text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
""" Classes and functions for Symmetric Diffeomorphic Registration """
from __future__ import print_function
import abc
from dipy.utils.six import with_metaclass
import numpy as np
import numpy.linalg as npl
import scipy as sp
import nibabel as nib
import dipy.align.vector_fields as vfu
from dipy.align import floating
from dipy.align import VerbosityLevels
from dipy.align import Bunch
RegistrationStages = Bunch(INIT_START=0,
INIT_END=1,
OPT_START=2,
OPT_END=3,
SCALE_START=4,
SCALE_END=5,
ITER_START=6,
ITER_END=7)
r"""Registration Stages
This enum defines the different stages which the Volumetric Registration
may be in. The value of the stage is passed as a parameter to the call-back
function so that it can react accordingly.
INIT_START: optimizer initialization starts
INIT_END: optimizer initialization ends
OPT_START: optimization starts
OPT_END: optimization ends
SCALE_START: optimization at a new scale space resolution starts
SCALE_END: optimization at the current scale space resolution ends
ITER_START: a new iteration starts
ITER_END: the current iteration ends
"""
def mult_aff(A, B):
r"""Returns the matrix product A.dot(B) considering None as the identity
Parameters
----------
A : array, shape (n,k)
B : array, shape (k,m)
Returns
-------
The matrix product A.dot(B). If any of the input matrices is None, it is
treated as the identity matrix. If both matrices are None, None is returned.
"""
if A is None:
return B
elif B is None:
return A
return A.dot(B)
def get_direction_and_spacings(affine, dim):
r"""Extracts the rotational and spacing components from a matrix
Extracts the rotational and spacing (voxel dimensions) components from a
matrix. An image gradient represents the local variation of the image's gray
values per voxel. Since we are iterating on the physical space, we need to
compute the gradients as variation per millimeter, so we need to divide each
gradient's component by the voxel size along the corresponding axis, that's
what the spacings are used for. Since the image's gradients are oriented
along the grid axes, we also need to re-orient the gradients to be given
in physical space coordinates.
Parameters
----------
affine : array, shape (k, k), k = 3, 4
the matrix transforming grid coordinates to physical space.
Returns
-------
direction : array, shape (k-1, k-1)
the rotational component of the input matrix
spacings : array, shape (k-1,)
the scaling component (voxel size) of the matrix
"""
if affine is None:
return np.eye(dim), np.ones(dim)
dim = affine.shape[1]-1
#Temporary hack: get the zooms by building a nifti image
affine4x4 = np.eye(4)
empty_volume = np.zeros((0,0,0))
affine4x4[:dim, :dim] = affine[:dim, :dim]
affine4x4[:dim, 3] = affine[:dim, dim-1]
nib_nifti = nib.Nifti1Image(empty_volume, affine4x4)
scalings = np.asarray(nib_nifti.get_header().get_zooms())
scalings = np.asarray(scalings[:dim], dtype = np.float64)
A = affine[:dim,:dim]
return A.dot(np.diag(1.0/scalings)), scalings
class ScaleSpace(object):
def __init__(self, image, num_levels,
codomain_affine=None,
input_spacing=None,
sigma_factor=0.2,
mask0=False):
r""" ScaleSpace
Computes the Scale Space representation of an image. The scale space is
simply a list of images produced by smoothing the input image with a
Gaussian kernel with increasing smoothing parameter. If the image's
voxels are isotropic, the smoothing will be the same along all
directions: at level L = 0,1,..., the sigma is given by s * ( 2^L - 1 ).
If the voxel dimensions are not isotropic, then the smoothing is
weaker along low resolution directions.
Parameters
----------
image : array, shape (r,c) or (s, r, c) where s is the number of slices,
r is the number of rows and c is the number of columns of the input
image.
num_levels : int
the desired number of levels (resolutions) of the scale space
codomain_affine : array, shape (k, k), k=3,4 (for either 2D or 3D images)
the matrix transforming voxel coordinates to space coordinates in
the input image discretization
input_spacing : array, shape (k-1,)
the spacing (voxel size) between voxels in physical space
sigma_factor : float
the smoothing factor to be used in the construction of the scale
space.
mask0 : Boolean
if True, all smoothed images will be zero at all voxels that are
zero in the input image.
"""
self.dim = len(image.shape)
self.num_levels = num_levels
input_size = np.array(image.shape)
if mask0:
mask = np.asarray(image>0, dtype=np.int32)
#normalize input image to [0,1]
img = (image - image.min())/(image.max() - image.min())
if mask0:
img *= mask
#The properties are saved in separate lists. Insert input image
#properties at the first level of the scale space
self.images = [img.astype(floating)]
self.domain_shapes = [input_size.astype(np.int32)]
if input_spacing is None:
input_spacing = np.ones((self.dim,), dtype = np.int32)
self.spacings = [input_spacing]
self.scalings = [np.ones(self.dim)]
self.affines = [codomain_affine]
self.sigmas = [np.zeros(self.dim)]
if codomain_affine is not None:
self.affine_invs = [npl.inv(codomain_affine)]
else:
self.affine_invs = [None]
#compute the rest of the levels
min_spacing = np.min(input_spacing)
for i in range(1, num_levels):
scaling_factor = 2**i
scaling = np.ndarray((self.dim+1,))
#Note: the minimum below is present in ANTS to prevent the scaling
#from being too large (making the sub-sampled image to be too small)
#this makes the sub-sampled image at least 32 voxels at each
#direction it is risky to make this decision based on image size,
#though (we need to investigate more the effect of this)
#scaling = np.minimum(scaling_factor * min_spacing / input_spacing,
# input_size / 32)
scaling = scaling_factor * min_spacing / input_spacing
output_spacing = input_spacing * scaling
extended = np.append(scaling, [1])
if not codomain_affine is None:
affine = codomain_affine.dot(np.diag(extended))
else:
affine = np.diag(extended)
output_size = input_size * (input_spacing / output_spacing) + 0.5
output_size = output_size.astype(np.int32)
sigmas = sigma_factor * (output_spacing / input_spacing - 1.0)
#filter along each direction with the appropriate sigma
filtered = sp.ndimage.filters.gaussian_filter(image, sigmas)
filtered = ((filtered - filtered.min())/
(filtered.max() - filtered.min()))
if mask0:
filtered *= mask
#Add current level to the scale space
self.images.append(filtered.astype(floating))
self.domain_shapes.append(output_size)
self.spacings.append(output_spacing)
self.scalings.append(scaling)
self.affines.append(affine)
self.affine_invs.append(npl.inv(affine))
self.sigmas.append(sigmas)
def get_expand_factors(self, from_level, to_level):
r"""Ratio of voxel size from pyramid level from_level to to_level
Given two scale space resolutions a = from_level, b = to_level,
returns the ratio of voxels size at level b to voxel size at level a
(the factor that must be used to multiply voxels at level a to
'expand' them to level b).
Parameters
----------
from_level : int, 0 <= from_level < L, (L = number of resolutions)
the resolution to expand voxels from
to_level : int, 0 <= to_level < from_level
the resolution to expand voxels to
Returns
-------
factors : array, shape (k,), k = 2, 3
the expand factors (a scalar for each voxel dimension)
"""
factors = (np.array(self.spacings[to_level]) /
np.array(self.spacings[from_level]) )
return factors
def print_level(self, level):
r"""Prints properties of a pyramid level
Prints the properties of a level of this scale space to standard output
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to be printed
"""
print('Domain shape: ', self.get_domain_shape(level))
print('Spacing: ', self.get_spacing(level))
print('Scaling: ', self.get_scaling(level))
print('Affine: ', self.get_affine(level))
print('Sigmas: ', self.get_sigmas(level))
def _get_attribute(self, attribute, level):
r"""Returns an attribute from the Scale Space at a given level
Returns the level-th element of attribute if level is a valid level
of this scale space. Otherwise, returns None.
Parameters
----------
attribute : list
the attribute to retrieve the level-th element from
level : int,
the index of the required element from attribute.
Returns
-------
attribute[level] : object
the requested attribute if level is valid, else it raises
a ValueError
"""
if 0 <= level < self.num_levels:
return attribute[level]
raise ValueError('Invalid pyramid level: '+str(level))
def get_image(self, level):
r"""Smoothed image at a given level
Returns the smoothed image at the requested level in the Scale Space.
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the smooth image from
Returns
-------
the smooth image at the requested resolution or None if an invalid
level was requested
"""
return self._get_attribute(self.images, level)
def get_domain_shape(self, level):
r"""Shape the sub-sampled image must have at a particular level
Returns the shape the sub-sampled image must have at a particular
resolution of the scale space (note that this object does not explicitly
subsample the smoothed images, but only provides the properties
the sub-sampled images must have).
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the sub-sampled shape from
Returns
-------
the sub-sampled shape at the requested resolution or None if an
invalid level was requested
"""
return self._get_attribute(self.domain_shapes, level)
def get_spacing(self, level):
r"""Spacings the sub-sampled image must have at a particular level
Returns the spacings (voxel sizes) the sub-sampled image must have at a
particular resolution of the scale space (note that this object does
not explicitly subsample the smoothed images, but only provides the
properties the sub-sampled images must have).
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the sub-sampled shape from
Returns
-------
the spacings (voxel sizes) at the requested resolution or None if an
invalid level was requested
"""
return self._get_attribute(self.spacings, level)
def get_scaling(self, level):
r"""Adjustment factor for input-spacing to reflect voxel sizes at level
Returns the scaling factor that needs to be applied to the input spacing
(the voxel sizes of the image at level 0 of the scale space) to
transform them to voxel sizes at the requested level.
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the scalings from
Returns
-------
the scaling factors from the original spacing to the spacings at the
requested level
"""
return self._get_attribute(self.scalings, level)
def get_affine(self, level):
r"""Voxel-to-space transformation at a given level
Returns the voxel-to-space transformation associated to the sub-sampled
image at a particular resolution of the scale space (note that this
object does not explicitly subsample the smoothed images, but only
provides the properties the sub-sampled images must have).
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get affine transform from
Returns
-------
the affine (voxel-to-space) transform at the requested resolution or
None if an invalid level was requested
"""
return self._get_attribute(self.affines, level)
def get_affine_inv(self, level):
r"""Space-to-voxel transformation at a given level
Returns the space-to-voxel transformation associated to the sub-sampled
image at a particular resolution of the scale space (note that this
object does not explicitly subsample the smoothed images, but only
provides the properties the sub-sampled images must have).
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the inverse transform from
Returns
-------
the inverse (space-to-voxel) transform at the requested resolution or
None if an invalid level was requested
"""
return self._get_attribute(self.affine_invs, level)
def get_sigmas(self, level):
r"""Smoothing parameters used at a given level
Returns the smoothing parameters (a scalar for each axis) used at the
requested level of the scale space
Parameters
----------
level : int, 0 <= from_level < L, (L = number of resolutions)
the scale space level to get the smoothing parameters from
Returns
-------
the smoothing parameters at the requested level
"""
return self._get_attribute(self.sigmas, level)
class DiffeomorphicMap(object):
def __init__(self,
dim,
disp_shape,
disp_affine=None,
domain_shape=None,
domain_affine=None,
codomain_shape=None,
codomain_affine=None,
prealign=None):
r""" DiffeomorphicMap
Implements a diffeomorphic transformation on the physical space. The
deformation fields encoding the direct and inverse transformations
share the same domain discretization (both the discretization grid shape
and voxel-to-space matrix). The input coordinates (physical coordinates)
are first aligned using prealign, and then displaced using the
corresponding vector field interpolated at the aligned coordinates.
Parameters
----------
dim : int, 2 or 3
the transformation's dimension
disp_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the deformation
field's discretization
disp_affine : the voxel-to-space transformation between the deformation field's
grid and space
domain_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the default
discretizatio of this map's domain
domain_affine : array, shape (dim+1, dim+1)
the default voxel-to-space transformation between this map's
discretization and physical space
codomain_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the images that
are 'normally' warped using this transformation in the forward
direction (this will provide default transformation parameters to
warp images under this transformation). By default, we assume that
the inverse transformation is 'normally' used to warp images with
the same discretization and voxel-to-space transformation as the
deformation field grid.
codomain_affine : array, shape (dim+1, dim+1)
the voxel-to-space transformation of images that are 'normally'
warped using this transformation (in the forward direction).
prealign : array, shape (dim+1, dim+1)
the linear transformation to be applied to align input images to
the reference space before warping under the deformation field.
"""
self.dim = dim
if(disp_shape is None):
raise ValueError("Invalid displacement field discretization")
self.disp_shape = np.asarray(disp_shape, dtype = np.int32)
# If the discretization affine is None, we assume it's the identity
self.disp_affine = disp_affine
if(self.disp_affine is None):
self.disp_affine_inv = None
else:
self.disp_affine_inv = npl.inv(self.disp_affine)
# If domain_shape is not provided, we use the map's discretization shape
if(domain_shape is None):
self.domain_shape = self.disp_shape
else:
self.domain_shape = np.asarray(domain_shape, dtype = np.int32)
self.domain_affine = domain_affine
if(domain_affine is None):
self.domain_affine_inv = None
else:
self.domain_affine_inv = npl.inv(domain_affine)
# If codomain shape was not provided, we assume it is an endomorphism:
# use the same domain_shape and codomain_affine as the field domain
if codomain_shape is None:
self.codomain_shape = self.domain_shape
else:
self.codomain_shape = np.asarray(codomain_shape, dtype = np.int32)
self.codomain_affine = codomain_affine
if codomain_affine is None:
self.codomain_affine_inv = None
else:
self.codomain_affine_inv = npl.inv(codomain_affine)
self.prealign = prealign
if prealign is None:
self.prealign_inv = None
else:
self.prealign_inv = npl.inv(prealign)
self.is_inverse = False
self.forward = None
self.backward = None
def get_forward_field(self):
r"""Deformation field to transform an image in the forward direction
Returns the deformation field that must be used to warp an image under
this transformation in the forward direction (note the 'is_inverse'
flag).
"""
if self.is_inverse:
return self.backward
else:
return self.forward
def get_backward_field(self):
r"""Deformation field to transform an image in the backward direction
Returns the deformation field that must be used to warp an image under
this transformation in the backward direction (note the 'is_inverse'
flag).
"""
if self.is_inverse:
return self.forward
else:
return self.backward
def allocate(self):
r"""Creates a zero displacement field
Creates a zero displacement field (the identity transformation).
"""
self.forward = np.zeros(tuple(self.disp_shape)+(self.dim,),
dtype=floating)
self.backward = np.zeros(tuple(self.disp_shape)+(self.dim,),
dtype=floating)
def _get_warping_function(self, interpolation):
r"""Appropriate warping function for the given interpolation type
Returns the right warping function from vector_fields that must be
called for the specified data dimension and interpolation type
"""
if self.dim == 2:
if interpolation == 'linear':
return vfu.warp_2d
else:
return vfu.warp_2d_nn
else:
if interpolation == 'linear':
return vfu.warp_3d
else:
return vfu.warp_3d_nn
def _warp_forward(self, image, interpolation='linear', world_to_image=None,
sampling_shape=None, sampling_affine=None):
r"""Warps an image in the forward direction
Deforms the input image under this diffeomorphic map in the forward
direction. Since the mapping is defined in the physical space, the user
must specify the sampling grid shape and its space-to-voxel mapping.
By default, the transformation will use the discretization information
given at initialization.
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
world_to_image : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
sampling_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
sampling_affine : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = sampling_shape or self.codomain_shape if None
the warped image under this transformation in the forward direction
Notes
-----
A diffeomorphic map must be thought as a mapping between points
in space. Warping an image J towards an image I means transforming
each voxel with (discrete) coordinates i in I to (floating-point) voxel
coordinates j in J. The transformation we consider 'forward' is
precisely mapping coordinates i from the input image to coordinates j
from reference image, which has the effect of warping an image with
reference discretization (typically, the "static image") "towards" an
image with input discretization (typically, the "moving image"). More
precisely, the warped image is produced by the following interpolation:
warped[i] = image[W * forward[Dinv * P * S * i] + W * P * S * i )]
where i denotes the coordinates of a voxel in the input grid, W is
the world-to-grid transformation of the image given as input, Dinv
is the world-to-grid transformation of the deformation field
discretization, P is the pre-aligning matrix (transforming input
points to reference points), S is the voxel-to-space transformation of
the sampling grid (see comment below) and forward is the forward
deformation field.
If we want to warp an image, we also must specify on what grid we
want to sample the resulting warped image (the images are considered as
points in space and its representation on a grid depends on its
grid-to-space transform telling us for each grid voxel what point in
space we need to bring via interpolation). So, S is the matrix that
converts the sampling grid (whose shape is given as parameter
'sampling_shape' ) to space coordinates.
"""
#if no world-to-image transform is provided, we use the codomain info
if world_to_image is None:
world_to_image = self.codomain_affine_inv
#if no sampling info is provided, we use the domain info
if sampling_shape is None:
if self.domain_shape is None:
raise ValueError('Unable to infer sampling info. '
'Provide a valid sampling_shape.')
sampling_shape = self.domain_shape
else:
sampling_shape = np.asarray(sampling_shape, dtype=np.int32)
if sampling_affine is None:
sampling_affine = self.domain_affine
W = None if world_to_image == 'identity' else world_to_image
Dinv = self.disp_affine_inv
P = self.prealign
S = None if sampling_affine == 'identity' else sampling_affine
#this is the matrix which we need to multiply the voxel coordinates
#to interpolate on the forward displacement field ("in"side the
#'forward' brackets in the expression above)
affine_idx_in = mult_aff(Dinv, mult_aff(P, S))
#this is the matrix which we need to multiply the voxel coordinates
#to add to the displacement ("out"side the 'forward' brackets in the
#expression above)
affine_idx_out = mult_aff(W, mult_aff(P, S))
#this is the matrix which we need to multiply the displacement vector
#prior to adding to the transformed input point
affine_disp = W
#Convert the data to the required types to use the cythonized functions
if interpolation == 'nearest':
if image.dtype is np.dtype('float64') and floating is np.float32:
image = image.astype(floating)
elif image.dtype is np.dtype('int64'):
image = image.astype(np.int32)
else:
image = np.asarray(image, dtype=floating)
warp_f = self._get_warping_function(interpolation)
warped = warp_f(image, self.forward, affine_idx_in, affine_idx_out,
affine_disp, sampling_shape)
return warped
def _warp_backward(self, image, interpolation='linear', world_to_image=None,
sampling_shape=None, sampling_affine=None):
r"""Warps an image in the backward direction
Deforms the input image under this diffeomorphic map in the backward
direction. Since the mapping is defined in the physical space, the user
must specify the sampling grid shape and its space-to-voxel mapping.
By default, the transformation will use the discretization information
given at initialization.
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the backward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
world_to_image : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
sampling_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
sampling_affine : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = sampling_shape or self.domain_shape if None
the warped image under this transformation in the backward direction
Notes
-----
A diffeomorphic map must be thought as a mapping between points
in space. Warping an image J towards an image I means transforming
each voxel with (discrete) coordinates i in I to (floating-point) voxel
coordinates j in J. The transformation we consider 'backward' is
precisely mapping coordinates i from the reference grid to coordinates j
from the input image (that's why it's "backward"), which has the effect
of warping the input image (moving) "towards" the reference. More
precisely, the warped image is produced by the following interpolation:
warped[i]= image[W * Pinv * backward[Dinv * S * i] + W * Pinv * S * i )]
where i denotes the coordinates of a voxel in the input grid, W is
the world-to-grid transformation of the image given as input, Dinv
is the world-to-grid transformation of the deformation field
discretization, Pinv is the pre-aligning matrix's inverse (transforming
reference points to input points), S is the grid-to-space transformation
of the sampling grid (see comment below) and backward is the backward
deformation field.
If we want to warp an image, we also must specify on what grid we
want to sample the resulting warped image (the images are considered as
points in space and its representation on a grid depends on its
grid-to-space transform telling us for each grid voxel what point in
space we need to bring via interpolation). So, S is the matrix that
converts the sampling grid (whose shape is given as parameter
'sampling_shape' ) to space coordinates.
"""
#if no world-to-image transform is provided, we use the domain info
if world_to_image is None:
world_to_image = self.domain_affine_inv
#if no sampling info is provided, we use the codomain info
if sampling_shape is None:
if self.codomain_shape is None:
raise ValueError('Unable to infer sampling info. Provide a valid sampling_shape.')
sampling_shape = self.codomain_shape
if sampling_affine is None:
sampling_affine = self.codomain_affine
W = None if world_to_image == 'identity' else world_to_image
Dinv = self.disp_affine_inv
Pinv = self.prealign_inv
S = None if sampling_affine == 'identity' else sampling_affine
#this is the matrix which we need to multiply the voxel coordinates
#to interpolate on the backward displacement field ("in"side the
#'backward' brackets in the expression above)
affine_idx_in = mult_aff(Dinv, S)
#this is the matrix which we need to multiply the voxel coordinates
#to add to the displacement ("out"side the 'backward' brackets in the
#expression above)
affine_idx_out = mult_aff(W, mult_aff(Pinv, S))
#this is the matrix which we need to multiply the displacement vector
#prior to adding to the transformed input point
affine_disp = mult_aff(W, Pinv)
if interpolation == 'nearest':
if image.dtype is np.dtype('float64') and floating is np.float32:
image = image.astype(floating)
elif image.dtype is np.dtype('int64'):
image = image.astype(np.int32)
else:
image = np.asarray(image, dtype=floating)
warp_f = self._get_warping_function(interpolation)
warped = warp_f(image, self.backward, affine_idx_in, affine_idx_out,
affine_disp, sampling_shape)
return warped
def transform(self, image, interpolation='linear', world_to_image=None,
sampling_shape=None, sampling_affine=None):
r"""Warps an image in the forward direction
Transforms the input image under this transformation in the forward
direction. It uses the "is_inverse" flag to switch between "forward"
and "backward" (if is_inverse is False, then transform(...) warps the
image forwards, else it warps the image backwards).
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
world_to_image : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
sampling_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
sampling_affine : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = sampling_shape or self.codomain_shape if None
the warped image under this transformation in the forward direction
Notes
-----
See _warp_forward and _warp_backward documentation for further
information.
"""
if sampling_shape is not None:
sampling_shape = np.asarray(sampling_shape, dtype=np.int32)
if self.is_inverse:
warped = self._warp_backward(image, interpolation, world_to_image,
sampling_shape, sampling_affine)
else:
warped = self._warp_forward(image, interpolation, world_to_image,
sampling_shape, sampling_affine)
return np.asarray(warped)
def transform_inverse(self, image, interpolation='linear', world_to_image=None,
sampling_shape=None, sampling_affine=None):
r"""Warps an image in the backward direction
Transforms the input image under this transformation in the backward
direction. It uses the "is_inverse" flag to switch between "forward"
and "backward" (if is_inverse is False, then transform_inverse(...)
warps the image backwards, else it warps the image forwards)
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
world_to_image : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
sampling_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
sampling_affine : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = sampling_shape or self.codomain_shape if None
the warped image under this transformation in the backward direction
Notes
-----
See _warp_forward and _warp_backward documentation for further
information.
"""
if self.is_inverse:
warped = self._warp_forward(image, interpolation, world_to_image,
sampling_shape, sampling_affine)
else:
warped = self._warp_backward(image, interpolation, world_to_image,
sampling_shape, sampling_affine)
return np.asarray(warped)
def inverse(self):
r"""Inverse of this DiffeomorphicMap instance
Returns a diffeomorphic map object representing the inverse of this
transformation. The internal arrays are not copied but just referenced.
Returns
-------
inv : DiffeomorphicMap object
the inverse of this diffeomorphic map.
"""
inv = DiffeomorphicMap(self.dim,
self.disp_shape,
self.disp_affine,
self.domain_shape,
self.domain_affine,
self.codomain_shape,
self.codomain_affine,
self.prealign)
inv.forward = self.forward
inv.backward = self.backward
inv.is_inverse = True
return inv
def expand_fields(self, expand_factors, new_shape):
r"""Expands the displacement fields from current shape to new_shape
Up-samples the discretization of the displacement fields to be of
new_shape shape.
Parameters
----------
expand_factors : array, shape (dim,)
the factors scaling current spacings (voxel sizes) to spacings in
the expanded discretization.
new_shape : array, shape (dim,)
the shape of the arrays holding the up-sampled discretization
"""
if self.dim == 2:
expand_f = vfu.resample_displacement_field_2d
else:
expand_f = vfu.resample_displacement_field_3d
expanded_forward = expand_f(self.forward, expand_factors, new_shape)
expanded_backward = expand_f(self.backward, expand_factors, new_shape)
expand_factors = np.append(expand_factors, [1])
expanded_affine = mult_aff(self.disp_affine, np.diag(expand_factors))
expanded_affine_inv = npl.inv(expanded_affine)
self.forward = expanded_forward
self.backward = expanded_backward
self.disp_shape = new_shape
self.disp_affine = expanded_affine
self.disp_affine_inv = expanded_affine_inv
def compute_inversion_error(self):
r"""Inversion error of the displacement fields
Estimates the inversion error of the displacement fields by computing
statistics of the residual vectors obtained after composing the forward
and backward displacement fields.
Returns
-------
residual : array, shape (R, C) or (S, R, C)
the displacement field resulting from composing the forward and
backward displacement fields of this transformation (the residual
should be zero for a perfect diffeomorphism)
stats : array, shape (3,)
statistics from the norms of the vectors of the residual
displacement field: maximum, mean and standard deviation
Notes
-----
Since the forward and backward displacement fields have the same
discretization, the final composition is given by
comp[i] = forward[ i + Dinv * backward[i]]
where Dinv is the space-to-grid transformation of the displacement
fields
"""
Dinv = self.disp_affine_inv
if self.dim == 2:
compose_f = vfu.compose_vector_fields_2d
else:
compose_f = vfu.compose_vector_fields_3d
residual, stats = compose_f(self.backward, self.forward,
None, Dinv, 1.0, None)
return np.asarray(residual), np.asarray(stats)
def shallow_copy(self):
r"""Shallow copy of this DiffeomorphicMap instance
Creates a shallow copy of this diffeomorphic map (the arrays are not
copied but just referenced)
Returns
-------
new_map : DiffeomorphicMap object
the shallow copy of this diffeomorphic map
"""
new_map = DiffeomorphicMap(self.dim,
self.disp_shape,
self.disp_affine,
self.domain_shape,
self.domain_affine,
self.codomain_shape,
self.codomain_affine,
self.prealign)
new_map.forward = self.forward
new_map.backward = self.backward
new_map.is_inverse = self.is_inverse
return new_map
def warp_endomorphism(self, phi):
r"""Composition of this DiffeomorphicMap with a given endomorphism
Creates a new DiffeomorphicMap C with the same properties as self and
composes its displacement fields with phi's corresponding fields.
The resulting diffeomorphism is of the form C(x) = phi(self(x)) with
inverse C^{-1}(y) = self^{-1}(phi^{-1}(y)). We assume that phi is an
endomorphism with the same discretization and domain affine as self
to ensure that the composition inherits self's properties (we also
assume that the pre-aligning matrix of phi is None or identity).
Parameters
----------
phi : DiffeomorphicMap object
the endomorphism to be warped by this diffeomorphic map
Returns
-------
composition : the composition of this diffeomorphic map with the
endomorphism given as input
Notes
-----
The problem with our current representation of a DiffeomorphicMap is
that the set of Diffeomorphism that can be represented this way (a
pre-aligning matrix followed by a non-linear endomorphism given as a
displacement field) is not closed under the composition operation.
Supporting a general DiffeomorphicMap class, closed under composition,
may be extremely costly computationally, and the kind of transformations
we actually need for Avants' mid-point algorithm (SyN) are much simpler.
"""
#Compose the forward deformation fields
d1 = self.get_forward_field()
d2 = phi.get_forward_field()
d1_inv = self.get_backward_field()
d2_inv = phi.get_backward_field()
premult_disp = self.disp_affine_inv
if self.dim == 2:
compose_f = vfu.compose_vector_fields_2d
else:
compose_f = vfu.compose_vector_fields_3d
forward, stats = compose_f(d1, d2, None, premult_disp, 1.0, None)
backward, stats, = compose_f(d2_inv, d1_inv, None, premult_disp, 1.0, None)
composition = self.shallow_copy()
composition.forward = forward
composition.backward = backward
return composition
def get_simplified_transform(self):
r""" Constructs a simplified version of this Diffeomorhic Map
The simplified version incorporates the pre-align transform, as well as
the domain and codomain affine transforms into the displacement field.
The resulting transformation may be regarded as operating on the
image spaces given by the domain and codomain discretization. As a
result, self.prealign, self.disp_affine, self.domain_affine and
self.codomain affine will be None (denoting Identity) in the resulting
diffeomorphic map.
"""
if self.dim == 2:
simplify_f = vfu.simplify_warp_function_2d
else:
simplify_f = vfu.simplify_warp_function_3d
# Simplify the forward transform
D = self.domain_affine
P = self.prealign
Rinv = self.disp_affine_inv
Cinv = self.codomain_affine_inv
#this is the matrix which we need to multiply the voxel coordinates
#to interpolate on the forward displacement field ("in"side the
#'forward' brackets in the expression above)
affine_idx_in = mult_aff(Rinv, mult_aff(P, D))
#this is the matrix which we need to multiply the voxel coordinates
#to add to the displacement ("out"side the 'forward' brackets in the
#expression above)
affine_idx_out = mult_aff(Cinv, mult_aff(P, D))
#this is the matrix which we need to multiply the displacement vector
#prior to adding to the transformed input point
affine_disp = Cinv
new_forward = simplify_f(self.forward, affine_idx_in,
affine_idx_out, affine_disp,
self.domain_shape)
# Simplify the backward transform
C = self.codomain_affine_inv
Pinv = self.prealign_inv
Dinv = self.domain_affine_inv
affine_idx_in = mult_aff(Rinv, C)
affine_idx_out = mult_aff(Dinv, mult_aff(Pinv, C))
affine_disp = mult_aff(Dinv, Pinv)
new_backward = simplify_f(self.backward, affine_idx_in,
affine_idx_out, affine_disp,
self.codomain_shape)
simplified = DiffeomorphicMap(self.dim,
self.disp_shape,
None,
self.domain_shape,
None,
self.codomain_shape,
None,
None)
simplified.forward = new_forward
simplified.backward = new_backward
return simplified
class DiffeomorphicRegistration(with_metaclass(abc.ABCMeta, object)):
def __init__(self, metric=None):
r""" Diffeomorphic Registration
This abstract class defines the interface to be implemented by any
optimization algorithm for diffeomorphic registration.
Parameters
----------
metric : SimilarityMetric object
the object measuring the similarity of the two images. The
registration algorithm will minimize (or maximize) the provided
similarity.
"""
if metric is None:
raise ValueError('The metric cannot be None')
self.metric = metric
self.dim = metric.dim
def set_level_iters(self, level_iters):
r"""Sets the number of iterations at each pyramid level
Establishes the maximum number of iterations to be performed at each
level of the Gaussian pyramid, similar to ANTS.
Parameters
----------
level_iters : list
the number of iterations at each level of the Gaussian pyramid.
level_iters[0] corresponds to the finest level, level_iters[n-1] the
coarsest, where n is the length of the list
"""
self.levels = len(level_iters) if level_iters else 0
self.level_iters = level_iters
@abc.abstractmethod
def optimize(self):
r"""Starts the metric optimization
This is the main function each specialized class derived from this must
implement. Upon completion, the deformation field must be available from
the forward transformation model.
"""
@abc.abstractmethod
def get_map(self):
r"""
Returns the resulting diffeomorphic map after optimization
"""
class SymmetricDiffeomorphicRegistration(DiffeomorphicRegistration):
def __init__(self,
metric,
level_iters=None,
step_length=0.25,
ss_sigma_factor=0.2,
opt_tol=1e-5,
inv_iter=20,
inv_tol=1e-3,
callback=None):
r""" Symmetric Diffeomorphic Registration (SyN) Algorithm
Performs the multi-resolution optimization algorithm for non-linear
registration using a given similarity metric.
Parameters
----------
metric : SimilarityMetric object
the metric to be optimized
level_iters : list of int
the number of iterations at each level of the Gaussian Pyramid (the
length of the list defines the number of pyramid levels to be
used)
opt_tol : float
the optimization will stop when the estimated derivative of the
energy profile w.r.t. time falls below this threshold
inv_iter : int
the number of iterations to be performed by the displacement field
inversion algorithm
step_length : float
the length of the maximum displacement vector of the update
displacement field at each iteration
ss_sigma_factor : float
parameter of the scale-space smoothing kernel. For example, the
std. dev. of the kernel will be factor*(2^i) in the isotropic case
where i = 0, 1, ..., n_scales is the scale
inv_tol : float
the displacement field inversion algorithm will stop iterating
when the inversion error falls below this threshold
callback : function(SymmetricDiffeomorphicRegistration)
a function receiving a SymmetricDiffeomorphicRegistration object
to be called after each iteration (this optimizer will call this
function passing self as parameter)
"""
super(SymmetricDiffeomorphicRegistration, self).__init__(metric)
if level_iters is None:
level_iters = [100, 100, 25]
if len(level_iters) == 0:
raise ValueError('The iterations list cannot be empty')
self.set_level_iters(level_iters)
self.step_length = step_length
self.ss_sigma_factor = ss_sigma_factor
self.opt_tol = opt_tol
self.inv_tol = inv_tol
self.inv_iter = inv_iter
self.energy_window = 12
self.energy_list = []
self.full_energy_profile = []
self.verbosity = VerbosityLevels.STATUS
self.callback = callback
self.moving_ss = None
self.static_ss = None
self.static_direction = None
self.moving_direction = None
self.mask0 = metric.mask0
def update(self, current_displacement, new_displacement,
affine_inv, time_scaling):
r"""Composition of the current displacement field with the given field
Interpolates new displacement at the locations defined by
current_displacement. Equivalently, computes the composition C of the given
displacement fields as C(x) = B(A(x)), where A is current_displacement and B
is new_displacement. This function is intended to be used with
deformation fields of the same sampling (e.g. to be called by a
registration algorithm).
Parameters
----------
new_displacement : array, shape (R, C, 2) or (S, R, C, 3)
the displacement field to be warped by current_displacement
current_displacement : array, shape (R', C', 2) or (S', R', C', 3)
the displacement field defining where to interpolate
new_displacement
Returns
-------
updated : array, shape (the same as new_displacement)
the warped displacement field
mean_norm : the mean norm of all vectors in current_displacement
"""
mean_norm = np.sqrt(np.sum((np.array(current_displacement) ** 2), -1)).mean()
self.compose(current_displacement, new_displacement, None,
affine_inv, time_scaling, current_displacement)
return np.array(current_displacement), np.array(mean_norm)
def get_map(self):
r"""Returns the resulting diffeomorphic map
Returns the DiffeomorphicMap registering the moving image towards
the static image.
"""
return self.static_to_ref
def _connect_functions(self):
r"""Assign the methods to be called according to the image dimension
Assigns the appropriate functions to be called for displacement field
inversion, Gaussian pyramid, and affine / dense deformation composition
according to the dimension of the input images e.g. 2D or 3D.
"""
if self.dim == 2:
self.invert_vector_field = vfu.invert_vector_field_fixed_point_2d
self.compose = vfu.compose_vector_fields_2d
else:
self.invert_vector_field = vfu.invert_vector_field_fixed_point_3d
self.compose = vfu.compose_vector_fields_3d
def _init_optimizer(self, static, moving,
static_affine, moving_affine, prealign):
r"""Initializes the registration optimizer
Initializes the optimizer by computing the scale space of the input
images and allocating the required memory for the transformation models
at the coarsest scale.
Parameters
----------
static: array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization. The
displacement fields will have the same discretization as the static
image.
moving: array, shape (S, R, C) or (R, C)
the image to be used as "moving" during optimization. Since the
deformation fields' discretization is the same as the static image,
it is necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed to
be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the 'prealign' matrix
static_affine: array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the static image
moving_affine: array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the moving image
prealign: array, shape (dim+1, dim+1)
the affine transformation (operating on the physical space)
pre-aligning the moving image towards the static
"""
self._connect_functions()
#Extract information from the affine matrices to create the scale space
static_direction, static_spacing = \
get_direction_and_spacings(static_affine, self.dim)
moving_direction, moving_spacing = \
get_direction_and_spacings(moving_affine, self.dim)
#the images' directions don't change with scale
self.static_direction = np.eye(self.dim + 1)
self.moving_direction = np.eye(self.dim + 1)
self.static_direction[:self.dim, :self.dim] = static_direction
self.moving_direction[:self.dim, :self.dim] = moving_direction
#Build the scale space of the input images
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Applying zero mask: ' + str(self.mask0))
if self.verbosity >= VerbosityLevels.STATUS:
print('Creating scale space from the moving image. Levels: %d. '
'Sigma factor: %f.' % (self.levels, self.ss_sigma_factor))
self.moving_ss = ScaleSpace(moving, self.levels, moving_affine,
moving_spacing, self.ss_sigma_factor,
self.mask0)
if self.verbosity >= VerbosityLevels.STATUS:
print('Creating scale space from the static image. Levels: %d. '
'Sigma factor: %f.' % (self.levels, self.ss_sigma_factor))
self.static_ss = ScaleSpace(static, self.levels, static_affine,
static_spacing, self.ss_sigma_factor,
self.mask0)
if self.verbosity >= VerbosityLevels.DEBUG:
print('Moving scale space:')
for level in range(self.levels):
self.moving_ss.print_level(level)
print('Static scale space:')
for level in range(self.levels):
self.static_ss.print_level(level)
#Get the properties of the coarsest level from the static image. These
#properties will be taken as the reference discretization.
disp_shape = self.static_ss.get_domain_shape(self.levels-1)
disp_affine = self.static_ss.get_affine(self.levels-1)
# The codomain discretization of both diffeomorphic maps is
# precisely the discretization of the static image
codomain_shape = static.shape
codomain_affine = static_affine
#The forward model transforms points from the static image
#to points on the reference (which is the static as well). So the domain
#properties are taken from the static image. Since its the same as the
#reference, we don't need to pre-align.
domain_shape = static.shape
domain_affine = static_affine
self.static_to_ref = DiffeomorphicMap(self.dim,
disp_shape,
disp_affine,
domain_shape,
domain_affine,
codomain_shape,
codomain_affine,
None)
self.static_to_ref.allocate()
#The backward model transforms points from the moving image
#to points on the reference (which is the static). So the input
#properties are taken from the moving image, and we need to pre-align
#points on the moving physical space to the reference physical space by
#applying the inverse of pre-align. This is done this way to make it
#clear for the user: the pre-align matrix is usually obtained by doing
#affine registration of the moving image towards the static image, which
#results in a matrix transforming points in the static physical space to
#points in the moving physical space
prealign_inv = None if prealign is None else npl.inv(prealign)
domain_shape = moving.shape
domain_affine = moving_affine
self.moving_to_ref = DiffeomorphicMap(self.dim,
disp_shape,
disp_affine,
domain_shape,
domain_affine,
codomain_shape,
codomain_affine,
prealign_inv)
self.moving_to_ref.allocate()
def _end_optimizer(self):
r"""Frees the resources allocated during initialization
"""
del self.moving_ss
del self.static_ss
def _iterate(self):
r"""Performs one symmetric iteration
Performs one iteration of the SyN algorithm:
1.Compute forward
2.Compute backward
3.Update forward
4.Update backward
5.Compute inverses
6.Invert the inverses
Returns
-------
der : float
the derivative of the energy profile, computed by fitting a
quadratic function to the energy values at the latest T iterations,
where T = self.energy_window. If the current iteration is less than
T then np.inf is returned instead.
"""
#Acquire current resolution information from scale spaces
current_moving = self.moving_ss.get_image(self.current_level)
current_static = self.static_ss.get_image(self.current_level)
current_disp_shape = \
self.static_ss.get_domain_shape(self.current_level)
current_disp_affine = \
self.static_ss.get_affine(self.current_level)
current_disp_affine_inv = \
self.static_ss.get_affine_inv(self.current_level)
current_disp_spacing = \
self.static_ss.get_spacing(self.current_level)
#Warp the input images (smoothed to the current scale) to the common
#(reference) space at the current resolution
wstatic = self.static_to_ref.transform_inverse(current_static, 'linear',
None,
current_disp_shape,
current_disp_affine)
wmoving = self.moving_to_ref.transform_inverse(current_moving, 'linear',
None,
current_disp_shape,
current_disp_affine)
#Pass both images to the metric. Now both images are sampled on the
#reference grid (equal to the static image's grid) and the direction
#doesn't change across scales
self.metric.set_moving_image(wmoving, current_disp_affine,
current_disp_spacing, self.static_direction)
self.metric.use_moving_image_dynamics(
current_moving, self.moving_to_ref.inverse())
self.metric.set_static_image(wstatic, current_disp_affine,
current_disp_spacing, self.static_direction)
self.metric.use_static_image_dynamics(
current_static, self.static_to_ref.inverse())
#Initialize the metric for a new iteration
self.metric.initialize_iteration()
if self.callback is not None:
self.callback(self, RegistrationStages.ITER_START)
#Compute the forward step (to be used to update the forward transform)
fw_step = np.array(self.metric.compute_forward())
#set zero displacements at the boundary
fw_step[0, ...] = 0
fw_step[:, 0, ...] = 0
fw_step[-1, ...] = 0
fw_step[:, -1, ...] = 0
if(self.dim == 3):
fw_step[:, :, 0, ...] = 0
fw_step[:, :, -1, ...] = 0
#Normalize the forward step
nrm = np.sqrt(np.sum((fw_step/current_disp_spacing)**2, -1)).max()
if nrm>0:
fw_step /= nrm
#Add to current total field
self.static_to_ref.forward, md_forward = self.update(
self.static_to_ref.forward, fw_step,
current_disp_affine_inv, self.step_length)
del fw_step
#Keep track of the forward energy
fw_energy = self.metric.get_energy()
#Compose the backward step (to be used to update the backward transform)
bw_step = np.array(self.metric.compute_backward())
#set zero displacements at the boundary
bw_step[0, ...] = 0
bw_step[:, 0, ...] = 0
if(self.dim == 3):
bw_step[:, :, 0, ...] = 0
#Normalize the backward step
nrm = np.sqrt(np.sum((bw_step/current_disp_spacing)**2, -1)).max()
if nrm>0:
bw_step /= nrm
#Add to current total field
self.moving_to_ref.forward, md_backward = self.update(
self.moving_to_ref.forward, bw_step,
current_disp_affine_inv, self.step_length)
del bw_step
#Keep track of the energy
bw_energy = self.metric.get_energy()
der = np.inf
n_iter = len(self.energy_list)
if len(self.energy_list) >= self.energy_window:
der = self._get_energy_derivative()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
ch = '-' if np.isnan(der) else der
print('%d:\t%0.6f\t%0.6f\t%0.6f\t%s' %
(n_iter, fw_energy, bw_energy, fw_energy + bw_energy, ch))
self.energy_list.append(fw_energy + bw_energy)
#Invert the forward model's forward field
self.static_to_ref.backward = np.array(
self.invert_vector_field(
self.static_to_ref.forward,
current_disp_affine_inv,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.static_to_ref.backward))
#Invert the backward model's forward field
self.moving_to_ref.backward = np.array(
self.invert_vector_field(
self.moving_to_ref.forward,
current_disp_affine_inv,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.moving_to_ref.backward))
#Invert the forward model's backward field
self.static_to_ref.forward = np.array(
self.invert_vector_field(
self.static_to_ref.backward,
current_disp_affine_inv,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.static_to_ref.forward))
#Invert the backward model's backward field
self.moving_to_ref.forward = np.array(
self.invert_vector_field(
self.moving_to_ref.backward,
current_disp_affine_inv,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.moving_to_ref.forward))
#Free resources no longer needed to compute the forward and backward
#steps
if self.callback is not None:
self.callback(self, RegistrationStages.ITER_END)
self.metric.free_iteration()
return der
def _approximate_derivative_direct(self, x, y):
r"""Derivative of the degree-2 polynomial fit of the given x, y pairs
Directly computes the derivative of the least-squares-fit quadratic
function estimated from (x[...],y[...]) pairs.
Parameters
----------
x : array, shape(n,)
increasing array representing the x-coordinates of the points to
be fit
y : array, shape(n,)
array representing the y-coordinates of the points to be fit
Returns
-------
y0 : float
the estimated derivative at x0 = 0.5*len(x)
"""
x = np.asarray(x)
y = np.asarray(y)
X = np.row_stack((x**2, x, np.ones_like(x)))
XX = (X).dot(X.T)
b = X.dot(y)
beta = npl.solve(XX,b)
x0 = 0.5 * len(x)
y0 = 2.0 * beta[0] * (x0) + beta[1]
return y0
def _get_energy_derivative(self):
r"""Approximate derivative of the energy profile
Returns the derivative of the estimated energy as a function of "time"
(iterations) at the last iteration
"""
n_iter = len(self.energy_list)
if n_iter < self.energy_window:
raise ValueError('Not enough data to fit the energy profile')
x = range(self.energy_window)
y = self.energy_list[(n_iter - self.energy_window):n_iter]
ss = sum(y)
if(ss > 0):
ss *= -1
y = [v / ss for v in y]
der = self._approximate_derivative_direct(x,y)
return der
def _optimize(self):
r"""Starts the optimization
The main multi-scale symmetric optimization algorithm
"""
self.full_energy_profile = []
if self.callback is not None:
self.callback(self, RegistrationStages.OPT_START)
for level in range(self.levels - 1, -1, -1):
if self.verbosity >= VerbosityLevels.STATUS:
print('Optimizing level %d'%level)
self.current_level = level
self.metric.set_levels_below(self.levels - level)
self.metric.set_levels_above(level)
if level < self.levels - 1:
expand_factors = \
self.static_ss.get_expand_factors(level+1, level)
new_shape = self.static_ss.get_domain_shape(level)
self.static_to_ref.expand_fields(expand_factors, new_shape)
self.moving_to_ref.expand_fields(expand_factors, new_shape)
self.niter = 0
self.energy_list = []
derivative = np.inf
if self.callback is not None:
self.callback(self, RegistrationStages.SCALE_START)
while ((self.niter < self.level_iters[self.levels - 1 - level]) and
(self.opt_tol < derivative)):
derivative = self._iterate()
self.niter += 1
self.full_energy_profile.extend(self.energy_list)
if self.callback is not None:
self.callback(self, RegistrationStages.SCALE_END)
# Reporting mean and std in stats[1] and stats[2]
residual, stats = self.static_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Static-Reference Residual error: %0.6f (%0.6f)'
% (stats[1], stats[2]))
residual, stats = self.moving_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Moving-Reference Residual error :%0.6f (%0.6f)'
% (stats[1], stats[2]))
#Compose the two partial transformations
self.static_to_ref = self.moving_to_ref.warp_endomorphism(
self.static_to_ref.inverse()).inverse()
# Report mean and std for the composed deformation field
residual, stats = self.static_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Final residual error: %0.6f (%0.6f)' % (stats[1], stats[2]))
if self.callback is not None:
self.callback(self, RegistrationStages.OPT_END)
def optimize(self, static, moving, static_affine=None, moving_affine=None,
prealign=None):
r"""
Starts the optimization
Parameters
----------
static: array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization. The
displacement fields will have the same discretization as the static
image.
moving: array, shape (S, R, C) or (R, C)
the image to be used as "moving" during optimization. Since the
deformation fields' discretization is the same as the static image,
it is necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed to
be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the 'prealign' matrix
static_affine: array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the static image
moving_affine: array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the moving image
prealign: array, shape (dim+1, dim+1)
the affine transformation (operating on the physical space)
pre-aligning the moving image towards the static
Returns
-------
static_to_ref : DiffeomorphicMap object
the diffeomorphic map that brings the moving image towards the
static one in the forward direction (i.e. by calling
static_to_ref.transform) and the static image towards the
moving one in the backward direction (i.e. by calling
static_to_ref.transform_inverse).
"""
if self.verbosity >= VerbosityLevels.DEBUG:
print("Pre-align:", prealign)
self._init_optimizer(static.astype(floating), moving.astype(floating),
static_affine, moving_affine, prealign)
self._optimize()
self._end_optimizer()
return self.static_to_ref
|
Messaoud-Boudjada/dipy
|
dipy/align/imwarp.py
|
Python
|
bsd-3-clause
| 71,836
|
[
"Gaussian"
] |
cf22522934e9fa95b34ae116fabc8ba5f8382abb5c6df986657740ad88479d92
|
# sql/util.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""High level utilities which build upon other modules here.
"""
from .. import exc, util
from .base import _from_objects, ColumnSet
from . import operators, visitors
from itertools import chain
from collections import deque
from .elements import BindParameter, ColumnClause, ColumnElement, \
Null, UnaryExpression, literal_column, Label
from .selectable import ScalarSelect, Join, FromClause, FromGrouping
from .schema import Column
join_condition = util.langhelpers.public_factory(
Join._join_condition,
".sql.util.join_condition")
# names that are still being imported from the outside
from .annotation import _shallow_annotate, _deep_annotate, _deep_deannotate
from .elements import _find_columns
from .ddl import sort_tables
def find_join_source(clauses, join_to):
"""Given a list of FROM clauses and a selectable,
return the first index and element from the list of
clauses which can be joined against the selectable. returns
None, None if no match is found.
e.g.::
clause1 = table1.join(table2)
clause2 = table4.join(table5)
join_to = table2.join(table3)
find_join_source([clause1, clause2], join_to) == clause1
"""
selectables = list(_from_objects(join_to))
for i, f in enumerate(clauses):
for s in selectables:
if f.is_derived_from(s):
return i, f
else:
return None, None
def visit_binary_product(fn, expr):
"""Produce a traversal of the given expression, delivering
column comparisons to the given function.
The function is of the form::
def my_fn(binary, left, right)
For each binary expression located which has a
comparison operator, the product of "left" and
"right" will be delivered to that function,
in terms of that binary.
Hence an expression like::
and_(
(a + b) == q + func.sum(e + f),
j == r
)
would have the traversal::
a <eq> q
a <eq> e
a <eq> f
b <eq> q
b <eq> e
b <eq> f
j <eq> r
That is, every combination of "left" and
"right" that doesn't further contain
a binary comparison is passed as pairs.
"""
stack = []
def visit(element):
if isinstance(element, ScalarSelect):
# we don't want to dig into correlated subqueries,
# those are just column elements by themselves
yield element
elif element.__visit_name__ == 'binary' and \
operators.is_comparison(element.operator):
stack.insert(0, element)
for l in visit(element.left):
for r in visit(element.right):
fn(stack[0], l, r)
stack.pop(0)
for elem in element.get_children():
visit(elem)
else:
if isinstance(element, ColumnClause):
yield element
for elem in element.get_children():
for e in visit(elem):
yield e
list(visit(expr))
def find_tables(clause, check_columns=False,
include_aliases=False, include_joins=False,
include_selects=False, include_crud=False):
"""locate Table objects within the given expression."""
tables = []
_visitors = {}
if include_selects:
_visitors['select'] = _visitors['compound_select'] = tables.append
if include_joins:
_visitors['join'] = tables.append
if include_aliases:
_visitors['alias'] = tables.append
if include_crud:
_visitors['insert'] = _visitors['update'] = \
_visitors['delete'] = lambda ent: tables.append(ent.table)
if check_columns:
def visit_column(column):
tables.append(column.table)
_visitors['column'] = visit_column
_visitors['table'] = tables.append
visitors.traverse(clause, {'column_collections': False}, _visitors)
return tables
def unwrap_order_by(clause):
"""Break up an 'order by' expression into individual column-expressions,
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
stack = deque([clause])
while stack:
t = stack.popleft()
if isinstance(t, ColumnElement) and \
(
not isinstance(t, UnaryExpression) or \
not operators.is_ordering_modifier(t.modifier)
):
cols.add(t)
else:
for c in t.get_children():
stack.append(c)
return cols
def clause_is_present(clause, search):
"""Given a target clause and a second to search within, return True
if the target is plainly present in the search without any
subqueries or aliases involved.
Basically descends through Joins.
"""
for elem in surface_selectables(search):
if clause == elem: # use == here so that Annotated's compare
return True
else:
return False
def surface_selectables(clause):
stack = [clause]
while stack:
elem = stack.pop()
yield elem
if isinstance(elem, Join):
stack.extend((elem.left, elem.right))
elif isinstance(elem, FromGrouping):
stack.append(elem.element)
def selectables_overlap(left, right):
"""Return True if left/right have some overlapping selectable"""
return bool(
set(surface_selectables(left)).intersection(
surface_selectables(right)
)
)
def bind_values(clause):
"""Return an ordered list of "bound" values in the given clause.
E.g.::
>>> expr = and_(
... table.c.foo==5, table.c.foo==7
... )
>>> bind_values(expr)
[5, 7]
"""
v = []
def visit_bindparam(bind):
v.append(bind.effective_value)
visitors.traverse(clause, {}, {'bindparam': visit_bindparam})
return v
def _quote_ddl_expr(element):
if isinstance(element, util.string_types):
element = element.replace("'", "''")
return "'%s'" % element
else:
return repr(element)
class _repr_params(object):
"""A string view of bound parameters, truncating
display to the given number of 'multi' parameter sets.
"""
def __init__(self, params, batches):
self.params = params
self.batches = batches
def __repr__(self):
if isinstance(self.params, (list, tuple)) and \
len(self.params) > self.batches and \
isinstance(self.params[0], (list, dict, tuple)):
msg = " ... displaying %i of %i total bound parameter sets ... "
return ' '.join((
repr(self.params[:self.batches - 2])[0:-1],
msg % (self.batches, len(self.params)),
repr(self.params[-2:])[1:]
))
else:
return repr(self.params)
def adapt_criterion_to_null(crit, nulls):
"""given criterion containing bind params, convert selected elements
to IS NULL.
"""
def visit_binary(binary):
if isinstance(binary.left, BindParameter) \
and binary.left._identifying_key in nulls:
# reverse order if the NULL is on the left side
binary.left = binary.right
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
elif isinstance(binary.right, BindParameter) \
and binary.right._identifying_key in nulls:
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
return visitors.cloned_traverse(crit, {}, {'binary': visit_binary})
def splice_joins(left, right, stop_on=None):
if left is None:
return right
stack = [(right, None)]
adapter = ClauseAdapter(left)
ret = None
while stack:
(right, prevright) = stack.pop()
if isinstance(right, Join) and right is not stop_on:
right = right._clone()
right._reset_exported()
right.onclause = adapter.traverse(right.onclause)
stack.append((right.left, right))
else:
right = adapter.traverse(right)
if prevright is not None:
prevright.left = right
if ret is None:
ret = right
return ret
def reduce_columns(columns, *clauses, **kw):
"""given a list of columns, return a 'reduced' set based on natural
equivalents.
the set is reduced to the smallest list of columns which have no natural
equivalent present in the list. A "natural equivalent" means that two
columns will ultimately represent the same value because they are related
by a foreign key.
\*clauses is an optional list of join clauses which will be traversed
to further identify columns that are "equivalent".
\**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys
whose tables are not yet configured, or columns that aren't yet present.
This function is primarily used to determine the most minimal "primary key"
from a selectable, by reducing the set of primary key columns present
in the selectable to just those that are not repeated.
"""
ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False)
only_synonyms = kw.pop('only_synonyms', False)
columns = util.ordered_column_set(columns)
omit = util.column_set()
for col in columns:
for fk in chain(*[c.foreign_keys for c in col.proxy_set]):
for c in columns:
if c is col:
continue
try:
fk_col = fk.column
except exc.NoReferencedColumnError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
except exc.NoReferencedTableError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
if fk_col.shares_lineage(c) and \
(not only_synonyms or \
c.name == col.name):
omit.add(col)
break
if clauses:
def visit_binary(binary):
if binary.operator == operators.eq:
cols = util.column_set(chain(*[c.proxy_set
for c in columns.difference(omit)]))
if binary.left in cols and binary.right in cols:
for c in reversed(columns):
if c.shares_lineage(binary.right) and \
(not only_synonyms or \
c.name == binary.left.name):
omit.add(c)
break
for clause in clauses:
if clause is not None:
visitors.traverse(clause, {}, {'binary': visit_binary})
return ColumnSet(columns.difference(omit))
def criterion_as_pairs(expression, consider_as_foreign_keys=None,
consider_as_referenced_keys=None, any_operator=False):
"""traverse an expression and locate binary criterion pairs."""
if consider_as_foreign_keys and consider_as_referenced_keys:
raise exc.ArgumentError("Can only specify one of "
"'consider_as_foreign_keys' or "
"'consider_as_referenced_keys'")
def col_is(a, b):
#return a is b
return a.compare(b)
def visit_binary(binary):
if not any_operator and binary.operator is not operators.eq:
return
if not isinstance(binary.left, ColumnElement) or \
not isinstance(binary.right, ColumnElement):
return
if consider_as_foreign_keys:
if binary.left in consider_as_foreign_keys and \
(col_is(binary.right, binary.left) or
binary.right not in consider_as_foreign_keys):
pairs.append((binary.right, binary.left))
elif binary.right in consider_as_foreign_keys and \
(col_is(binary.left, binary.right) or
binary.left not in consider_as_foreign_keys):
pairs.append((binary.left, binary.right))
elif consider_as_referenced_keys:
if binary.left in consider_as_referenced_keys and \
(col_is(binary.right, binary.left) or
binary.right not in consider_as_referenced_keys):
pairs.append((binary.left, binary.right))
elif binary.right in consider_as_referenced_keys and \
(col_is(binary.left, binary.right) or
binary.left not in consider_as_referenced_keys):
pairs.append((binary.right, binary.left))
else:
if isinstance(binary.left, Column) and \
isinstance(binary.right, Column):
if binary.left.references(binary.right):
pairs.append((binary.right, binary.left))
elif binary.right.references(binary.left):
pairs.append((binary.left, binary.right))
pairs = []
visitors.traverse(expression, {}, {'binary': visit_binary})
return pairs
class AliasedRow(object):
"""Wrap a RowProxy with a translation map.
This object allows a set of keys to be translated
to those present in a RowProxy.
"""
def __init__(self, row, map):
# AliasedRow objects don't nest, so un-nest
# if another AliasedRow was passed
if isinstance(row, AliasedRow):
self.row = row.row
else:
self.row = row
self.map = map
def __contains__(self, key):
return self.map[key] in self.row
def has_key(self, key):
return key in self
def __getitem__(self, key):
return self.row[self.map[key]]
def keys(self):
return self.row.keys()
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(self, selectable, equivalents=None,
include=None, exclude=None,
include_fn=None, exclude_fn=None,
adapt_on_names=False):
self.__traverse_options__ = {'stop_on': [selectable]}
self.selectable = selectable
if include:
assert not include_fn
self.include_fn = lambda e: e in include
else:
self.include_fn = include_fn
if exclude:
assert not exclude_fn
self.exclude_fn = lambda e: e in exclude
else:
self.exclude_fn = exclude_fn
self.equivalents = util.column_dict(equivalents or {})
self.adapt_on_names = adapt_on_names
def _corresponding_column(self, col, require_embedded,
_seen=util.EMPTY_SET):
newcol = self.selectable.corresponding_column(
col,
require_embedded=require_embedded)
if newcol is None and col in self.equivalents and col not in _seen:
for equiv in self.equivalents[col]:
newcol = self._corresponding_column(equiv,
require_embedded=require_embedded,
_seen=_seen.union([col]))
if newcol is not None:
return newcol
if self.adapt_on_names and newcol is None:
newcol = self.selectable.c.get(col.name)
return newcol
magic_flag = False
def replace(self, col):
if not self.magic_flag and isinstance(col, FromClause) and \
self.selectable.is_derived_from(col):
return self.selectable
elif not isinstance(col, ColumnElement):
return None
elif self.include_fn and not self.include_fn(col):
return None
elif self.exclude_fn and self.exclude_fn(col):
return None
else:
return self._corresponding_column(col, True)
class ColumnAdapter(ClauseAdapter):
"""Extends ClauseAdapter with extra utility functions.
Provides the ability to "wrap" this ClauseAdapter
around another, a columns dictionary which returns
adapted elements given an original, and an
adapted_row() factory.
"""
def __init__(self, selectable, equivalents=None,
chain_to=None, include=None,
exclude=None, adapt_required=False):
ClauseAdapter.__init__(self, selectable, equivalents, include, exclude)
if chain_to:
self.chain(chain_to)
self.columns = util.populate_column_dict(self._locate_col)
self.adapt_required = adapt_required
def wrap(self, adapter):
ac = self.__class__.__new__(self.__class__)
ac.__dict__ = self.__dict__.copy()
ac._locate_col = ac._wrap(ac._locate_col, adapter._locate_col)
ac.adapt_clause = ac._wrap(ac.adapt_clause, adapter.adapt_clause)
ac.adapt_list = ac._wrap(ac.adapt_list, adapter.adapt_list)
ac.columns = util.populate_column_dict(ac._locate_col)
return ac
adapt_clause = ClauseAdapter.traverse
adapt_list = ClauseAdapter.copy_and_process
def _wrap(self, local, wrapped):
def locate(col):
col = local(col)
return wrapped(col)
return locate
def _locate_col(self, col):
c = self._corresponding_column(col, True)
if c is None:
c = self.adapt_clause(col)
# anonymize labels in case they have a hardcoded name
if isinstance(c, Label):
c = c.label(None)
# adapt_required used by eager loading to indicate that
# we don't trust a result row column that is not translated.
# this is to prevent a column from being interpreted as that
# of the child row in a self-referential scenario, see
# inheritance/test_basic.py->EagerTargetingTest.test_adapt_stringency
if self.adapt_required and c is col:
return None
return c
def adapted_row(self, row):
return AliasedRow(row, self.columns)
def __getstate__(self):
d = self.__dict__.copy()
del d['columns']
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.columns = util.PopulateDict(self._locate_col)
|
michaelBenin/sqlalchemy
|
lib/sqlalchemy/sql/util.py
|
Python
|
mit
| 19,766
|
[
"VisIt"
] |
d927ca6e98b3e2c44e65f0aa4b36f7e3e75b8c3fae515f2f875801da27cea4e9
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_virtualservice
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of VirtualService Avi RESTful Object
description:
- This module is used to configure VirtualService object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
active_standby_se_tag:
description:
- This configuration only applies if the virtualservice is in legacy active standby ha mode and load distribution among active standby is enabled.
- This field is used to tag the virtualservice so that virtualservices with the same tag will share the same active serviceengine.
- Virtualservices with different tags will have different active serviceengines.
- If one of the serviceengine's in the serviceenginegroup fails, all virtualservices will end up using the same active serviceengine.
- Redistribution of the virtualservices can be either manual or automated when the failed serviceengine recovers.
- Redistribution is based on the auto redistribute property of the serviceenginegroup.
- Enum options - ACTIVE_STANDBY_SE_1, ACTIVE_STANDBY_SE_2.
- Default value when not specified in API or module is interpreted by Avi Controller as ACTIVE_STANDBY_SE_1.
analytics_policy:
description:
- Determines analytics settings for the application.
analytics_profile_ref:
description:
- Specifies settings related to analytics.
- It is a reference to an object of type analyticsprofile.
application_profile_ref:
description:
- Enable application layer specific features for the virtual service.
- It is a reference to an object of type applicationprofile.
auto_allocate_floating_ip:
description:
- Auto-allocate floating/elastic ip from the cloud infrastructure.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
auto_allocate_ip:
description:
- Auto-allocate vip from the provided subnet.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
availability_zone:
description:
- Availability-zone to place the virtual service.
- Field deprecated in 17.1.1.
avi_allocated_fip:
description:
- (internal-use) fip allocated by avi in the cloud infrastructure.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
avi_allocated_vip:
description:
- (internal-use) vip allocated by avi in the cloud infrastructure.
- Field deprecated in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
client_auth:
description:
- Http authentication configuration for protected resources.
cloud_config_cksum:
description:
- Checksum of cloud configuration for vs.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
cloud_type:
description:
- Enum options - cloud_none, cloud_vcenter, cloud_openstack, cloud_aws, cloud_vca, cloud_apic, cloud_mesos, cloud_linuxserver, cloud_docker_ucp,
- cloud_rancher, cloud_oshift_k8s.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
connections_rate_limit:
description:
- Rate limit the incoming connections to this virtual service.
content_rewrite:
description:
- Profile used to match and rewrite strings in request and/or response body.
created_by:
description:
- Creator name.
delay_fairness:
description:
- Select the algorithm for qos fairness.
- This determines how multiple virtual services sharing the same service engines will prioritize traffic over a congested network.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
description:
description:
- User defined description for the object.
discovered_network_ref:
description:
- (internal-use) discovered networks providing reachability for client facing virtual service ip.
- This field is deprecated.
- It is a reference to an object of type network.
- Field deprecated in 17.1.1.
discovered_networks:
description:
- (internal-use) discovered networks providing reachability for client facing virtual service ip.
- This field is used internally by avi, not editable by the user.
- Field deprecated in 17.1.1.
discovered_subnet:
description:
- (internal-use) discovered subnets providing reachability for client facing virtual service ip.
- This field is deprecated.
- Field deprecated in 17.1.1.
dns_info:
description:
- Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record.
- Note that only one of fqdn and dns_info setting is allowed.
dns_policies:
description:
- Dns policies applied on the dns traffic of the virtual service.
- Field introduced in 17.1.1.
version_added: "2.4"
east_west_placement:
description:
- Force placement on all se's in service group (mesos mode only).
- Default value when not specified in API or module is interpreted by Avi Controller as False.
enable_autogw:
description:
- Response traffic to clients will be sent back to the source mac address of the connection, rather than statically sent to a default gateway.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
enable_rhi:
description:
- Enable route health injection using the bgp config in the vrf context.
enable_rhi_snat:
description:
- Enable route health injection for source nat'ted floating ip address using the bgp config in the vrf context.
enabled:
description:
- Enable or disable the virtual service.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
floating_ip:
description:
- Floating ip to associate with this virtual service.
- Field deprecated in 17.1.1.
floating_subnet_uuid:
description:
- If auto_allocate_floating_ip is true and more than one floating-ip subnets exist, then the subnet for the floating ip address allocation.
- This field is applicable only if the virtualservice belongs to an openstack or aws cloud.
- In openstack or aws cloud it is required when auto_allocate_floating_ip is selected.
- Field deprecated in 17.1.1.
flow_dist:
description:
- Criteria for flow distribution among ses.
- Enum options - LOAD_AWARE, CONSISTENT_HASH_SOURCE_IP_ADDRESS, CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT.
- Default value when not specified in API or module is interpreted by Avi Controller as LOAD_AWARE.
flow_label_type:
description:
- Criteria for flow labelling.
- Enum options - NO_LABEL, SERVICE_LABEL.
- Default value when not specified in API or module is interpreted by Avi Controller as NO_LABEL.
fqdn:
description:
- Dns resolvable, fully qualified domain name of the virtualservice.
- Only one of 'fqdn' and 'dns_info' configuration is allowed.
host_name_xlate:
description:
- Translate the host name sent to the servers to this value.
- Translate the host name sent from servers back to the value used by the client.
http_policies:
description:
- Http policies applied on the data traffic of the virtual service.
ign_pool_net_reach:
description:
- Ignore pool servers network reachability constraints for virtual service placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
ip_address:
description:
- Ip address of the virtual service.
- Field deprecated in 17.1.1.
ipam_network_subnet:
description:
- Subnet and/or network for allocating virtualservice ip by ipam provider module.
limit_doser:
description:
- Limit potential dos attackers who exceed max_cps_per_client significantly to a fraction of max_cps_per_client for a while.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
max_cps_per_client:
description:
- Maximum connections per second per client ip.
- Allowed values are 10-1000.
- Special values are 0- 'unlimited'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
microservice_ref:
description:
- Microservice representing the virtual service.
- It is a reference to an object of type microservice.
name:
description:
- Name for the virtual service.
required: true
network_profile_ref:
description:
- Determines network settings such as protocol, tcp or udp, and related options for the protocol.
- It is a reference to an object of type networkprofile.
network_ref:
description:
- Manually override the network on which the virtual service is placed.
- It is a reference to an object of type network.
- Field deprecated in 17.1.1.
network_security_policy_ref:
description:
- Network security policies for the virtual service.
- It is a reference to an object of type networksecuritypolicy.
nsx_securitygroup:
description:
- A list of nsx service groups representing the clients which can access the virtual ip of the virtual service.
- Field introduced in 17.1.1.
version_added: "2.4"
performance_limits:
description:
- Optional settings that determine performance limits like max connections or bandwdith etc.
pool_group_ref:
description:
- The pool group is an object that contains pools.
- It is a reference to an object of type poolgroup.
pool_ref:
description:
- The pool is an object that contains destination servers and related attributes such as load-balancing and persistence.
- It is a reference to an object of type pool.
port_uuid:
description:
- (internal-use) network port assigned to the virtual service ip address.
- Field deprecated in 17.1.1.
remove_listening_port_on_vs_down:
description:
- Remove listening port if virtualservice is down.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
requests_rate_limit:
description:
- Rate limit the incoming requests to this virtual service.
scaleout_ecmp:
description:
- Disable re-distribution of flows across service engines for a virtual service.
- Enable if the network itself performs flow hashing with ecmp in environments such as gcp.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
se_group_ref:
description:
- The service engine group to use for this virtual service.
- Moving to a new se group is disruptive to existing connections for this vs.
- It is a reference to an object of type serviceenginegroup.
server_network_profile_ref:
description:
- Determines the network settings profile for the server side of tcp proxied connections.
- Leave blank to use the same settings as the client to vs side of the connection.
- It is a reference to an object of type networkprofile.
service_metadata:
description:
- Metadata pertaining to the service provided by this virtual service.
- In openshift/kubernetes environments, egress pod info is stored.
- Any user input to this field will be overwritten by avi vantage.
version_added: "2.4"
service_pool_select:
description:
- Select pool based on destination port.
services:
description:
- List of services defined for this virtual service.
sideband_profile:
description:
- Sideband configuration to be used for this virtualservice.it can be used for sending traffic to sideband vips for external inspection etc.
version_added: "2.4"
snat_ip:
description:
- Nat'ted floating source ip address(es) for upstream connection to servers.
ssl_key_and_certificate_refs:
description:
- Select or create one or two certificates, ec and/or rsa, that will be presented to ssl/tls terminated connections.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- Determines the set of ssl versions and ciphers to accept for ssl/tls terminated connections.
- It is a reference to an object of type sslprofile.
ssl_sess_cache_avg_size:
description:
- Expected number of ssl session cache entries (may be exceeded).
- Allowed values are 1024-16383.
- Default value when not specified in API or module is interpreted by Avi Controller as 1024.
static_dns_records:
description:
- List of static dns records applied to this virtual service.
- These are static entries and no health monitoring is performed against the ip addresses.
subnet:
description:
- Subnet providing reachability for client facing virtual service ip.
- Field deprecated in 17.1.1.
subnet_uuid:
description:
- It represents subnet for the virtual service ip address allocation when auto_allocate_ip is true.it is only applicable in openstack or aws cloud.
- This field is required if auto_allocate_ip is true.
- Field deprecated in 17.1.1.
tenant_ref:
description:
- It is a reference to an object of type tenant.
traffic_clone_profile_ref:
description:
- Server network or list of servers for cloning traffic.
- It is a reference to an object of type trafficcloneprofile.
- Field introduced in 17.1.1.
version_added: "2.4"
type:
description:
- Specify if this is a normal virtual service, or if it is the parent or child of an sni-enabled virtual hosted virtual service.
- Enum options - VS_TYPE_NORMAL, VS_TYPE_VH_PARENT, VS_TYPE_VH_CHILD.
- Default value when not specified in API or module is interpreted by Avi Controller as VS_TYPE_NORMAL.
url:
description:
- Avi controller URL of the object.
use_bridge_ip_as_vip:
description:
- Use bridge ip as vip on each host in mesos deployments.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
uuid:
description:
- Uuid of the virtualservice.
vh_domain_name:
description:
- The exact name requested from the client's sni-enabled tls hello domain name field.
- If this is a match, the parent vs will forward the connection to this child vs.
vh_parent_vs_uuid:
description:
- Specifies the virtual service acting as virtual hosting (sni) parent.
vip:
description:
- List of virtual service ips.
- While creating a 'shared vs',please use vsvip_ref to point to the shared entities.
- Field introduced in 17.1.1.
version_added: "2.4"
vrf_context_ref:
description:
- Virtual routing context that the virtual service is bound to.
- This is used to provide the isolation of the set of networks the application is attached to.
- It is a reference to an object of type vrfcontext.
vs_datascripts:
description:
- Datascripts applied on the data traffic of the virtual service.
vsvip_ref:
description:
- Mostly used during the creation of shared vs, this fieldrefers to entities that can be shared across virtual services.
- It is a reference to an object of type vsvip.
- Field introduced in 17.1.1.
version_added: "2.4"
weight:
description:
- The quality of service weight to assign to traffic transmitted from this virtual service.
- A higher weight will prioritize traffic versus other virtual services sharing the same service engines.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create SSL Virtual Service using Pool testpool2
avi_virtualservice:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
name: newtestvs
state: present
performance_limits:
max_concurrent_connections: 1000
services:
- port: 443
enable_ssl: true
- port: 80
ssl_profile_ref: '/api/sslprofile?name=System-Standard'
application_profile_ref: '/api/applicationprofile?name=System-Secure-HTTP'
ssl_key_and_certificate_refs:
- '/api/sslkeyandcertificate?name=System-Default-Cert'
ip_address:
addr: 10.90.131.103
type: V4
pool_ref: '/api/pool?name=testpool2'
'''
RETURN = '''
obj:
description: VirtualService (api/virtualservice) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
active_standby_se_tag=dict(type='str',),
analytics_policy=dict(type='dict',),
analytics_profile_ref=dict(type='str',),
application_profile_ref=dict(type='str',),
auto_allocate_floating_ip=dict(type='bool',),
auto_allocate_ip=dict(type='bool',),
availability_zone=dict(type='str',),
avi_allocated_fip=dict(type='bool',),
avi_allocated_vip=dict(type='bool',),
client_auth=dict(type='dict',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
cloud_type=dict(type='str',),
connections_rate_limit=dict(type='dict',),
content_rewrite=dict(type='dict',),
created_by=dict(type='str',),
delay_fairness=dict(type='bool',),
description=dict(type='str',),
discovered_network_ref=dict(type='list',),
discovered_networks=dict(type='list',),
discovered_subnet=dict(type='list',),
dns_info=dict(type='list',),
dns_policies=dict(type='list',),
east_west_placement=dict(type='bool',),
enable_autogw=dict(type='bool',),
enable_rhi=dict(type='bool',),
enable_rhi_snat=dict(type='bool',),
enabled=dict(type='bool',),
floating_ip=dict(type='dict',),
floating_subnet_uuid=dict(type='str',),
flow_dist=dict(type='str',),
flow_label_type=dict(type='str',),
fqdn=dict(type='str',),
host_name_xlate=dict(type='str',),
http_policies=dict(type='list',),
ign_pool_net_reach=dict(type='bool',),
ip_address=dict(type='dict',),
ipam_network_subnet=dict(type='dict',),
limit_doser=dict(type='bool',),
max_cps_per_client=dict(type='int',),
microservice_ref=dict(type='str',),
name=dict(type='str', required=True),
network_profile_ref=dict(type='str',),
network_ref=dict(type='str',),
network_security_policy_ref=dict(type='str',),
nsx_securitygroup=dict(type='list',),
performance_limits=dict(type='dict',),
pool_group_ref=dict(type='str',),
pool_ref=dict(type='str',),
port_uuid=dict(type='str',),
remove_listening_port_on_vs_down=dict(type='bool',),
requests_rate_limit=dict(type='dict',),
scaleout_ecmp=dict(type='bool',),
se_group_ref=dict(type='str',),
server_network_profile_ref=dict(type='str',),
service_metadata=dict(type='str',),
service_pool_select=dict(type='list',),
services=dict(type='list',),
sideband_profile=dict(type='dict',),
snat_ip=dict(type='list',),
ssl_key_and_certificate_refs=dict(type='list',),
ssl_profile_ref=dict(type='str',),
ssl_sess_cache_avg_size=dict(type='int',),
static_dns_records=dict(type='list',),
subnet=dict(type='dict',),
subnet_uuid=dict(type='str',),
tenant_ref=dict(type='str',),
traffic_clone_profile_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
use_bridge_ip_as_vip=dict(type='bool',),
uuid=dict(type='str',),
vh_domain_name=dict(type='list',),
vh_parent_vs_uuid=dict(type='str',),
vip=dict(type='list',),
vrf_context_ref=dict(type='str',),
vs_datascripts=dict(type='list',),
vsvip_ref=dict(type='str',),
weight=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'virtualservice',
set([]))
if __name__ == '__main__':
main()
|
kbrebanov/ansible
|
lib/ansible/modules/network/avi/avi_virtualservice.py
|
Python
|
gpl-3.0
| 24,059
|
[
"VisIt"
] |
989809b28160024b8c943f13ed96029646e0b2bd2ad4b12ae9812ff18f7e87ae
|
import uuid
from typing import Any, Callable
from Firefly import aliases, logging
from Firefly.const import API_ALEXA_VIEW, API_FIREBASE_VIEW, API_INFO_REQUEST, EVENT_TYPE_BROADCAST, TYPE_DEVICE
from Firefly.helpers.events import Command, Event, Request
from Firefly.helpers.metadata import EXPORT_UI, FF_ID, HIDDEN_BY_USER, action_text
from Firefly.helpers.metadata.settings import settings_alias, settings_device_tags
class Device(object):
def __init__(self, firefly, package, title, author, commands, requests, device_type, **kwargs):
device_id = kwargs.get('ff_id')
alias = kwargs.get('alias')
self._firefly = firefly
self._title = title
self._author = author
self._package = package
# TODO: Change commands and requests to set
self._commands = list(set(commands))
self._requests = list(set(requests))
self._device_type = device_type
self._export_ui = True
self._hidden_by_system = False
self._initial_values = kwargs.get('initial_values')
self._command_mapping = {}
self._request_mapping = {}
self._security_monitoring = kwargs.get('security_monitoring', False)
self._metadata = {
'title': self._title,
'author': self._author,
'package': self._package,
'actions': {}
}
self._settings = {
'values': {},
'metadata': {},
'requests': {}
}
self._last_command_source = 'none'
self._last_update_time = self.firefly.location.now
# If alias given but no ID look at config files for ID.
if not device_id and alias:
if aliases.get_device_id(alias) is not None:
device_id = aliases.get_device_id(alias)
if device_id in self.firefly.components:
device_id = None
elif device_id and not alias:
if aliases.get_alias(device_id):
alias = aliases.get_alias(device_id)
# If no ff_id ID given -> generate random ID.
if device_id is None:
device_id = str(uuid.uuid4())
self._id = device_id
self._alias = alias if alias else device_id
self._alias = aliases.set_alias(self._id, self._alias)
self._habridge_export = kwargs.get('habridge_export', True)
self._habridge_alias = kwargs.get('habridge_alias', self._alias)
self._homekit_export = kwargs.get('homekit_export', True)
self._homekit_alias = kwargs.get('homekit_alias', self._alias)
self._homekit_types = {}
self._alexa_export = kwargs.get('alexa_export', True)
self._alexa_categories = kwargs.get('alexa_categories', [])
self._alexa_capabilities = []
self._alexa_manufacturer_name = kwargs.get('alexa_manufacturer_name', 'Firefly')
self._alexa_description = kwargs.get('alexa_description', 'Firefly Home Device')
self._room = kwargs.get('room', '')
self.room_id = kwargs.get('room_id', '')
self._tags = kwargs.get('tags', [])
self.add_command('set_alias', self.set_alias)
self.add_command('set_room', self.set_room)
self.add_command('delete', self.delete_device)
self.add_action('z_last_update', action_text(title='Last Update', request='last_update'))
self.add_request('last_update', self.last_update)
self.add_setting('name', '_alias', 'alias', settings_alias())
self.add_setting('tags', '_tags', 'tags', settings_device_tags())
# Set initial values
for prop, val in self._initial_values.items():
self.__setattr__(prop, val)
self._before_state = None
self._after_state = None
def __str__(self):
return '< FIREFLY DEVICE - ID: %s | PACKAGE: %s >' % (self.id, self._package)
def add_setting(self, setting_index, setting_request, setting_param, setting_metadata):
self._settings['metadata'][setting_index] = setting_metadata
self._settings['requests'][setting_param] = setting_request
def get_settings_values(self):
for param in list(self._settings['requests'].keys()):
self._settings['values'][param] = self.get_setting_value(param)
def get_setting_value(self, setting_param):
return self.__getattribute__(self._settings['requests'][setting_param])
def get_settings_view(self):
self.get_settings_values()
return self._settings
def store_before_state(self, **kwargs):
self._before_state = self.get_all_request_values(True, True)
def store_after_state(self, **kwargs):
self._after_state = self.get_all_request_values(True, True)
def broadcast_change(self, **kwargs):
if self._before_state is None:
logging.error('before_sate not recorded')
return False
if self._after_state is None:
self.store_after_state()
self.broadcast_changes(self._before_state, self._after_state)
self._before_state = None
self._after_state = None
return True
def set_alias(self, **kwargs):
new_alias = kwargs.get('alias')
if new_alias is None:
return
self._alias = aliases.set_alias(self._id, new_alias)
self._habridge_alias = self._alias
self._homekit_alias = self._alias
self.firefly.refresh_firebase()
def set_room(self, **kwargs):
new_room = kwargs.get('room')
if new_room is None:
return
self._room = new_room
self.firefly._rooms.build_rooms()
def delete_device(self):
self.firefly.delete_device(self.id)
return
def last_update(self):
return '%s (%s)' % (self._last_update_time.strftime("%B %d %Y %I:%M:%S %p"), self._last_command_source)
def export(self, current_values: bool = True, api_view: bool = False) -> dict:
"""
Export ff_id config with options current values to a dictionary.
Args:
current_values (bool): Include current values as new initial values.
Returns:
(dict): A dict of the ff_id config.
"""
export_data = {
'package': self._package,
'ff_id': self.id,
'alias': self._alias,
'type': self.type,
'homekit_export': self._homekit_export,
'homekit_alias': self._homekit_alias,
'homekit_types': self._homekit_types,
'habridge_export': self._habridge_export,
'habridge_alias': self._habridge_alias,
'export_ui': self._export_ui,
'tags': self._tags,
'room': self._room,
'alexa_export': self._alexa_export,
'security_monitoring': self.security,
'room_id': self.room_id
}
if current_values:
current_vals = {}
for item in self._initial_values.keys():
current_vals[item] = self.__getattribute__(item)
export_data['initial_values'] = current_vals
return export_data
def get_alexa_view(self):
if self._alexa_export is False:
return None
return {
'endpointId': self.id,
'friendlyName': self.alias,
'description': self._alexa_description,
'manufacturerName': self._alexa_manufacturer_name,
'displayCategories': self._alexa_categories,
'cookie': {},
'capabilities': self._alexa_capabilities
}
def add_alexa_capabilities(self, capabilities):
if type(capabilities) is not list:
capabilities = [capabilities]
for capability in capabilities:
if capability not in self._alexa_capabilities:
self._alexa_capabilities.append(capability)
def add_alexa_categories(self, categories):
if type(categories) is not list:
categories = [categories]
for category in categories:
if category not in self._alexa_categories:
self._alexa_categories.append(category)
def set_alexa_categories(self, categories):
if type(categories) is not list:
categories = [categories]
self._alexa_categories = categories
def add_command(self, command: str, function: Callable) -> None:
"""
Adds a command to the list of supported ff_id commands.
Args:
command (str): The string of the command
function (Callable): The function to be executed.
"""
# TODO: Remove this, just use command_map for verification
if command not in self._commands:
self._commands.append(command)
self._command_mapping[command] = function
def add_request(self, request: str, function: Callable) -> None:
"""
Adds a request to the list of supported ff_id requests.
Args:
request (str): The string of the request
function (Callable): The function to be executed.
"""
# TODO: Remove this, just use request_map for verification
if request not in self._requests:
self._requests.append(request)
self._request_mapping[request] = function
def add_action(self, action, action_meta):
self._metadata['actions'][action] = action_meta
if action_meta.get('primary') is True:
self._metadata['primary'] = action
def add_homekit_export(self, homekit_type, action):
self._homekit_types[homekit_type] = action
def command(self, command: Command) -> bool:
"""
Function that is called to send a command to a ff_id.
Args:
command (Command): The command to be sent in a Command object
Returns:
(bool): Command successful.
"""
# state_before = self.get_all_request_values(True, True)
self.store_before_state()
logging.debug('%s: Got Command: %s' % (self.id, command.command))
if command.command in self.command_map.keys():
self._last_command_source = command.source
self._last_update_time = self.firefly.location.now
try:
self.command_map[command.command](**command.args)
except:
return False
# state_after = self.get_all_request_values(True, True)
# self.broadcast_changes(state_before, state_after)
self.broadcast_change()
# scheduler.runInMCS(5, self.broadcast_change, job_id='%s-b' % self.id, max_instances=1)
return True
return False
def broadcast_changes(self, before: dict, after: dict) -> None:
"""Find changes from before and after states and broadcast the changes.
Args:
before (dict): before state.
after (dict): after state.
Returns:
"""
if before == after:
logging.debug('No change detected. %s' % self)
return
logging.debug('Change detected. %s' % self)
changed = {}
for item, val in after.items():
if after.get(item) != before.get(item):
changed[item] = after.get(item)
changed['last_update'] = self.last_update()
logging.debug("Items changed: %s %s" % (str(changed), self))
broadcast = Event(self.id, EVENT_TYPE_BROADCAST, event_action=changed)
logging.info(broadcast)
self._firefly.send_event(broadcast)
return
def request(self, request: Request) -> Any:
"""Function to request data from the ff_id.
The returned data can be in any format. Common formats should be:
str, int, dict
Args:
request (Request): Request object
Returns:
Requested Data
"""
logging.debug('%s: Got Request %s' % (self.id, request))
if request.request == API_INFO_REQUEST:
return self.get_api_info()
if request.request == API_FIREBASE_VIEW:
return self.get_firebase_views()
if request.request == API_ALEXA_VIEW:
return self.get_alexa_view()
if request.request in self.request_map.keys():
return self.request_map[request.request](**request.args)
return None
def event(self, event: Event) -> None:
logging.error(code='FF.DEV.EVE.001') # devices currently dont support events
def get_api_info(self) -> dict:
"""
Function to get view for API.
Returns (dict): JSON for API view.
"""
return_data = {}
return_data.update(self.export(api_view=True))
return_data['alexa_view'] = self.get_alexa_view()
return_data['commands'] = self._commands
return_data['requests'] = self._requests
return_data['device_type'] = self._device_type
return_data['metadata'] = self._metadata
return_data['current_values'] = return_data['initial_values']
return_data['last_update_time'] = str(self._last_update_time)
return_data['last_command_source'] = self._last_command_source
return_data.pop('initial_values')
return_data['request_values'] = {}
for r in self._requests:
return_data['request_values'][r] = self.request_map[r]()
return return_data
def get_firebase_views(self, **kwargs) -> dict:
"""
Get the minimum data needed for the web ui for firebase.
Args:
**kwargs:
Returns: (dict) firebase view.
"""
return_data = {
FF_ID: self.id,
'alias': self._alias,
'metadata': self._metadata,
'deviceType': self._device_type,
'tags': self._tags,
'room': self._room,
EXPORT_UI: self._export_ui,
HIDDEN_BY_USER: self._export_ui
}
return return_data
def get_all_request_values(self, min_data=False, diff_check=False, **kwargs) -> dict:
"""Function to get all requestable values.
Returns (dict): All requests and values.
Args:
min_data (bool): only get requests that are lowercase. This is used for firebase and filtering out unneeded data.
"""
request_values = {}
for r in self._requests:
if diff_check and r == 'last_update':
continue
try:
if not min_data:
request_values[r] = self.request_map[r]()
continue
if min_data and r.islower():
value = self.request_map[r]()
if type(value) is float:
value = round(value, 2)
request_values[r] = value
except:
pass
return request_values
def member_set(self, key: str, val: Any) -> None:
"""Function for setting member values when you want it to be broadcasted.
This is mainly used for time delay functions and it similar to the builtin __setattr__.
Args:
key (string): Value to be changed.
val (Any): New value.
Returns:
"""
logging.info("Setting %s to %s" % (key, val))
state_before = self.get_all_request_values(True, True)
self.__setattr__(key, val)
state_after = self.get_all_request_values(True, True)
self.broadcast_changes(state_before, state_after)
# TODO: Add runInX functions to devices. These functions have to be similar to member_set and should be able to
# replace it.
@property
def id(self):
return self._id
@property
def alias(self):
return self._alias
@property
def firefly(self):
return self._firefly
@property
def command_map(self):
return self._command_mapping
@property
def request_map(self):
return self._request_mapping
@property
def tags(self):
return self._tags
@property
def room(self):
return self._room
@property
def type(self):
return TYPE_DEVICE
@property
def security(self):
return self._security_monitoring
|
Firefly-Automation/Firefly
|
Firefly/helpers/device/device.py
|
Python
|
apache-2.0
| 14,844
|
[
"Firefly"
] |
3c303fea70fc9c2e44e2c843407a40c234fd25d06df4541fe858ee2fb942480e
|
import os
import logging
# Set up logging here so we can log in the settings module.
# After initialization, Django's LOGGING settings are used.
logging.basicConfig(
format='%(asctime)s %(module)s %(message)s',
level=logging.DEBUG
)
logger = logging.getLogger(__name__)
DEBUG = True
TEMPLATE_DEBUG = True
# Set matplotlib defaults.
# Import specific matplotlib settings for this app.
from lizard_kml.jarkus.matplotlib_settings import set_matplotlib_defaults
set_matplotlib_defaults()
# SETTINGS_DIR allows media paths and so to be relative to this settings file
# instead of hardcoded to c:\only\on\my\computer.
SETTINGS_DIR = os.path.dirname(os.path.realpath(__file__))
# BUILDOUT_DIR is for access to the "surrounding" buildout, for instance for
# BUILDOUT_DIR/var/static files to give django-staticfiles a proper place
# to place all collected static files.
BUILDOUT_DIR = os.path.abspath(os.path.join(SETTINGS_DIR, '..'))
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(asctime)s %(name)s %(levelname)s\n%(message)s',
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'': {
'handlers': ['console'],
'propagate': True,
'level': 'DEBUG',
},
'django.db.backends': {
'handlers': ['null'], # Quiet by default!
'propagate': False,
'level': 'DEBUG',
},
},
}
# ENGINE: 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# In case of geodatabase, prepend with:
# django.contrib.gis.db.backends.(postgis)
DATABASES = {
# If you want to use another database, consider putting the database
# settings in localsettings.py. Otherwise, if you change the settings in
# the current file and commit them to the repository, other developers will
# also use these settings whether they have that database or not.
# One of those other developers is Jenkins, our continuous integration
# solution. Jenkins can only run the tests of the current application when
# the specified database exists. When the tests cannot run, Jenkins sees
# that as an error.
'default': {
'NAME': os.path.join(BUILDOUT_DIR, 'var', 'sqlite', 'test.db'),
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '', # empty string for localhost.
'PORT': '', # empty string for default.
}
}
SITE_ID = 1
INSTALLED_APPS = [
'lizard_kml',
'south',
'django_nose',
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.gis',
'django.contrib.sites',
'django.contrib.staticfiles',
]
ROOT_URLCONF = 'lizard_kml.urls'
TEMPLATE_CONTEXT_PROCESSORS = (
# Default django 1.4.5 processors.
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages"
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(BUILDOUT_DIR, 'var', 'cache'),
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--with-doctest', '--verbosity=3']
SOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead
SKIP_SOUTH_TESTS = True # To disable South's own unit tests
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'nl-NL'
# For at-runtime language switching. Note: they're shown in reverse order in
# the interface!
LANGUAGES = [
# ('en', 'English'),
('nl', 'Nederlands'),
]
# If you set this to False, Django will make some optimizations so as not to
# load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = (os.path.join(SETTINGS_DIR, 'locale'),)
SECRET_KEY = 'testsettings'
# Used for django-staticfiles (and for media files)
STATIC_URL = '/static_media/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'static')
MEDIA_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'media')
LIZARD_KML_STANDALONE = True
# NetCDF databases containing transect (coastal) data, for example.
# Local copies are highly recommended:
# $ bin/django sync_netcdfs
NC_RESOURCE_LOCAL_DIR = os.path.join(BUILDOUT_DIR, 'var', 'netcdf')
NC_RESOURCE_URLS = {
'transect': 'http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/profiles/transect.nc',
# Use this if we break the deltares server:
#'transect': 'http://opendap.tudelft.nl/thredds/dodsC/data2/deltares/rijkswaterstaat/jarkus/profiles/transect.nc',
'BKL_TKL_TND': 'http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/BKL_TKL_MKL/BKL_TKL_TND.nc',
'DF': 'http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/DuneFoot/DF.nc',
'MKL': 'http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/BKL_TKL_MKL/MKL.nc',
'strandbreedte': 'http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/strandbreedte/strandbreedte.nc',
'strandlijnen': 'http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/MHW_MLW/MHW_MLW.nc',
'suppleties': 'http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/suppleties/suppleties.nc',
'faalkans': 'http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/faalkans_PC-Ring/faalkans.nc',
}
# by default, just use the remote versions
NC_RESOURCE = dict(NC_RESOURCE_URLS)
# when a local copy of the .nc file is provided, use that instead
for key in NC_RESOURCE:
fn = key + '.nc'
path = os.path.join(NC_RESOURCE_LOCAL_DIR, fn)
if os.path.isfile(path):
logger.info('Using %s', path)
NC_RESOURCE[key] = path
else:
logger.info('Using %s', NC_RESOURCE[key])
try:
# Import local settings that aren't stored in svn/git.
from lizard_kml.local_testsettings import *
except ImportError:
pass
|
lizardsystem/lizard-kml
|
lizard_kml/testsettings.py
|
Python
|
gpl-3.0
| 6,592
|
[
"NetCDF"
] |
35903e9f7a2cfd10000550f60500563b12ee0979f256ef0eedd2c2817c7b0bf6
|
"""
Base Page for fitting
"""
from __future__ import print_function
import sys
import os
import wx
import numpy as np
import time
import copy
import math
import json
import logging
import traceback
from collections import defaultdict
from wx.lib.scrolledpanel import ScrolledPanel
from sasmodels.weights import MODELS as POLYDISPERSITY_MODELS
from sas.sasgui.guiframe.panel_base import PanelBase
from sas.sasgui.guiframe.utils import format_number, check_float, IdList, \
check_int
from sas.sasgui.guiframe.events import PanelOnFocusEvent
from sas.sasgui.guiframe.events import StatusEvent
from sas.sasgui.guiframe.events import AppendBookmarkEvent
from sas.sasgui.guiframe.dataFitting import Data2D
from sas.sasgui.guiframe.dataFitting import Data1D
from sas.sasgui.guiframe.dataFitting import check_data_validity
from sas.sasgui.guiframe.gui_style import GUIFRAME_ID
from sas.sascalc.dataloader.data_info import Detector
from sas.sascalc.dataloader.data_info import Source
from sas.sasgui.perspectives.fitting.pagestate import PageState
from sas.sasgui.guiframe.CategoryInstaller import CategoryInstaller
from sas.sasgui.guiframe.documentation_window import DocumentationWindow
logger = logging.getLogger(__name__)
(PageInfoEvent, EVT_PAGE_INFO) = wx.lib.newevent.NewEvent()
(PreviousStateEvent, EVT_PREVIOUS_STATE) = wx.lib.newevent.NewEvent()
(NextStateEvent, EVT_NEXT_STATE) = wx.lib.newevent.NewEvent()
_BOX_WIDTH = 76
_QMIN_DEFAULT = 0.0005
_QMAX_DEFAULT = 0.5
_NPTS_DEFAULT = 50
# Control panel width
if sys.platform.count("win32") > 0:
PANEL_WIDTH = 450
FONT_VARIANT = 0
ON_MAC = False
else:
PANEL_WIDTH = 500
FONT_VARIANT = 1
ON_MAC = True
CUSTOM_MODEL = 'Plugin Models'
class BasicPage(ScrolledPanel, PanelBase):
"""
This class provide general structure of the fitpanel page
"""
# Internal name for the AUI manager
window_name = "Fit Page"
# Title to appear on top of the window
window_caption = "Fit Page "
# These two buttons have specific IDs since they seem to be created more
# frequently than they need to. In particular, set_dispers_sizer() is
# called by _on_select_model
ID_BOOKMARK = wx.NewId()
ID_DISPERSER_HELP = wx.NewId()
_id_pool = IdList()
def __init__(self, parent, color='blue', **kwargs):
"""
"""
ScrolledPanel.__init__(self, parent, **kwargs)
PanelBase.__init__(self, parent)
self.SetupScrolling()
# Set window's font size
self.SetWindowVariant(variant=FONT_VARIANT)
self.SetBackgroundColour(color)
self._ids = iter(self._id_pool)
# parent of the page
self.parent = parent
# manager is the fitting plugin
# owner of the page (fitting plugin)
self.event_owner = None
# current model
self.model = None
self.m_name = None
self.index_model = None
self.panel = None
# data
self.data = None
# list of available data
self.data_list = []
self.mask = None
self.uid = wx.NewId()
self.graph_id = None
# Q range for data set
self.qmin_data_set = np.inf
self.qmax_data_set = None
self.npts_data_set = 0
# Q range
self.qmin = None
self.qmax = None
self.qmax_x = _QMAX_DEFAULT
self.qmin_x = _QMIN_DEFAULT
self.npts_x = _NPTS_DEFAULT
# total number of point: float
self.npts = None
self.num_points = None
# smear default
self.current_smearer = None
# 2D smear accuracy default
self.smear2d_accuracy = 'Low'
# slit smear:
self.dxl = None
self.dxw = None
# pinhole smear
self.dx_percent = None
# smear attrbs
self.enable_smearer = None
self.disable_smearer = None
self.pinhole_smearer = None
self.slit_smearer = None
# weight attrbs
self.dI_noweight = None
self.dI_didata = None
self.dI_sqrdata = None
self.dI_idata = None
# other attrbs
self.dq_l = None
self.dq_r = None
self.tcChi = None
self.disp_box = None
self.model_disp = None
self.Npts_fit = None
self.Npts_total = None
self.theory_qmin = None
self.theory_qmax = None
self.theory_qmin_x = None
self.theory_qmax_x = None
self.btEditMask = None
self.btFit = None
self.sld_axes = None
self.multi_factor = None
self.disp_cb_dict = {}
# self.state = PageState(parent=parent)
# dictionary containing list of models
self.model_list_box = {}
# Data member to store the dispersion object created
self._disp_obj_dict = {}
# selected parameters to apply dispersion
self.disp_cb_dict = {}
# smearer object
self.enable2D = False
self._has_magnetic = False
self.magnetic_on = False
self.is_mac = ON_MAC
self.formfactorbox = None
self.structurebox = None
self.categorybox = None
# list of model parameters. each item must have same length
# each item related to a given parameters
# [cb state, name, value, "+/-", error of fit, min, max , units]
self.parameters = []
# non-fittable parameter whose value is astring
self.str_parameters = []
# list of parameters to fit , must be like self.parameters
self.param_toFit = []
# list of looking like parameters but with non fittable parameters info
self.fixed_param = []
# list of looking like parameters but with fittable parameters info
self.fittable_param = []
# list of dispersion parameters
self.disp_list = []
self.disp_name = ""
# list of orientation parameters
self.orientation_params = []
self.orientation_params_disp = []
# Self.model should ALWAYS be None here. It was set to none above in
# this long init setting. no obvious function call in between setting
# and this - commenting out on 4/8/2014 by PDB. Remove once clear
# it is pointless.
# if self.model is not None:
# self.disp_list = self.model.getDispParamList()
self.temp_multi_functional = False
# enable model 2D draw
self.enable2D = False
# check that the fit range is correct to plot the model again
self.fitrange = True
# Create memento to save the current state
self.state = PageState(parent=self.parent,
model=self.model, data=self.data)
# flag to determine if state has change
self.state_change = False
# save customized array
self.values = {} # type: Dict[str, List[float, ...]]
self.weights = {} # type: Dict[str, List[float, ...]]
# retrieve saved state
self.number_saved_state = 0
# dictionary of saved state
self.saved_states = {}
# Create context menu for page
self.popUpMenu = wx.Menu()
wx_id = self._ids.next()
self._keep = wx.MenuItem(self.popUpMenu, wx_id, "Add bookmark",
" Keep the panel status to recall it later")
self.popUpMenu.AppendItem(self._keep)
self._keep.Enable(False)
self._set_bookmark_flag(False)
self._set_save_flag(False)
wx.EVT_MENU(self, wx_id, self.on_bookmark)
self.popUpMenu.AppendSeparator()
# Default locations
self._default_save_location = os.getcwd()
# save initial state on context menu
# self.onSave(event=None)
self.Bind(wx.EVT_CONTEXT_MENU, self.onContextMenu)
# bind key event
self.Bind(wx.EVT_LEFT_DOWN, self.on_left_down)
# create the basic structure of the panel with empty sizer
self.define_page_structure()
# drawing Initial dispersion parameters sizer
self.set_dispers_sizer()
# layout
self.set_layout()
def set_index_model(self, index):
"""
Index related to this page
"""
self.index_model = index
def create_default_data(self):
"""
Given the user selection, creates a 1D or 2D data
Only when the page is on theory mode.
"""
if not hasattr(self, "model_view"):
return
toggle_mode_on = self.model_view.IsEnabled() or self.data is None
if toggle_mode_on:
if self.enable2D and not check_data_validity(self.data):
self._create_default_2d_data()
else:
if self.pointsbox.GetValue():
self._create_log_1d_data()
else:
self._create_default_1d_data()
if self.model is not None:
if not self.data.is_data:
self._manager.page_finder[self.uid].set_fit_data(
data=[self.data])
self.on_smear_helper(update=True)
self.state.enable_smearer = self.enable_smearer.GetValue()
self.state.disable_smearer = self.disable_smearer.GetValue()
self.state.pinhole_smearer = self.pinhole_smearer.GetValue()
self.state.slit_smearer = self.slit_smearer.GetValue()
def _create_default_1d_data(self):
"""
Create default data for fitting perspective
Only when the page is on theory mode.
:warning: This data is never plotted.
"""
x = np.linspace(start=self.qmin_x, stop=self.qmax_x,
num=self.npts_x, endpoint=True)
self.data = Data1D(x=x)
self.data.xaxis('\\rm{Q}', "A^{-1}")
self.data.yaxis('\\rm{Intensity}', "cm^{-1}")
self.data.is_data = False
self.data.id = str(self.uid) + " data"
self.data.group_id = str(self.uid) + " Model1D"
def _create_log_1d_data(self):
"""
Create log-spaced data for fitting perspective
Only when the page is on theory mode.
:warning: This data is never plotted.
"""
if self.qmin_x >= 1.e-10:
qmin = np.log10(self.qmin_x)
else:
qmin = -10.
if self.qmax_x <= 1.e10:
qmax = np.log10(self.qmax_x)
else:
qmax = 10.
x = np.logspace(start=qmin, stop=qmax,
num=self.npts_x, endpoint=True, base=10.0)
self.data = Data1D(x=x)
self.data.xaxis('\\rm{Q}', "A^{-1}")
self.data.yaxis('\\rm{Intensity}', "cm^{-1}")
self.data.is_data = False
self.data.id = str(self.uid) + " data"
self.data.group_id = str(self.uid) + " Model1D"
def _create_default_2d_data(self):
"""
Create 2D data by default
Only when the page is on theory mode.
:warning: This data is never plotted.
"""
self.data = Data2D()
qmax = self.qmax_x / math.sqrt(2)
self.data.xaxis('\\rm{Q_{x}}', 'A^{-1}')
self.data.yaxis('\\rm{Q_{y}}', 'A^{-1}')
self.data.is_data = False
self.data.id = str(self.uid) + " data"
self.data.group_id = str(self.uid) + " Model2D"
# Default values
self.data.detector.append(Detector())
index = len(self.data.detector) - 1
self.data.detector[index].distance = 8000 # mm
self.data.source.wavelength = 6 # A
self.data.detector[index].pixel_size.x = 5 # mm
self.data.detector[index].pixel_size.y = 5 # mm
self.data.detector[index].beam_center.x = qmax
self.data.detector[index].beam_center.y = qmax
xmax = qmax
xmin = -qmax
ymax = qmax
ymin = -qmax
qstep = self.npts_x
x = np.linspace(start=xmin, stop=xmax, num=qstep, endpoint=True)
y = np.linspace(start=ymin, stop=ymax, num=qstep, endpoint=True)
# use data info instead
new_x = np.tile(x, (len(y), 1))
new_y = np.tile(y, (len(x), 1))
new_y = new_y.swapaxes(0, 1)
# all data reuire now in 1d array
qx_data = new_x.flatten()
qy_data = new_y.flatten()
q_data = np.sqrt(qx_data * qx_data + qy_data * qy_data)
# set all True (standing for unmasked) as default
mask = np.ones(len(qx_data), dtype=bool)
# store x and y bin centers in q space
x_bins = x
y_bins = y
self.data.source = Source()
self.data.data = np.ones(len(mask))
self.data.err_data = np.ones(len(mask))
self.data.qx_data = qx_data
self.data.qy_data = qy_data
self.data.q_data = q_data
self.data.mask = mask
self.data.x_bins = x_bins
self.data.y_bins = y_bins
# max and min taking account of the bin sizes
self.data.xmin = xmin
self.data.xmax = xmax
self.data.ymin = ymin
self.data.ymax = ymax
def on_set_focus(self, event):
"""
On Set Focus, update guimanger and menu
"""
if self._manager is not None:
wx.PostEvent(self._manager.parent, PanelOnFocusEvent(panel=self))
self.on_tap_focus()
def on_tap_focus(self):
"""
Update menu1 on cliking the page tap
"""
if self._manager.menu1 is not None:
chain_menu = self._manager.menu1.FindItemById(
self._manager.id_reset_flag)
chain_menu.Enable(self.batch_on)
sim_menu = self._manager.menu1.FindItemById(self._manager.id_simfit)
flag = self.data.is_data\
and (self.model is not None)
sim_menu.Enable(not self.batch_on and flag)
batch_menu = \
self._manager.menu1.FindItemById(self._manager.id_batchfit)
batch_menu.Enable(self.batch_on and flag)
def onContextMenu(self, event):
"""
Retrieve the state selected state
"""
pos = event.GetPosition()
pos = self.ScreenToClient(pos)
self.PopupMenu(self.popUpMenu, pos)
def onUndo(self, event):
"""
Cancel the previous action
"""
event = PreviousStateEvent(page=self)
wx.PostEvent(self.parent, event)
def onRedo(self, event):
"""
Restore the previous action cancelled
"""
event = NextStateEvent(page=self)
wx.PostEvent(self.parent, event)
def define_page_structure(self):
"""
Create empty sizer for a panel
"""
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.sizer0 = wx.BoxSizer(wx.VERTICAL)
self.sizer1 = wx.BoxSizer(wx.VERTICAL)
self.sizer2 = wx.BoxSizer(wx.VERTICAL)
self.sizer3 = wx.BoxSizer(wx.VERTICAL)
self.sizer4 = wx.BoxSizer(wx.VERTICAL)
self.sizer5 = wx.BoxSizer(wx.VERTICAL)
self.sizer6 = wx.BoxSizer(wx.VERTICAL)
self.sizer0.SetMinSize((PANEL_WIDTH, -1))
self.sizer1.SetMinSize((PANEL_WIDTH, -1))
self.sizer2.SetMinSize((PANEL_WIDTH, -1))
self.sizer3.SetMinSize((PANEL_WIDTH, -1))
self.sizer4.SetMinSize((PANEL_WIDTH, -1))
self.sizer5.SetMinSize((PANEL_WIDTH, -1))
self.sizer6.SetMinSize((PANEL_WIDTH, -1))
self.vbox.Add(self.sizer0)
self.vbox.Add(self.sizer1)
self.vbox.Add(self.sizer2)
self.vbox.Add(self.sizer3)
self.vbox.Add(self.sizer4)
self.vbox.Add(self.sizer5)
self.vbox.Add(self.sizer6)
def set_layout(self):
"""
layout
"""
self.vbox.Layout()
self.vbox.Fit(self)
self.SetSizer(self.vbox)
self.Centre()
def set_owner(self, owner):
"""
set owner of fitpage
:param owner: the class responsible of plotting
"""
self.event_owner = owner
self.state.event_owner = owner
def get_state(self):
"""
return the current page state
"""
return self.state
def get_data(self):
"""
return the current data
"""
return self.data
def get_data_list(self):
"""
return the current data
"""
return self.data_list
def set_manager(self, manager):
"""
set panel manager
:param manager: instance of plugin fitting
"""
self._manager = manager
self.state.manager = manager
def populate_box(self, model_dict):
"""
Store list of model
:param model_dict: dictionary containing list of models
"""
self.model_list_box = model_dict
self.state.model_list_box = self.model_list_box
self.initialize_combox()
def set_model_dictionary(self, model_dict):
"""
Store a dictionary linking model name -> model object
:param model_dict: dictionary containing list of models
"""
self.model_dict = model_dict
def initialize_combox(self):
"""
put default value in the combo box
"""
if self.model_list_box is not None and len(self.model_list_box) > 0:
self._populate_box(self.structurebox,
self.model_list_box["Structure Factors"])
self.structurebox.Insert("None", 0, None)
self.structurebox.SetSelection(0)
self.structurebox.Hide()
self.text2.Hide()
self.structurebox.Disable()
self.text2.Disable()
def set_dispers_sizer(self):
"""
fill sizer containing dispersity info
"""
# print "==== entering set_dispers_sizer ==="
self.sizer4.Clear(True)
name = "Polydispersity and Orientational Distribution"
box_description = wx.StaticBox(self, wx.ID_ANY, name)
box_description.SetForegroundColour(wx.BLUE)
boxsizer1 = wx.StaticBoxSizer(box_description, wx.VERTICAL)
# ----------------------------------------------------
self.disable_disp = wx.RadioButton(self, wx.ID_ANY, 'Off', (10, 10),
style=wx.RB_GROUP)
self.enable_disp = wx.RadioButton(self, wx.ID_ANY, 'On', (10, 30))
# best size for MAC and PC
if ON_MAC:
size_q = (30, 20)
else:
size_q = (20, 15)
self.disp_help_bt = wx.Button(self, self.ID_DISPERSER_HELP, '?',
style=wx.BU_EXACTFIT,
size=size_q)
self.disp_help_bt.Bind(wx.EVT_BUTTON, self.on_pd_help_clicked,
id=self.disp_help_bt.GetId())
self.disp_help_bt.SetToolTipString("Help for polydispersion.")
self.Bind(wx.EVT_RADIOBUTTON, self._set_dipers_Param,
id=self.disable_disp.GetId())
self.Bind(wx.EVT_RADIOBUTTON, self._set_dipers_Param,
id=self.enable_disp.GetId())
# MAC needs SetValue
self.disable_disp.SetValue(True)
sizer_dispersion = wx.BoxSizer(wx.HORIZONTAL)
sizer_dispersion.Add((20, 20))
name = "" # Polydispersity and \nOrientational Distribution "
sizer_dispersion.Add(wx.StaticText(self, wx.ID_ANY, name))
sizer_dispersion.Add(self.enable_disp)
sizer_dispersion.Add((20, 20))
sizer_dispersion.Add(self.disable_disp)
sizer_dispersion.Add((25, 20))
sizer_dispersion.Add(self.disp_help_bt)
# fill a sizer for dispersion
boxsizer1.Add(sizer_dispersion, 0,
wx.TOP|wx.BOTTOM|wx.LEFT|wx.EXPAND|wx.ADJUST_MINSIZE,
border=5)
self.sizer4_4 = wx.GridBagSizer(6, 5)
boxsizer1.Add(self.sizer4_4)
# -----------------------------------------------------
self.sizer4.Add(boxsizer1, 0, wx.EXPAND | wx.ALL, 10)
self.sizer4_4.Layout()
self.sizer4.Layout()
self.Layout()
self.Refresh()
# saving the state of enable dispersity button
self.state.enable_disp = self.enable_disp.GetValue()
self.state.disable_disp = self.disable_disp.GetValue()
self.SetupScrolling()
def onResetModel(self, event):
"""
Reset model state
"""
menu = event.GetEventObject()
# post help message for the selected model
msg = menu.GetHelpString(event.GetId())
msg += " reloaded"
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
self.Show(False)
name = menu.GetLabel(event.GetId())
self._on_select_model_helper()
if self.model is not None:
self.m_name = self.model.name
if name in self.saved_states.keys():
previous_state = self.saved_states[name]
# reset state of checkbox,textcrtl and regular parameters value
self.reset_page(previous_state)
self.state.m_name = self.m_name
self.Show(True)
def on_preview(self, event):
"""
Report the current fit results
"""
# Get plot image from plotpanel
images, canvases = self.get_images()
# get the report dialog
self.state.report(images, canvases)
def on_save(self, event):
"""
Save the current state into file
"""
self.save_current_state()
new_state = self.state.clone()
# Ask the user the location of the file to write to.
path = None
if self.parent is not None:
self._default_save_location = \
self._manager.parent._default_save_location
dlg = wx.FileDialog(self, "Choose a file", self._default_save_location,
self.window_caption, "*.fitv", wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self._default_save_location = os.path.dirname(path)
self._manager.parent._default_save_location = \
self._default_save_location
else:
return None
# MAC always needs the extension for saving
extens = ".fitv"
# Make sure the ext included in the file name
fName = os.path.splitext(path)[0] + extens
# the manager write the state into file
self._manager.save_fit_state(filepath=fName, fitstate=new_state)
return new_state
def on_copy(self, event):
"""
Copy Parameter values to the clipboad
"""
if event is not None:
event.Skip()
# It seems MAC needs wxCallAfter
if event.GetId() == GUIFRAME_ID.COPYEX_ID:
print("copy excel")
wx.CallAfter(self.get_copy_excel)
elif event.GetId() == GUIFRAME_ID.COPYLAT_ID:
print("copy latex")
wx.CallAfter(self.get_copy_latex)
else:
wx.CallAfter(self.get_copy)
def on_paste(self, event):
"""
Paste Parameter values to the panel if possible
"""
# if event is not None:
# event.Skip()
# It seems MAC needs wxCallAfter for the setvalues
# for multiple textctrl items, otherwise it tends to crash once a while
wx.CallAfter(self.get_paste)
# messages depending on the flag
# self._copy_info(True)
def _copy_info(self, flag):
"""
Send event depending on flag
: Param flag: flag that distinguishes the event
"""
# messages depending on the flag
if flag is None:
msg = " Parameter values are copied to the clipboard..."
infor = 'warning'
elif flag:
msg = " Parameter values are pasted from the clipboard..."
infor = "warning"
else:
msg = "Error occurred: "
msg += "No valid parameter values to paste from the clipboard..."
infor = "warning"
# inform msg to wx
wx.PostEvent(self._manager.parent,
StatusEvent(status=msg, info=infor))
def _get_time_stamp(self):
"""
return time and date stings
"""
# date and time
year, month, day, hour, minute, second, _, _, _ = time.localtime()
current_time = str(hour) + ":" + str(minute) + ":" + str(second)
current_date = str(month) + "/" + str(day) + "/" + str(year)
return current_time, current_date
def on_bookmark(self, event):
"""
save history of the data and model
"""
if self.model is None:
msg = "Can not bookmark; Please select Data and Model first..."
wx.MessageBox(msg, 'Info')
return
self.save_current_state()
new_state = self.state.clone()
# Add model state on context menu
self.number_saved_state += 1
current_time, current_date = self._get_time_stamp()
# name= self.model.name+"[%g]"%self.number_saved_state
name = "Fitting: %g]" % self.number_saved_state
name += self.model.__class__.__name__
name += "bookmarked at %s on %s" % (current_time, current_date)
self.saved_states[name] = new_state
# Add item in the context menu
msg = "Model saved at %s on %s" % (current_time, current_date)
# post help message for the selected model
msg += " Saved! right click on this page to retrieve this model"
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
self.popUpMenu.Append(self.ID_BOOKMARK, name, str(msg))
wx.EVT_MENU(self, self.ID_BOOKMARK, self.onResetModel)
wx.PostEvent(self._manager.parent,
AppendBookmarkEvent(title=name,
hint=str(msg),
handler=self._back_to_bookmark))
def _back_to_bookmark(self, event):
"""
Back to bookmark
"""
self._manager.on_perspective(event)
self.onResetModel(event)
self._draw_model()
def onSetFocus(self, evt):
"""
highlight the current textcrtl and hide the error text control shown
after fitting
"""
return
def read_file(self, path):
"""
Read two columns file
:param path: the path to the file to read
"""
try:
if path is None:
status = " Selected Distribution was not loaded: %s" % path
wx.PostEvent(self._manager.parent,
StatusEvent(status=status))
return None, None
input_f = open(path, 'r')
buff = input_f.read()
lines = buff.split('\n')
input_f.close()
angles = []
weights = []
for line in lines:
toks = line.split()
try:
angle = float(toks[0])
weight = float(toks[1])
angles.append(angle)
weights.append(weight)
except Exception:
# Skip non-data lines
logger.error(traceback.format_exc())
return np.array(angles), np.array(weights)
except:
raise
def createMemento(self):
"""
return the current state of the page
"""
return self.state.clone()
def save_current_state(self):
"""
Store current state
"""
# save model option
if self.model is not None:
self.disp_list = self.model.getDispParamList()
self.state.disp_list = copy.deepcopy(self.disp_list)
self.state.model = self.model.clone()
# model combobox: complex code because of mac's silent error
if self.structurebox is not None:
if self.structurebox.IsShown():
self.state.structurecombobox = 'None'
s_select = self.structurebox.GetSelection()
if s_select > 0:
self.state.structurecombobox = \
self.structurebox.GetString(s_select)
if self.formfactorbox is not None:
f_select = self.formfactorbox.GetSelection()
if f_select > 0:
self.state.formfactorcombobox = \
self.formfactorbox.GetString(f_select)
if self.categorybox is not None:
cb_select = self.categorybox.GetSelection()
if cb_select > 0:
self.state.categorycombobox = \
self.categorybox.GetString(cb_select)
self.state.enable2D = copy.deepcopy(self.enable2D)
self.state.values = copy.deepcopy(self.values)
self.state.weights = copy.deepcopy(self.weights)
# save data
self.state.data = copy.deepcopy(self.data)
self.state.qmax_x = self.qmax_x
self.state.qmin_x = self.qmin_x
self.state.dI_noweight = copy.deepcopy(self.dI_noweight.GetValue())
self.state.dI_didata = copy.deepcopy(self.dI_didata.GetValue())
self.state.dI_sqrdata = copy.deepcopy(self.dI_sqrdata.GetValue())
self.state.dI_idata = copy.deepcopy(self.dI_idata.GetValue())
self.state.dq_l = self.dq_l
self.state.dq_r = self.dq_r
if hasattr(self, "enable_disp"):
self.state.enable_disp = self.enable_disp.GetValue()
self.state.disable_disp = self.disable_disp.GetValue()
self.state.smearer = copy.deepcopy(self.current_smearer)
if hasattr(self, "enable_smearer"):
self.state.enable_smearer = \
copy.deepcopy(self.enable_smearer.GetValue())
self.state.disable_smearer = \
copy.deepcopy(self.disable_smearer.GetValue())
self.state.pinhole_smearer = \
copy.deepcopy(self.pinhole_smearer.GetValue())
self.state.dx_percent = copy.deepcopy(self.dx_percent)
self.state.dxl = copy.deepcopy(self.dxl)
self.state.dxw = copy.deepcopy(self.dxw)
self.state.slit_smearer = copy.deepcopy(self.slit_smearer.GetValue())
if len(self._disp_obj_dict) > 0:
for k, v in self._disp_obj_dict.iteritems():
self.state._disp_obj_dict[k] = v.type
self.state.values = copy.deepcopy(self.values)
self.state.weights = copy.deepcopy(self.weights)
# save plotting range
self._save_plotting_range()
self.state.orientation_params = []
self.state.orientation_params_disp = []
self.state.parameters = []
self.state.fittable_param = []
self.state.fixed_param = []
self.state.str_parameters = []
# save checkbutton state and txtcrtl values
self._copy_parameters_state(self.str_parameters,
self.state.str_parameters)
self._copy_parameters_state(self.orientation_params,
self.state.orientation_params)
self._copy_parameters_state(self.orientation_params_disp,
self.state.orientation_params_disp)
self._copy_parameters_state(self.parameters, self.state.parameters)
self._copy_parameters_state(self.fittable_param,
self.state.fittable_param)
self._copy_parameters_state(self.fixed_param, self.state.fixed_param)
# save chisqr
self.state.tcChi = self.tcChi.GetValue()
def save_current_state_fit(self):
"""
Store current state for fit_page
"""
# save model option
if self.model is not None:
self.disp_list = self.model.getDispParamList()
self.state.disp_list = copy.deepcopy(self.disp_list)
self.state.model = self.model.clone()
self.state.enable2D = copy.deepcopy(self.enable2D)
self.state.values = copy.deepcopy(self.values)
self.state.weights = copy.deepcopy(self.weights)
# save data
self.state.data = copy.deepcopy(self.data)
if hasattr(self, "enable_disp"):
self.state.enable_disp = self.enable_disp.GetValue()
self.state.disable_disp = self.disable_disp.GetValue()
self.state.smearer = copy.deepcopy(self.current_smearer)
if hasattr(self, "enable_smearer"):
self.state.enable_smearer = \
copy.deepcopy(self.enable_smearer.GetValue())
self.state.disable_smearer = \
copy.deepcopy(self.disable_smearer.GetValue())
self.state.pinhole_smearer = \
copy.deepcopy(self.pinhole_smearer.GetValue())
self.state.slit_smearer = copy.deepcopy(self.slit_smearer.GetValue())
self.state.dI_noweight = copy.deepcopy(self.dI_noweight.GetValue())
self.state.dI_didata = copy.deepcopy(self.dI_didata.GetValue())
self.state.dI_sqrdata = copy.deepcopy(self.dI_sqrdata.GetValue())
self.state.dI_idata = copy.deepcopy(self.dI_idata.GetValue())
if hasattr(self, "disp_box") and self.disp_box is not None:
self.state.disp_box = self.disp_box.GetCurrentSelection()
if len(self.disp_cb_dict) > 0:
for k, v in self.disp_cb_dict.iteritems():
if v is None:
self.state.disp_cb_dict[k] = v
else:
try:
self.state.disp_cb_dict[k] = v.GetValue()
except:
self.state.disp_cb_dict[k] = None
if len(self._disp_obj_dict) > 0:
for k, v in self._disp_obj_dict.iteritems():
self.state._disp_obj_dict[k] = v.type
self.state.values = copy.deepcopy(self.values)
self.state.weights = copy.deepcopy(self.weights)
# save plotting range
self._save_plotting_range()
# save checkbutton state and txtcrtl values
self._copy_parameters_state(self.orientation_params,
self.state.orientation_params)
self._copy_parameters_state(self.orientation_params_disp,
self.state.orientation_params_disp)
self._copy_parameters_state(self.parameters, self.state.parameters)
self._copy_parameters_state(self.fittable_param,
self.state.fittable_param)
self._copy_parameters_state(self.fixed_param, self.state.fixed_param)
def check_invalid_panel(self):
"""
check if the user can already perform some action with this panel
"""
if self.data is None:
self.disable_smearer.SetValue(True)
self.disable_disp.SetValue(True)
msg = "Please load Data and select Model to start..."
wx.MessageBox(msg, 'Info')
return True
def set_model_state(self, state):
"""
reset page given a model state
"""
self.disp_cb_dict = state.disp_cb_dict
self.disp_list = state.disp_list
# fill model combobox
self._show_combox_helper()
# select the current model
try:
# to support older version
category_pos = int(state.categorycombobox)
except:
category_pos = 0
for ind_cat in range(self.categorybox.GetCount()):
if self.categorycombobox.GetString(ind_cat) == \
state.categorycombobox:
category_pos = int(ind_cat)
break
self.categorybox.Select(category_pos)
try:
# to support older version
formfactor_pos = int(state.formfactorcombobox)
except:
formfactor_pos = 0
for ind_form in range(self.formfactorbox.GetCount()):
if self.formfactorbox.GetString(ind_form) == \
state.formfactorcombobox:
formfactor_pos = int(ind_form)
break
self.formfactorbox.Select(formfactor_pos)
try:
# to support older version
structfactor_pos = int(state.structurecombobox)
except:
structfactor_pos = 0
for ind_struct in range(self.structurebox.GetCount()):
if self.structurebox.GetString(ind_struct) == \
state.structurecombobox:
structfactor_pos = int(ind_struct)
break
self.structurebox.SetSelection(structfactor_pos)
if state.multi_factor is not None:
self.multifactorbox.SetSelection(state.multi_factor)
# reset state of checkbox,textcrtl and regular parameters value
self._reset_parameters_state(self.orientation_params_disp,
state.orientation_params_disp)
self._reset_parameters_state(self.orientation_params,
state.orientation_params)
self._reset_parameters_state(self.str_parameters,
state.str_parameters)
self._reset_parameters_state(self.parameters, state.parameters)
# display dispersion info layer
self.enable_disp.SetValue(state.enable_disp)
self.disable_disp.SetValue(state.disable_disp)
if hasattr(self, "disp_box") and self.disp_box is not None:
self.disp_box.SetSelection(state.disp_box)
n = self.disp_box.GetCurrentSelection()
dispersity = self.disp_box.GetClientData(n)
name = dispersity.__name__
self._set_dipers_Param(event=None)
if name == "ArrayDispersion":
for item in self.disp_cb_dict.keys():
if hasattr(self.disp_cb_dict[item], "SetValue"):
self.disp_cb_dict[item].SetValue(
state.disp_cb_dict[item])
# Create the dispersion objects
disp_model = POLYDISPERSITY_MODELS['array']()
if hasattr(state, "values") and \
self.disp_cb_dict[item].GetValue():
if len(state.values) > 0:
self.values = state.values
self.weights = state.weights
disp_model.set_weights(self.values,
state.weights)
else:
self._reset_dispersity()
self._disp_obj_dict[item] = disp_model
# Set the new model as the dispersion object
# for the selected parameter
self.model.set_dispersion(item, disp_model)
self.model._persistency_dict[item] = \
[state.values, state.weights]
else:
keys = self.model.getParamList()
for item in keys:
if item in self.disp_list and \
item not in self.model.details:
self.model.details[item] = ["", None, None]
self.disp_cb_dict = copy.deepcopy(state.disp_cb_dict)
self.state.disp_cb_dict = copy.deepcopy(state.disp_cb_dict)
# smearing info restore
if hasattr(self, "enable_smearer"):
# set smearing value whether or not the data
# contain the smearing info
self.enable_smearer.SetValue(state.enable_smearer)
self.disable_smearer.SetValue(state.disable_smearer)
self.onSmear(event=None)
self.pinhole_smearer.SetValue(state.pinhole_smearer)
self.slit_smearer.SetValue(state.slit_smearer)
self.dI_noweight.SetValue(state.dI_noweight)
self.dI_didata.SetValue(state.dI_didata)
self.dI_sqrdata.SetValue(state.dI_sqrdata)
self.dI_idata.SetValue(state.dI_idata)
# we have two more options for smearing
if self.pinhole_smearer.GetValue():
self.onPinholeSmear(event=None)
elif self.slit_smearer.GetValue():
self.onSlitSmear(event=None)
# reset state of checkbox,textcrtl and dispersity parameters value
self._reset_parameters_state(self.fittable_param, state.fittable_param)
self._reset_parameters_state(self.fixed_param, state.fixed_param)
# draw the model with previous parameters value
self._onparamEnter_helper()
self.select_param(event=None)
# Save state_fit
self.save_current_state_fit()
self._lay_out()
self.Refresh()
def get_cat_combo_box_pos(self, state):
"""
Iterate through the categories to find the structurefactor
:return: combo_box_position
"""
for key, value in self.master_category_dict.iteritems():
formfactor = state.formfactorcombobox.split(":")
if isinstance(formfactor, list):
formfactor = formfactor[0]
for list_item in value:
if formfactor in list_item:
return self.categorybox.Items.index(key)
return 0
def reset_page_helper(self, state):
"""
Use page_state and change the state of existing page
:precondition: the page is already drawn or created
:postcondition: the state of the underlying data changes as well as the
state of the graphic interface
"""
if state is None:
return
# set data, etc. from the state
# reset page between theory and fitting from bookmarking
data = state.data
if data is None:
data_min = state.qmin
data_max = state.qmax
self.qmin_x = data_min
self.qmax_x = data_max
self.qmin.SetValue(str(data_min))
self.qmax.SetValue(str(data_max))
self.state.data = data
self.state.qmin = self.qmin_x
self.state.qmax = self.qmax_x
else:
self.set_data(data)
self.enable2D = state.enable2D
try:
self.magnetic_on = state.magnetic_on
except:
# Backward compatibility (for older state files)
self.magnetic_on = False
self.disp_cb_dict = state.disp_cb_dict
self.disp_list = state.disp_list
# fill model combobox
self._show_combox_helper()
# select the current model
state._convert_to_sasmodels()
state.categorycombobox = unicode(state.categorycombobox)
if state.categorycombobox in self.categorybox.Items:
category_pos = self.categorybox.Items.index(
state.categorycombobox)
else:
# Look in master list for model name (model.lower)
category_pos = self.get_cat_combo_box_pos(state)
self.categorybox.Select(category_pos)
self._show_combox(None)
from models import PLUGIN_NAME_BASE
if self.categorybox.GetValue() == CUSTOM_MODEL \
and PLUGIN_NAME_BASE not in state.formfactorcombobox:
state.formfactorcombobox = \
PLUGIN_NAME_BASE + state.formfactorcombobox
formfactor_pos = 0
for ind_form in range(self.formfactorbox.GetCount()):
if self.formfactorbox.GetString(ind_form) == \
(state.formfactorcombobox):
formfactor_pos = int(ind_form)
break
self.formfactorbox.Select(formfactor_pos)
structfactor_pos = 0
if state.structurecombobox is not None:
state.structurecombobox = unicode(state.structurecombobox)
for ind_struct in range(self.structurebox.GetCount()):
if self.structurebox.GetString(ind_struct) == \
(state.structurecombobox):
structfactor_pos = int(ind_struct)
break
self.structurebox.SetSelection(structfactor_pos)
if state.multi_factor is not None:
self.multifactorbox.SetSelection(state.multi_factor)
# draw the panel according to the new model parameter
self._on_select_model(event=None)
# take care of 2D button
if data is None and self.model_view.IsEnabled():
if self.enable2D:
self.model_view.SetLabel("2D Mode")
else:
self.model_view.SetLabel("1D Mode")
# reset state of checkbox,textcrtl and regular parameters value
self._reset_parameters_state(self.orientation_params_disp,
state.orientation_params_disp)
self._reset_parameters_state(self.orientation_params,
state.orientation_params)
self._reset_parameters_state(self.str_parameters,
state.str_parameters)
self._reset_parameters_state(self.parameters, state.parameters)
# display dispersion info layer
self.enable_disp.SetValue(state.enable_disp)
self.disable_disp.SetValue(state.disable_disp)
# If the polydispersion is ON
if state.enable_disp:
# reset dispersion according the state
self._set_dipers_Param(event=None)
self._reset_page_disp_helper(state)
# plotting range restore
self._reset_plotting_range(state)
# smearing info restore
if hasattr(self, "enable_smearer"):
# set smearing value whether or not the data
# contain the smearing info
self.enable_smearer.SetValue(state.enable_smearer)
self.disable_smearer.SetValue(state.disable_smearer)
self.onSmear(event=None)
self.pinhole_smearer.SetValue(state.pinhole_smearer)
self.slit_smearer.SetValue(state.slit_smearer)
try:
self.dI_noweight.SetValue(state.dI_noweight)
self.dI_didata.SetValue(state.dI_didata)
self.dI_sqrdata.SetValue(state.dI_sqrdata)
self.dI_idata.SetValue(state.dI_idata)
except:
# to support older state file formats
self.dI_noweight.SetValue(False)
self.dI_didata.SetValue(True)
self.dI_sqrdata.SetValue(False)
self.dI_idata.SetValue(False)
# we have two more options for smearing
if self.pinhole_smearer.GetValue():
self.dx_percent = state.dx_percent
if self.dx_percent is not None:
if state.dx_old:
self.dx_percent = 100 * (self.dx_percent / self.data.x[0])
self.smear_pinhole_percent.SetValue("%.2f" % self.dx_percent)
self.onPinholeSmear(event=None)
elif self.slit_smearer.GetValue():
self.dxl = state.dxl
self.dxw = state.dxw
if self.dxl is not None:
self.smear_slit_height.SetValue(str(self.dxl))
if self.dxw is not None:
self.smear_slit_width.SetValue(str(self.dxw))
else:
self.smear_slit_width.SetValue('')
self.onSlitSmear(event=None)
# reset state of checkbox,textcrtl and dispersity parameters value
self._reset_parameters_state(self.fittable_param, state.fittable_param)
self._reset_parameters_state(self.fixed_param, state.fixed_param)
# draw the model with previous parameters value
self._onparamEnter_helper()
# reset the value of chisqr when not consistent with the value computed
self.tcChi.SetValue(str(self.state.tcChi))
# reset context menu items
self._reset_context_menu()
# set the value of the current state to the state given as parameter
self.state = state.clone()
self.state.m_name = self.m_name
def _reset_page_disp_helper(self, state):
"""
Help to rest page for dispersions
"""
keys = self.model.getParamList()
for item in keys:
if item in self.disp_list and \
item not in self.model.details:
self.model.details[item] = ["", None, None]
# for k,v in self.state.disp_cb_dict.iteritems():
self.disp_cb_dict = copy.deepcopy(state.disp_cb_dict)
self.state.disp_cb_dict = copy.deepcopy(state.disp_cb_dict)
self.values = copy.deepcopy(state.values)
self.weights = copy.deepcopy(state.weights)
for key, disp_type in state._disp_obj_dict.iteritems():
# disp_model = disp
disp_model = POLYDISPERSITY_MODELS[disp_type]()
self._disp_obj_dict[key] = disp_model
param_name = key.split('.')[0]
# Try to set dispersion only when available
# for eg., pass the orient. angles for 1D Cal
try:
self.model.set_dispersion(param_name, disp_model)
self.model._persistency_dict[key] = \
[state.values, state.weights]
except Exception:
logger.error(traceback.format_exc())
selection = self._find_polyfunc_selection(disp_model)
for list in self.fittable_param:
if list[1] == key and list[7] is not None:
list[7].SetSelection(selection)
# For the array disp_model, set the values and weights
if selection == 1:
disp_model.set_weights(self.values[key],
self.weights[key])
try:
# Diables all fittable params for array
list[0].SetValue(False)
list[0].Disable()
list[2].Disable()
list[5].Disable()
list[6].Disable()
except Exception:
logger.error(traceback.format_exc())
# For array, disable all fixed params
if selection == 1:
for item in self.fixed_param:
if item[1].split(".")[0] == key.split(".")[0]:
# try it and pass it for the orientation for 1D
try:
item[2].Disable()
except Exception:
logger.error(traceback.format_exc())
def _selectDlg(self):
"""
open a dialog file to select the customized polydispersity function
"""
if self.parent is not None:
self._default_save_location = \
self._manager.parent.get_save_location()
dlg = wx.FileDialog(self, "Choose a weight file",
self._default_save_location, "",
"*.*", wx.OPEN)
path = None
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
dlg.Destroy()
return path
def _reset_context_menu(self):
"""
reset the context menu
"""
ids = iter(self._id_pool) # Reusing ids for context menu
for name, _ in self.state.saved_states.iteritems():
self.number_saved_state += 1
# Add item in the context menu
wx_id = ids.next()
msg = 'Save model and state %g' % self.number_saved_state
self.popUpMenu.Append(wx_id, name, msg)
wx.EVT_MENU(self, wx_id, self.onResetModel)
def _reset_plotting_range(self, state):
"""
Reset the plotting range to a given state
"""
self.qmin.SetValue(str(state.qmin))
self.qmax.SetValue(str(state.qmax))
def _save_typeOfmodel(self):
"""
save radiobutton containing the type model that can be selected
"""
# self.state.shape_rbutton = self.shape_rbutton.GetValue()
# self.state.shape_indep_rbutton = self.shape_indep_rbutton.GetValue()
# self.state.struct_rbutton = self.struct_rbutton.GetValue()
# self.state.plugin_rbutton = self.plugin_rbutton.GetValue()
self.state.structurecombobox = self.structurebox.GetValue()
self.state.formfactorcombobox = self.formfactorbox.GetValue()
self.state.categorycombobox = self.categorybox.GetValue()
# post state to fit panel
event = PageInfoEvent(page=self)
wx.PostEvent(self.parent, event)
def _save_plotting_range(self):
"""
save the state of plotting range
"""
self.state.qmin = self.qmin_x
self.state.qmax = self.qmax_x
self.state.npts = self.npts_x
def _onparamEnter_helper(self, is_modified=False):
"""
check if values entered by the user are changed and valid to replot
model
"""
# Flag to register when a parameter has changed.
# is_modified = False
self.fitrange = True
is_2Ddata = False
# self._undo.Enable(True)
# check if 2d data
if self.data.__class__.__name__ == "Data2D":
is_2Ddata = True
if self.model is not None:
# Either we get a is_modified = True passed in because
# _update_paramv_on_fit() has been called already or
# we need to check here ourselves.
if not is_modified:
is_modified = (self._check_value_enter(self.fittable_param)
or self._check_value_enter(self.fixed_param)
or self._check_value_enter(self.parameters))
# Here we should check whether the boundaries have been modified.
# If qmin and qmax have been modified, update qmin and qmax and
# set the is_modified flag to True
if self._validate_qrange(self.qmin, self.qmax):
tempmin = float(self.qmin.GetValue())
if tempmin != self.qmin_x:
self.qmin_x = tempmin
is_modified = True
tempmax = float(self.qmax.GetValue())
if tempmax != self.qmax_x:
self.qmax_x = tempmax
is_modified = True
if is_2Ddata:
is_modified = self._validate_Npts()
else:
is_modified = self._validate_Npts_1D()
else:
self.fitrange = False
# if any value is modify draw model with new value
if not self.fitrange:
# self.btFit.Disable()
if is_2Ddata:
self.btEditMask.Disable()
else:
if is_2Ddata and self.data.is_data and not self.batch_on:
self.btEditMask.Enable(True)
if is_modified and self.fitrange:
# Theory case: need to get npts value to draw
self.npts_x = float(self.Npts_total.GetValue())
self.Npts_fit.SetValue(str(self.Npts_total.GetValue()))
self._save_plotting_range()
self.create_default_data()
self.state_change = True
self._draw_model()
self.Refresh()
# logger.info("is_modified flag set to %g",is_modified)
return is_modified
def _update_paramv_on_fit(self):
"""
make sure that update param values just before the fitting
"""
# flag for qmin qmax check values
flag = True
self.fitrange = True
is_modified = False
# wx.PostEvent(self._manager.parent, StatusEvent(status=" \
# updating ... ",type="update"))
# So make sure that update param values on_Fit.
# self._undo.Enable(True)
if self.model is not None:
if self.Npts_total.GetValue() != self.Npts_fit.GetValue():
if not self.data.is_data:
self._manager.page_finder[self.uid].set_fit_data(
data=[self.data])
# Check the values
is_modified = (self._check_value_enter(self.fittable_param)
or self._check_value_enter(self.fixed_param)
or self._check_value_enter(self.parameters))
# If qmin and qmax have been modified, update qmin and qmax and
# Here we should check whether the boundaries have been modified.
# If qmin and qmax have been modified, update qmin and qmax and
# set the is_modified flag to True
self.fitrange = self._validate_qrange(self.qmin, self.qmax)
if self.fitrange:
tempmin = float(self.qmin.GetValue())
if tempmin != self.qmin_x:
self.qmin_x = tempmin
tempmax = float(self.qmax.GetValue())
if tempmax != self.qmax_x:
self.qmax_x = tempmax
if tempmax == tempmin:
flag = False
temp_smearer = None
if not self.disable_smearer.GetValue():
temp_smearer = self.current_smearer
if self.slit_smearer.GetValue():
flag = self.update_slit_smear()
elif self.pinhole_smearer.GetValue():
flag = self.update_pinhole_smear()
else:
enable_smearer = not self.disable_smearer.GetValue()
self._manager.set_smearer(smearer=temp_smearer,
uid=self.uid,
fid=self.data.id,
qmin=float(self.qmin_x),
qmax=float(self.qmax_x),
enable_smearer=enable_smearer,
draw=False)
elif not self._is_2D():
enable_smearer = not self.disable_smearer.GetValue()
self._manager.set_smearer(smearer=temp_smearer,
qmin=float(self.qmin_x),
uid=self.uid,
fid=self.data.id,
qmax=float(self.qmax_x),
enable_smearer=enable_smearer,
draw=False)
if self.data is not None:
index_data = ((self.qmin_x <= self.data.x) &
(self.data.x <= self.qmax_x))
val = str(len(self.data.x[index_data]))
self.Npts_fit.SetValue(val)
else:
# No data in the panel
try:
self.npts_x = float(self.Npts_total.GetValue())
except:
flag = False
return flag
flag = True
if self._is_2D():
# only 2D case set mask
flag = self._validate_Npts()
if not flag:
return flag
else:
flag = False
else:
flag = False
# For invalid q range, disable the mask editor and fit button, vs.
if not self.fitrange:
if self._is_2D():
self.btEditMask.Disable()
else:
if self._is_2D() and self.data.is_data and not self.batch_on:
self.btEditMask.Enable(True)
if not flag:
msg = "Cannot Plot or Fit :Must select a "
msg += " model or Fitting range is not valid!!! "
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
try:
self.save_current_state()
except Exception:
logger.error(traceback.format_exc())
return flag, is_modified
def _reset_parameters_state(self, listtorestore, statelist):
"""
Reset the parameters at the given state
"""
if len(statelist) == 0 or len(listtorestore) == 0:
return
for j in range(len(listtorestore)):
for param in statelist:
if param[1] == listtorestore[j][1]:
item_page = listtorestore[j]
item_page_info = param
if (item_page_info[1] == "theta" or item_page_info[1] ==
"phi") and not self._is_2D():
break
# change the state of the check box for simple parameters
if item_page[0] is not None:
item_page[0].SetValue(item_page_info[0])
if item_page[2] is not None:
item_page[2].SetValue(item_page_info[2])
if item_page[2].__class__.__name__ == "ComboBox":
if item_page_info[2] in self.model.fun_list:
fun_val = self.model.fun_list[item_page_info[2]]
self.model.setParam(item_page_info[1], fun_val)
if item_page[3] is not None:
# show or hide text +/-
if item_page_info[2]:
item_page[3].Show(True)
else:
item_page[3].Hide()
if item_page[4] is not None:
# show of hide the text crtl for fitting error
if item_page_info[4][0]:
item_page[4].Show(True)
item_page[4].SetValue(str(item_page_info[4][1]))
else:
item_page[3].Hide()
if item_page[5] is not None:
# show of hide the text crtl for fitting error
item_page[5].Show(True)
item_page[5].SetValue(str(item_page_info[5][1]))
if item_page[6] is not None:
# show of hide the text crtl for fitting error
item_page[6].Show(True)
item_page[6].SetValue(str(item_page_info[6][1]))
break
def _reset_strparam_state(self, listtorestore, statelist):
"""
Reset the string parameters at the given state
"""
if len(statelist) == 0:
return
listtorestore = copy.deepcopy(statelist)
for j in range(len(listtorestore)):
item_page = listtorestore[j]
item_page_info = statelist[j]
# change the state of the check box for simple parameters
if item_page[0] is not None:
item_page[0].SetValue(format_number(item_page_info[0], True))
if item_page[2] is not None:
param_name = item_page_info[1]
value = item_page_info[2]
selection = value
if value in self.model.fun_list:
selection = self.model.fun_list[value]
item_page[2].SetValue(selection)
self.model.setParam(param_name, selection)
def _copy_parameters_state(self, listtocopy, statelist):
"""
copy the state of button
:param listtocopy: the list of check button to copy
:param statelist: list of state object to store the current state
"""
if len(listtocopy) == 0:
return
for item in listtocopy:
checkbox_state = None
if item[0] is not None:
checkbox_state = item[0].GetValue()
parameter_name = item[1]
parameter_value = None
if item[2] is not None:
parameter_value = item[2].GetValue()
static_text = None
if item[3] is not None:
static_text = item[3].IsShown()
error_value = None
error_state = None
if item[4] is not None:
error_value = item[4].GetValue()
error_state = item[4].IsShown()
min_value = None
min_state = None
if item[5] is not None:
min_value = item[5].GetValue()
min_state = item[5].IsShown()
max_value = None
max_state = None
if item[6] is not None:
max_value = item[6].GetValue()
max_state = item[6].IsShown()
unit = None
if item[7] is not None:
unit = item[7].GetLabel()
statelist.append([checkbox_state, parameter_name, parameter_value,
static_text, [error_state, error_value],
[min_state, min_value],
[max_state, max_value], unit])
def _draw_model(self, update_chisqr=True, source='model'):
"""
Method to draw or refresh a plotted model.
The method will use the data member from the model page
to build a call to the fitting perspective manager.
:param chisqr: update chisqr value [bool]
"""
wx.CallAfter(self._draw_model_after, update_chisqr, source)
def _draw_model_after(self, update_chisqr=True, source='model'):
"""
Method to draw or refresh a plotted model.
The method will use the data member from the model page
to build a call to the fitting perspective manager.
:param chisqr: update chisqr value [bool]
"""
# if self.check_invalid_panel():
# return
if self.model is not None:
temp_smear = None
if hasattr(self, "enable_smearer"):
if not self.disable_smearer.GetValue():
temp_smear = self.current_smearer
# compute weight for the current data
from sas.sasgui.perspectives.fitting.utils import get_weight
flag = self.get_weight_flag()
weight = get_weight(data=self.data, is2d=self._is_2D(), flag=flag)
toggle_mode_on = self.model_view.IsEnabled()
is_2d = self._is_2D()
self._manager.draw_model(self.model,
data=self.data,
smearer=temp_smear,
qmin=float(self.qmin_x),
qmax=float(self.qmax_x),
page_id=self.uid,
toggle_mode_on=toggle_mode_on,
state=self.state,
enable2D=is_2d,
update_chisqr=update_chisqr,
source='model',
weight=weight)
def _on_show_sld(self, event=None):
"""
Plot SLD profile
"""
# get profile data
x, y = self.model.getProfile()
from sas.sasgui.plottools import Data1D as pf_data1d
# from sas.sasgui.perspectives.theory.profile_dialog import SLDPanel
from sas.sasgui.guiframe.local_perspectives.plotting.profile_dialog \
import SLDPanel
sld_data = pf_data1d(x, y)
sld_data.name = 'SLD'
sld_data.axes = self.sld_axes
self.panel = SLDPanel(self, data=sld_data, axes=self.sld_axes,
id=wx.ID_ANY)
self.panel.ShowModal()
def _set_multfactor_combobox(self, multiplicity=10):
"""
Set comboBox for multitfactor of CoreMultiShellModel
:param multiplicit: no. of multi-functionality
"""
# build content of the combobox
for idx in range(0, multiplicity):
self.multifactorbox.Append(str(idx), int(idx))
self._hide_multfactor_combobox()
def _show_multfactor_combobox(self):
"""
Show the comboBox of muitfactor of CoreMultiShellModel
"""
if not self.mutifactor_text.IsShown():
self.mutifactor_text.Show(True)
self.mutifactor_text1.Show(True)
if not self.multifactorbox.IsShown():
self.multifactorbox.Show(True)
def _hide_multfactor_combobox(self):
"""
Hide the comboBox of muitfactor of CoreMultiShellModel
"""
if self.mutifactor_text.IsShown():
self.mutifactor_text.Hide()
self.mutifactor_text1.Hide()
if self.multifactorbox.IsShown():
self.multifactorbox.Hide()
def formfactor_combo_init(self):
"""
First time calls _show_combox_helper
"""
self._show_combox(None)
def _show_combox_helper(self):
"""
Fill panel's combo box according to the type of model selected
"""
mod_cat = self.categorybox.GetStringSelection()
self.structurebox.SetSelection(0)
self.structurebox.Disable()
self.formfactorbox.Clear()
if mod_cat is None:
return
m_list = []
try:
if mod_cat == CUSTOM_MODEL:
for model in self.model_list_box[mod_cat]:
m_list.append(self.model_dict[model.name])
else:
cat_dic = self.master_category_dict[mod_cat]
for (model, enabled) in cat_dic:
if enabled:
m_list.append(self.model_dict[model])
except Exception:
msg = traceback.format_exc()
wx.PostEvent(self._manager.parent,
StatusEvent(status=msg, info="error"))
self._populate_box(self.formfactorbox, m_list)
def _on_modify_cat(self, event=None):
"""
Called when category manager is opened
"""
self._manager.parent.on_category_panel(event)
def _show_combox(self, event=None):
"""
Show combox box associate with type of model selected
"""
self.Show(False)
self._show_combox_helper()
self._on_select_model(event=None)
self.Show(True)
self._save_typeOfmodel()
self.sizer4_4.Layout()
self.sizer4.Layout()
self.Layout()
self.Refresh()
def _populate_box(self, combobox, list):
"""
fill combox box with dict item
:param list: contains item to fill the combox
item must model class
"""
mlist = []
for models in list:
if models.name != "NoStructure":
mlist.append((models.name, models))
# Sort the models
mlist_sorted = sorted(mlist)
for item in mlist_sorted:
combobox.Append(item[0], item[1])
return 0
def _onQrangeEnter(self, event):
"""
Check validity of value enter in the Q range field
"""
tcrtl = event.GetEventObject()
# Clear msg if previously shown.
msg = ""
wx.PostEvent(self.parent, StatusEvent(status=msg))
# Flag to register when a parameter has changed.
if tcrtl.GetValue().lstrip().rstrip() != "":
try:
float(tcrtl.GetValue())
tcrtl.SetBackgroundColour(wx.WHITE)
# If qmin and qmax have been modified, update qmin and qmax
if self._validate_qrange(self.qmin, self.qmax):
tempmin = float(self.qmin.GetValue())
if tempmin != self.qmin_x:
self.qmin_x = tempmin
tempmax = float(self.qmax.GetValue())
if tempmax != self.qmax_x:
self.qmax_x = tempmax
else:
tcrtl.SetBackgroundColour("pink")
msg = "Model Error: wrong value entered: %s" % \
sys.exc_info()[1]
wx.PostEvent(self.parent, StatusEvent(status=msg))
return
except:
tcrtl.SetBackgroundColour("pink")
msg = "Model Error: wrong value entered: %s" % sys.exc_info()[1]
wx.PostEvent(self.parent, StatusEvent(status=msg))
return
# Check if # of points for theory model are valid(>0).
if self.npts is not None:
if check_float(self.npts):
temp_npts = float(self.npts.GetValue())
if temp_npts != self.num_points:
self.num_points = temp_npts
else:
msg = "Cannot plot: No points in Q range!!! "
wx.PostEvent(self.parent, StatusEvent(status=msg))
else:
tcrtl.SetBackgroundColour("pink")
msg = "Model Error: wrong value entered!!!"
wx.PostEvent(self.parent, StatusEvent(status=msg))
self.save_current_state()
event = PageInfoEvent(page=self)
wx.PostEvent(self.parent, event)
self.state_change = False
# Draw the model for a different range
if not self.data.is_data:
self.create_default_data()
self._draw_model()
def _theory_qrange_enter(self, event):
"""
Check validity of value enter in the Q range field
"""
tcrtl = event.GetEventObject()
# Clear msg if previously shown.
msg = ""
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
# Flag to register when a parameter has changed.
is_modified = False
if tcrtl.GetValue().lstrip().rstrip() != "":
try:
value = float(tcrtl.GetValue())
tcrtl.SetBackgroundColour(wx.WHITE)
# If qmin and qmax have been modified, update qmin and qmax
if self._validate_qrange(self.theory_qmin, self.theory_qmax):
tempmin = float(self.theory_qmin.GetValue())
if tempmin != self.theory_qmin_x:
self.theory_qmin_x = tempmin
tempmax = float(self.theory_qmax.GetValue())
if tempmax != self.qmax_x:
self.theory_qmax_x = tempmax
else:
tcrtl.SetBackgroundColour("pink")
msg = "Model Error: wrong value entered: %s" % \
sys.exc_info()[1]
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
return
except:
tcrtl.SetBackgroundColour("pink")
msg = "Model Error: wrong value entered: %s" % sys.exc_info()[1]
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
return
# Check if # of points for theory model are valid(>0).
if self.Npts_total.IsEditable():
if check_float(self.Npts_total):
temp_npts = float(self.Npts_total.GetValue())
if temp_npts != self.num_points:
self.num_points = temp_npts
is_modified = True
else:
msg = "Cannot Plot: No points in Q range!!! "
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
else:
tcrtl.SetBackgroundColour("pink")
msg = "Model Error: wrong value entered!!!"
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
self.save_current_state()
event = PageInfoEvent(page=self)
wx.PostEvent(self.parent, event)
self.state_change = False
# Draw the model for a different range
self.create_default_data()
self._draw_model()
def _on_select_model_helper(self):
"""
call back for model selection
"""
# reset dictionary containing reference to dispersion
self._disp_obj_dict = {}
self.disp_cb_dict = {}
self.temp_multi_functional = False
f_id = self.formfactorbox.GetCurrentSelection()
# For MAC
form_factor = None
if f_id >= 0:
form_factor = self.formfactorbox.GetClientData(f_id)
if form_factor is None or \
not hasattr(form_factor, 'is_form_factor') or \
not form_factor.is_form_factor:
self.structurebox.Hide()
self.text2.Hide()
self.structurebox.Disable()
self.structurebox.SetSelection(0)
self.text2.Disable()
else:
self.structurebox.Show()
self.text2.Show()
self.structurebox.Enable()
self.text2.Enable()
if form_factor is not None:
# set multifactor for Mutifunctional models
if form_factor.is_multiplicity_model:
m_id = self.multifactorbox.GetCurrentSelection()
multiplicity = form_factor.multiplicity_info[0]
self.multifactorbox.Clear()
self._set_multfactor_combobox(multiplicity)
self._show_multfactor_combobox()
# ToDo: this info should be called directly from the model
text = form_factor.multiplicity_info[1] # 'No. of Shells: '
self.mutifactor_text.SetLabel(text)
if m_id > multiplicity - 1:
# default value
m_id = 1
self.multi_factor = self.multifactorbox.GetClientData(m_id)
if self.multi_factor is None:
self.multi_factor = 0
self.multifactorbox.SetSelection(m_id)
# Check len of the text1 and max_multiplicity
text = ''
if form_factor.multiplicity_info[0] == \
len(form_factor.multiplicity_info[2]):
text = form_factor.multiplicity_info[2][self.multi_factor]
self.mutifactor_text1.SetLabel(text)
# Check if model has get sld profile.
if len(form_factor.multiplicity_info[3]) > 0:
self.sld_axes = form_factor.multiplicity_info[3]
self.show_sld_button.Show(True)
else:
self.sld_axes = ""
else:
self._hide_multfactor_combobox()
self.show_sld_button.Hide()
self.multi_factor = None
else:
self._hide_multfactor_combobox()
self.show_sld_button.Hide()
self.multi_factor = None
s_id = self.structurebox.GetCurrentSelection()
struct_factor = self.structurebox.GetClientData(s_id)
if struct_factor is not None:
from sasmodels.sasview_model import MultiplicationModel
self.model = MultiplicationModel(form_factor(self.multi_factor),
struct_factor())
# multifunctional form factor
if len(form_factor.non_fittable) > 0:
self.temp_multi_functional = True
elif form_factor is not None:
if self.multi_factor is not None:
self.model = form_factor(self.multi_factor)
else:
# old style plugin models do not accept a multiplicity argument
self.model = form_factor()
else:
self.model = None
return
# check if model has magnetic parameters
if len(self.model.magnetic_params) > 0:
self._has_magnetic = True
else:
self._has_magnetic = False
# post state to fit panel
self.state.parameters = []
self.state.model = self.model
self.state.qmin = self.qmin_x
self.state.multi_factor = self.multi_factor
self.disp_list = self.model.getDispParamList()
self.state.disp_list = self.disp_list
self.on_set_focus(None)
self.Layout()
def _validate_qrange(self, qmin_ctrl, qmax_ctrl):
"""
Verify that the Q range controls have valid values
and that Qmin < Qmax.
:param qmin_ctrl: text control for Qmin
:param qmax_ctrl: text control for Qmax
:return: True is the Q range is value, False otherwise
"""
qmin_validity = check_float(qmin_ctrl)
qmax_validity = check_float(qmax_ctrl)
if not (qmin_validity and qmax_validity):
return False
else:
qmin = float(qmin_ctrl.GetValue())
qmax = float(qmax_ctrl.GetValue())
if qmin < qmax:
# Make sure to set both colours white.
qmin_ctrl.SetBackgroundColour(wx.WHITE)
qmin_ctrl.Refresh()
qmax_ctrl.SetBackgroundColour(wx.WHITE)
qmax_ctrl.Refresh()
else:
qmin_ctrl.SetBackgroundColour("pink")
qmin_ctrl.Refresh()
qmax_ctrl.SetBackgroundColour("pink")
qmax_ctrl.Refresh()
msg = "Invalid Q range: Q min must be smaller than Q max"
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
return False
return True
def _validate_Npts(self):
"""
Validate the number of points for fitting is more than 10 points.
If valid, setvalues Npts_fit otherwise post msg.
"""
# default flag
flag = True
# Theory
if self.data is None and self.enable2D:
return flag
for data in self.data_list:
# q value from qx and qy
radius = np.sqrt(data.qx_data * data.qx_data +
data.qy_data * data.qy_data)
# get unmasked index
index_data = (float(self.qmin.GetValue()) <= radius) & \
(radius <= float(self.qmax.GetValue()))
index_data = (index_data) & (data.mask)
index_data = (index_data) & (np.isfinite(data.data))
if len(index_data[index_data]) < 10:
# change the color pink.
self.qmin.SetBackgroundColour("pink")
self.qmin.Refresh()
self.qmax.SetBackgroundColour("pink")
self.qmax.Refresh()
msg = "Data Error: "
msg += "Too few points in %s." % data.name
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
self.fitrange = False
flag = False
else:
self.Npts_fit.SetValue(str(len(index_data[index_data])))
self.fitrange = True
return flag
def _validate_Npts_1D(self):
"""
Validate the number of points for fitting is more than 5 points.
If valid, setvalues Npts_fit otherwise post msg.
"""
# default flag
flag = True
# Theory
if self.data is None:
return flag
for data in self.data_list:
# q value from qx and qy
radius = data.x
# get unmasked index
index_data = (float(self.qmin.GetValue()) <= radius) & \
(radius <= float(self.qmax.GetValue()))
index_data = (index_data) & (np.isfinite(data.y))
if len(index_data[index_data]) < 5:
# change the color pink.
self.qmin.SetBackgroundColour("pink")
self.qmin.Refresh()
self.qmax.SetBackgroundColour("pink")
self.qmax.Refresh()
msg = "Data Error: "
msg += "Too few points in %s." % data.name
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
self.fitrange = False
flag = False
else:
self.Npts_fit.SetValue(str(len(index_data[index_data])))
self.fitrange = True
return flag
def _check_value_enter(self, list):
"""
:param list: model parameter and panel info
:Note: each item of the list should be as follow:
item=[check button state, parameter's name,
paramater's value, string="+/-",
parameter's error of fit,
parameter's minimum value,
parameter's maximum value ,
parameter's units]
Returns True if the model parameters have changed.
"""
is_modified = False
for item in list:
# skip angle parameters for 1D
if not self.enable2D and item in self.orientation_params:
continue
value_ctrl = item[2]
if not value_ctrl.IsEnabled():
# ArrayDispersion disables PD, Min, Max, Npts, Nsigs
continue
name = item[1]
value_str = value_ctrl.GetValue().strip()
if name.endswith(".npts"):
validity = check_int(value_ctrl)
if not validity:
continue
value = int(value_str)
elif name.endswith(".nsigmas"):
validity = check_float(value_ctrl)
if not validity:
continue
value = float(value_str)
else: # value or polydispersity
# Check that min, max and value are floats
min_ctrl, max_ctrl = item[5], item[6]
min_str = min_ctrl.GetValue().strip()
max_str = max_ctrl.GetValue().strip()
validity = check_float(value_ctrl)
if min_str != "":
validity = validity and check_float(min_ctrl)
if max_str != "":
validity = validity and check_float(max_ctrl)
if not validity:
continue
# Check that min is less than max
low = -np.inf if min_str == "" else float(min_str)
high = np.inf if max_str == "" else float(max_str)
if high < low:
min_ctrl.SetBackgroundColour("pink")
min_ctrl.Refresh()
max_ctrl.SetBackgroundColour("pink")
max_ctrl.Refresh()
# msg = "Invalid fit range for %s: min must be smaller
# than max"%name
# wx.PostEvent(self._manager.parent,
# StatusEvent(status=msg))
continue
# Force value between min and max
value = float(value_str)
if value < low:
value = low
value_ctrl.SetValue(format_number(value))
elif value > high:
value = high
value_ctrl.SetValue(format_number(value))
if name not in self.model.details.keys():
self.model.details[name] = ["", None, None]
old_low, old_high = self.model.details[name][1:3]
if old_low != low or old_high != high:
# The configuration has changed but it won't change the
# computed curve so no need to set is_modified to True
# is_modified = True
self.model.details[name][1:3] = low, high
# Update value in model if it has changed
if value != self.model.getParam(name):
self.model.setParam(name, value)
is_modified = True
return is_modified
def _set_dipers_Param(self, event):
"""
respond to self.enable_disp and self.disable_disp radio box.
The dispersity object is reset inside the model into Gaussian.
When the user select yes , this method display a combo box for
more selection when the user selects No,the combo box disappears.
Redraw the model with the default dispersity (Gaussian)
"""
# On selction if no model exists.
if self.model is None:
self.disable_disp.SetValue(True)
msg = "Please select a Model first..."
wx.MessageBox(msg, 'Info')
wx.PostEvent(self._manager.parent,
StatusEvent(status="Polydispersion: %s" % msg))
return
self._reset_dispersity()
if self.model is None:
self.model_disp.Hide()
self.sizer4_4.Clear(True)
return
if self.enable_disp.GetValue():
# layout for model containing no dispersity parameters
self.disp_list = self.model.getDispParamList()
if len(self.disp_list) == 0 and len(self.disp_cb_dict) == 0:
self._layout_sizer_noDipers()
else:
# set gaussian sizer
self._on_select_Disp(event=None)
else:
self.sizer4_4.Clear(True)
# post state to fit panel
self.save_current_state()
if event is not None:
event = PageInfoEvent(page=self)
wx.PostEvent(self.parent, event)
# draw the model with the current dispersity
# Wojtek P, Oct 8, 2016: Calling draw_model seems to be unessecary.
# By comenting it we save an extra Iq calculation
# self._draw_model()
# Need to use FitInside again here to replace the next four lines.
# Otherwised polydispersity off does not resize the scrollwindow.
# PDB Nov 28, 2015
self.FitInside()
# self.sizer4_4.Layout()
# self.sizer5.Layout()
# self.Layout()
# self.Refresh()
def _layout_sizer_noDipers(self):
"""
Draw a sizer with no dispersity info
"""
ix = 0
iy = 1
self.fittable_param = []
self.fixed_param = []
self.orientation_params_disp = []
self.sizer4_4.Clear(True)
text = "No polydispersity available for this model"
model_disp = wx.StaticText(self, wx.ID_ANY, text)
self.sizer4_4.Add(model_disp, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 10)
self.sizer4_4.Layout()
self.sizer4.Layout()
def _reset_dispersity(self):
"""
put gaussian dispersity into current model
"""
if len(self.param_toFit) > 0:
for item in self.fittable_param:
if item in self.param_toFit:
self.param_toFit.remove(item)
for item in self.orientation_params_disp:
if item in self.param_toFit:
self.param_toFit.remove(item)
self.fittable_param = []
self.fixed_param = []
self.orientation_params_disp = []
self.values = {}
self.weights = {}
# from sas.models.dispersion_models import GaussianDispersion
from sasmodels.weights import GaussianDispersion
if len(self.disp_cb_dict) == 0:
self.save_current_state()
self.sizer4_4.Clear(True)
self.Layout()
return
if (len(self.disp_cb_dict) > 0):
for p in self.disp_cb_dict:
# The parameter was un-selected.
# Go back to Gaussian model (with 0 pts)
disp_model = GaussianDispersion()
self._disp_obj_dict[p] = disp_model
# Set the new model as the dispersion object
# for the selected parameter
try:
self.model.set_dispersion(p, disp_model)
except Exception:
logger.error(traceback.format_exc())
# save state into
self.save_current_state()
self.Layout()
self.Refresh()
def _on_select_Disp(self, event):
"""
allow selecting different dispersion
self.disp_list should change type later .now only gaussian
"""
self._set_sizer_dispersion()
# Redraw the model
# Wojtek P. Nov 7, 2016: Redrawing seems to be unnecessary here
# self._draw_model()
# self._undo.Enable(True)
event = PageInfoEvent(page=self)
wx.PostEvent(self.parent, event)
self.sizer4_4.Layout()
self.sizer4.Layout()
self.SetupScrolling()
def _on_disp_func(self, event=None):
"""
Select a distribution function for the polydispersion
:Param event: ComboBox event
"""
# get ready for new event
if event is not None:
event.Skip()
# Get event object
disp_box = event.GetEventObject()
# Try to select a Distr. function
try:
disp_box.SetBackgroundColour("white")
selection = disp_box.GetCurrentSelection()
param_name = disp_box.Name.split('.')[0]
disp_name = disp_box.GetValue()
dispersity = disp_box.GetClientData(selection)
# disp_model = GaussianDispersion()
disp_model = dispersity()
# Get param names to reset the values of the param
name1 = param_name + ".width"
name2 = param_name + ".npts"
name3 = param_name + ".nsigmas"
# Check Disp. function whether or not it is 'array'
if disp_name.lower() == "array":
value2 = ""
value3 = ""
value1 = self._set_array_disp(name=name1, disp=disp_model)
else:
self._del_array_values(name1)
# self._reset_array_disp(param_name)
self._disp_obj_dict[name1] = disp_model
self.model.set_dispersion(param_name, disp_model)
self.state._disp_obj_dict[name1] = disp_model.type
value1 = str(format_number(self.model.getParam(name1), True))
value2 = str(format_number(self.model.getParam(name2)))
value3 = str(format_number(self.model.getParam(name3)))
# Reset fittable polydispersin parameter value
for item in self.fittable_param:
if item[1] == name1:
item[2].SetValue(value1)
item[5].SetValue("")
item[6].SetValue("")
# Disable for array
if disp_name.lower() == "array":
item[0].SetValue(False)
item[0].Disable()
item[2].Disable()
item[3].Show(False)
item[4].Show(False)
item[5].Disable()
item[6].Disable()
else:
item[0].Enable()
item[2].Enable()
item[3].Show(True)
item[4].Show(True)
item[5].Enable()
item[6].Enable()
break
# Reset fixed polydispersion params
for item in self.fixed_param:
if item[1] == name2:
item[2].SetValue(value2)
# Disable Npts for array
if disp_name.lower() == "array":
item[2].Disable()
else:
item[2].Enable()
if item[1] == name3:
item[2].SetValue(value3)
# Disable Nsigs for array
if disp_name.lower() == "array":
item[2].Disable()
else:
item[2].Enable()
# Make sure the check box updated
self.get_all_checked_params()
# update params
self._update_paramv_on_fit()
# draw
self._draw_model()
self.Refresh()
except Exception:
logger.error(traceback.format_exc())
# Error msg
msg = "Error occurred:"
msg += " Could not select the distribution function..."
msg += " Please select another distribution function."
disp_box.SetBackgroundColour("pink")
# Focus on Fit button so that users can see the pinky box
self.btFit.SetFocus()
wx.PostEvent(self._manager.parent,
StatusEvent(status=msg, info="error"))
def _set_array_disp(self, name=None, disp=None):
"""
Set array dispersion
:param name: name of the parameter for the dispersion to be set
:param disp: the polydisperion object
"""
# The user wants this parameter to be averaged.
# Pop up the file selection dialog.
path = self._selectDlg()
# Array data
values = []
weights = []
# If nothing was selected, just return
if path is None:
self.disp_cb_dict[name].SetValue(False)
# self.noDisper_rbox.SetValue(True)
return
self._default_save_location = os.path.dirname(path)
if self._manager is not None:
self._manager.parent._default_save_location = \
self._default_save_location
basename = os.path.basename(path)
values, weights = self.read_file(path)
# If any of the two arrays is empty, notify the user that we won't
# proceed
if len(self.param_toFit) > 0:
if name in self.param_toFit:
self.param_toFit.remove(name)
# Tell the user that we are about to apply the distribution
msg = "Applying loaded %s distribution: %s" % (name, path)
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
self._set_array_disp_model(name=name, disp=disp,
values=values, weights=weights)
return basename
def _set_array_disp_model(self, name=None, disp=None,
values=[], weights=[]):
"""
Set array dispersion model
:param name: name of the parameter for the dispersion to be set
:param disp: the polydisperion object
"""
disp.set_weights(values, weights)
self._disp_obj_dict[name] = disp
self.model.set_dispersion(name.split('.')[0], disp)
self.state._disp_obj_dict[name] = disp.type
self.values[name] = values
self.weights[name] = weights
# Store the object to make it persist outside the
# scope of this method
# TODO: refactor model to clean this up?
self.state.values = {}
self.state.weights = {}
self.state.values = copy.deepcopy(self.values)
self.state.weights = copy.deepcopy(self.weights)
# Set the new model as the dispersion object for the
# selected parameter
# self.model.set_dispersion(p, disp_model)
# Store a reference to the weights in the model object
# so that
# it's not lost when we use the model within another thread.
self.state.model = self.model.clone()
self.model._persistency_dict[name.split('.')[0]] = \
[values, weights]
self.state.model._persistency_dict[name.split('.')[0]] = \
[values, weights]
def _del_array_values(self, name=None):
"""
Reset array dispersion
:param name: name of the parameter for the dispersion to be set
"""
# Try to delete values and weight of the names array dic if exists
try:
if name in self.values:
del self.values[name]
del self.weights[name]
# delete all other dic
del self.state.values[name]
del self.state.weights[name]
del self.model._persistency_dict[name.split('.')[0]]
del self.state.model._persistency_dict[name.split('.')[0]]
except Exception:
logger.error(traceback.format_exc())
def _lay_out(self):
"""
returns self.Layout
:Note: Mac seems to like this better when self.
Layout is called after fitting.
"""
self.Layout()
return
def _find_polyfunc_selection(self, disp_func=None):
"""
FInd Comboox selection from disp_func
:param disp_function: dispersion distr. function
"""
# Find the selection
if disp_func is not None:
try:
return POLYDISPERSITY_MODELS.values().index(disp_func.__class__)
except ValueError:
pass # Fall through to default class
return POLYDISPERSITY_MODELS.keys().index('gaussian')
def on_reset_clicked(self, event):
"""
On 'Reset' button for Q range clicked
"""
flag = True
# For 3 different cases: Data2D, Data1D, and theory
if self.model is None:
msg = "Please select a model first..."
wx.MessageBox(msg, 'Info')
flag = False
return
elif self.data.__class__.__name__ == "Data2D":
data_min = 0
x = max(math.fabs(self.data.xmin), math.fabs(self.data.xmax))
y = max(math.fabs(self.data.ymin), math.fabs(self.data.ymax))
self.qmin_x = data_min
self.qmax_x = math.sqrt(x * x + y * y)
# self.data.mask = np.ones(len(self.data.data),dtype=bool)
# check smearing
if not self.disable_smearer.GetValue():
# set smearing value whether or
# not the data contain the smearing info
if self.pinhole_smearer.GetValue():
flag = self.update_pinhole_smear()
else:
flag = True
elif self.data is None:
self.qmin_x = _QMIN_DEFAULT
self.qmax_x = _QMAX_DEFAULT
self.num_points = _NPTS_DEFAULT
self.state.npts = self.num_points
elif self.data.__class__.__name__ != "Data2D":
self.qmin_x = min(self.data.x)
self.qmax_x = max(self.data.x)
# check smearing
if not self.disable_smearer.GetValue():
# set smearing value whether or
# not the data contain the smearing info
if self.slit_smearer.GetValue():
flag = self.update_slit_smear()
elif self.pinhole_smearer.GetValue():
flag = self.update_pinhole_smear()
else:
flag = True
else:
flag = False
if flag is False:
msg = "Cannot Plot :Must enter a number!!! "
wx.PostEvent(self._manager.parent, StatusEvent(status=msg))
else:
# set relative text ctrs.
self.qmin.SetValue(str(self.qmin_x))
self.qmax.SetValue(str(self.qmax_x))
self.show_npts2fit()
# At this point, some button and variables satatus (disabled?)
# should be checked such as color that should be reset to
# white in case that it was pink.
self._onparamEnter_helper()
self.save_current_state()
self.state.qmin = self.qmin_x
self.state.qmax = self.qmax_x
# reset the q range values
self._reset_plotting_range(self.state)
self._draw_model()
def select_log(self, event):
"""
Log checked to generate log spaced points for theory model
"""
def get_images(self):
"""
Get the images of the plots corresponding this panel for report
: return graphs: list of figures
: Need Move to guiframe
"""
# set list of graphs
graphs = []
canvases = []
res_item = None
# call gui_manager
gui_manager = self._manager.parent
# loops through the panels [dic]
for _, item2 in gui_manager.plot_panels.iteritems():
data_title = self.data.group_id
# try to get all plots belonging to this control panel
try:
g_id = item2.group_id
if g_id == data_title or \
str(g_id).count("res" + str(self.graph_id)) or \
str(g_id).count(str(self.uid)) > 0:
if str(g_id).count("res" + str(self.graph_id)) > 0:
res_item = [item2.figure, item2.canvas]
else:
# append to the list
graphs.append(item2.figure)
canvases.append(item2.canvas)
except Exception:
# Not for control panels
logger.error(traceback.format_exc())
# Make sure the resduals plot goes to the last
if res_item is not None:
graphs.append(res_item[0])
canvases.append(res_item[1])
# return the list of graphs
return graphs, canvases
def on_function_help_clicked(self, event):
"""
Function called when 'Help' button is pressed next to model
of interest. This calls DocumentationWindow from
documentation_window.py. It will load the top level of the model
help documenation sphinx generated html if no model is presented.
If a model IS present then if documention for that model exists
it will load to that point otherwise again it will go to the top.
For Wx2.8 and below is used (i.e. non-released through installer)
a browser is loaded and the top of the model documentation only is
accessible because webbrowser module does not pass anything after
the # to the browser.
:param event: on Help Button pressed event
"""
if self.model is not None:
name = self.formfactorbox.GetValue()
_TreeLocation = 'user/models/' + name.lower()+'.html'
_doc_viewer = DocumentationWindow(self, wx.ID_ANY, _TreeLocation,
"", name + " Help")
else:
_TreeLocation = 'user/index.html'
_doc_viewer = DocumentationWindow(self, wx.ID_ANY, _TreeLocation,
"", "General Model Help")
def on_model_help_clicked(self, event):
"""
Function called when 'Description' button is pressed next to model
of interest. This calls the Description embedded in the model. This
should work with either Wx2.8 and lower or higher. If no model is
selected it will give the message that a model must be chosen first
in the box that would normally contain the description. If a badly
behaved model is encountered which has no description then it will
give the message that none is available.
:param event: on Description Button pressed event
"""
if self.model is None:
name = 'index.html'
else:
name = self.formfactorbox.GetValue()
msg = 'Model description:\n'
info = "Info"
if self.model is not None:
# frame.Destroy()
if str(self.model.description).rstrip().lstrip() == '':
msg += "Sorry, no information is available for this model."
else:
msg += self.model.description + '\n'
wx.MessageBox(msg, info)
else:
msg += "You must select a model to get information on this"
wx.MessageBox(msg, info)
def _on_mag_angle_help(self, event):
"""
Bring up Magnetic Angle definition bmp image whenever the ? button
is clicked. Calls DocumentationWindow with the path of the location
within the documentation tree (after /doc/ ....". When using old
versions of Wx (i.e. before 2.9 and therefore not part of release
versions distributed via installer) it brings up an image viewer
box which allows the user to click through the rest of the images in
the directory. Not ideal but probably better than alternative which
would bring up the entire discussion of how magnetic models work?
Specially since it is not likely to be accessed. The normal release
versions bring up the normal image box.
:param evt: Triggers on clicking ? in Magnetic Angles? box
"""
_TreeLocation = "_images/M_angles_pic.bmp"
_doc_viewer = DocumentationWindow(self, wx.ID_ANY, _TreeLocation, "",
"Magnetic Angle Defintions")
def _on_mag_help(self, event):
"""
Bring up Magnetic Angle definition bmp image whenever the ? button
is clicked. Calls DocumentationWindow with the path of the location
within the documentation tree (after /doc/ ....". When using old
versions of Wx (i.e. before 2.9 and therefore not part of release
versions distributed via installer) it brings up an image viewer
box which allows the user to click through the rest of the images in
the directory. Not ideal but probably better than alternative which
would bring up the entire discussion of how magnetic models work?
Specially since it is not likely to be accessed. The normal release
versions bring up the normal image box.
:param evt: Triggers on clicking ? in Magnetic Angles? box
"""
_TreeLocation = "user/magnetism.html"
_doc_viewer = DocumentationWindow(self, wx.ID_ANY, _TreeLocation, "",
"Polarized Beam/Magnetc Help")
def _on_mag_on(self, event):
"""
Magnetic Parameters ON/OFF
"""
button = event.GetEventObject()
if button.GetLabel().count('ON') > 0:
self.magnetic_on = True
button.SetLabel("Magnetic OFF")
m_value = 1.0e-06
for key in self.model.magnetic_params:
if key.count('M0') > 0:
self.model.setParam(key, m_value)
m_value += 0.5e-06
else:
self.magnetic_on = False
button.SetLabel("Magnetic ON")
for key in self.model.magnetic_params:
if key.count('M0') > 0:
# reset mag value to zero fo safety
self.model.setParam(key, 0.0)
self.Show(False)
self.set_model_param_sizer(self.model)
# self._set_sizer_dispersion()
self.state.magnetic_on = self.magnetic_on
self.SetupScrolling()
self.Show(True)
def on_pd_help_clicked(self, event):
"""
Bring up Polydispersity Documentation whenever the ? button is clicked.
Calls DocumentationWindow with the path of the location within the
documentation tree (after /doc/ ....". Note that when using old
versions of Wx (before 2.9) and thus not the release version of
istallers, the help comes up at the top level of the file as
webbrowser does not pass anything past the # to the browser when it is
running "file:///...."
:param event: Triggers on clicking ? in polydispersity box
"""
_TreeLocation = "user/sasgui/perspectives/fitting/pd_help.html"
_PageAnchor = ""
_doc_viewer = DocumentationWindow(self, wx.ID_ANY, _TreeLocation,
_PageAnchor, "Polydispersity Help")
def on_left_down(self, event):
"""
Get key stroke event
"""
# Figuring out key combo: Cmd for copy, Alt for paste
if event.CmdDown() and event.ShiftDown():
self.get_paste()
elif event.CmdDown():
self.get_copy()
else:
event.Skip()
return
# make event free
event.Skip()
def get_copy(self):
"""
Get copy params to clipboard
"""
content = self.get_copy_params()
flag = self.set_clipboard(content)
self._copy_info(flag)
return flag
def get_copy_params(self):
"""
Get the string copies of the param names and values in the tap
"""
content = 'sasview_parameter_values:'
# Do it if params exist
if self.parameters:
# go through the parameters
strings = self._get_copy_helper(self.parameters,
self.orientation_params)
content += strings
# go through the fittables
strings = self._get_copy_helper(self.fittable_param,
self.orientation_params_disp)
content += strings
# go through the fixed params
strings = self._get_copy_helper(self.fixed_param,
self.orientation_params_disp)
content += strings
# go through the str params
strings = self._get_copy_helper(self.str_parameters,
self.orientation_params)
content += strings
return content
else:
return False
def get_copy_excel(self):
"""
Get copy params to clipboard
"""
content = self.get_copy_params_excel()
flag = self.set_clipboard(content)
self._copy_info(flag)
return flag
def get_copy_params_excel(self):
"""
Get the string copies of the param names and values in the tap
"""
content = ''
crlf = chr(13) + chr(10)
tab = chr(9)
# Do it if params exist
if self.parameters:
for param in self.parameters:
content += param[1] # parameter name
content += tab
content += param[1] + "_err"
content += tab
content += crlf
# row of values and errors...
for param in self.parameters:
content += param[2].GetValue() # value
content += tab
content += param[4].GetValue() # error
content += tab
return content
else:
return False
def get_copy_latex(self):
"""
Get copy params to clipboard
"""
content = self.get_copy_params_latex()
flag = self.set_clipboard(content)
self._copy_info(flag)
return flag
def get_copy_params_latex(self):
"""
Get the string copies of the param names and values in the tap
"""
content = '\\begin{table}'
content += '\\begin{tabular}[h]'
crlf = chr(13) + chr(10)
tab = chr(9)
# Do it if params exist
if self.parameters:
content += '{|'
for param in self.parameters:
content += 'l|l|'
content += '}\hline'
content += crlf
for index, param in enumerate(self.parameters):
content += param[1].replace('_', '\_') # parameter name
content += ' & '
content += param[1].replace('_', '\_') + "\_err"
if index < len(self.parameters) - 1:
content += ' & '
content += '\\\\ \\hline'
content += crlf
# row of values and errors...
for index, param in enumerate(self.parameters):
content += param[2].GetValue() # parameter value
content += ' & '
content += param[4].GetValue() # parameter error
if index < len(self.parameters) - 1:
content += ' & '
content += '\\\\ \\hline'
content += crlf
content += '\\end{tabular}'
content += '\\end{table}'
return content
else:
return False
def set_clipboard(self, content=None):
"""
Put the string to the clipboard
"""
if not content:
return False
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(wx.TextDataObject(str(content)))
wx.TheClipboard.Close()
return True
return None
def _get_copy_helper(self, param, orient_param):
"""
Helping get value and name of the params
: param param: parameters
: param orient_param = oritational params
: return content: strings [list] [name,value:....]
"""
content = ''
bound_hi = ''
bound_lo = ''
# go through the str params
for item in param:
# copy only the params shown
if not item[2].IsShown():
continue
disfunc = ''
try:
if item[7].__class__.__name__ == 'ComboBox':
disfunc = str(item[7].GetValue())
except Exception:
logger.error(traceback.format_exc())
# 2D
if self.data.__class__.__name__ == "Data2D":
try:
check = item[0].GetValue()
except Exception:
check = None
name = item[1]
value = item[2].GetValue()
# 1D
else:
# for 1D all parameters except orientation
if not item[1] in orient_param:
try:
check = item[0].GetValue()
except:
check = None
name = item[1]
value = item[2].GetValue()
# Bounds
try:
bound_lo = item[5].GetValue()
bound_hi = item[6].GetValue()
except Exception:
# harmless - need to just pass
pass
# add to the content
if disfunc != '':
disfunc = ',' + disfunc
# Need to support array func for copy/paste
try:
if disfunc.count('array') > 0:
disfunc += ','
for val in self.values[name]:
disfunc += ' ' + str(val)
disfunc += ','
for weight in self.weights[name]:
disfunc += ' ' + str(weight)
except Exception:
logger.error(traceback.format_exc())
content += name + ',' + str(check) + ',' + value + disfunc + ',' + \
bound_lo + ',' + bound_hi + ':'
return content
def get_clipboard(self):
"""
Get strings in the clipboard
"""
text = ""
# Get text from the clip board
if wx.TheClipboard.Open():
if wx.TheClipboard.IsSupported(wx.DataFormat(wx.DF_TEXT)):
data = wx.TextDataObject()
# get wx dataobject
success = wx.TheClipboard.GetData(data)
# get text
if success:
text = data.GetText()
else:
text = ''
# close clipboard
wx.TheClipboard.Close()
return text
def get_paste(self):
"""
Paste params from the clipboard
"""
text = self.get_clipboard()
flag = self.get_paste_params(text)
self._copy_info(flag)
return flag
def get_paste_params(self, text=''):
"""
Get the string copies of the param names and values in the tap
"""
context = {}
# put the text into dictionary
lines = text.split(':')
if lines[0] != 'sasview_parameter_values':
self._copy_info(False)
return False
for line in lines[1:-1]:
if len(line) != 0:
item = line.split(',')
check = item[1]
name = item[0]
value = item[2]
# Transfer the text to content[dictionary]
context[name] = [check, value]
# limits
limit_lo = item[3]
context[name].append(limit_lo)
limit_hi = item[4]
context[name].append(limit_hi)
# ToDo: PlugIn this poly disp function for pasting
try:
poly_func = item[5]
context[name].append(poly_func)
try:
# take the vals and weights for array
array_values = item[6].split(' ')
array_weights = item[7].split(' ')
val = [float(a_val) for a_val in array_values[1:]]
weit = [float(a_weit) for a_weit in array_weights[1:]]
context[name].append(val)
context[name].append(weit)
except:
raise
except:
poly_func = ''
context[name].append(poly_func)
# Do it if params exist
if self.parameters:
# go through the parameters
self._get_paste_helper(self.parameters,
self.orientation_params, context)
# go through the fittables
self._get_paste_helper(self.fittable_param,
self.orientation_params_disp,
context)
# go through the fixed params
self._get_paste_helper(self.fixed_param,
self.orientation_params_disp, context)
# go through the str params
self._get_paste_helper(self.str_parameters,
self.orientation_params, context)
return True
return None
def _get_paste_helper(self, param, orient_param, content):
"""
Helping set values of the params
: param param: parameters
: param orient_param: oritational params
: param content: dictionary [ name, value: name1.value1,...]
"""
# go through the str params
for item in param:
# 2D
if self.data.__class__.__name__ == "Data2D":
name = item[1]
if name in content.keys():
values = content[name]
check = values[0]
pd = values[1]
if name.count('.') > 0:
# If this is parameter.width, then pd may be a floating
# point value or it may be an array distribution.
# Nothing to do for parameter.npts or parameter.nsigmas.
try:
float(pd)
if name.endswith('.npts'):
pd = int(pd)
except Exception:
# continue
if not pd and pd != '':
continue
item[2].SetValue(str(pd))
if item in self.fixed_param and pd == '':
# Only array func has pd == '' case.
item[2].Enable(False)
else:
item[2].Enable(True)
if item[2].__class__.__name__ == "ComboBox":
if content[name][1] in self.model.fun_list:
fun_val = self.model.fun_list[content[name][1]]
self.model.setParam(name, fun_val)
try:
item[5].SetValue(str(values[-3]))
item[6].SetValue(str(values[-2]))
except Exception:
# passing as harmless non-update
pass
value = content[name][1:]
self._paste_poly_help(item, value)
if check == 'True':
is_true = True
elif check == 'False':
is_true = False
else:
is_true = None
if is_true is not None:
item[0].SetValue(is_true)
# 1D
else:
# for 1D all parameters except orientation
if not item[1] in orient_param:
name = item[1]
if name in content.keys():
check = content[name][0]
# Avoid changing combox content
value = content[name][1:]
pd = value[0]
if name.count('.') > 0:
# If this is parameter.width, then pd may be a
# floating point value or it may be an array
# distribution. Nothing to do for parameter.npts or
# parameter.nsigmas.
try:
pd = float(pd)
if name.endswith('.npts'):
pd = int(pd)
except:
# continue
if not pd and pd != '':
continue
item[2].SetValue(str(pd))
if item in self.fixed_param and pd == '':
# Only array func has pd == '' case.
item[2].Enable(False)
else:
item[2].Enable(True)
if item[2].__class__.__name__ == "ComboBox":
if value[0] in self.model.fun_list:
fun_val = self.model.fun_list[value[0]]
self.model.setParam(name, fun_val)
# save state
try:
item[5].SetValue(str(value[-3]))
item[6].SetValue(str(value[-2]))
except Exception:
# passing as harmless non-update
pass
self._paste_poly_help(item, value)
if check == 'True':
is_true = True
elif check == 'False':
is_true = False
else:
is_true = None
if is_true is not None:
item[0].SetValue(is_true)
self.select_param(event=None)
self.Refresh()
def _paste_poly_help(self, item, value):
"""
Helps get paste for poly function
*item* is the parameter name
*value* depends on which parameter is being processed, and whether it
has array polydispersity.
For parameters without array polydispersity:
parameter => ['FLOAT', '']
parameter.width => ['FLOAT', 'DISTRIBUTION', '']
parameter.npts => ['FLOAT', '']
parameter.nsigmas => ['FLOAT', '']
For parameters with array polydispersity:
parameter => ['FLOAT', '']
parameter.width => ['FILENAME', 'array', [x1, ...], [w1, ...]]
parameter.npts => ['FLOAT', '']
parameter.nsigmas => ['FLOAT', '']
"""
# Do nothing if not setting polydispersity
if len(value[3]) == 0:
return
try:
name = item[7].Name
param_name = name.split('.')[0]
item[7].SetValue(value[1])
selection = item[7].GetCurrentSelection()
dispersity = item[7].GetClientData(selection)
disp_model = dispersity()
if value[1] == 'array':
pd_vals = np.array(value[2])
pd_weights = np.array(value[3])
if len(pd_vals) == 0 or len(pd_vals) != len(pd_weights):
msg = ("bad array distribution parameters for %s"
% param_name)
raise ValueError(msg)
self._set_disp_cb(True, item=item)
self._set_array_disp_model(name=name,
disp=disp_model,
values=pd_vals,
weights=pd_weights)
else:
self._set_disp_cb(False, item=item)
self._disp_obj_dict[name] = disp_model
self.model.set_dispersion(param_name, disp_model)
self.state._disp_obj_dict[name] = disp_model.type
# TODO: It's not an array, why update values and weights?
self.model._persistency_dict[param_name] = \
[self.values, self.weights]
self.state.values = self.values
self.state.weights = self.weights
except Exception:
logger.error(traceback.format_exc())
print("Error in BasePage._paste_poly_help: %s" % \
sys.exc_info()[1])
def _set_disp_cb(self, isarray, item):
"""
Set cb for array disp
"""
if isarray:
item[0].SetValue(False)
item[0].Enable(False)
item[2].Enable(False)
item[3].Show(False)
item[4].Show(False)
item[5].SetValue('')
item[5].Enable(False)
item[6].SetValue('')
item[6].Enable(False)
else:
item[0].Enable()
item[2].Enable()
item[3].Show(True)
item[4].Show(True)
item[5].Enable()
item[6].Enable()
def update_pinhole_smear(self):
"""
Method to be called by sub-classes
Moveit; This method doesn't belong here
"""
print("BasicPage.update_pinhole_smear was called: skipping")
return
def _read_category_info(self):
"""
Reads the categories in from file
"""
# # ILL mod starts here - July 2012 kieranrcampbell@gmail.com
self.master_category_dict = defaultdict(list)
self.by_model_dict = defaultdict(list)
self.model_enabled_dict = defaultdict(bool)
categorization_file = CategoryInstaller.get_user_file()
with open(categorization_file, 'rb') as f:
self.master_category_dict = json.load(f)
self._regenerate_model_dict()
def _regenerate_model_dict(self):
"""
regenerates self.by_model_dict which has each model name as the
key and the list of categories belonging to that model
along with the enabled mapping
"""
self.by_model_dict = defaultdict(list)
for category in self.master_category_dict:
for (model, enabled) in self.master_category_dict[category]:
self.by_model_dict[model].append(category)
self.model_enabled_dict[model] = enabled
def _populate_listbox(self):
"""
fills out the category list box
"""
uncat_str = 'Plugin Models'
self._read_category_info()
self.categorybox.Clear()
cat_list = sorted(self.master_category_dict.keys())
if uncat_str not in cat_list:
cat_list.append(uncat_str)
for category in cat_list:
if category != '':
self.categorybox.Append(category)
if self.categorybox.GetSelection() == wx.NOT_FOUND:
self.categorybox.SetSelection(0)
else:
self.categorybox.SetSelection(
self.categorybox.GetSelection())
# self._on_change_cat(None)
def _on_change_cat(self, event):
"""
Callback for category change action
"""
self.model_name = None
category = self.categorybox.GetStringSelection()
if category is None:
return
self.model_box.Clear()
if category == 'Plugin Models':
for model in self.model_list_box[category]:
str_m = str(model).split(".")[0]
self.model_box.Append(str_m)
else:
for (model, enabled) in sorted(self.master_category_dict[category],
key=lambda name: name[0]):
if(enabled):
self.model_box.Append(model)
def _fill_model_sizer(self, sizer):
"""
fill sizer containing model info
"""
# This should only be called once per fit tab
# print "==== Entering _fill_model_sizer"
# Add model function Details button in fitpanel.
# The following 3 lines are for Mac. Let JHC know before modifying...
title = "Model"
self.formfactorbox = None
self.multifactorbox = None
self.mbox_description = wx.StaticBox(self, wx.ID_ANY, str(title))
boxsizer1 = wx.StaticBoxSizer(self.mbox_description, wx.VERTICAL)
sizer_cat = wx.BoxSizer(wx.HORIZONTAL)
self.mbox_description.SetForegroundColour(wx.RED)
wx_id = self._ids.next()
self.model_func = wx.Button(self, wx_id, 'Help', size=(80, 23))
self.model_func.Bind(wx.EVT_BUTTON, self.on_function_help_clicked,
id=wx_id)
self.model_func.SetToolTipString("Full Model Function Help")
wx_id = self._ids.next()
self.model_help = wx.Button(self, wx_id, 'Description', size=(80, 23))
self.model_help.Bind(wx.EVT_BUTTON, self.on_model_help_clicked,
id=wx_id)
self.model_help.SetToolTipString("Short Model Function Description")
wx_id = self._ids.next()
self.model_view = wx.Button(self, wx_id, "Show 2D", size=(80, 23))
self.model_view.Bind(wx.EVT_BUTTON, self._onModel2D, id=wx_id)
hint = "toggle view of model from 1D to 2D or 2D to 1D"
self.model_view.SetToolTipString(hint)
cat_set_box = wx.StaticBox(self, wx.ID_ANY, 'Category')
sizer_cat_box = wx.StaticBoxSizer(cat_set_box, wx.HORIZONTAL)
sizer_cat_box.SetMinSize((200, 50))
self.categorybox = wx.ComboBox(self, wx.ID_ANY,
style=wx.CB_READONLY)
self.categorybox.SetToolTip(wx.ToolTip("Select a Category/Type"))
self._populate_listbox()
wx.EVT_COMBOBOX(self.categorybox, wx.ID_ANY, self._show_combox)
# self.shape_rbutton = wx.RadioButton(self, wx.ID_ANY, 'Shapes',
# style=wx.RB_GROUP)
# self.shape_indep_rbutton = wx.RadioButton(self, wx.ID_ANY,
# "Shape-Independent")
# self.struct_rbutton = wx.RadioButton(self, wx.ID_ANY,
# "Structure Factor ")
# self.plugin_rbutton = wx.RadioButton(self, wx.ID_ANY,
# "Uncategorized")
# self.Bind(wx.EVT_RADIOBUTTON, self._show_combox,
# id=self.shape_rbutton.GetId())
# self.Bind(wx.EVT_RADIOBUTTON, self._show_combox,
# id=self.shape_indep_rbutton.GetId())
# self.Bind(wx.EVT_RADIOBUTTON, self._show_combox,
# id=self.struct_rbutton.GetId())
# self.Bind(wx.EVT_RADIOBUTTON, self._show_combox,
# id=self.plugin_rbutton.GetId())
# MAC needs SetValue
show_cat_button = wx.Button(self, wx.ID_ANY, "Modify")
cat_tip = "Modify model categories \n"
cat_tip += "(also accessible from the menu bar)."
show_cat_button.SetToolTip(wx.ToolTip(cat_tip))
show_cat_button.Bind(wx.EVT_BUTTON, self._on_modify_cat)
sizer_cat_box.Add(self.categorybox, 1, wx.RIGHT, 3)
sizer_cat_box.Add((10, 10))
sizer_cat_box.Add(show_cat_button)
# self.shape_rbutton.SetValue(True)
sizer_radiobutton = wx.GridSizer(2, 2, 5, 5)
# sizer_radiobutton.Add(self.shape_rbutton)
# sizer_radiobutton.Add(self.shape_indep_rbutton)
sizer_radiobutton.Add((5, 5))
sizer_radiobutton.Add(self.model_view, 1, wx.RIGHT, 5)
# sizer_radiobutton.Add(self.plugin_rbutton)
# sizer_radiobutton.Add(self.struct_rbutton)
# sizer_radiobutton.Add((5,5))
sizer_radiobutton.Add(self.model_help, 1, wx.RIGHT | wx.LEFT, 5)
# sizer_radiobutton.Add((5,5))
sizer_radiobutton.Add(self.model_func, 1, wx.RIGHT, 5)
sizer_cat.Add(sizer_cat_box, 1, wx.LEFT, 2.5)
sizer_cat.Add(sizer_radiobutton)
sizer_selection = wx.BoxSizer(wx.HORIZONTAL)
mutifactor_selection = wx.BoxSizer(wx.HORIZONTAL)
self.text1 = wx.StaticText(self, wx.ID_ANY, "")
self.text2 = wx.StaticText(self, wx.ID_ANY, "P(Q)*S(Q)")
self.mutifactor_text = wx.StaticText(self, wx.ID_ANY, "No. of Shells: ")
self.mutifactor_text1 = wx.StaticText(self, wx.ID_ANY, "")
self.show_sld_button = wx.Button(self, wx.ID_ANY, "Show SLD Profile")
self.show_sld_button.Bind(wx.EVT_BUTTON, self._on_show_sld)
self.formfactorbox = wx.ComboBox(self, wx.ID_ANY, style=wx.CB_READONLY)
self.formfactorbox.SetToolTip(wx.ToolTip("Select a Model"))
if self.model is not None:
self.formfactorbox.SetValue(self.model.name)
self.structurebox = wx.ComboBox(self, wx.ID_ANY, style=wx.CB_READONLY)
self.multifactorbox = wx.ComboBox(self, wx.ID_ANY, style=wx.CB_READONLY)
self.initialize_combox()
wx.EVT_COMBOBOX(self.formfactorbox, wx.ID_ANY, self._on_select_model)
wx.EVT_COMBOBOX(self.structurebox, wx.ID_ANY, self._on_select_model)
wx.EVT_COMBOBOX(self.multifactorbox, wx.ID_ANY, self._on_select_model)
# check model type to show sizer
if self.model is not None:
print("_set_model_sizer_selection: disabled.")
# self._set_model_sizer_selection(self.model)
sizer_selection.Add(self.text1)
sizer_selection.Add((10, 5))
sizer_selection.Add(self.formfactorbox)
sizer_selection.Add((5, 5))
sizer_selection.Add(self.text2)
sizer_selection.Add((5, 5))
sizer_selection.Add(self.structurebox)
mutifactor_selection.Add((13, 5))
mutifactor_selection.Add(self.mutifactor_text)
mutifactor_selection.Add(self.multifactorbox)
mutifactor_selection.Add((5, 5))
mutifactor_selection.Add(self.mutifactor_text1)
mutifactor_selection.Add((10, 5))
mutifactor_selection.Add(self.show_sld_button)
boxsizer1.Add(sizer_cat)
boxsizer1.Add((10, 10))
boxsizer1.Add(sizer_selection)
boxsizer1.Add((10, 10))
boxsizer1.Add(mutifactor_selection)
self._set_multfactor_combobox()
self.multifactorbox.SetSelection(1)
self.show_sld_button.Hide()
sizer.Add(boxsizer1, 0, wx.EXPAND | wx.ALL, 10)
sizer.Layout()
def on_smear_helper(self, update=False):
"""
Help for onSmear if implemented
:param update: force or not to update
"""
def reset_page(self, state, first=False):
"""
reset the state if implemented
"""
def onSmear(self, event):
"""
Create a smear object if implemented
"""
def onPinholeSmear(self, event):
"""
Create a custom pinhole smear object if implemented
"""
def onSlitSmear(self, event):
"""
Create a custom slit smear object if implemented
"""
def update_slit_smear(self):
"""
called by kill_focus on pinhole TextCntrl
to update the changes if implemented
"""
def select_param(self, event):
"""
Select TextCtrl checked if implemented
"""
def set_data(self, data=None):
"""
Sets data if implemented
"""
def _is_2D(self):
"""
Check if data_name is Data2D if implemented
"""
def _on_select_model(self, event=None):
"""
call back for model selection if implemented
"""
def get_weight_flag(self):
"""
Get flag corresponding to a given weighting dI data if implemented
"""
def _set_sizer_dispersion(self):
"""
draw sizer for dispersity if implemented
"""
def get_all_checked_params(self):
"""
Found all parameters current check and add them to list of parameters
to fit if implemented
"""
def show_npts2fit(self):
"""
setValue Npts for fitting if implemented
"""
def _onModel2D(self, event):
"""
toggle view of model from 1D to 2D or 2D from 1D if implemented
"""
class ModelTextCtrl(wx.TextCtrl):
"""
Text control for model and fit parameters.
Binds the appropriate events for user interactions.
Default callback methods can be overwritten on initialization
:param kill_focus_callback: callback method for EVT_KILL_FOCUS event
:param set_focus_callback: callback method for EVT_SET_FOCUS event
:param mouse_up_callback: callback method for EVT_LEFT_UP event
:param text_enter_callback: callback method for EVT_TEXT_ENTER event
"""
# Set to True when the mouse is clicked while whole string is selected
full_selection = False
# Call back for EVT_SET_FOCUS events
_on_set_focus_callback = None
def __init__(self, parent, id=-1,
value=wx.EmptyString,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=0,
validator=wx.DefaultValidator,
name=wx.TextCtrlNameStr,
kill_focus_callback=None,
set_focus_callback=None,
mouse_up_callback=None,
text_enter_callback=None):
wx.TextCtrl.__init__(self, parent, id, value, pos,
size, style, validator, name)
# Bind appropriate events
self._on_set_focus_callback = parent.onSetFocus \
if set_focus_callback is None else set_focus_callback
self.Bind(wx.EVT_SET_FOCUS, self._on_set_focus)
self.Bind(wx.EVT_KILL_FOCUS, self._silent_kill_focus
if kill_focus_callback is None else kill_focus_callback)
self.Bind(wx.EVT_TEXT_ENTER, parent._onparamEnter
if text_enter_callback is None else text_enter_callback)
if not ON_MAC:
self.Bind(wx.EVT_LEFT_UP, self._highlight_text
if mouse_up_callback is None else mouse_up_callback)
def _on_set_focus(self, event):
"""
Catch when the text control is set in focus to highlight the whole
text if necessary
:param event: mouse event
"""
event.Skip()
self.full_selection = True
return self._on_set_focus_callback(event)
def _highlight_text(self, event):
"""
Highlight text of a TextCtrl only of no text has be selected
:param event: mouse event
"""
# Make sure the mouse event is available to other listeners
event.Skip()
control = event.GetEventObject()
if self.full_selection:
self.full_selection = False
# Check that we have a TextCtrl
if issubclass(control.__class__, wx.TextCtrl):
# Check whether text has been selected,
# if not, select the whole string
(start, end) = control.GetSelection()
if start == end:
control.SetSelection(-1, -1)
def _silent_kill_focus(self, event):
"""
Save the state of the page
"""
event.Skip()
# pass
|
lewisodriscoll/sasview
|
src/sas/sasgui/perspectives/fitting/basepage.py
|
Python
|
bsd-3-clause
| 145,362
|
[
"Gaussian"
] |
9d96cb66f9d3e8cc8400a0bef815e8b124667757cc666d9ed7babb2ad43a2710
|
#
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2020 Wolfram G. Nöhring (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .io import EAMParameters
import numpy as np
def average_potential(
concentrations,
parameters,
F,
f,
rep,
kind="eam/alloy",
avg_atom="A",
atomic_number=999,
crystal_structure="unknown",
lattice_constant=1.0,
):
r"""Generate Average-atom potential
The Average-atom (A-atom) potential is a mean-field approximation
for random alloys, see Ref. `1`_. The purpose is to replace the true
elements by a single fictious element, the A-atom. A configuration
of A-atoms yields approximately the same potential energy as the
corresponding random alloy configuration. Other average material
properties, e.g. the elastic constants, are reproduced as well.
For a full derivation, see Ref. `1`.
The A-atom potential has standard EAM form, i.e. it can be tabulated
just like any other EAM potential. The potential functions are simply
the concentration-weighted averages of the pure element functions:
.. math::
\phi_{AA}\left(r_{\gamma\delta}\right)
&= \sum_{X}^{N_T}\sum_{Y}^{N_T} c_{X}c_{Y}
\phi_{XY}\left(r_{\gamma\delta}\right) \quad\text{(pair potential)}, \\
U_{A}\left(\rho_\gamma\right)
&= \sum_{X}^{N_T}c_{X}U_{X}\left(\rho_\gamma\right) \quad\text{(embedding energy)}, \\
g_A\left(r_{\gamma\delta}\right)
&= \sum_{X}^{N_T}c_X g_X\left(r_{\gamma\delta}\right)\quad\text{(electron density)},\;\text{and}\\
m_A &= \sum_{X}^{N_T}c_X m_X\quad\text{(mass)}.
.. note::
Currently, only eam/alloy-style potentials can be averaged.
The extension to eam/fs should be straightforward, however.
Parameters
----------
concentrations: array_like
concentrations of the elements in the A-atom
parameters: EAMParameters
EAM potential parameters
F : array_like
tabulated embedding energy functionals
f : array_like
tabulated electron density functions
rep : array_like
tabulated pair potential energy functions
Returns
-------
parameters : EAMParameters
EAM potential parameters
new_F : array_like
tabulated embedding energy functionals, including A-atom functional
new_f : array_like
tabulated electron density functions, including A-atom function(s)
new_rep : array_like
tabulated pair potential energy functions, including pairs with A-atom
Examples
--------
>>> from matscipy.calculators.eam import io, average_atom
>>> source, parameters, F, f, rep = io.read_eam(
>>> "ZrCu.onecolumn.eam.alloy"
>>> )
>>> concentrations = [0.5, 0.5]
>>> (new_parameters, new_F, new_f, new_rep) = average_atom.average_potential(
>>> concentrations, parameters, F, f, rep
>>> )
>>> composition = " ".join(
>>> [str(c * 100.0) + "% {0},".format(e) for c, e in zip(concentrations, parameters.symbols)]
>>> )
>>> composition = composition.rstrip(",")
>>> source += ", averaged for composition {0}".format(composition)
>>> io.write_eam(
>>> source,
>>> new_parameters,
>>> new_F,
>>> new_f,
>>> new_rep,
>>> "ZrCu.onecolumn.averaged.eam.alloy",
>>> kind="eam/alloy",
>>> )
Read an EAM potential table for two elements in eam/alloy format, and create
a new table with additional A-atom functions for the equicomposition alloy.
References
----------
.. [1] Varvenne, C., Luque, A., Nöhring, W. G. & Curtin, W. A.
Average-atom interatomic potential for random alloys.
Physical Review B 93, 104201 (2016).
Notes
-----
Notation:
* :math:`N` Number of atoms
* :math:`N_T` Number of elements
* :math:`r_{\nu\mu{}}` Pair distance of atoms :math:`\nu` and :math:`\mu`
* :math:`\phi_{\nu\mu}(r_{\nu\mu{}})` Pair potential energy of atoms :math:`\nu` and :math:`\mu`
* :math:`\rho_{\nu}` Total electron density of atom :math:`\nu`
* :math:`U_\nu(\rho_nu)` Embedding energy of atom :math:`\nu`
* :math:`g_{\delta}\left(r_{\gamma\delta}\right) \equiv g_{\gamma\delta}` Contribution from atom :math:`\delta` to :math:`\rho_\gamma`
* :math:`m_\nu` mass of atom :math:`\nu`
"""
if kind == "eam" or kind == "eam/fs":
raise NotImplementedError
assert np.isclose(np.sum(concentrations), 1)
symbols = [s for s in parameters.symbols] + [avg_atom]
atomic_numbers = np.r_[parameters.atomic_numbers, atomic_number]
atomic_masses = np.r_[
parameters.atomic_masses, np.average(parameters[2], weights=concentrations)
]
lattice_constants = np.r_[parameters.lattice_constants, lattice_constant]
crystal_structures = np.r_[
parameters.crystal_structures, np.array(crystal_structure)
]
new_parameters = EAMParameters(
symbols,
atomic_numbers,
atomic_masses,
lattice_constants,
crystal_structures,
parameters.number_of_density_grid_points,
parameters.number_of_distance_grid_points,
parameters.density_grid_spacing,
parameters.distance_grid_spacing,
parameters.cutoff,
)
# Average embedding energy and electron density functions
new_F = np.r_[F, np.zeros((1, F.shape[1]), dtype=F.dtype)]
new_f = np.r_[f, np.zeros((1, f.shape[1]), dtype=f.dtype)]
new_F[-1, :] = np.average(F, axis=0, weights=concentrations)
new_f[-1, :] = np.average(f, axis=0, weights=concentrations)
# Average the pair potential
new_rep = np.concatenate(
(rep, np.zeros((rep.shape[0], 1, rep.shape[2]), dtype=rep.dtype)), axis=1
)
new_rep = np.concatenate(
(new_rep, np.zeros((1, new_rep.shape[1], rep.shape[2]), dtype=rep.dtype)),
axis=0,
)
# Consider the matrix Vij of pair functions
# i: rows, each corresponding to an element
# j: columns, each corresponding to an element
# Each element corresponds to the function
# for the atom pair i,j
#
# Compute the last row of the new Vij matrix, excluding the
# value on the diagonal. Each column j corresponds to the
# interaction of a particular type j with the homogenous material.
new_rep[-1, :-1, :] = np.average(rep, axis=0, weights=concentrations)
new_rep[:-1, -1, :] = new_rep[-1, :-1, :]
# Compute the last potential on the diagonal. This is the
# interaction of the homogeneous material with itself.
column = new_rep[:-1, -1, :]
new_rep[-1, -1, :] = np.average(column, axis=0, weights=concentrations)
return new_parameters, new_F, new_f, new_rep
|
libAtoms/matscipy
|
matscipy/calculators/eam/average_atom.py
|
Python
|
lgpl-2.1
| 7,507
|
[
"Matscipy"
] |
9f8650b8aeebd0008769183e30566b32ed5b65f6b3495c845e480cba99f6aa3d
|
# -*- coding: utf-8 -*-
#
# __init__.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""PyNEST - Python interface for the NEST simulator
* ``nest.helpdesk()`` opens the NEST documentation in your browser.
* ``nest.__version__`` displays the NEST version.
* ``nest.Models()`` shows all available neuron, device and synapse models.
* ``nest.help('model_name') displays help for the given model, e.g., ``nest.help('iaf_psc_exp')``
* To get help on functions in the ``nest`` package, use Python's ``help()`` function
or IPython's ``?``, e.g.
- ``help(nest.Create)``
- ``nest.Connect?``
For more information visit https://www.nest-simulator.org.
"""
import sys
if sys.version_info[0] == 2:
msg = "Python 2 is no longer supported. Please use Python >= 3.6."
raise Exception(msg)
from . import ll_api # noqa
from .ll_api import set_communicator # noqa
from . import pynestkernel as kernel # noqa
from .hl_api import * # noqa
from . import random # noqa
from . import math # noqa
from . import spatial_distributions # noqa
from . import logic # noqa
# spatial needs to be imported last because of documentation generation
from . import spatial # noqa
try:
from . import server # noqa
except ImportError:
pass
__version__ = ll_api.sli_func("statusdict /version get")
|
lekshmideepu/nest-simulator
|
pynest/nest/__init__.py
|
Python
|
gpl-2.0
| 2,063
|
[
"NEURON",
"VisIt"
] |
f8f4785890d73e909bc92a8280fac0ed3faab2844a8911b49a24b3984b2fe304
|
#! /usr/bin/python
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
__author__="jon"
__date__ ="$Apr 12, 2014 12:57:32 PM$"
from symbol import except_clause
import flickrapi
import json
import urllib
import os.path
from pprint import pprint
from pygeocoder import Geocoder
from slugify import slugify
import os, errno
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
api_key = "da3cea7cccd3240398c6af0630474dd7"
flickr = flickrapi.FlickrAPI(api_key, cache=True)
cities = ["Des Moines,United States","Nipomo,United States","Las Vegas,United States","Paso Robles,United States","Atascadero,United States","San Luis Obispo,United States","Morro Bay,United States","Red Hills,United States","El Paso,United States","Springfield,United States","Laredo,United States","Austin,United States","Beaumont-Port Arthur,United States","Houston-Galveston-Brazoria,United States","Oxnard,United States","Waco-Killeen,United States","Phoenix,United States","Tyler-Longview-Marshall,United States","Columbus,United States","Brownsville-McAllen,United States","Dallas-Fort Worth,United States","San Antonio,United States","Youngstown,United States","Simi Valley,United States","Lake Elsinore,United States","Louisville,United States","Central LA CO,United States","Grand Junction,United States","Norco/Corona,United States"]
limit = 30
for city in cities:
sum = 0
results = Geocoder.geocode(city)
r_lat = str(results[0].coordinates[0])
r_long = str(results[0].coordinates[1])
name_city = slugify(str(results[0]))
print "retrieving data for city " + name_city + " lat " + r_lat + " long " + r_long
base_meta_dir = "metadata/"
base_img_dir = "photos/"
mkdir_p(base_meta_dir)
mkdir_p(base_img_dir)
for index in range(1,3):
if sum >= limit:
break
photos = flickr.photos_search(tags='cielo, sky, paisaje, landscape', has_geo=1, page=index, per_page= 100, lat=r_lat, lon=r_long, radius='20')
for photo in photos[0]:
sum = sum + 1
if sum > limit:
break
if os.path.isfile(base_meta_dir+photo.attrib['id']+'.json'):
continue
print("downloading file "+photo.attrib['id'] + " to dir " + name_city)
photo_location = flickr.photos_geo_getLocation(photo_id=photo.attrib['id'])
photo_size = flickr.photos_getSizes(photo_id=photo.attrib['id'])
imgloc = ""
for indx in range(6, 1, -1):
#download image
try:
imgloc = photo_size[0][indx].attrib['source']
break
except:
continue
data_json = {
"id": photo.attrib['id'],
"title": photo.attrib['title'],
"lat": photo_location[0][0].attrib['latitude'],
"long": photo_location[0][0].attrib['longitude'],
"imgloc": imgloc
}
#save image file
with open(base_meta_dir+photo.attrib['id']+'.json', 'w') as outfile:
json.dump(data_json, outfile)
#urllib.urlretrieve(imgloc, base_img_dir+photo.attrib['id']+'.jpg')
# exit(0)
|
wayra-nita/aqi-platform
|
crawler/src/base.py
|
Python
|
gpl-3.0
| 3,303
|
[
"COLUMBUS"
] |
0d3be1c488c2ce3172a5c8c2e073c58e841f1bbe34fca8088e95b86d92e2dd09
|
m __future__ import division
import MDAnalysis as mda
import datreant as dtr
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pdbionsurvey.coordination
import json
import seaborn as sb
import scipy
import mmtf
import pdbionsurvey.collection
from matplotlib.ticker import MaxNLocator
# import pdbionsurvey.analysis
from os import path as pth
import os
import shutil
from glob import glob
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import mmtf
def individualdee(prot, ionname='NA', atomname='O', bs=.1, mindistance=True, maxdistance=15, ts=1):
u = mda.Universe(prot[prot.name+'.pdb'].abspath)
ions = u.select_atoms('name '+ionname+' and resname '+ionname)
for ion in ions:
frames = []
for csv in prot.glob('coordination/'+ionname.upper()+'/'+atomname+'/*.csv'):
df = pd.read_csv(csv.abspath)
frames.append(df)
dataframe = pd.concat(frames)
h, e = np.histogram(dataframe['distance'], bins=np.arange(0, max(dataframe['distance']), bs))
m = .5 * (e[:-1] + e[1:])
V = 4. / 3 * np.pi * (e[1:] ** 3 - e[:-1] ** 3)
density = h / V
gdf = pd.DataFrame({'radius': m, 'density': density}, columns=['radius', 'density'])
gdf = gdf[gdf['radius'] < maxdistance]
if not mindistance:
gdf.to_csv(csvpath.abspath+'individual/d-'+prot.name+'-'+ionname.upper()+'-'+str(ion.resnum)+'-'+atomname+'-'+str(int(bs*100))+'pmbins.csv')
else:
mindistance = .5
gdf['density'] = [gdf['density'][i] if gdf['radius'][i]>.5 else 0 for i in range(len(gdf['density']))]
gdf.to_csv(csvpath.abspath+'individual/d-'+prot.name+'-'+ionname.upper()+'-'+str(ion.resnum)+'-'+atomname+'-'+str(int(bs*100))+'pmbins-withmin.csv')
def make_dees(ionname, atomnames=ATOMNAMES, bs=.1, mindistance=True, maxdistance=15, ts=1):
for atomname in atomnames:
print('started d '+ionname+' with '+atomname)
gdf = pdbionsurvey.coordination.gee(b, ionname, atomname=atomname, binsize=bs)
gdf = gdf[gdf['radius'] < maxdistance]
print('made d '+ionname+' with '+atomname)
if not mindistance:
gdf.to_csv(csvpath.abspath+'d-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins.csv')
else:
mindistance = .5
gdf['density'] = [gdf['density'][i] if gdf['radius'][i]>mindistance else 0 for i in range(len(gdf['density']))]
gdf.to_csv(csvpath.abspath+'d-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-withmin.csv')
print('saved d '+ionname+' with '+atomname)
def getbins(num):
ts = int(num/10)
if ts == 0:
ts = num/10
if ts < 1 and ts >= .5:
ts = .5
elif ts < .5 and ts >= .2:
ts = .2
elif ts < .2:
ts = .1
return ts
def make_gees(ionname, atomname='O', maxdistance=15, bs=.1, bundle=b, path=csvpath):
fig = plt.figure(figsize=(4,3))
ax = fig.add_subplot(111)
fig.set_tight_layout(True)
fig1 = plt.figure(figsize=(4,3))
ax1 = fig1.add_subplot(111)
fig1.set_tight_layout(True)
print('started g '+ionname+' with '+atomname)
gdf = pd.read_csv(path.abspath+'d-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-withmin.csv')
gdf['density'] = [gdf['density'][i] if gdf['radius'][i]<.5 else 0 for i in range(len(gdf['density']))]
gdf = gdf[gdf['radius'] < maxdistance]
ax.plot(gdf['radius'], gdf['density'], label=propernames[ionname], linewidth=2)
yts = getbins(max(gdf['density']))
ts = getbins(max(gdf['radius']))
ax.set_xlabel(r'distance ($\mathrm{\AA}$)')
ax.set_ylabel(r'density ($\mathrm{\AA}^{-3}$)')
ax.xaxis.set_major_locator(MultipleLocator(5*ts))
ax.xaxis.set_minor_locator(MultipleLocator(ts))
ax1.yaxis.set_major_locator(MultipleLocator(.005))
ax1.yaxis.set_minor_locator(MultipleLocator(yts*.005))
sns.despine(offset=10, ax=ax)
ax.legend()
ax.figure.savefig(impath.abspath+'d-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-'+str(maxdistance)+'.png')
ax.figure.savefig(impath.abspath+'d-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-'+str(maxdistance)+'.pdf')
y = gdf['density']/bulkdensity[atomname]
ax1.plot(gdf['radius'], y, label=propernames[ionname], linewidth=2)
yts = getbins(max(y))
ts = getbins(max(gdf['radius']))
ax1.set_xlabel(r'distance ($\mathrm{\AA}$)')
ax1.set_ylabel(r'$g(r)$')
ax1.xaxis.set_major_locator(MultipleLocator(5*ts))
ax1.xaxis.set_minor_locator(MultipleLocator(ts))
ax1.yaxis.set_major_locator(MultipleLocator(5*yts))
ax1.yaxis.set_minor_locator(MultipleLocator(yts))
ax1.set_xlim(0, maxdistance)
ax1.set_ylim(0, max(y))
sns.despine(offset=10, ax=ax1)
ax1.legend()
ax1.plot(gdf['radius'], np.array([1 for i in range(len(gdf['radius']))]), color=(0,0,0), ls='dotted', alpha=.5)
ax1.figure.savefig(impath.abspath+'g-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-'+str(maxdistance)+'.png')
ax1.figure.savefig(impath.abspath+'g-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-'+str(maxdistance)+'.pdf')
df = pd.DataFrame({'radius': gdf['radius'], 'density': y}, columns=['radius', 'density'])
df.to_csv(csvpath.abspath+'g-'+ionname+'-'+atomname+'-'+str(int(bs*100))+'pmbins-'+str(maxdistance)+'.csv')
print('finished g '+ionname+' with '+atomname)
IONNAME = 'CL'
ATOMNAME = 'O'
def individualen(prot, ionname=IONNAME, atomname=ATOMNAME, bs=.1, mindistance=True, ts=1):
shellsize = shells['first min'][(IONNAME, ATOMNAME)]
maxdistance = shellsize
u = mda.Universe(prot[prot.name+'.pdb'].abspath)
coordnums = []
for csv in prot.glob('coordination/'+ionname.upper()+'/'+atomname+'/*.csv'):
df = pd.read_csv(csv.abspath)
try:
gdf = df[df['distance'] < maxdistance]
if mindistance:
gdf = gdf[gdf['distance'] > .5]
coordnum = len(gdf['distance'])
except TypeError:
coordnum = 'N/A'
continue
coordnums.append(coordnum)
atomnum = csv.name[:-4]
prot.categories['coordnum_'+atomnum] = coordnum
prot.categories[IONNAME+'_'+ATOMNAME+'_coordnums'] = str(coordnums)
return coordnums
|
Becksteinlab/PDB_Ion_Survey
|
src/pdbionsurvey/bulkcoord.py
|
Python
|
gpl-3.0
| 6,385
|
[
"MDAnalysis"
] |
e621b08c8a8c7385f95f511a5bfe72a3f586abc8063afaff7ce71f7c9642d84b
|
from __future__ import division
from warnings import warn
import numpy as np
from scipy.sparse import csr_matrix
from pybasicbayes.util.general import objarray
from pylds.lds_messages_interface import info_E_step, info_sample, kalman_info_filter, kalman_filter, E_step
# TODO on instantiating, maybe gaussian states should be resampled
# TODO make niter an __init__ arg instead of a method arg
###########
# bases #
###########
class _LDSStates(object):
def __init__(self, model, T=None, data=None, inputs=None, stateseq=None,
initialize_from_prior=False,
initialize_to_noise=True):
self.model = model
self.T = T if T is not None else data.shape[0]
self.data = data
self.inputs = np.zeros((self.T, 0)) if inputs is None else inputs
self._normalizer = None
if stateseq is not None:
self.gaussian_states = stateseq
elif initialize_from_prior:
self.generate_states()
elif initialize_to_noise:
self.gaussian_states = np.random.normal(size=(self.T, self.D_latent))
elif data is not None:
self.resample()
else:
raise Exception("Invalid options. Must specify how states are initialized.")
### Basics
def log_likelihood(self):
if self._normalizer is None:
self._normalizer, _, _ = kalman_info_filter(*self.info_params)
# self._normalizer += self._info_extra_loglike_terms(
# *self.extra_info_params,
# isdiag=self.diagonal_noise)
return self._normalizer
def generate_states(self):
# Generate from the prior and raise exception if unstable
T, n = self.T, self.D_latent
gss = np.empty((T,n),dtype='double')
gss[0] = np.random.multivariate_normal(self.mu_init, self.sigma_init)
for t in range(1,T):
gss[t] = self.dynamics_distn.\
rvs(x=np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])),
return_xy=False)
assert np.all(np.isfinite(gss[t])), "LDS appears to be unstable!"
self.gaussian_states = gss
def generate_obs(self):
# Go through each time bin, get the discrete latent state,
# use that to index into the emission_distns to get samples
T, p = self.T, self.D_emission
ed = self.emission_distn
gss = self.gaussian_states
data = np.empty((T,p),dtype='double')
for t in range(self.T):
data[t] = \
ed.rvs(x=np.hstack((gss[t][None, :], self.inputs[t][None,:])),
return_xy=False)
return data
def sample_predictions(self, Tpred, inputs=None, states_noise=False, obs_noise=False):
inputs = np.zeros((Tpred, self.D_input)) if inputs is None else inputs
_, filtered_mus, filtered_sigmas = kalman_filter(
self.mu_init, self.sigma_init,
self.A, self.B, self.sigma_states,
self.C, self.D, self.sigma_obs,
self.inputs, self.data)
init_mu = self.A.dot(filtered_mus[-1]) + self.B.dot(self.inputs[-1])
init_sigma = self.sigma_states + self.A.dot(
filtered_sigmas[-1]).dot(self.A.T)
randseq = np.zeros((Tpred - 1, self.D_latent))
if states_noise:
L = np.linalg.cholesky(self.sigma_states)
randseq += np.random.randn(Tpred - 1, self.D_latent).dot(L.T)
states = np.empty((Tpred, self.D_latent))
if states_noise:
states[0] = np.random.multivariate_normal(init_mu, init_sigma)
else:
states[0] = init_mu
for t in range(1, Tpred):
states[t] = self.A.dot(states[t - 1]) + \
self.B.dot(inputs[t - 1]) + \
randseq[t - 1]
obs = states.dot(self.C.T) + inputs.dot(self.D.T)
if obs_noise:
L = np.linalg.cholesky(self.sigma_obs)
obs += np.random.randn(Tpred, self.D_emission).dot(L.T)
return obs
## convenience properties
@property
def D_latent(self):
return self.dynamics_distn.D_out
@property
def D_input(self):
return self.dynamics_distn.D_in - self.dynamics_distn.D_out
@property
def D_emission(self):
return self.emission_distn.D_out
@property
def dynamics_distn(self):
return self.model.dynamics_distn
@property
def emission_distn(self):
return self.model.emission_distn
@property
def diagonal_noise(self):
return self.model.diagonal_noise
@property
def mu_init(self):
return self.model.mu_init
@property
def sigma_init(self):
return self.model.sigma_init
@property
def A(self):
return self.dynamics_distn.A[:, :self.D_latent]
@property
def B(self):
return self.dynamics_distn.A[:, self.D_latent:]
@property
def sigma_states(self):
return self.dynamics_distn.sigma
@property
def C(self):
return self.emission_distn.A[:,:self.D_latent]
@property
def D(self):
return self.emission_distn.A[:, self.D_latent:]
@property
def sigma_obs(self):
return self.emission_distn.sigma
@property
def _kwargs(self):
return dict(super(_LDSStates, self)._kwargs,
gaussian_states=self.gaussian_states)
@property
def info_init_params(self):
J_init = np.linalg.inv(self.sigma_init)
h_init = np.linalg.solve(self.sigma_init, self.mu_init)
log_Z_init = -1. / 2 * h_init.dot(np.linalg.solve(J_init, h_init))
log_Z_init += 1. / 2 * np.linalg.slogdet(J_init)[1]
log_Z_init -= self.D_latent / 2. * np.log(2 * np.pi)
return J_init, h_init, log_Z_init
@property
def info_dynamics_params(self):
A = self.A
B = self.B
Q = self.sigma_states
# Get the pairwise potentials
# TODO: Check for diagonal before inverting
J_pair_22 = np.linalg.inv(Q)
J_pair_21 = -J_pair_22.dot(A)
J_pair_11 = A.T.dot(-J_pair_21)
# Check if diagonal and avoid inverting D_obs x D_obs matrix
mBTQiA = B.T.dot(J_pair_21)
BTQi = B.T.dot(J_pair_22)
h_pair_1 = self.inputs[:-1].dot(mBTQiA)
h_pair_2 = self.inputs[:-1].dot(BTQi)
log_Z_pair = -1. / 2 * np.linalg.slogdet(Q)[1]
log_Z_pair -= self.D_latent / 2. * np.log(2 * np.pi)
hJh_pair = B.T.dot(np.linalg.solve(Q, B))
log_Z_pair -= 1. / 2 * np.einsum('ij,ti,tj->t', hJh_pair, self.inputs[:-1], self.inputs[:-1])
return J_pair_11, J_pair_21, J_pair_22, h_pair_1, h_pair_2, log_Z_pair
@property
def info_emission_params(self):
C = self.C
centered_data = self.data - self.inputs.dot(self.D.T)
# Observations
log_Z_node = -self.D_emission / 2. * np.log(2 * np.pi) * np.ones(self.T)
if self.diagonal_noise:
# Use the fact that the diagonal regression prior is factorized
rsq = self.emission_distn.sigmasq_flat
RinvC = (1/rsq)[:,None] * C
J_node = C.T.dot(RinvC)
h_node = centered_data.dot(RinvC)
log_Z_node -= 1./2 * np.sum(np.log(rsq))
log_Z_node -= 1./2 * np.sum(centered_data**2 * 1./rsq, axis=1)
else:
Rinv = np.linalg.inv(self.sigma_obs)
RinvC = Rinv.dot(C)
J_node = C.T.dot(RinvC)
h_node = centered_data.dot(RinvC)
log_Z_node += 1./2 * np.linalg.slogdet(Rinv)[1]
log_Z_node -= 1./2 * np.einsum('ij,ti,tj->t', Rinv,
centered_data, centered_data)
return J_node, h_node, log_Z_node
@property
def info_params(self):
return self.info_init_params + self.info_dynamics_params + self.info_emission_params
def info_filter(self):
self._normalizer, filtered_Js, filtered_hs = \
kalman_info_filter(*self.info_params)
return filtered_Js, filtered_hs
def kalman_filter(self):
self._normalizer, filtered_mus, filtered_sigmas = kalman_filter(
self.mu_init, self.sigma_init,
self.A, self.B, self.sigma_states,
self.C, self.D, self.sigma_obs,
self.inputs, self.data)
# Update the normalization constant
# self._gaussian_normalizer += self._info_extra_loglike_terms(
# *self.extra_info_params,
# isdiag=self.diagonal_noise)
return filtered_mus, filtered_sigmas
def smooth(self):
# Use the info E step because it can take advantage of diagonal noise
# The standard E step could but we have not implemented it
self.info_E_step()
return self.smoothed_mus.dot(self.C.T) + self.inputs.dot(self.D.T)
### Expectations
def E_step(self):
return self.info_E_step()
def std_E_step(self):
self._normalizer, self.smoothed_mus, self.smoothed_sigmas, \
E_xtp1_xtT = E_step(
self.mu_init, self.sigma_init,
self.A, self.B, self.sigma_states,
self.C, self.D, self.sigma_obs,
self.inputs, self.data)
self._set_expected_stats(
self.smoothed_mus, self.smoothed_sigmas, E_xtp1_xtT)
def info_E_step(self):
self._normalizer, self.smoothed_mus, \
self.smoothed_sigmas, E_xtp1_xtT = \
info_E_step(*self.info_params)
self._set_expected_stats(
self.smoothed_mus, self.smoothed_sigmas, E_xtp1_xtT)
def _set_expected_stats(self, smoothed_mus, smoothed_sigmas, E_xtp1_xtT):
# Get the emission stats
p, n, d, T, inputs, data = \
self.D_emission, self.D_latent, self.D_input, self.T, \
self.inputs, self.data
E_x_xT = smoothed_sigmas + self.smoothed_mus[:, :, None] * self.smoothed_mus[:, None, :]
E_x_uT = smoothed_mus[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1].sum(0)
E_xtp1_xtp1T = E_x_xT[1:].sum(0)
E_xtp1_xtT = E_xtp1_xtT.sum(0)
E_xtp1_utT = (smoothed_mus[1:, :, None] * inputs[:-1, None, :]).sum(0)
E_xtp1_xutT = np.hstack((E_xtp1_xtT, E_xtp1_utT))
# def is_symmetric(A):
# return np.allclose(A, A.T)
# assert is_symmetric(E_xt_xtT)
# assert is_symmetric(E_xtp1_xtp1T)
self.E_dynamics_stats = np.array(
[E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, self.T - 1])
# Emission statistics
E_yyT = np.sum(data**2, axis=0) if self.diagonal_noise else data.T.dot(data)
E_yxT = data.T.dot(smoothed_mus)
E_yuT = data.T.dot(inputs)
E_yxuT = np.hstack((E_yxT, E_yuT))
self.E_emission_stats = objarray([E_yyT, E_yxuT, E_xu_xuT.sum(0), T])
######################
# algorithm mixins #
######################
class _LDSStatesGibbs(_LDSStates):
def resample(self, niter=1):
self.resample_gaussian_states()
def _init_gibbs_from_mf(self):
raise NotImplementedError # TODO
def resample_gaussian_states(self):
self._normalizer, self.gaussian_states = \
info_sample(*self.info_params)
class _LDSStatesMeanField(_LDSStates):
@property
def expected_info_dynamics_params(self):
J_pair_22, J_pair_21, J_pair_11, logdet_pair = \
self.dynamics_distn.meanfield_expectedstats()
# Compute E[B^T Q^{-1}] and E[B^T Q^{-1} A]
n = self.D_latent
E_Qinv = J_pair_22.copy("C")
E_AT_Qinv = (J_pair_21[:,:n].T).copy("C")
E_BT_Qinv = (J_pair_21[:,n:].T).copy("C")
E_AT_Qinv_A = J_pair_11[:n,:n].copy("C")
E_BT_Qinv_A = J_pair_11[n:,:n].copy("C")
E_BT_Qinv_B = J_pair_11[n:,n:].copy("C")
h_pair_1 = (-self.inputs[:-1].dot(E_BT_Qinv_A)).copy("C")
h_pair_2 = (self.inputs[:-1].dot(E_BT_Qinv)).copy("C")
log_Z_pair = 1./2 * logdet_pair * np.ones(self.T-1)
log_Z_pair -= self.D_latent / 2. * np.log(2 * np.pi)
log_Z_pair -= 1. / 2 * np.einsum('ij,ti,tj->t', E_BT_Qinv_B, self.inputs[:-1], self.inputs[:-1])
return E_AT_Qinv_A, -E_AT_Qinv, E_Qinv, h_pair_1, h_pair_2, log_Z_pair
@property
def expected_info_emission_params(self):
J_yy, J_yx, J_node, logdet_node = \
self.emission_distn.meanfield_expectedstats()
n = self.D_latent
E_Rinv = J_yy
E_Rinv_C = J_yx[:,:n].copy("C")
E_Rinv_D = J_yx[:,n:].copy("C")
E_CT_Rinv_C = (J_node[:n,:n]).copy("C")
E_DT_Rinv_C = (J_node[n:,:n]).copy("C")
E_DT_Rinv_D = (J_node[n:,n:]).copy("C")
h_node = self.data.dot(E_Rinv_C)
h_node -= self.inputs.dot(E_DT_Rinv_C)
log_Z_node = -self.D_emission / 2. * np.log(2 * np.pi) * np.ones(self.T)
log_Z_node += 1. / 2 * logdet_node
# E[(y-Du)^T R^{-1} (y-Du)]
log_Z_node -= 1. / 2 * np.einsum('ij,ti,tj->t', E_Rinv,
self.data, self.data)
log_Z_node -= 1. / 2 * np.einsum('ij,ti,tj->t', -2*E_Rinv_D,
self.data, self.inputs)
log_Z_node -= 1. / 2 * np.einsum('ij,ti,tj->t', E_DT_Rinv_D,
self.inputs, self.inputs)
return E_CT_Rinv_C, h_node, log_Z_node
@property
def expected_info_params(self):
return self.info_init_params + \
self.expected_info_dynamics_params + \
self.expected_info_emission_params
def meanfieldupdate(self):
self._mf_lds_normalizer, self.smoothed_mus, self.smoothed_sigmas, \
E_xtp1_xtT = info_E_step(*self.expected_info_params)
self._set_expected_stats(
self.smoothed_mus,self.smoothed_sigmas,E_xtp1_xtT)
def get_vlb(self):
return self._mf_lds_normalizer
def meanfield_smooth(self):
if self.diagonal_noise:
E_C, _, _, _ = self.emission_distn.mf_expectations
else:
ed = self.emission_distn
_,_,E_C,_ = ed._natural_to_standard(ed.mf_natural_hypparam)
return np.hstack((self.smoothed_mus, self.inputs)).dot(E_C.T)
####################
# states classes #
####################
class LDSStates(
_LDSStatesGibbs,
_LDSStatesMeanField):
pass
class LDSStatesMissingData(_LDSStatesGibbs, _LDSStatesMeanField):
def __init__(self, model, T=None, data=None, mask=None, **kwargs):
if mask is not None:
assert mask.shape == data.shape
self.mask = mask
elif (data is not None) and isinstance(data, np.ndarray):
if np.any(np.isnan(data)):
warn("data includes NaN's. Treating these as missing data.")
self.mask = ~np.isnan(data)
data[np.isnan(data)] = 0
else:
self.mask = np.ones_like(data, dtype=bool)
else:
self.mask = np.ones((T, model.emission_distn.D_out), dtype=bool)
super(LDSStatesMissingData, self).__init__(model, T=T, data=data, **kwargs)
@property
def info_emission_params(self):
if self.mask is None:
return super(LDSStatesMissingData, self).info_emission_params
if self.diagonal_noise:
return self._info_emission_params_diag
else:
return self._info_emission_params_dense
@property
def _info_emission_params_diag(self):
C, D = self.C, self.D
sigmasq = self.emission_distn.sigmasq_flat
J_obs = self.mask / sigmasq
centered_data = self.data - self.inputs.dot(D.T)
CCT = np.array([np.outer(cp, cp) for cp in C]).\
reshape((self.D_emission, self.D_latent ** 2))
J_node = np.dot(J_obs, CCT)
J_node = J_node.reshape((self.T, self.D_latent, self.D_latent))
# h_node = y^T R^{-1} C - u^T D^T R^{-1} C
h_node = (centered_data * J_obs).dot(C)
log_Z_node = -self.mask.sum(1) / 2. * np.log(2 * np.pi)
log_Z_node -= 1. / 2 * np.sum(self.mask * np.log(sigmasq), axis=1)
log_Z_node -= 1. / 2 * np.sum(centered_data ** 2 * J_obs, axis=1)
return J_node, h_node, log_Z_node
@property
def _info_emission_params_dense(self):
# raise Exception("This must be updated with log normalizers")
T, D_latent = self.T, self.D_latent
data, inputs, mask = self.data, self.inputs, self.mask
C, D, R = self.C, self.D, self.sigma_obs
centered_data = data - inputs.dot(D.T)
# Sloowwwwww
J_node = np.zeros((T, D_latent, D_latent))
h_node = np.zeros((T, D_latent))
log_Z_node = np.zeros(T)
for t in range(T):
m_t = mask[t].sum()
if m_t == 0:
continue
centered_data_t = centered_data[t][mask[t]]
C_t = C[mask[t]]
R_t = R[np.ix_(mask[t], mask[t])]
Rinv_t = np.linalg.inv(R_t)
J_node[t] = C_t.T.dot(Rinv_t).dot(C_t)
h_node[t] = (centered_data_t).dot(Rinv_t).dot(C_t)
log_Z_node[t] -= m_t / 2. * np.log(2 * np.pi)
log_Z_node[t] -= 1. / 2 * np.linalg.slogdet(R_t)[1]
log_Z_node[t] -= 1. / 2 * centered_data_t.dot(Rinv_t).dot(centered_data_t)
J_node = J_node.reshape((self.T, self.D_latent, self.D_latent))
return J_node, h_node, log_Z_node
@property
def expected_info_emission_params(self):
n = self.D_latent
if self.mask is None:
return super(LDSStatesMissingData, self).expected_info_emission_params
raise Exception("Mean field for masked data is not implemented correctly. We need to handle "
"inputs properly, and we need to ravel E_CCT to make the dot product work.")
if self.diagonal_noise:
E_C, E_CDCDT, E_sigmasq_inv, E_logdet_node = self.emission_distn.mf_expectations
E_C, E_D = E_C[:,:n], E_C[:,n:]
E_CCT = E_CDCDT[:n, :n]
J_obs = self.mask * E_sigmasq_inv
J_node = np.dot(J_obs, E_CCT)
h_node = (self.data * J_obs).dot(E_C)
h_node -= (self.inputs.dot(E_D.T) * J_obs).dot(E_C)
J_node = J_node.reshape((self.T, self.D_latent, self.D_latent))
log_Z_node = -self.D_emission / 2. * np.log(2 * np.pi) * np.ones(self.T)
log_Z_node += 1. / 2 * E_logdet_node
# E[(y-Du)^T R^{-1} (y-Du)]
# E[yT R^{-1}y -2 y^T R^{-1} Du + u^T D^T R^{-1} D u ]
log_Z_node -= 1./2 * (self.data * J_obs).T.dot(self.data)
log_Z_node -= 1. / 2 * np.einsum('ij,ti,tj->t', -2 * E_Rinv_D,
self.data, self.inputs)
log_Z_node -= 1. / 2 * np.einsum('ij,ti,tj->t', E_DT_Rinv_D,
self.inputs, self.inputs)
else:
raise NotImplementedError("Only supporting diagonal regression class with missing data now")
return J_node, h_node
def _set_expected_stats(self, smoothed_mus, smoothed_sigmas, E_xtp1_xtT):
if self.mask is None:
return super(LDSStatesMissingData, self).\
_set_expected_stats(smoothed_mus, smoothed_sigmas, E_xtp1_xtT)
# Get the emission stats
p, n, d, T, mask, inputs, data = \
self.D_emission, self.D_latent, self.D_input, self.T, \
self.mask, self.inputs, self.data
E_x_xT = smoothed_sigmas + self.smoothed_mus[:, :, None] * self.smoothed_mus[:, None, :]
E_x_uT = smoothed_mus[:,:,None] * self.inputs[:,None,:]
E_u_uT = self.inputs[:,:,None] * self.inputs[:,None,:]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0,2,1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1].sum(0)
E_xtp1_xtp1T = E_x_xT[1:].sum(0)
E_xt_xtT = E_x_xT[:-1].sum(0)
E_xtp1_xtT = E_xtp1_xtT.sum(0)
E_xtp1_utT = (smoothed_mus[1:,:,None] * inputs[:-1, None, :]).sum(0)
E_xtp1_xutT = np.hstack((E_xtp1_xtT, E_xtp1_utT))
def is_symmetric(A):
return np.allclose(A, A.T)
assert is_symmetric(E_xt_xtT)
assert is_symmetric(E_xtp1_xtp1T)
self.E_dynamics_stats = np.array(
[E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, self.T - 1])
# Emission statistics
E_ysq = np.sum(data**2 * mask, axis=0)
E_yxT = (data * mask).T.dot(smoothed_mus)
E_yuT = (data * mask).T.dot(inputs)
E_yxuT = np.hstack((E_yxT, E_yuT))
E_xuxuT_vec = E_xu_xuT.reshape((T, -1))
E_xuxuT = np.array([np.dot(self.mask[:, i], E_xuxuT_vec).reshape((n+d, n+d))
for i in range(p)])
Tp = np.sum(self.mask, axis=0)
self.E_emission_stats = objarray([E_ysq, E_yxuT, E_xuxuT, Tp])
class LDSStatesCountData(LDSStatesMissingData, _LDSStatesGibbs):
def __init__(self, model, data=None, mask=None, **kwargs):
super(LDSStatesCountData, self). \
__init__(model, data=data, mask=mask, **kwargs)
# Check if the emission matrix is a count regression
from pypolyagamma.distributions import _PGLogisticRegressionBase
if isinstance(self.emission_distn, _PGLogisticRegressionBase):
self.has_count_data = True
# Initialize the Polya-gamma samplers
import pypolyagamma as ppg
num_threads = ppg.get_omp_num_threads()
seeds = np.random.randint(2 ** 16, size=num_threads)
self.ppgs = [ppg.PyPolyaGamma(seed) for seed in seeds]
# Initialize auxiliary variables, omega
self.omega = np.ones((self.T, self.D_emission), dtype=np.float)
else:
self.has_count_data = False
@property
def sigma_obs(self):
if self.has_count_data:
raise Exception("Count data does not have sigma_obs")
return super(LDSStatesCountData, self).sigma_obs
@property
def info_emission_params(self):
if not self.has_count_data:
return super(LDSStatesCountData, self).info_emission_params
# Otherwise, use the Polya-gamma augmentation
# log p(y_{tn} | x, om)
# = -0.5 * om_{tn} * (c_n^T x_t + d_n^T u_t + b_n)**2
# + kappa * (c_n * x_t + d_n^Tu_t + b_n)
# = -0.5 * om_{tn} * (x_t^T c_n c_n^T x_t
# + 2 x_t^T c_n d_n^T u_t
# + 2 x_t^T c_n b_n)
# + x_t^T (kappa_{tn} * c_n)
# = -0.5 x_t^T (c_n c_n^T * om_{tn}) x_t
# + x_t^T * (kappa_{tn} - d_n^T u_t * om_{tn} -b_n * om_{tn}) * c_n
#
# Thus
# J = (om * mask).dot(CCT)
# h = ((kappa - om * d) * mask).dot(C)
T, D_latent, D_emission = self.T, self.D_latent, self.D_emission
data, inputs, mask, omega = self.data, self.inputs, self.mask, self.omega
# TODO: This is hacky...
mask = self.mask if self.mask is not None else np.ones_like(self.data)
emission_distn = self.emission_distn
C = emission_distn.A[:, :D_latent]
D = emission_distn.A[:,D_latent:]
b = emission_distn.b
CCT = np.array([np.outer(cp, cp) for cp in C]).\
reshape((D_emission, D_latent ** 2))
J_node = np.dot(omega * mask, CCT)
J_node = J_node.reshape((T, D_latent, D_latent))
kappa = emission_distn.kappa_func(data)
h_node = ((kappa - omega * b.T - omega * inputs.dot(D.T)) * mask).dot(C)
# TODO: Implement the log normalizer for the Polya-gamma augmentation
# after augmentation, the normalizer includes the terms in the PG
# augmented density that do not contain x. Specifically, we have,
#
# logZ = -b(y) log 2 - log PG(omega | b(y), 0) - log c(y),
#
# where b(y) and c(y) come from the count distribution. The hard part is
# that the PG density is expensive to evaluate. For now, we will just
# ignore all these terms.
log_Z_node = np.zeros(self.T)
return J_node, h_node, log_Z_node
@property
def expected_info_emission_params(self):
if self.has_count_data:
raise NotImplementedError("Mean field with count observations is not yet supported")
return super(LDSStatesCountData, self).expected_info_emission_params
def log_likelihood(self):
if self.has_count_data:
ll = self.emission_distn.log_likelihood(
(np.hstack((self.gaussian_states, self.inputs)), self.data),
mask=self.mask).sum()
return ll
else:
return super(LDSStatesCountData, self).log_likelihood()
def resample(self, niter=1):
self.resample_gaussian_states()
if self.has_count_data:
self.resample_auxiliary_variables()
def resample_auxiliary_variables(self):
C, D, ed = self.C, self.D, self.emission_distn
psi = self.gaussian_states.dot(C.T) + self.inputs.dot(D.T) + ed.b.T
b = ed.b_func(self.data)
import pypolyagamma as ppg
ppg.pgdrawvpar(self.ppgs, b.ravel(), psi.ravel(), self.omega.ravel())
def smooth(self):
if not self.has_count_data:
return super(LDSStatesCountData, self).smooth()
X = np.column_stack((self.gaussian_states, self.inputs))
mean = self.emission_distn.mean(X)
return mean
class LDSStatesZeroInflatedCountData(LDSStatesMissingData, _LDSStatesGibbs):
"""
In many cases, the observation dimension is so large and so sparse
that a Bernoulli, Poisson, etc. is not a good model. Moreover, it
is computationally demanding to compute the likelihood for so many
terms. Zero-inflated models address both concerns. Let,
z_{t,n} ~ Bern(rho)
y_{t,n} ~ p(y_{t,n} | c_n.dot(x_t) + d_n)) if z_{t,n} = 1
= 0 o.w.
If z_{t,n} = 1, we say that datapoint was "exposed." That is, the
observation y_{t,n} reflects the underlying latent state. The
observation may be zero, but that is still informative. However,
if the datapoint was not exposed (which can only happen if y_{t,n}=0),
then this term does not reflect the underlying state.
Thus, Z is effectively a mask on the data, and the likelihood only
depends on places where z_{t,n} = 1. Moreover, we only have to
introduce auxiliary variables for the entries that are unmasked.
"""
def __init__(self,model, data=None, **kwargs):
# The data must be provided in sparse row format
# This makes it easy to iterate over rows. Basically,
# for each row, t, it is easy to get the output dimensions, n,
# such that y_{t,n} > 0.
super(LDSStatesZeroInflatedCountData, self).\
__init__(model, data=data, **kwargs)
# Initialize the Polya-gamma samplers
num_threads = ppg.get_omp_num_threads()
seeds = np.random.randint(2 ** 16, size=num_threads)
self.ppgs = [ppg.PyPolyaGamma(seed) for seed in seeds]
# Initialize the masked data
if data is not None:
assert isinstance(data, csr_matrix), "Data must be a sparse row matrix for zero-inflated models"
# Initialize a sparse matrix of masked data. The mask
# specifies which observations were "exposed" and which
# were determinisitcally zero. In other words, the mask
# gives the data values at the places where z_{t,n} = 1.
T, N, C, D, b = self.T, self.D_emission, self.C, self.D, self.emission_distn.b
indptr = [0]
indices = []
vals = []
offset = 0
for t in range(T):
# Get the nonzero entries in the t-th row
ns_t = data.indices[data.indptr[t]:data.indptr[t + 1]]
y_t = np.zeros(N)
y_t[ns_t] = data.data[data.indptr[t]:data.indptr[t + 1]]
# Sample zero inflation mask
z_t = np.random.rand(N) < self.rho
z_t[ns_t] = True
# Construct the sparse matrix
t_inds = np.where(z_t)[0]
indices.append(t_inds)
vals.append(y_t[t_inds])
offset += t_inds.size
indptr.append(offset)
# Construct a sparse matrix
vals = np.concatenate(vals)
indices = np.concatenate(indices)
indptr = np.array(indptr)
self.masked_data = csr_matrix((vals, indices, indptr), shape=(T, N))
# DEBUG: Start with all the data
# dense_data = data.toarray()
# values = dense_data.ravel()
# indices = np.tile(np.arange(self.D_emission), (self.T,))
# indptrs = np.arange(self.T+1) * self.D_emission
# self.masked_data = csr_matrix((values, indices, indptrs), (self.T, self.D_emission))
# assert np.allclose(self.masked_data.toarray(), dense_data)
self.resample_auxiliary_variables()
else:
self.masked_data = None
self.omega = None
@property
def rho(self):
return self.model.rho
@property
def sigma_obs(self):
raise Exception("Count data does not have sigma_obs")
def generate_obs(self):
# Go through each time bin, get the discrete latent state,
# use that to index into the emission_distns to get samples
T, p = self.T, self.D_emission
ed = self.emission_distn
gss = self.gaussian_states
data = np.empty((T,p),dtype='double')
# TODO: Do this sparsely
for t in range(self.T):
data[t] = \
ed.rvs(x=np.hstack((gss[t][None, :], self.inputs[t][None,:])),
return_xy=False)
# Zero out data
zeros = np.random.rand(p) > self.rho
data[t][zeros] = 0
data = csr_matrix(data)
return data
@property
def info_emission_params(self):
T, D_latent, D_emission = self.T, self.D_latent, self.D_emission
masked_data, inputs, omega = self.masked_data, self.inputs, self.omega
emission_distn = self.emission_distn
C = emission_distn.A[:, :D_latent]
CCT = np.array([np.outer(c, c) for c in C]).reshape((D_emission, D_latent**2))
D = emission_distn.A[:,D_latent:]
b = emission_distn.b
J_node = omega.dot(CCT).reshape((T, D_latent, D_latent))
kappa = emission_distn.kappa_func(masked_data.data)
kappa = csr_matrix((kappa, masked_data.indices, masked_data.indptr), shape=masked_data.shape)
h_node = kappa.dot(C)
# Unfortunately, the following operations would require dense arrays of size (TxD_emisison)
# h_node += -(omega * b.T).dot(C)
# h_node += -(omega * inputs.dot(D.T)).dot(C)
# This might not be much faster, but it should avoid making
# dense arrays
for t in range(T):
ns_t = masked_data.indices[masked_data.indptr[t]:masked_data.indptr[t+1]]
om_t = omega.data[omega.indptr[t]:omega.indptr[t+1]]
h_node[t] -= (om_t * b[ns_t][:,0]).dot(C[ns_t])
h_node[t] -= (om_t * inputs[t].dot(D[ns_t].T)).dot(C[ns_t])
# TODO: See comment in _LDSStatesCountData for info on the log normalizers
# The same applies to this zero-inflated data
log_Z_node = np.zeros(self.T)
return J_node, h_node, log_Z_node
@property
def expected_info_emission_params(self):
raise NotImplementedError("Mean field with count observations is not yet supported")
@property
def expected_extra_info_params(self):
raise NotImplementedError("Mean field with count observations is not yet supported")
@property
def psi(self):
T, C, D, ed = self.T, self.C, self.D, self.emission_distn
data, size, indices, indptr \
= self.masked_data, self.masked_data.size, \
self.masked_data.indices, self.masked_data.indptr
psi = np.zeros(size)
offset = 0
for t in range(T):
for n in indices[indptr[t]:indptr[t + 1]]:
psi[offset] = self.gaussian_states[t].dot(C[n])
psi[offset] += self.inputs[t].dot(D[n])
psi[offset] += ed.b[n]
offset += 1
return csr_matrix((psi, indices, indptr), shape=data.shape)
def resample(self, niter=1):
self.resample_zeroinflation_variables()
self.resample_auxiliary_variables()
self.resample_gaussian_states()
def resample_zeroinflation_variables(self):
"""
There's no way around the fact that we have to look at every
data point, even the zeros here.
"""
# TODO: move this to cython?
T, N, C, D, b = self.T, self.D_emission, self.C, self.D, self.emission_distn.b
indptr = [0]
indices = []
vals = []
offset = 0
X = np.hstack((self.gaussian_states, self.inputs))
for t in range(T):
# Evaluate probability of data
y_t = np.zeros(N)
ns_t = self.data.indices[self.data.indptr[t]:self.data.indptr[t+1]]
y_t[ns_t] = self.data.data[self.data.indptr[t]:self.data.indptr[t+1]]
ll = self.emission_distn._elementwise_log_likelihood((X[t], y_t))
ll = ll.ravel()
# Evaluate the probability that each emission was "exposed",
# i.e. p(z_tn = 1 | y_tn, x_tn)
log_p_exposed = np.log(self.rho) + ll
log_p_exposed -= np.log(np.exp(log_p_exposed) + (1-self.rho) * (y_t == 0))
# Sample zero inflation mask
z_t = np.random.rand(N) < np.exp(log_p_exposed)
# Construct the sparse matrix
t_inds = np.where(z_t)[0]
indices.append(t_inds)
vals.append(y_t[t_inds])
offset += t_inds.size
indptr.append(offset)
# Construct a sparse matrix
vals = np.concatenate(vals)
indices = np.concatenate(indices)
indptr = np.array(indptr)
self.masked_data = csr_matrix((vals, indices, indptr), shape=(T, N))
def resample_auxiliary_variables(self):
# TODO: move this to cython
T, C, D, ed = self.T, self.C, self.D, self.emission_distn
data, size, indices, indptr \
= self.masked_data, self.masked_data.size, \
self.masked_data.indices, self.masked_data.indptr
psi = np.zeros(size)
offset = 0
for t in range(T):
for n in indices[indptr[t]:indptr[t+1]]:
psi[offset] = self.gaussian_states[t].dot(C[n])
psi[offset] += self.inputs[t].dot(D[n])
psi[offset] += ed.b[n]
offset += 1
psi = csr_matrix((psi, indices, indptr), shape=data.shape)
b = ed.b_func(data)
# Allocate vector for omega
self.omega = np.zeros(size)
ppg.pgdrawvpar(self.ppgs, b.data, psi.data, self.omega)
self.omega = csr_matrix((self.omega, indices, indptr), shape=data.shape)
def smooth(self):
# TODO: By assumption, the data is too large to construct
# TODO: a dense smoothing matrix. Let's support a column-wise
# TODO: smoothing operation instead.
warn("Zero inflated smoothing is instantiating a dense matrix!")
X = np.column_stack((self.gaussian_states, self.inputs))
mean = self.rho * self.emission_distn.mean(X)
return mean
|
mattjj/pylds
|
pylds/states.py
|
Python
|
mit
| 36,176
|
[
"Gaussian"
] |
98ae17365ed5ea1611dbb29961b5975bc7deffac51f04fc32e9d1a76a850465e
|
from distutils.core import setup
setup(
name='Raspberry-Pi-IO',
version='0.0.2',
author='Brian Hines',
author_email='brian@projectweekend.net',
packages=['raspberry_pi_io'],
url='https://github.com/exitcodezero/raspberry-pi-io',
license='LICENSE.txt',
description='A Python service for remotely controlling GPIO.',
long_description=open('README.txt').read(),
install_requires=[
"pika == 0.9.14",
"requests == 2.6.0",
],
)
|
projectweekend/raspberry-pi-io
|
setup.py
|
Python
|
mit
| 483
|
[
"Brian"
] |
f942f1cb134857b3f2c29a1f0e47b68329f848854be2d36371b3e4175047a398
|
# Basic application to load a mesh from file and view it in a window
# Python imports
import sys, os
import euclid as eu
## Imports from this project
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'core')) # hack to allow local imports without creaing a module or modifying the path variable
from InputOutput import *
from MeshDisplay import MeshDisplay
from HalfEdgeMesh import *
from Utilities import *
def main():
# Get the path for the mesh to load, either from the program argument if
# one was given, or a dialog otherwise
if(len(sys.argv) > 1):
filename = sys.argv[1]
else:
print("ERROR: No file name specified. Proper syntax is 'python Assignment2.py path/to/your/mesh.obj'.")
exit()
# Read in the mesh
mesh = HalfEdgeMesh(readMesh(filename))
# Create a viewer object
winName = 'DDG Assignment2 -- ' + os.path.basename(filename)
meshDisplay = MeshDisplay(windowTitle=winName)
meshDisplay.setMesh(mesh)
###################### BEGIN YOUR CODE
# implement the body of each of these functions
@property
@cacheGeometry
def faceArea(self):
"""
Compute the area of a face. Though not directly requested, this will be
useful when computing face-area weighted normals below.
This method gets called on a face, so 'self' is a reference to the
face at which we will compute the area.
"""
return 0.0 # placeholder value
@property
@cacheGeometry
def faceNormal(self):
"""
Compute normal at a face of the mesh. Unlike at vertices, there is one very
obvious way to do this, since a face uniquely defines a plane.
This method gets called on a face, so 'self' is a reference to the
face at which we will compute the normal.
"""
return Vector3D(0.0,0.0,0.0) # placeholder value
@property
@cacheGeometry
def vertexNormal_EquallyWeighted(self):
"""
Compute a vertex normal using the 'equally weighted' method.
This method gets called on a vertex, so 'self' is a reference to the
vertex at which we will compute the normal.
"""
return Vector3D(0.0,0.0,0.0) # placeholder value
@property
@cacheGeometry
def vertexNormal_AreaWeighted(self):
"""
Compute a vertex normal using the 'face area weights' method.
This method gets called on a vertex, so 'self' is a reference to the
vertex at which we will compute the normal.
"""
return Vector3D(0.0,0.0,0.0) # placeholder value
@property
@cacheGeometry
def vertexNormal_AngleWeighted(self):
"""
Compute a vertex normal using the 'tip angle weights' method.
This method gets called on a vertex, so 'self' is a reference to the
vertex at which we will compute the normal.
"""
return Vector3D(0.0,0.0,0.0) # placeholder value
@property
@cacheGeometry
def cotan(self):
"""
Compute the cotangent of the angle opposite a halfedge. This is not
directly required, but will be useful when computing the mean curvature
normals below.
This method gets called on a halfedge, so 'self' is a reference to the
halfedge at which we will compute the cotangent.
"""
return 0.0 # placeholder value
@property
@cacheGeometry
def vertexNormal_MeanCurvature(self):
"""
Compute a vertex normal using the 'mean curvature' method.
Be sure to understand the relationship between this method and the
area gradient method.
This method gets called on a vertex, so 'self' is a reference to the
vertex at which we will compute the normal.
"""
return Vector3D(0.0,0.0,0.0) # placeholder value
@property
@cacheGeometry
def vertexNormal_SphereInscribed(self):
"""
Compute a vertex normal using the 'inscribed sphere' method.
This method gets called on a vertex, so 'self' is a reference to the
vertex at which we will compute the normal.
"""
return Vector3D(0.0,0.0,0.0) # placeholder value
@property
@cacheGeometry
def angleDefect(self):
"""
Compute the angle defect of a vertex, d(v) (see Assignment 1 Exercise 8).
This method gets called on a vertex, so 'self' is a reference to the
vertex at which we will compute the angle defect.
"""
return 0.0 # placeholder value
def totalGaussianCurvature():
"""
Compute the total Gaussian curvature in the mesh, meaning the sum of Gaussian
curvature at each vertex.
Note that you can access the mesh with the 'mesh' variable.
"""
return 0.0 # placeholder value
def gaussianCurvatureFromGaussBonnet():
"""
Compute the total Gaussian curvature that the mesh should have, given that the
Gauss-Bonnet theorem holds (see Assignment 1 Exercise 9).
Note that you can access the mesh with the 'mesh' variable. The
mesh includes members like 'mesh.verts' and 'mesh.faces', which are
sets of the vertices (resp. faces) in the mesh.
"""
return 0.0 # placeholder value
###################### END YOUR CODE
# Set these newly-defined methods as the methods to use in the classes
Face.normal = faceNormal
Face.area = faceArea
Vertex.normal = vertexNormal_AreaWeighted
Vertex.vertexNormal_EquallyWeighted = vertexNormal_EquallyWeighted
Vertex.vertexNormal_AreaWeighted = vertexNormal_AreaWeighted
Vertex.vertexNormal_AngleWeighted = vertexNormal_AngleWeighted
Vertex.vertexNormal_MeanCurvature = vertexNormal_MeanCurvature
Vertex.vertexNormal_SphereInscribed = vertexNormal_SphereInscribed
Vertex.angleDefect = angleDefect
HalfEdge.cotan = cotan
## Functions which will be called by keypresses to visualize these definitions
def toggleFaceVectors():
print("\nToggling vertex vector display")
if toggleFaceVectors.val:
toggleFaceVectors.val = False
meshDisplay.setVectors(None)
else:
toggleFaceVectors.val = True
meshDisplay.setVectors('normal', vectorDefinedAt='face')
meshDisplay.generateVectorData()
toggleFaceVectors.val = False # ridiculous Python scoping hack
meshDisplay.registerKeyCallback('1', toggleFaceVectors, docstring="Toggle drawing face normal vectors")
def toggleVertexVectors():
print("\nToggling vertex vector display")
if toggleVertexVectors.val:
toggleVertexVectors.val = False
meshDisplay.setVectors(None)
else:
toggleVertexVectors.val = True
meshDisplay.setVectors('normal', vectorDefinedAt='vertex')
meshDisplay.generateVectorData()
toggleVertexVectors.val = False # ridiculous Python scoping hack
meshDisplay.registerKeyCallback('2', toggleVertexVectors, docstring="Toggle drawing vertex normal vectors")
def toggleDefect():
print("\nToggling angle defect display")
if toggleDefect.val:
toggleDefect.val = False
meshDisplay.setShapeColorToDefault()
else:
toggleDefect.val = True
meshDisplay.setShapeColorFromScalar("angleDefect", cmapName="seismic",vMinMax=[-pi/8,pi/8])
meshDisplay.generateFaceData()
toggleDefect.val = False
meshDisplay.registerKeyCallback('3', toggleDefect, docstring="Toggle drawing angle defect coloring")
def useEquallyWeightedNormals():
mesh.staticGeometry = False
print("\nUsing equally-weighted normals")
Vertex.normal = vertexNormal_EquallyWeighted
mesh.staticGeometry = True
meshDisplay.generateAllMeshValues()
meshDisplay.registerKeyCallback('4', useEquallyWeightedNormals, docstring="Use equally-weighted normal computation")
def useAreaWeightedNormals():
mesh.staticGeometry = False
print("\nUsing area-weighted normals")
Vertex.normal = vertexNormal_AreaWeighted
mesh.staticGeometry = True
meshDisplay.generateAllMeshValues()
meshDisplay.registerKeyCallback('5', useAreaWeightedNormals, docstring="Use area-weighted normal computation")
def useAngleWeightedNormals():
mesh.staticGeometry = False
print("\nUsing angle-weighted normals")
Vertex.normal = vertexNormal_AngleWeighted
mesh.staticGeometry = True
meshDisplay.generateAllMeshValues()
meshDisplay.registerKeyCallback('6', useAngleWeightedNormals, docstring="Use angle-weighted normal computation")
def useMeanCurvatureNormals():
mesh.staticGeometry = False
print("\nUsing mean curvature normals")
Vertex.normal = vertexNormal_MeanCurvature
mesh.staticGeometry = True
meshDisplay.generateAllMeshValues()
meshDisplay.registerKeyCallback('7', useMeanCurvatureNormals, docstring="Use mean curvature normal computation")
def useSphereInscribedNormals():
mesh.staticGeometry = False
print("\nUsing sphere-inscribed normals")
Vertex.normal = vertexNormal_SphereInscribed
mesh.staticGeometry = True
meshDisplay.generateAllMeshValues()
meshDisplay.registerKeyCallback('8', useSphereInscribedNormals, docstring="Use sphere-inscribed normal computation")
def computeDiscreteGaussBonnet():
print("\nComputing total curvature:")
computed = totalGaussianCurvature()
predicted = gaussianCurvatureFromGaussBonnet()
print(" Total computed curvature: " + str(computed))
print(" Predicted value from Gauss-Bonnet is: " + str(predicted))
print(" Error is: " + str(abs(computed - predicted)))
meshDisplay.registerKeyCallback('z', computeDiscreteGaussBonnet, docstring="Compute total curvature")
def deformShape():
print("\nDeforming shape")
mesh.staticGeometry = False
# Get the center and scale of the shape
center = meshDisplay.dataCenter
scale = meshDisplay.scaleFactor
# Rotate according to swirly function
ax = eu.Vector3(-1.0,.75,0.5)
for v in mesh.verts:
vec = v.position - center
theta = 0.8 * norm(vec) / scale
newVec = np.array(eu.Vector3(*vec).rotate_around(ax, theta))
v.position = center + newVec
mesh.staticGeometry = True
meshDisplay.generateAllMeshValues()
meshDisplay.registerKeyCallback('x', deformShape, docstring="Apply a swirly deformation to the shape")
## Register pick functions that output useful information on click
def pickVert(vert):
print(" Position:" + printVec3(vert.position))
print(" Angle defect: {:.5f}".format(vert.angleDefect))
print(" Normal (equally weighted): " + printVec3(vert.vertexNormal_EquallyWeighted))
print(" Normal (area weighted): " + printVec3(vert.vertexNormal_AreaWeighted))
print(" Normal (angle weighted): " + printVec3(vert.vertexNormal_AngleWeighted))
print(" Normal (sphere-inscribed): " + printVec3(vert.vertexNormal_SphereInscribed))
print(" Normal (mean curvature): " + printVec3(vert.vertexNormal_MeanCurvature))
meshDisplay.pickVertexCallback = pickVert
def pickFace(face):
print(" Face area: {:.5f}".format(face.area))
print(" Normal: " + printVec3(face.normal))
print(" Vertex positions: ")
for (i, vert) in enumerate(face.adjacentVerts()):
print(" v{}: {}".format((i+1),printVec3(vert.position)))
meshDisplay.pickFaceCallback = pickFace
# Start the viewer running
meshDisplay.startMainLoop()
if __name__ == "__main__": main()
|
nmwsharp/DDGSpring2016
|
Assignment2/Assignment2.py
|
Python
|
mit
| 11,896
|
[
"Gaussian"
] |
7a6b706ab7398004db7a06c6c089a72b57e7cd46a661868698bc1dfcbc8de662
|
# Copyright Anne M. Archibald 2008
# Released under the scipy license
import numpy as np
import warnings
from .ckdtree import cKDTree, cKDTreeNode
__all__ = ['minkowski_distance_p', 'minkowski_distance',
'distance_matrix',
'Rectangle', 'KDTree']
def minkowski_distance_p(x, y, p=2):
"""Compute the pth power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance_p
>>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
# Find smallest common datatype with float64 (return type of this function) - addresses #10262.
# Don't just cast to float64 for complex input case.
common_datatype = np.promote_types(np.promote_types(x.dtype, y.dtype), 'float64')
# Make sure x and y are NumPy arrays of correct datatype.
x = x.astype(common_datatype)
y = y.astype(common_datatype)
if p == np.inf:
return np.amax(np.abs(y-x), axis=-1)
elif p == 1:
return np.sum(np.abs(y-x), axis=-1)
else:
return np.sum(np.abs(y-x)**p, axis=-1)
def minkowski_distance(x, y, p=2):
"""Compute the L**p distance between two arrays.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance
>>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p)**(1./p)
class Rectangle:
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(float)
self.mins = np.minimum(maxes,mins).astype(float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the
hyperrectangle.
Parameters
----------
x : array_like
Input.
p : float, optional
Input.
"""
return minkowski_distance(
0, np.maximum(0, np.maximum(self.mins-x, x-self.maxes)),
p
)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input array.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-x, x-self.mins), p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float
Input.
"""
return minkowski_distance(
0,
np.maximum(0, np.maximum(self.mins-other.maxes,
other.mins-self.maxes)),
p
)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float, optional
Input.
"""
return minkowski_distance(
0, np.maximum(self.maxes-other.mins, other.maxes-self.mins), p)
class KDTree(cKDTree):
"""kd-tree for quick nearest-neighbor lookup.
This class provides an index into a set of k-dimensional points
which can be used to rapidly look up the nearest neighbors of any
point.
Parameters
----------
data : array_like, shape (n,m)
The n data points of dimension m to be indexed. This array is
not copied unless this is necessary to produce a contiguous
array of doubles, and so modifying this data will result in
bogus results. The data are also copied if the kd-tree is built
with copy_data=True.
leafsize : positive int, optional
The number of points at which the algorithm switches over to
brute-force. Default: 10.
compact_nodes : bool, optional
If True, the kd-tree is built to shrink the hyperrectangles to
the actual data range. This usually gives a more compact tree that
is robust against degenerated input data and gives faster queries
at the expense of longer build time. Default: True.
copy_data : bool, optional
If True the data is always copied to protect the kd-tree against
data corruption. Default: False.
balanced_tree : bool, optional
If True, the median is used to split the hyperrectangles instead of
the midpoint. This usually gives a more compact tree and
faster queries at the expense of longer build time. Default: True.
boxsize : array_like or scalar, optional
Apply a m-d toroidal topology to the KDTree.. The topology is generated
by :math:`x_i + n_i L_i` where :math:`n_i` are integers and :math:`L_i`
is the boxsize along i-th dimension. The input data shall be wrapped
into :math:`[0, L_i)`. A ValueError is raised if any of the data is
outside of this bound.
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
Attributes
----------
data : ndarray, shape (n,m)
The n data points of dimension m to be indexed. This array is
not copied unless this is necessary to produce a contiguous
array of doubles. The data are also copied if the kd-tree is built
with `copy_data=True`.
leafsize : positive int
The number of points at which the algorithm switches over to
brute-force.
m : int
The dimension of a single data-point.
n : int
The number of data points.
maxes : ndarray, shape (m,)
The maximum value in each dimension of the n data points.
mins : ndarray, shape (m,)
The minimum value in each dimension of the n data points.
size : int
The number of nodes in the tree.
"""
class node:
@staticmethod
def _create(ckdtree_node=None):
"""Create either an inner or leaf node, wrapping a cKDTreeNode instance"""
if ckdtree_node is None:
return KDTree.node(ckdtree_node)
elif ckdtree_node.split_dim == -1:
return KDTree.leafnode(ckdtree_node)
else:
return KDTree.innernode(ckdtree_node)
def __init__(self, ckdtree_node=None):
if ckdtree_node is None:
ckdtree_node = cKDTreeNode()
self._node = ckdtree_node
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return id(self) == id(other)
class leafnode(node):
@property
def idx(self):
return self._node.indices
@property
def children(self):
return self._node.children
class innernode(node):
def __init__(self, ckdtreenode):
assert isinstance(ckdtreenode, cKDTreeNode)
super().__init__(ckdtreenode)
self.less = KDTree.node._create(ckdtreenode.lesser)
self.greater = KDTree.node._create(ckdtreenode.greater)
@property
def split_dim(self):
return self._node.split_dim
@property
def split(self):
return self._node.split
@property
def children(self):
return self._node.children
@property
def tree(self):
if not hasattr(self, "_tree"):
self._tree = KDTree.node._create(super().tree)
return self._tree
def __init__(self, data, leafsize=10, compact_nodes=True, copy_data=False,
balanced_tree=True, boxsize=None):
data = np.asarray(data)
if data.dtype.kind == 'c':
raise TypeError("KDTree does not work with complex data")
# Note KDTree has different default leafsize from cKDTree
super().__init__(data, leafsize, compact_nodes, copy_data,
balanced_tree, boxsize)
def query(
self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf, workers=1):
"""Query the kd-tree for nearest neighbors.
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : int or Sequence[int], optional
Either the number of nearest neighbors to return, or a list of the
k-th nearest neighbors to return, starting from 1.
eps : nonnegative float, optional
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values distance ("Manhattan" distance).
2 is the usual Euclidean distance.
infinity is the maximum-coordinate-difference distance.
A large, finite p may cause a ValueError if overflow can occur.
distance_upper_bound : nonnegative float, optional
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
workers : int, optional
Number of workers to use for parallel processing. If -1 is given
all CPU threads are used. Default: 1.
.. versionadded:: 1.6.0
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If ``x`` has shape ``tuple+(self.m,)``, then ``d`` has shape
``tuple+(k,)``.
When k == 1, the last dimension of the output is squeezed.
Missing neighbors are indicated with infinite distances.
Hits are sorted by distance (nearest first).
.. deprecated:: 1.6.0
If ``k=None``, then ``d`` is an object array of shape ``tuple``,
containing lists of distances. This behavior is deprecated and
will be removed in SciPy 1.8.0, use ``query_ball_point``
instead.
i : integer or array of integers
The index of each neighbor in ``self.data``.
``i`` is the same shape as d.
Missing neighbors are indicated with ``self.n``.
Examples
--------
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> x, y = np.mgrid[0:5, 2:8]
>>> tree = KDTree(np.c_[x.ravel(), y.ravel()])
To query the nearest neighbours and return squeezed result, use
>>> dd, ii = tree.query([[0, 0], [2.1, 2.9]], k=1)
>>> print(dd, ii)
[2. 0.14142136] [ 0 13]
To query the nearest neighbours and return unsqueezed result, use
>>> dd, ii = tree.query([[0, 0], [2.1, 2.9]], k=[1])
>>> print(dd, ii)
[[2. ]
[0.14142136]] [[ 0]
[13]]
To query the second nearest neighbours and return unsqueezed result,
use
>>> dd, ii = tree.query([[0, 0], [2.1, 2.9]], k=[2])
>>> print(dd, ii)
[[2.23606798]
[0.90553851]] [[ 6]
[12]]
To query the first and second nearest neighbours, use
>>> dd, ii = tree.query([[0, 0], [2.1, 2.9]], k=2)
>>> print(dd, ii)
[[2. 2.23606798]
[0.14142136 0.90553851]] [[ 0 6]
[13 12]]
or, be more specific
>>> dd, ii = tree.query([[0, 0], [2.1, 2.9]], k=[1, 2])
>>> print(dd, ii)
[[2. 2.23606798]
[0.14142136 0.90553851]] [[ 0 6]
[13 12]]
"""
x = np.asarray(x)
if x.dtype.kind == 'c':
raise TypeError("KDTree does not work with complex data")
if k is None:
# k=None, return all neighbors
warnings.warn(
"KDTree.query with k=None is deprecated and will be removed "
"in SciPy 1.8.0. Use KDTree.query_ball_point instead.",
DeprecationWarning)
# Convert index query to a lists of distance and index,
# sorted by distance
def inds_to_hits(point, neighbors):
dist = minkowski_distance(point, self.data[neighbors], p)
hits = sorted([(d, i) for d, i in zip(dist, neighbors)])
return [d for d, i in hits], [i for d, i in hits]
x = np.asarray(x, dtype=np.float64)
inds = super().query_ball_point(
x, distance_upper_bound, p, eps, workers)
if isinstance(inds, list):
return inds_to_hits(x, inds)
dists = np.empty_like(inds)
for idx in np.ndindex(inds.shape):
dists[idx], inds[idx] = inds_to_hits(x[idx], inds[idx])
return dists, inds
d, i = super().query(x, k, eps, p, distance_upper_bound, workers)
if isinstance(i, int):
i = np.intp(i)
return d, i
def query_ball_point(self, x, r, p=2., eps=0, workers=1,
return_sorted=None, return_length=False):
"""Find all points within distance r of point(s) x.
Parameters
----------
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
r : array_like, float
The radius of points to return, must broadcast to the length of x.
p : float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
A finite large p may cause a ValueError if overflow can occur.
eps : nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
workers : int, optional
Number of jobs to schedule for parallel processing. If -1 is given
all processors are used. Default: 1.
.. versionadded:: 1.6.0
return_sorted : bool, optional
Sorts returned indicies if True and does not sort them if False. If
None, does not sort single point queries, but does sort
multi-point queries which was the behavior before this option
was added.
.. versionadded:: 1.6.0
return_length: bool, optional
Return the number of points inside the radius instead of a list
of the indices.
.. versionadded:: 1.6.0
Returns
-------
results : list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
Notes
-----
If you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using
query_ball_tree.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 0:5]
>>> points = np.c_[x.ravel(), y.ravel()]
>>> tree = spatial.KDTree(points)
>>> sorted(tree.query_ball_point([2, 0], 1))
[5, 10, 11, 15]
Query multiple points and plot the results:
>>> import matplotlib.pyplot as plt
>>> points = np.asarray(points)
>>> plt.plot(points[:,0], points[:,1], '.')
>>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
... nearby_points = points[results]
... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
>>> plt.margins(0.1, 0.1)
>>> plt.show()
"""
x = np.asarray(x)
if x.dtype.kind == 'c':
raise TypeError("KDTree does not work with complex data")
return super().query_ball_point(
x, r, p, eps, workers, return_sorted, return_length)
def query_ball_tree(self, other, r, p=2., eps=0):
"""
Find all pairs of points between `self` and `other` whose distance is
at most r.
Parameters
----------
other : KDTree instance
The tree containing points to search against.
r : float
The maximum distance, has to be positive.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : list of lists
For each element ``self.data[i]`` of this tree, ``results[i]`` is a
list of the indices of its neighbors in ``other.data``.
Examples
--------
You can search all pairs of points between two kd-trees within a distance:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> rng = np.random.default_rng()
>>> points1 = rng.random((15, 2))
>>> points2 = rng.random((15, 2))
>>> plt.figure(figsize=(6, 6))
>>> plt.plot(points1[:, 0], points1[:, 1], "xk", markersize=14)
>>> plt.plot(points2[:, 0], points2[:, 1], "og", markersize=14)
>>> kd_tree1 = KDTree(points1)
>>> kd_tree2 = KDTree(points2)
>>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
>>> for i in range(len(indexes)):
... for j in indexes[i]:
... plt.plot([points1[i, 0], points2[j, 0]],
... [points1[i, 1], points2[j, 1]], "-r")
>>> plt.show()
"""
return super().query_ball_tree(other, r, p, eps)
def query_pairs(self, r, p=2., eps=0, output_type='set'):
"""Find all pairs of points in `self` whose distance is at most r.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
output_type : string, optional
Choose the output container, 'set' or 'ndarray'. Default: 'set'
.. versionadded:: 1.6.0
Returns
-------
results : set or ndarray
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close. If output_type is 'ndarray', an ndarry is
returned instead of a set.
Examples
--------
You can search all pairs of points in a kd-tree within a distance:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> rng = np.random.default_rng()
>>> points = rng.random((20, 2))
>>> plt.figure(figsize=(6, 6))
>>> plt.plot(points[:, 0], points[:, 1], "xk", markersize=14)
>>> kd_tree = KDTree(points)
>>> pairs = kd_tree.query_pairs(r=0.2)
>>> for (i, j) in pairs:
... plt.plot([points[i, 0], points[j, 0]],
... [points[i, 1], points[j, 1]], "-r")
>>> plt.show()
"""
return super().query_pairs(r, p, eps, output_type)
def count_neighbors(self, other, r, p=2., weights=None, cumulative=True):
"""Count how many nearby pairs can be formed.
Count the number of pairs ``(x1,x2)`` can be formed, with ``x1`` drawn
from ``self`` and ``x2`` drawn from ``other``, and where
``distance(x1, x2, p) <= r``.
Data points on ``self`` and ``other`` are optionally weighted by the
``weights`` argument. (See below)
This is adapted from the "two-point correlation" algorithm described by
Gray and Moore [1]_. See notes for further discussion.
Parameters
----------
other : KDTree
The other tree to draw points from, can be the same tree as self.
r : float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with
a single tree traversal.
If the count is non-cumulative(``cumulative=False``), ``r`` defines
the edges of the bins, and must be non-decreasing.
p : float, optional
1<=p<=infinity.
Which Minkowski p-norm to use.
Default 2.0.
A finite large p may cause a ValueError if overflow can occur.
weights : tuple, array_like, or None, optional
If None, the pair-counting is unweighted.
If given as a tuple, weights[0] is the weights of points in
``self``, and weights[1] is the weights of points in ``other``;
either can be None to indicate the points are unweighted.
If given as an array_like, weights is the weights of points in
``self`` and ``other``. For this to make sense, ``self`` and
``other`` must be the same tree. If ``self`` and ``other`` are two
different trees, a ``ValueError`` is raised.
Default: None
.. versionadded:: 1.6.0
cumulative : bool, optional
Whether the returned counts are cumulative. When cumulative is set
to ``False`` the algorithm is optimized to work with a large number
of bins (>10) specified by ``r``. When ``cumulative`` is set to
True, the algorithm is optimized to work with a small number of
``r``. Default: True
.. versionadded:: 1.6.0
Returns
-------
result : scalar or 1-D array
The number of pairs. For unweighted counts, the result is integer.
For weighted counts, the result is float.
If cumulative is False, ``result[i]`` contains the counts with
``(-inf if i == 0 else r[i-1]) < R <= r[i]``
Notes
-----
Pair-counting is the basic operation used to calculate the two point
correlation functions from a data set composed of position of objects.
Two point correlation function measures the clustering of objects and
is widely used in cosmology to quantify the large scale structure
in our Universe, but it may be useful for data analysis in other fields
where self-similar assembly of objects also occur.
The Landy-Szalay estimator for the two point correlation function of
``D`` measures the clustering signal in ``D``. [2]_
For example, given the position of two sets of objects,
- objects ``D`` (data) contains the clustering signal, and
- objects ``R`` (random) that contains no signal,
.. math::
\\xi(r) = \\frac{<D, D> - 2 f <D, R> + f^2<R, R>}{f^2<R, R>},
where the brackets represents counting pairs between two data sets
in a finite bin around ``r`` (distance), corresponding to setting
`cumulative=False`, and ``f = float(len(D)) / float(len(R))`` is the
ratio between number of objects from data and random.
The algorithm implemented here is loosely based on the dual-tree
algorithm described in [1]_. We switch between two different
pair-cumulation scheme depending on the setting of ``cumulative``.
The computing time of the method we use when for
``cumulative == False`` does not scale with the total number of bins.
The algorithm for ``cumulative == True`` scales linearly with the
number of bins, though it is slightly faster when only
1 or 2 bins are used. [5]_.
As an extension to the naive pair-counting,
weighted pair-counting counts the product of weights instead
of number of pairs.
Weighted pair-counting is used to estimate marked correlation functions
([3]_, section 2.2),
or to properly calculate the average of data per distance bin
(e.g. [4]_, section 2.1 on redshift).
.. [1] Gray and Moore,
"N-body problems in statistical learning",
Mining the sky, 2000,
https://arxiv.org/abs/astro-ph/0012333
.. [2] Landy and Szalay,
"Bias and variance of angular correlation functions",
The Astrophysical Journal, 1993,
http://adsabs.harvard.edu/abs/1993ApJ...412...64L
.. [3] Sheth, Connolly and Skibba,
"Marked correlations in galaxy formation models",
Arxiv e-print, 2005,
https://arxiv.org/abs/astro-ph/0511773
.. [4] Hawkins, et al.,
"The 2dF Galaxy Redshift Survey: correlation functions,
peculiar velocities and the matter density of the Universe",
Monthly Notices of the Royal Astronomical Society, 2002,
http://adsabs.harvard.edu/abs/2003MNRAS.346...78H
.. [5] https://github.com/scipy/scipy/pull/5647#issuecomment-168474926
Examples
--------
You can count neighbors number between two kd-trees within a distance:
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> rng = np.random.default_rng()
>>> points1 = rng.random((5, 2))
>>> points2 = rng.random((5, 2))
>>> kd_tree1 = KDTree(points1)
>>> kd_tree2 = KDTree(points2)
>>> kd_tree1.count_neighbors(kd_tree2, 0.2)
1
This number is same as the total pair number calculated by
`query_ball_tree`:
>>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
>>> sum([len(i) for i in indexes])
1
"""
return super().count_neighbors(other, r, p, weights, cumulative)
def sparse_distance_matrix(
self, other, max_distance, p=2., output_type='dok_matrix'):
"""Compute a sparse distance matrix.
Computes a distance matrix between two KDTrees, leaving as zero
any distance greater than max_distance.
Parameters
----------
other : KDTree
max_distance : positive float
p : float, 1<=p<=infinity
Which Minkowski p-norm to use.
A finite large p may cause a ValueError if overflow can occur.
output_type : string, optional
Which container to use for output data. Options: 'dok_matrix',
'coo_matrix', 'dict', or 'ndarray'. Default: 'dok_matrix'.
.. versionadded:: 1.6.0
Returns
-------
result : dok_matrix, coo_matrix, dict or ndarray
Sparse matrix representing the results in "dictionary of keys"
format. If a dict is returned the keys are (i,j) tuples of indices.
If output_type is 'ndarray' a record array with fields 'i', 'j',
and 'v' is returned,
Examples
--------
You can compute a sparse distance matrix between two kd-trees:
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> rng = np.random.default_rng()
>>> points1 = rng.random((5, 2))
>>> points2 = rng.random((5, 2))
>>> kd_tree1 = KDTree(points1)
>>> kd_tree2 = KDTree(points2)
>>> sdm = kd_tree1.sparse_distance_matrix(kd_tree2, 0.3)
>>> sdm.toarray()
array([[0. , 0. , 0.12295571, 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ],
[0.28942611, 0. , 0. , 0.2333084 , 0. ],
[0. , 0. , 0. , 0. , 0. ],
[0.24617575, 0.29571802, 0.26836782, 0. , 0. ]])
You can check distances above the `max_distance` are zeros:
>>> from scipy.spatial import distance_matrix
>>> distance_matrix(points1, points2)
array([[0.56906522, 0.39923701, 0.12295571, 0.8658745 , 0.79428925],
[0.37327919, 0.7225693 , 0.87665969, 0.32580855, 0.75679479],
[0.28942611, 0.30088013, 0.6395831 , 0.2333084 , 0.33630734],
[0.31994999, 0.72658602, 0.71124834, 0.55396483, 0.90785663],
[0.24617575, 0.29571802, 0.26836782, 0.57714465, 0.6473269 ]])
"""
return super().sparse_distance_matrix(
other, max_distance, p, output_type)
def distance_matrix(x, y, p=2, threshold=1000000):
"""Compute the distance matrix.
Returns the matrix of all pair-wise distances.
Parameters
----------
x : (M, K) array_like
Matrix of M vectors in K dimensions.
y : (N, K) array_like
Matrix of N vectors in K dimensions.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
threshold : positive int
If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
of large temporary arrays.
Returns
-------
result : (M, N) ndarray
Matrix containing the distance from every vector in `x` to every vector
in `y`.
Examples
--------
>>> from scipy.spatial import distance_matrix
>>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
array([[ 1. , 1.41421356],
[ 1.41421356, 1. ]])
"""
x = np.asarray(x)
m, k = x.shape
y = np.asarray(y)
n, kk = y.shape
if k != kk:
raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
if m*n*k <= threshold:
return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
else:
result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
if m < n:
for i in range(m):
result[i,:] = minkowski_distance(x[i],y,p)
else:
for j in range(n):
result[:,j] = minkowski_distance(x,y[j],p)
return result
|
WarrenWeckesser/scipy
|
scipy/spatial/kdtree.py
|
Python
|
bsd-3-clause
| 33,807
|
[
"Galaxy"
] |
0c6fd0c04f4d07c0e368ebfef85da29b81553a1ed2a856df317cfd8aa4be0c4d
|
# ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
# Integrator code for batch processing of full data runs (incorporating parts of earlier analysis scripts)
# Data interfacing
from read_data import *
from read_param import *
# Pre-existing analysis scripts
from nematic_analysis import *
#from glob import glob
# This is the structured data file hierarchy. Replace as appropriate (do not go the Yaouen way and fully automatize ...)
basefolder='/home/silke/Documents/CurrentProjects/Rastko/nematic/data/'
#basefolder = '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/J_1_0_v0_1_0/'
#outfolder= '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/J_1_0_v0_1_0/'
outfolder = '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/'
#v0val=['0.3','0.5','0.7','1.5','2.0','3.0','7.0','10.0']
v0val=['0.3','0.5','0.7','1.5']
sigma=1
rval=['16.0']
nstep=10100000
nsave=5000
nsnap=int(nstep/nsave)
#skip=835
skip=1500
for r in rval:
for v0 in v0val:
#param = Param(basefolder)
files = sorted(glob(basefolder+'R_'+ r+ '/v0_' + v0 + '/sphere_*.dat'))[skip:]
defects=np.zeros((len(files),12))
ndefect=np.zeros((len(files),1))
u=0
for f in files:
print f
outname =outfolder +'R_'+ r+ '/v0_' + v0 + '/frame_data' + str(u)+'.vtk'
defects0,ndefect0=getDefects(f,float(r),sigma,outname,False,True)
defects[u,0:3]=defects0[0,:]
defects[u,3:6]=defects0[1,:]
defects[u,6:9]=defects0[2,:]
defects[u,9:12]=defects0[3,:]
ndefect[u]=ndefect0
outname = '.'.join((f).split('.')[:-1]) + '_defects.vtk'
outname =outfolder +'R_'+ r+ '/v0_' + v0 + '/frame_defects' + str(u)+'.vtk'
print outname
writeDefects(defects0,ndefect0,outname)
u+=1
#outfile2=outfolder + 'defects_v0_' + v0 + '_R_'+ r+ '.dat'
#np.savetxt(outfile2,np.concatenate((ndefect,defects),axis=1),fmt='%12.6g', header='ndefect defects')
|
sknepneklab/SAMoS
|
analysis/batch_nematic/batch_analyze_nematic_R16a.py
|
Python
|
gpl-3.0
| 2,380
|
[
"VTK"
] |
0a9f7a24db3105f23b8249d87e021b94dde5c333b9916912580e2e4f78e59181
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pymatgen.analysis.diffraction.xrd import XRDCalculator
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
"""
TODO: Modify unittest doc.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "5/22/14"
class XRDCalculatorTest(PymatgenTest):
def test_type_wavelength(self):
"""Test TypeError is raised if wavelength is unaccepted type"""
wavelength = [1.78, 2.78] # just a list
self.assertRaises(TypeError, XRDCalculator, wavelength)
def test_get_pattern(self):
s = self.get_structure("CsCl")
c = XRDCalculator()
xrd = c.get_pattern(s, two_theta_range=(0, 90))
self.assertTrue(xrd.to_json()) # Test MSONAble property
# Check the first two peaks
self.assertAlmostEqual(xrd.x[0], 21.107738329639844)
self.assertAlmostEqual(xrd.y[0], 36.483184003748946)
self.assertEqual(xrd.hkls[0], [{"hkl": (1, 0, 0), "multiplicity": 6}])
self.assertAlmostEqual(xrd.d_hkls[0], 4.2089999999999996)
self.assertAlmostEqual(xrd.x[1], 30.024695921112777)
self.assertAlmostEqual(xrd.y[1], 100)
self.assertEqual(xrd.hkls[1], [{"hkl": (1, 1, 0), "multiplicity": 12}])
self.assertAlmostEqual(xrd.d_hkls[1], 2.976212442014178)
s = self.get_structure("LiFePO4")
xrd = c.get_pattern(s, two_theta_range=(0, 90))
self.assertAlmostEqual(xrd.x[1], 17.03504233621785)
self.assertAlmostEqual(xrd.y[1], 50.400928948337075)
s = self.get_structure("Li10GeP2S12")
xrd = c.get_pattern(s, two_theta_range=(0, 90))
self.assertAlmostEqual(xrd.x[1], 14.058274883353876)
self.assertAlmostEqual(xrd.y[1], 4.4111123641667671)
# Test a hexagonal structure.
s = self.get_structure("Graphite")
xrd = c.get_pattern(s, two_theta_range=(0, 90))
self.assertAlmostEqual(xrd.x[0], 26.21057350859598)
self.assertAlmostEqual(xrd.y[0], 100)
self.assertAlmostEqual(len(xrd.hkls[0][0]["hkl"]), 4)
# Add test case with different lengths of coefficients.
# Also test d_hkl.
coords = [
[0.25, 0.25, 0.173],
[0.75, 0.75, 0.827],
[0.75, 0.25, 0],
[0.25, 0.75, 0],
[0.25, 0.25, 0.676],
[0.75, 0.75, 0.324],
]
sp = ["Si", "Si", "Ru", "Ru", "Pr", "Pr"]
s = Structure(Lattice.tetragonal(4.192, 6.88), sp, coords)
xrd = c.get_pattern(s)
self.assertAlmostEqual(xrd.x[0], 12.86727341476735)
self.assertAlmostEqual(xrd.y[0], 31.448239816769796)
self.assertAlmostEqual(xrd.d_hkls[0], 6.88)
self.assertEqual(len(xrd), 42)
xrd = c.get_pattern(s, two_theta_range=[0, 60])
self.assertEqual(len(xrd), 18)
# Test with and without Debye-Waller factor
tungsten = Structure(Lattice.cubic(3.1653), ["W"] * 2, [[0, 0, 0], [0.5, 0.5, 0.5]])
xrd = c.get_pattern(tungsten, scaled=False)
self.assertAlmostEqual(xrd.x[0], 40.294828554672264)
self.assertAlmostEqual(xrd.y[0], 2414237.5633093244)
self.assertAlmostEqual(xrd.d_hkls[0], 2.2382050944897789)
c = XRDCalculator(debye_waller_factors={"W": 0.1526})
xrd = c.get_pattern(tungsten, scaled=False)
self.assertAlmostEqual(xrd.x[0], 40.294828554672264)
self.assertAlmostEqual(xrd.y[0], 2377745.2296686019)
self.assertAlmostEqual(xrd.d_hkls[0], 2.2382050944897789)
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/analysis/diffraction/tests/test_xrd.py
|
Python
|
mit
| 3,828
|
[
"pymatgen"
] |
17717fa28c0034b8d7ec87550d1ecebde0c6747d5b4a18e93dbe2a6d213cd7b7
|
# -*- coding: utf-8 -*-
from datetime import timedelta, datetime
from dateutil import parser as dateparser
import json
import os
import pytz
import time
from typing import Any, Dict
from django.conf import settings
from django.http import HttpRequest, JsonResponse
from django.db.models.aggregates import Count
from django.db import connection, transaction
from django.utils import timezone
from django.utils.decorators import method_decorator
from rest_framework.decorators import api_view
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from catmaid.control.authentication import requires_user_role
from catmaid.control.common import get_relation_to_id_map, get_request_bool
from catmaid.models import ClassInstance, Connector, Treenode, User, UserRole, \
Review, Relation, TreenodeConnector
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def stats_cable_length(request:HttpRequest, project_id=None) -> JsonResponse:
""" Get the largest skeletons based on cable length.
---
parameters:
- name: n_skeletoons
description: |
How many skeletons should be returned
required: false
type: integer
paramType: form
- name: name_pattern
description |
Optional name pattern that returned neuron names have to match. Start
with '/' for a regular expression.
required: false
type: string
paramType: form
"""
cursor = connection.cursor()
n_skeletons = int(request.GET.get('n_skeletons', '0'))
name_pattern = request.GET.get('name_pattern', '')
name_match_join = ''
name_match_where = ''
model_of_rel = ''
if name_pattern:
relations = get_relation_to_id_map(project_id, ['model_of'], cursor=cursor)
model_of_rel = relations['model_of']
name_match_join = '''
JOIN class_instance_class_instance cici
ON cici.class_instance_a = skeleton_id
JOIN class_instance ci
ON ci.id = cici.class_instance_b
'''
if name_pattern[0] == '/':
name_pattern = name_pattern[1:]
name_match_where = 'AND ci.name ~ %(name_pattern)s AND cici.relation_id = %(model_of)s'
else:
name_pattern = f'%{name_pattern}%'
name_match_where = 'AND ci.name ~~* %(name_pattern)s AND cici.relation_id = %(model_of)s'
cursor.execute("""
SELECT skeleton_id, cable_length
FROM catmaid_skeleton_summary css
{name_match_join}
WHERE css.project_id = %(project_id)s
{name_match_where}
ORDER BY cable_length DESC
{limit}
""".format(**{
'limit': 'LIMIT {}'.format(n_skeletons) if n_skeletons else '',
'name_match_join': name_match_join,
'name_match_where': name_match_where,
}), {
'project_id': project_id,
'name_pattern': name_pattern,
'model_of': model_of_rel,
})
result = list(cursor.fetchall())
return JsonResponse(result, safe=False)
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def stats_nodecount(request:HttpRequest, project_id=None) -> JsonResponse:
""" Get the total number of created nodes per user.
---
parameters:
- name: with_imports
description: |
Whether data added through imports should be respected.
required: false
default: false
type: boolean
paramType: form
defaultValue: false
"""
cursor = connection.cursor()
names = dict(User.objects.values_list('id', 'username'))
with_imports = get_request_bool(request.GET, 'with_imports', False)
cursor.execute('''
WITH precomputed AS (
SELECT user_id,
MAX(date) AS date,
SUM(n_treenodes) AS n_treenodes
FROM catmaid_stats_summary
WHERE project_id = %(project_id)s
GROUP BY 1
),
last_precomputation AS (
SELECT COALESCE(
date_trunc('hour', MAX(date)) + interval '1 hour',
NULL) AS max_date
FROM precomputed
),
result_with_precomputation AS (
SELECT p.user_id AS user_id,
p.n_treenodes AS n_treenodes
FROM precomputed p
-- Don't expect duplicates, when adding rows for nodes traced after the
-- last precomputation. This is only executed if there actually was a
-- precomputation (max_Date is not null).
UNION ALL
SELECT t.user_id AS user_id,
count(*) AS n_treenodes
FROM treenode t, last_precomputation
WHERE t.project_id = %(project_id)s
AND last_precomputation.max_date IS NOT NULL
AND t.creation_time >= last_precomputation.max_date
GROUP BY t.user_id
)
SELECT user_id, SUM(n_treenodes)::float
FROM result_with_precomputation, last_precomputation
WHERE last_precomputation.max_date IS NOT NULL
GROUP BY user_id
-- If there was no precomputation (max_date is null), do a simpler
-- counting that doesn't involve date comparisons. In this case
-- duplicates are impossible.
UNION ALL
SELECT user_id, count(*)::float
FROM treenode, last_precomputation
WHERE project_id = %(project_id)s
AND last_precomputation IS NULL
GROUP BY user_id
''', dict(project_id=int(project_id)))
node_stats = dict(cursor.fetchall())
if not with_imports:
# In case imports should be excluded, subtract the number imported nodes
# for each entry. Otherwise the regular node count doesn't differentiate
# between imported and createad nodes. This flag requires history
# tracking to be enabled to work reliably.
cursor.execute('''
WITH precomputed AS (
SELECT user_id,
date,
SUM(n_imported_treenodes) AS n_imported_treenodes
FROM catmaid_stats_summary
WHERE project_id = %(project_id)s
-- This is required to not just take the last available cache
-- entry, which might not contain a valid precomputed import
-- cache field.
AND n_imported_treenodes > 0
GROUP BY 1, 2
),
last_precomputation AS (
SELECT COALESCE(
-- Select first start date after last precomputed hour/bucket
date_trunc('hour', MAX(date)) + interval '1 hour',
'-infinity') AS max_date
FROM precomputed
),
transactions AS (
SELECT cti.transaction_id, cti.execution_time
FROM last_precomputation
JOIN catmaid_transaction_info cti
ON cti.execution_time >= last_precomputation.max_date
WHERE cti.project_id = %(project_id)s
AND label = 'skeletons.import'
),
all_treenodes AS (
SELECT p.user_id AS user_id,
p.n_imported_treenodes AS n_imported_treenodes
FROM precomputed p
-- Don't expect duplicates
UNION ALL
SELECT sorted_row_history.user_id AS user_id,
1 AS n_imported_treenodes
FROM (
SELECT t.id, t.user_id,
ROW_NUMBER() OVER(PARTITION BY t.id ORDER BY t.edition_time) AS n
FROM last_precomputation,
transactions tx
JOIN treenode__with_history t
ON t.txid = tx.transaction_id
WHERE t.creation_time = tx.execution_time
AND t.creation_time >= last_precomputation.max_date
) sorted_row_history
WHERE sorted_row_history.n = 1
)
SELECT user_id,
-- Return float to make python side arithmetic easier
SUM(n_imported_treenodes)::float AS n_imported_treenodes
FROM all_treenodes
GROUP BY user_id
''', dict(project_id=int(project_id)))
for user_id, n_imported_nodes in cursor.fetchall():
created_nodes = node_stats.get(user_id)
if created_nodes:
# The lower boundary of zero shouldn't be needed, but due to the
# fact that general node counting doesn't take history into
# account (deleted nodes are not counted), there are corner
# cases in which more nodes have been imported than there are
# created (and still available).
node_stats[user_id] = max(0, created_nodes - n_imported_nodes)
# Both SUM and COUNT are represented as floating point number in the
# response, which works better with JSON than Decimal (which is converted to
# a string by the JSON encoder).
return JsonResponse(node_stats)
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def stats_editor(request:HttpRequest, project_id=None) -> JsonResponse:
""" Get the total number of edited nodes per user.
"""
cursor = connection.cursor()
cursor.execute('''
SELECT editor_id, count(editor_id)::float
FROM treenode
WHERE project_id=%(project_id)s
AND editor_id != user_id
GROUP BY editor_id
''', dict(project_id=int(project_id)))
return JsonResponse(dict(cursor.fetchall()))
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def stats_summary(request:HttpRequest, project_id=None) -> JsonResponse:
startdate = datetime.today()
result = {
'treenodes_created': Treenode.objects.filter(
project=project_id,
user=request.user.id,
creation_time__year=startdate.year,
creation_time__month=startdate.month,
creation_time__day=startdate.day
).count(),
'connectors_created': Connector.objects.filter(
project=project_id,
user=request.user.id,
creation_time__year=startdate.year,
creation_time__month=startdate.month,
creation_time__day=startdate.day
).count(),
}
for key, class_name in [
('skeletons_created', 'skeleton')
]:
result[key] = ClassInstance.objects.filter(
project=project_id,
user=request.user.id,
creation_time__year=startdate.year,
creation_time__month=startdate.month,
creation_time__day=startdate.day,
class_column__class_name=class_name).count()
return JsonResponse(result)
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def stats_history(request:HttpRequest, project_id=None) -> JsonResponse:
# Get the start and end dates for the query, defaulting to the last 30
# days.
start_date = request.GET.get('start_date', timezone.now() - timedelta(30))
end_date = request.GET.get('end_date', timezone.now())
# Look up all tree nodes for the project in the given date range.
# Also add a computed field which is just the day of the last edited
# date/time.
tree_nodes = Treenode.objects \
.filter(
project=project_id,
edition_time__range=(start_date, end_date)) \
.extra(select={
'date': 'to_char("treenode"."edition_time", \'YYYYMMDD\')'}) \
.order_by('user', 'date')
# Get the count of tree nodes for each user/day combination.
stats = tree_nodes.values('user__username', 'date') \
.annotate(count=Count('id'))
# Change the 'user__username' field name to just 'name'.
# (If <https://code.djangoproject.com/ticket/12222> ever gets implemented
# then this wouldn't be necessary.)
stats = [{
'name': stat['user__username'],
'date': stat['date'],
'count': stat['count']} for stat in stats]
return JsonResponse(stats, safe=False)
def stats_user_activity(request:HttpRequest, project_id=None) -> JsonResponse:
username = request.GET.get('username', None)
all_users = User.objects.filter().values('username', 'id')
map_name_to_userid = {}
for user in all_users:
map_name_to_userid[user['username']] = user['id']
relations = dict((r.relation_name, r.id) for r in Relation.objects.filter(project=project_id))
# Retrieve all treenodes and creation time
stats = Treenode.objects \
.filter(
project=project_id,
user=map_name_to_userid[username] ) \
.order_by('creation_time') \
.values('creation_time')
# Extract the timestamps from the datetime objects
timepoints = [time.mktime(ele['creation_time'].timetuple()) for ele in stats]
# Retrieve TreenodeConnector creation times
stats_prelink = TreenodeConnector.objects \
.filter(
project=project_id,
user=map_name_to_userid[username],
relation=relations['presynaptic_to'] ) \
.order_by('creation_time').values('creation_time')
stats_postlink = TreenodeConnector.objects \
.filter(
project=project_id,
user=map_name_to_userid[username],
relation=relations['postsynaptic_to'] ) \
.order_by('creation_time').values('creation_time')
prelinks = [time.mktime(ele['creation_time'].timetuple()) for ele in stats_prelink]
postlinks = [time.mktime(ele['creation_time'].timetuple()) for ele in stats_postlink]
return JsonResponse({'skeleton_nodes': timepoints,
'presynaptic': prelinks, 'postsynaptic': postlinks})
@api_view(['GET'])
def stats_user_history(request:HttpRequest, project_id=None) -> JsonResponse:
"""Get per user contribution statistics
A date range can be provided to limit the scope of the returned statiscis.
By default, the statistics for the last ten days is returned. The returned
data includes created cable length, the number of created synaptic
connections and the number of reviews made, per day and user.
---
parameters:
- name: start_date
description: |
If provided (YYYY-MM-DD), only statistics from this day on are returned (inclusive).
required: false
type: string
paramType: form
- name: end_date
description: |
If provided (YYYY-MM-DD), only statistics to this day on are returned (inclusive).
required: false
type: string
paramType: form
- name: time_zone
description: |
Optional time zone for the date range, e.g. "US/Eastern"
required: false
type: string
paramType: form
- name: with_imports
description: |
Whether or not to return information on the imported number of nodes and
cable length.
required: false
defaultValue: true
type: string
paramType: form
models:
stats_user_history_cell:
id: stats_user_history_cell
properties:
new_treenodes:
description: Number of nodes created
type: integer
required: true
new_cable_length:
description: Cable length created
type: integer
required: true
new_connectors:
description: Number of new synaptic connections created
type: integer
required: true
new_reviewed_nodes:
description: Number of new node reviews
type: integer
required: true
stats_user_history_day_segment:
id: stats_user_history_day_segment
properties:
date:
description: Entries for a day, expressed as field name
$ref: stats_user_history_cell
required: true
stats_user_history_segment:
id: stats_user_history_segment
properties:
user_id:
description: Entries by day for a user (ID), expressed as field name
$ref: stats_user_history_day_segment
required: true
type:
days:
description: Returned dates in YYYYMMDD format
type: array
items:
type: string
format: date
required: true
daysformatted:
description: Returned dates in more readable format
type: array
items:
type: string
required: true
stats_table:
description: Actual history information by user and by date
$ref: stats_user_history_segment
required: true
"""
raw_time_zone = request.GET.get('time_zone', settings.TIME_ZONE)
time_zone = pytz.timezone(raw_time_zone)
with_imports = get_request_bool(request.GET, 'with_imports', True)
# Get the start date for the query, defaulting to 10 days ago.
start_date = request.GET.get('start_date', None)
if start_date:
start_date = dateparser.parse(start_date)
start_date = time_zone.localize(start_date)
else:
with timezone.override(time_zone):
start_date = timezone.now() - timedelta(10)
# Get the end date for the query, defaulting to now.
end_date = request.GET.get('end_date', None)
if end_date:
end_date = dateparser.parse(end_date)
end_date = time_zone.localize(end_date)
else:
with timezone.override(time_zone):
end_date = timezone.now()
# The API is inclusive and should return stats for the end date as
# well. The actual query is easier with an exclusive end and therefore
# the end date is set to the beginning of the next day.
end_date = end_date + timedelta(days=1)
# Calculate number of days between (including) start and end.
delta = end_date - start_date
daydelta = delta.days
# If the orginal delta is bigger than the days only, the day based delta has
# to be incremented. This can happen if start date and end date have
# different distances to UTC, e.g. if start date is in EST and end date in
# EDT.
if timedelta(daydelta) < delta:
daydelta += 1
# To query data with raw SQL we need the UTC version of start and end time
start_date_utc = start_date.astimezone(pytz.utc)
end_date_utc = end_date.astimezone(pytz.utc)
all_users = User.objects.filter().values_list('id', flat=True)
days = []
daysformatted = []
for i in range(daydelta):
tmp_date = start_date + timedelta(days=i)
days.append(tmp_date.strftime("%Y%m%d"))
daysformatted.append(tmp_date.strftime("%a %d, %h %Y"))
stats_table:Dict = {}
for userid in all_users:
if userid == -1:
continue
userid = str(userid)
stats_table[userid] = {}
for i in range(daydelta):
date = (start_date + timedelta(days=i)).strftime("%Y%m%d")
stats_table[userid][date] = {}
cursor = connection.cursor()
treenode_stats = select_node_stats(cursor, project_id,
start_date_utc, end_date_utc, time_zone)
cable_stats = select_cable_stats(cursor, project_id,
start_date_utc, end_date_utc, time_zone)
connector_stats = select_connector_stats(cursor, project_id,
start_date_utc, end_date_utc, time_zone)
tree_reviewed_nodes = select_review_stats(cursor, project_id,
start_date_utc, end_date_utc, time_zone)
if with_imports:
import_treenode_stats = select_import_node_stats(cursor, project_id,
start_date_utc, end_date_utc, time_zone)
import_cable_stats = select_import_cable_stats(cursor, project_id,
start_date_utc, end_date_utc, time_zone)
for di in treenode_stats:
user_id = str(di[0])
date = di[1].strftime('%Y%m%d')
stats_table[user_id][date]['new_treenodes'] = di[2]
for di in cable_stats:
user_id = str(di[0])
date = di[1].strftime('%Y%m%d')
stats_table[user_id][date]['new_cable_length'] = di[2]
for di in connector_stats:
user_id = str(di[0])
date = di[1].strftime('%Y%m%d')
stats_table[user_id][date]['new_connectors'] = di[2]
for di in tree_reviewed_nodes:
user_id = str(di[0])
date = di[1].strftime('%Y%m%d')
stats_table[user_id][date]['new_reviewed_nodes'] = di[2]
if with_imports:
for di in import_treenode_stats:
user_id = str(di[0])
date = di[1].strftime('%Y%m%d')
stats_table[user_id][date]['new_import_treenodes'] = di[2]
for di in import_cable_stats:
user_id = str(di[0])
date = di[1].strftime('%Y%m%d')
stats_table[user_id][date]['new_import_cable_length'] = di[2]
return JsonResponse({
'stats_table': stats_table,
'days': days,
'daysformatted': daysformatted
})
def select_node_stats(cursor, project_id, start_date_utc, end_date_utc,
time_zone, time_unit='day'):
# Get review information by first getting all hourly precomputed statistics
# for the requested timezone and then add all remaining statistics on
# demand. The result sum is returned as float, to not required
# Decimal-to-JSON conversion.
cursor.execute('''
WITH precomputed AS (
SELECT user_id,
date,
SUM(n_treenodes) AS n_treenodes
FROM catmaid_stats_summary
WHERE project_id = %(project_id)s
AND date >= %(start_date_utc)s
AND date < %(end_date_utc)s
GROUP BY 1, 2
),
last_precomputation AS (
SELECT COALESCE(
-- Select first start date after last precomputed hour/bucket
date_trunc('hour', MAX(date)) + interval '1 hour',
%(start_date_utc)s) as max_date
FROM precomputed
),
all_treenodes AS (
SELECT p.user_id AS user_id,
p.date AS date,
p.n_treenodes AS n_treenodes
FROM precomputed p
-- Don't expect duplicates
UNION ALL
SELECT t.user_id AS user_id,
t.creation_time AS date,
count(*) AS n_treenodes
FROM treenode t, last_precomputation
WHERE t.project_id = %(project_id)s
AND t.creation_time >= last_precomputation.max_date
AND t.creation_time < %(end_date_utc)s
GROUP BY t.user_id, date
)
SELECT t.user_id,
date_trunc(%(time_unit)s, timezone(%(tz)s, t.date)) AS date,
SUM(t.n_treenodes)::float
FROM all_treenodes t
GROUP BY 1, 2
''', {
'tz': time_zone.zone,
'utc_offset': time_zone,
'project_id': project_id,
'start_date_utc': start_date_utc,
'end_date_utc': end_date_utc,
'time_unit': time_unit
})
return cursor.fetchall()
def select_cable_stats(cursor, project_id, start_date_utc, end_date_utc,
time_zone, time_unit='day'):
# The result sum is returned as float, to not required Decimal-to-JSON
# conversion.
cursor.execute('''
WITH precomputed AS (
SELECT user_id,
date,
SUM(cable_length) AS cable_length
FROM catmaid_stats_summary
WHERE project_id = %(project_id)s
AND date >= %(start_date_utc)s
AND date < %(end_date_utc)s
GROUP BY 1, 2
),
last_precomputation AS (
SELECT COALESCE(
-- Select first start date after last precomputed hour/bucket
date_trunc('hour', MAX(date)) + interval '1 hour',
%(start_date_utc)s) as max_date
FROM precomputed
),
all_cable_lengths AS (
SELECT p.user_id, p.date, p.cable_length
FROM precomputed p
-- Don't expect duplicates
UNION ALL
SELECT child.uid, child.date, SUM(edge.length)
FROM (
SELECT
child.user_id AS uid,
child.creation_time AS date,
child.parent_id,
child.location_x,
child.location_y,
child.location_z
FROM treenode child, last_precomputation
WHERE child.project_id = %(project_id)s
AND child.creation_time >= last_precomputation.max_date
AND child.creation_time < %(end_date_utc)s
) AS child
INNER JOIN LATERAL (
SELECT sqrt(pow(child.location_x - parent.location_x, 2)
+ pow(child.location_y - parent.location_y, 2)
+ pow(child.location_z - parent.location_z, 2)) AS length
FROM treenode parent
WHERE parent.project_id = %(project_id)s
AND parent.id = child.parent_id
LIMIT 1
) AS edge ON TRUE
GROUP BY child.uid, child.date
)
SELECT l.user_id,
date_trunc(%(time_unit)s, timezone(%(tz)s, l.date)) AS date,
ROUND(SUM(l.cable_length))::float
FROM all_cable_lengths l
GROUP BY 1, 2
''', dict(tz=time_zone.zone, project_id=project_id,
start_date_utc=start_date_utc, end_date_utc=end_date_utc,
time_unit=time_unit))
return cursor.fetchall()
def select_import_node_stats(cursor, project_id, start_date_utc, end_date_utc,
time_zone, time_unit='day'):
# Get review information by first getting all hourly precomputed statistics
# for the requested timezone and then add all remaining statistics on
# demand. The result sum is returned as float, to not required
# Decimal-to-JSON conversion.
cursor.execute('''
WITH precomputed AS (
SELECT user_id,
date,
SUM(n_imported_treenodes) AS n_imported_treenodes
FROM catmaid_stats_summary
WHERE project_id = %(project_id)s
AND date >= %(start_date_utc)s
AND date < %(end_date_utc)s
-- This is required to not just take the last available cache
-- entry, which might not contain a valid precomputed import
-- cache field.
AND n_imported_treenodes > 0
GROUP BY 1, 2
),
last_precomputation AS (
SELECT COALESCE(
-- Select first start date after last precomputed hour/bucket
date_trunc('hour', MAX(date)) + interval '1 hour',
%(start_date_utc)s) as max_date
FROM precomputed
),
transactions AS (
SELECT cti.transaction_id, cti.execution_time
FROM last_precomputation
JOIN catmaid_transaction_info cti
ON cti.execution_time >= last_precomputation.max_date
WHERE cti.project_id = %(project_id)s
AND label = 'skeletons.import'
),
all_treenodes AS (
SELECT p.user_id AS user_id,
p.date AS date,
p.n_imported_treenodes AS n_imported_treenodes
FROM precomputed p
-- Don't expect duplicates
UNION ALL
SELECT sorted_row_history.user_id AS user_id,
sorted_row_history.date,
1 AS n_imported_treenodes
FROM (
SELECT t.id, t.user_id, t.creation_time AS date,
ROW_NUMBER() OVER(PARTITION BY t.id ORDER BY t.edition_time) AS n
FROM last_precomputation,
transactions tx
JOIN treenode__with_history t
ON t.txid = tx.transaction_id
WHERE t.creation_time = tx.execution_time
AND t.creation_time >= last_precomputation.max_date
) sorted_row_history
WHERE sorted_row_history.n = 1
)
SELECT t.user_id,
date_trunc(%(time_unit)s, timezone(%(tz)s, t.date)) AS date,
SUM(t.n_imported_treenodes)::float
FROM all_treenodes t
GROUP BY 1, 2
''', {
'tz': time_zone.zone,
'utc_offset': time_zone,
'project_id': project_id,
'start_date_utc': start_date_utc,
'end_date_utc': end_date_utc,
'time_unit': time_unit
})
return cursor.fetchall()
def select_import_cable_stats(cursor, project_id, start_date_utc, end_date_utc,
time_zone, time_unit='day'):
# The result sum is returned as float, to not required Decimal-to-JSON
# conversion.
cursor.execute('''
WITH precomputed AS (
SELECT user_id,
date,
SUM(import_cable_length) AS cable_length
FROM catmaid_stats_summary
WHERE project_id = %(project_id)s
AND date >= %(start_date_utc)s
AND date < %(end_date_utc)s
AND import_cable_length > 0
GROUP BY 1, 2
),
last_precomputation AS (
SELECT COALESCE(
-- Select first start date after last precomputed hour/bucket
date_trunc('hour', MAX(date)) + interval '1 hour',
%(start_date_utc)s) as max_date
FROM precomputed
),
cable_info AS (
SELECT p.user_id, p.date, p.cable_length
FROM precomputed p
-- Don't expect duplicates
UNION ALL
SELECT child.uid AS user_id,
child.date AS date,
SUM(edge.length) AS cable_length
FROM (
SELECT
child.user_id AS uid,
child.date,
child.parent_id,
child.location_x,
child.location_y,
child.location_z,
child.txid
FROM (
SELECT DISTINCT t.id, t.user_id, t.creation_time AS date,
t.parent_id, t.location_x, t.location_y, t.location_z, t.txid,
ROW_NUMBER() OVER(PARTITION BY t.id ORDER BY t.edition_time) AS n
FROM last_precomputation, treenode__with_history t
JOIN catmaid_transaction_info cti
ON t.txid = cti.transaction_id
WHERE cti.project_id = %(project_id)s
AND ABS(EXTRACT(EPOCH FROM t.creation_time) - EXTRACT(EPOCH FROM cti.execution_time)) < 3600
AND t.creation_time >= last_precomputation.max_date
AND label = 'skeletons.import'
) child
) AS child
INNER JOIN LATERAL (
SELECT sqrt(pow(child.location_x - parent.location_x, 2)
+ pow(child.location_y - parent.location_y, 2)
+ pow(child.location_z - parent.location_z, 2)) AS length
FROM treenode__with_history parent
WHERE parent.project_id = %(project_id)s
AND parent.id = child.parent_id
-- This is okay, because we assume one transaction per import
AND parent.txid = child.txid
LIMIT 1
) AS edge ON TRUE
GROUP BY child.uid, child.date
)
SELECT l.user_id,
date_trunc(%(time_unit)s, timezone(%(tz)s, l.date)) AS date,
ROUND(SUM(l.cable_length))::float
FROM cable_info l
GROUP BY 1, 2
''', {
'tz': time_zone.zone,
'project_id': project_id,
'start_date_utc': start_date_utc,
'end_date_utc': end_date_utc,
'time_unit': time_unit
})
return cursor.fetchall()
def select_connector_stats(cursor, project_id, start_date_utc, end_date_utc,
time_zone, time_unit='day'):
relations = get_relation_to_id_map(project_id, cursor=cursor)
pre_id, post_id = relations['presynaptic_to'], relations['postsynaptic_to']
# Retrieve a list of how many completed connector relations a user has
# created in a given time frame. A completed connector relation is either
# one were a user created both the presynaptic and the postsynaptic side
# (one of them in the given time frame) or if a user completes an existing
# 'half connection'. To avoid duplicates, only links are counted, where the
# second node is younger than the first one. The result sum is returned as
# float, to not required Decimal-to-JSON conversion.
cursor.execute('''
WITH precomputed AS (
SELECT user_id,
date,
SUM(n_connector_links) AS n_connector_links
FROM catmaid_stats_summary
WHERE project_id = %(project_id)s
AND date >= %(start_date_utc)s
AND date < %(end_date_utc)s
GROUP BY 1, 2
),
last_precomputation AS (
SELECT COALESCE(
-- Select first start date after last precomputed hour/bucket
date_trunc('hour', MAX(date)) + interval '1 hour',
%(start_date_utc)s) as max_date
FROM precomputed
),
all_connectors AS (
SELECT p.user_id AS user_id,
p.date AS date,
p.n_connector_links AS n_connector_links
FROM precomputed p
-- Don't expect duplicates
UNION ALL
SELECT t1.user_id,
t1.creation_time AS date,
count(*) AS n_connector_links
FROM last_precomputation, treenode_connector t1
JOIN treenode_connector t2
ON t1.connector_id = t2.connector_id
WHERE t1.project_id=%(project_id)s
AND t1.creation_time >= last_precomputation.max_date
AND t1.creation_time < %(end_date_utc)s
AND t1.relation_id <> t2.relation_id
AND (t1.relation_id = %(pre_id)s OR t1.relation_id = %(post_id)s)
AND (t2.relation_id = %(pre_id)s OR t2.relation_id = %(post_id)s)
AND t1.creation_time > t2.creation_time
GROUP BY 1, 2
)
SELECT l.user_id,
date_trunc(%(time_unit)s, timezone(%(tz)s, l.date)) AS date,
SUM(l.n_connector_links)::float
FROM all_connectors l
GROUP BY 1, 2
''', {
'tz': time_zone.zone,
'project_id': project_id,
'start_date_utc': start_date_utc,
'end_date_utc': end_date_utc,
'pre_id': pre_id,
'post_id': post_id,
'time_unit': time_unit
})
return cursor.fetchall()
def select_review_stats(cursor, project_id, start_date_utc, end_date_utc,
time_zone, time_unit='day'):
# Get review information by first getting all hourly precomputed statistics
# for the requested timezone and then add all remaining statistics on
# demand. The result sum is returned as float, to not required
# Decimal-to-JSON conversion.
cursor.execute('''
WITH precomputed AS (
SELECT user_id,
date,
SUM(n_reviewed_nodes) AS n_reviewed_nodes
FROM catmaid_stats_summary
WHERE project_id = %(project_id)s
AND date >= %(start_date_utc)s
AND date < %(end_date_utc)s
GROUP BY 1, 2
),
last_precomputation AS (
SELECT COALESCE(
-- Select first start date after last precomputed hour/bucket
date_trunc('hour', MAX(date)) + interval '1 hour',
%(start_date_utc)s) as max_date
FROM precomputed
),
all_reviews AS (
SELECT p.user_id AS reviewer_id,
p.date AS date,
p.n_reviewed_nodes AS n_reviewed_nodes
FROM precomputed p
-- Don't expect duplicates
UNION ALL
SELECT r.reviewer_id AS reviewer_id,
r.review_time AS date,
count(*) AS n_reviewed_nodes
FROM review r, last_precomputation
WHERE r.project_id = %(project_id)s
AND r.review_time >= last_precomputation.max_date
AND r.review_time < %(end_date_utc)s
GROUP BY r.reviewer_id, date
)
SELECT r.reviewer_id,
date_trunc(%(time_unit)s, timezone(%(tz)s, r.date)) AS date,
SUM(r.n_reviewed_nodes)::float
FROM all_reviews r
GROUP BY 1, 2
''', {
'tz': time_zone.zone,
'utc_offset': time_zone,
'project_id': project_id,
'start_date_utc': start_date_utc,
'end_date_utc': end_date_utc,
'time_unit': time_unit
})
return cursor.fetchall()
@transaction.atomic
def populate_stats_summary(project_id, delete:bool=False, incremental:bool=True) -> None:
"""Create statistics summary tables from scratch until yesterday.
"""
cursor = connection.cursor()
if delete:
cursor.execute("""
DELETE FROM catmaid_stats_summary WHERE project_id = %(project_id)s
""", dict(project_id=project_id))
populate_review_stats_summary(project_id, incremental, cursor)
populate_connector_stats_summary(project_id, incremental, cursor)
populate_cable_stats_summary(project_id, incremental, cursor)
populate_nodecount_stats_summary(project_id, incremental, cursor)
populate_import_nodecount_stats_summary(project_id, incremental, cursor)
populate_import_cable_stats_summary(project_id, incremental, cursor)
def populate_review_stats_summary(project_id, incremental:bool=True, cursor=None) -> None:
"""Add review summary information to the summary table. Create hourly
aggregates in UTC time. These aggregates can still be moved in other
timezones with good enough precision for our purpose. By default, this
happens in an incremental manner, but can optionally be fone for all data
from scratch (overriding existing statistics).
"""
if not cursor:
cursor = connection.cursor()
# Add reviewer info
cursor.execute("""
WITH last_precomputation AS (
SELECT CASE WHEN %(incremental)s = FALSE THEN '-infinity'
ELSE COALESCE(date_trunc('hour', MAX(date)) - interval '1 hour',
'-infinity') END AS max_date
FROM catmaid_stats_summary
WHERE project_id=%(project_id)s
AND n_reviewed_nodes > 0
),
review_info AS (
SELECT r.reviewer_id AS user_id,
date_trunc('hour', r.review_time) AS date,
count(*) AS n_reviewed_nodes
FROM review r, last_precomputation
WHERE r.project_id = %(project_id)s
AND r.review_time > last_precomputation.max_date
AND r.review_time < date_trunc('hour', CURRENT_TIMESTAMP)
GROUP BY r.reviewer_id, date
)
INSERT INTO catmaid_stats_summary (project_id, user_id, date,
n_reviewed_nodes)
SELECT %(project_id)s, ri.user_id, ri.date, ri.n_reviewed_nodes
FROM review_info ri
ON CONFLICT (project_id, user_id, date) DO UPDATE
SET n_reviewed_nodes = EXCLUDED.n_reviewed_nodes;
""", dict(project_id=project_id, incremental=incremental))
def populate_connector_stats_summary(project_id, incremental:bool=True, cursor=None) -> None:
"""Add connector summary information to the summary table. Create hourly
aggregates in UTC time. These aggregates can still be moved in other
timezones with good enough precision for our purpose. By default, this
happens in an incremental manner, but can optionally be fone for all data
from scratch (overriding existing statistics).
"""
if not cursor:
cursor = connection.cursor()
relations = get_relation_to_id_map(project_id, cursor=cursor)
pre_id, post_id = relations.get('presynaptic_to'), relations.get('postsynaptic_to')
if pre_id and post_id:
cursor.execute("""
WITH last_precomputation AS (
SELECT CASE WHEN %(incremental)s = FALSE THEN '-infinity'
ELSE COALESCE(date_trunc('hour', MAX(date)) - interval '1 hour',
'-infinity') END AS max_date
FROM catmaid_stats_summary
WHERE project_id=%(project_id)s
AND n_connector_links > 0
),
connector_info AS (
SELECT t1.user_id,
date_trunc('hour', t1.creation_time) AS date,
count(*) AS n_connector_links
FROM last_precomputation, treenode_connector t1
JOIN treenode_connector t2 ON t1.connector_id = t2.connector_id
WHERE t1.project_id=%(project_id)s
AND t1.creation_time >= last_precomputation.max_date
AND t1.creation_time < date_trunc('hour', CURRENT_TIMESTAMP)
AND t1.relation_id <> t2.relation_id
AND (t1.relation_id = %(pre_id)s OR t1.relation_id = %(post_id)s)
AND (t2.relation_id = %(pre_id)s OR t2.relation_id = %(post_id)s)
AND t1.creation_time > t2.creation_time
GROUP BY t1.user_id, date
)
INSERT INTO catmaid_stats_summary (project_id, user_id, date,
n_connector_links)
SELECT %(project_id)s, ci.user_id, ci.date, ci.n_connector_links
FROM connector_info ci
ON CONFLICT (project_id, user_id, date) DO UPDATE
SET n_connector_links = EXCLUDED.n_connector_links;
""", dict(project_id=project_id, pre_id=pre_id, post_id=post_id,
incremental=incremental))
def populate_cable_stats_summary(project_id, incremental:bool=True, cursor=None) -> None:
"""Add cable length summary data to the statistics summary table. By
default, this happens in an incremental manner, but can optionally be fone
for all data from scratch (overriding existing statistics).
"""
if not cursor:
cursor = connection.cursor()
cursor.execute("""
WITH last_precomputation AS (
SELECT CASE WHEN %(incremental)s = FALSE THEN '-infinity'
ELSE COALESCE(date_trunc('hour', MAX(date)) - interval '1 hour',
'-infinity') END AS max_date
FROM catmaid_stats_summary
WHERE project_id=%(project_id)s
AND cable_length > 0
),
cable_info AS (
SELECT child.uid AS user_id,
child.date AS date,
SUM(edge.length) AS cable_length
FROM (
SELECT
child.user_id AS uid,
date_trunc('hour', child.creation_time) AS date,
child.parent_id,
child.location_x,
child.location_y,
child.location_z
FROM treenode child, last_precomputation
WHERE child.project_id = %(project_id)s
AND child.creation_time >= last_precomputation.max_date
AND child.creation_time < date_trunc('hour', CURRENT_TIMESTAMP)
) AS child
INNER JOIN LATERAL (
SELECT sqrt(pow(child.location_x - parent.location_x, 2)
+ pow(child.location_y - parent.location_y, 2)
+ pow(child.location_z - parent.location_z, 2)) AS length
FROM treenode parent
WHERE parent.project_id = %(project_id)s
AND parent.id = child.parent_id
LIMIT 1
) AS edge ON TRUE
GROUP BY child.uid, child.date
)
INSERT INTO catmaid_stats_summary (project_id, user_id, date,
cable_length)
SELECT %(project_id)s, ci.user_id, ci.date, ci.cable_length
FROM cable_info ci
ON CONFLICT (project_id, user_id, date) DO UPDATE
SET cable_length = EXCLUDED.cable_length;
""", dict(project_id=project_id, incremental=incremental))
def populate_nodecount_stats_summary(project_id, incremental:bool=True,
cursor=None) -> None:
"""Add node count summary data to the statistics summary table. By default,
this happens in an incremental manner, but can optionally be fone for all
data from scratch (overriding existing statistics).
"""
if not cursor:
cursor = connection.cursor()
# Add node count incrementally by finding the last precomputed treenode
# count value above zero for the passed in project and (re)compute
# statistics starting one hour before. This means, some statistics
# might be recomputed, which is done to increase reobustness.
cursor.execute("""
WITH last_precomputation AS (
SELECT CASE WHEN %(incremental)s = FALSE THEN '-infinity'
ELSE COALESCE(date_trunc('hour', MAX(date)) - interval '1 hour',
'-infinity') END AS max_date
FROM catmaid_stats_summary
WHERE project_id=%(project_id)s
AND n_treenodes > 0
),
node_info AS (
SELECT user_id,
date_trunc('hour', creation_time) AS date,
count(*) as node_count
FROM treenode, last_precomputation
WHERE project_id=%(project_id)s
AND creation_time >= last_precomputation.max_date
GROUP BY 1, 2
)
INSERT INTO catmaid_stats_summary (project_id, user_id, date,
n_treenodes)
SELECT %(project_id)s, ni.user_id, ni.date, ni.node_count
FROM node_info ni
ON CONFLICT (project_id, user_id, date) DO UPDATE
SET n_treenodes = EXCLUDED.n_treenodes;
""", dict(project_id=project_id, incremental=incremental))
def populate_import_nodecount_stats_summary(project_id, incremental:bool=True,
cursor=None) -> None:
"""Add import node count summary data to the statistics summary table. By
default, this happens in an incremental manner, but can optionally be fone
for all data from scratch (overriding existing statistics).
"""
if not cursor:
cursor = connection.cursor()
# Add import node count incrementally by finding the last precomputed
# import treenode count value above zero for the passed in project and
# (re)compute statistics starting one hour before. This means, some
# statistics might be recomputed, which is done to increase reobustness.
#
# Note on line "AND ABS(EXTRACT…)": Ideally, we could query for the first
# transaction for an object by testing for equality of creation_time and
# execution_time. Unfortunately, this only works in cases where no ORM was
# used at the moment. In that case we assume the difference is smaller than
# one hour (3600 seconds). This should be robust enough for our use case.
cursor.execute("""
WITH last_precomputation AS (
SELECT CASE WHEN %(incremental)s = FALSE THEN '-infinity'
ELSE COALESCE(date_trunc('hour', MAX(date)) - interval '1 hour',
'-infinity') END AS max_date
FROM catmaid_stats_summary
WHERE project_id=%(project_id)s
AND n_imported_treenodes > 0
),
node_info AS (
SELECT sorted_row_history.user_id AS user_id,
date_trunc('hour', sorted_row_history.creation_time) AS date,
count(*) AS node_count
FROM (
SELECT DISTINCT t.id, t.user_id, t.creation_time,
ROW_NUMBER() OVER(PARTITION BY t.id ORDER BY t.edition_time) AS n
FROM last_precomputation, treenode__with_history t
JOIN catmaid_transaction_info cti
ON t.txid = cti.transaction_id
WHERE cti.project_id = %(project_id)s
AND ABS(EXTRACT(EPOCH FROM t.creation_time) - EXTRACT(EPOCH FROM cti.execution_time)) < 3600
AND t.creation_time >= last_precomputation.max_date
AND label = 'skeletons.import'
) sorted_row_history
WHERE sorted_row_history.n = 1
GROUP BY 1, 2
)
INSERT INTO catmaid_stats_summary (project_id, user_id, date,
n_imported_treenodes)
SELECT %(project_id)s, ni.user_id, ni.date, ni.node_count
FROM node_info ni
ON CONFLICT (project_id, user_id, date) DO UPDATE
SET n_imported_treenodes = EXCLUDED.n_imported_treenodes;
""", dict(project_id=project_id, incremental=incremental))
def populate_import_cable_stats_summary(project_id, incremental:bool=True, cursor=None) -> None:
"""Add imported cable length summary data to the statistics summary table.
By default, this happens in an incremental manner, but can optionally be
fone for all data from scratch (overriding existing statistics).
"""
if not cursor:
cursor = connection.cursor()
cursor.execute("""
WITH last_precomputation AS (
SELECT CASE WHEN %(incremental)s = FALSE THEN '-infinity'
ELSE COALESCE(date_trunc('hour', MAX(date)) - interval '1 hour',
'-infinity') END AS max_date
FROM catmaid_stats_summary
WHERE project_id=%(project_id)s
AND import_cable_length > 0
),
sorted_row_history AS (
SELECT DISTINCT t.id, t.user_id, t.creation_time AS date,
t.parent_id, t.location_x, t.location_y, t.location_z, t.txid,
ROW_NUMBER() OVER(PARTITION BY t.id ORDER BY t.edition_time) AS n
FROM last_precomputation, treenode__with_history t
JOIN catmaid_transaction_info cti
ON t.txid = cti.transaction_id
WHERE cti.project_id = %(project_id)s
AND ABS(EXTRACT(EPOCH FROM t.creation_time) - EXTRACT(EPOCH FROM cti.execution_time)) < 3600
AND t.creation_time >= last_precomputation.max_date
AND t.creation_time < date_trunc('hour', CURRENT_TIMESTAMP)
AND label = 'skeletons.import'
),
cable_info AS (
SELECT child.uid AS user_id,
child.date AS date,
SUM(edge.length) AS cable_length
FROM (
SELECT
child.user_id AS uid,
child.date,
child.parent_id,
child.location_x,
child.location_y,
child.location_z,
child.txid
FROM sorted_row_history child
) AS child
INNER JOIN LATERAL (
SELECT sqrt(pow(child.location_x - parent.location_x, 2)
+ pow(child.location_y - parent.location_y, 2)
+ pow(child.location_z - parent.location_z, 2)) AS length
FROM treenode__with_history parent
WHERE parent.project_id = %(project_id)s
AND parent.id = child.parent_id
-- This is okay, because we assume one transaction per import
AND parent.txid = child.txid
LIMIT 1
) AS edge ON TRUE
GROUP BY child.uid, child.date
)
INSERT INTO catmaid_stats_summary (project_id, user_id, date,
import_cable_length)
SELECT %(project_id)s, ci.user_id, ci.date, ci.cable_length
FROM cable_info ci
ON CONFLICT (project_id, user_id, date) DO UPDATE
SET import_cable_length = EXCLUDED.cable_length;
""", dict(project_id=project_id, incremental=incremental))
class ServerStats(APIView):
@method_decorator(requires_user_role(UserRole.Admin))
def get(self, request:Request, project_id) -> Response:
"""Return an object that represents the state of various server and
database objects.
"""
return Response({
'time': self.get_current_timestamp(),
'server': self.get_server_stats(),
'database': self.get_database_stats(),
})
def get_current_timestamp(self) -> str:
return datetime.now().strftime("[%Y-%m-%d %H:%M:%S]")
def get_server_stats(self) -> Dict[str, Any]:
return {
'load_avg': os.getloadavg(),
}
def get_database_stats(self) -> Dict[str, Any]:
cursor = connection.cursor()
cursor.execute("select current_database()")
db_name = cursor.fetchone()[0]
cursor.execute("SELECT version()")
db_version = cursor.fetchone()[0]
cursor.execute("""
SELECT (xact_commit * 100) / (xact_commit + xact_rollback),
deadlocks, conflicts, temp_files, pg_size_pretty(temp_bytes),
blks_read, blks_hit
FROM pg_stat_database WHERE datname = %(db_name)s
""", {
'db_name': db_name,
})
db_stats = cursor.fetchone()
cursor.execute("""
SELECT sum(heap_blks_read) AS heap_read,
sum(heap_blks_hit) AS heap_hit,
sum(heap_blks_hit)/ (sum(heap_blks_hit) + sum(heap_blks_read)) AS ratio
FROM pg_statio_user_tables
""")
db_cache = cursor.fetchone()
cursor.execute("""
SELECT sum(idx_blks_read) AS idx_read,
sum(idx_blks_hit) AS idx_hit,
(sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) AS ratio
FROM pg_statio_user_indexes
""")
db_idx_cache = cursor.fetchone()
cursor.execute("""
SELECT checkpoints_timed, checkpoints_req, buffers_clean,
maxwritten_clean, buffers_backend_fsync,
extract(epoch from now() - pg_last_xact_replay_timestamp())
FROM pg_stat_bgwriter
""")
bgwriter_stats = cursor.fetchone()
return {
'version': db_version,
# Should be above 95%
'c_ratio': db_stats[0],
# Should be < 10
'deadlocks': db_stats[1],
# Should be < 10
'conflicts': db_stats[2],
# Should be < 100
'temp_files': db_stats[3],
# Should be < 10 GB
'temp_size': db_stats[4],
# blks_hit/blks_read Should be > 90%
'blks_read': db_stats[5],
'blks_hit': db_stats[6],
'cache_hit_ratio': db_stats[6]/(db_stats[5]+db_stats[6]),
# user table hit/blks ratio should be > 90%
'user_blks_read': db_cache[0],
'user_blks_hit': db_cache[1],
'user_cache_hit_ratio': db_cache[1]/(db_cache[0]+db_cache[1]),
# user table hit/blks ratio should be > 90%
'idx_blks_read': db_idx_cache[0],
'idx_blks_hit': db_idx_cache[1],
'idx_cache_hit_ratio': db_idx_cache[1]/(db_idx_cache[0]+db_idx_cache[1]),
# Should be checkpoints_req < checkpoints_timed
'checkpoints_req': bgwriter_stats[0],
'checkpoints_timed': bgwriter_stats[1],
# Should be high
'buffers_clean': bgwriter_stats[2],
# Should be 0
'maxwritten_clean': bgwriter_stats[3],
# Should be 0
'buffers_backend_fsync': bgwriter_stats[4],
# Should be close to 0 or 0
'replication_lag': bgwriter_stats[5],
}
|
tomka/CATMAID
|
django/applications/catmaid/control/stats.py
|
Python
|
gpl-3.0
| 56,004
|
[
"NEURON"
] |
5e3cfb0aa369aac0e934488ae261756f1cbdb7b31ed7a2fbaa548c06ecd10e90
|
import numpy as np
def array_almost_equal(a1, a2, tol=np.finfo(type(1.0)).eps):
"""Replacement for old numpy.testing.utils.array_almost_equal."""
return (np.abs(a1 - a2) < tol).all()
# this test should be run with abinit!
from ase.calculators.emt import EMT
from ase.io import read, write
from ase.structure import molecule
m1 = molecule('O2', pbc=True)
m1.center(2.0)
write('abinit_save.in', images=m1, format='abinit')
m1.set_calculator(EMT())
e1 = m1.get_potential_energy()
f1 = m1.get_forces()
m2 = read('abinit_save.in', format='abinit')
m2.set_calculator(EMT())
e2 = m2.get_potential_energy()
f2 = m1.get_forces()
# assume atoms definitions are the same if energy/forces are the same: can we do better?
assert abs(e1-e2) < 1.e-6, str(e1) + ' ' + str(e2)
assert array_almost_equal(f1, f2, tol=1.e-6)
|
grhawk/ASE
|
tools/ase/test/fio/abinit.py
|
Python
|
gpl-2.0
| 823
|
[
"ABINIT",
"ASE"
] |
2d11e09560a2f6cc25fa6582f2d25f8534b8cadf05790e10ccea0a26b1b87f63
|
import os
from .job import Job
class OrcaJob(Job):
""" Base class for all ORCA calculations
Subclasses are responsible for setting the correct parameters
in the calculation.
"""
def __init__(self, basename, **kwargs):
Job.__init__(self, basename, **kwargs)
self.program = 'orca'
def get_coordinates(self):
""" Returns the appropriate coordinates section
for an input file.
NB! This should return ONLY the coordinates
and nothing else
"""
s = ""
for label, coordinates in self.xyz_data:
s += "{0:s}{1[0]:20.9f}{1[1]:16.9f}{1[2]:16.9f}\n".format(label, coordinates)
return s[:-1]
def get_memory(self):
""" Orca wants memory in MB """
return self.memory_per_job
def get_basis_set(self):
""" Returns the basis set for the ORCA """
return self.basis_set.upper()
def _program_substitutions(self):
""" Load ORCA specific substitutions.
In CalcIt we have two different kinds of substitutions, namely
SHELL substitutions for the script that runs the QM program
in the `self._run_script_substitutions` variable and the INPUT
substitutions in the `self._comp_chem_substitutions` variable
In general, run script variables are handled through environment
variables such as QM program location and versions whereas QM
calculation settings are handled through options specified on
the command line.
Environment Variables:
----------------------
ORCA -- path to orca installation
"""
path = os.environ.get('ORCA')
if path is None:
raise ValueError("ORCA environment variable not set to path of Orca installation.")
# first we do run_script substitutions
self._run_script_substitutions['PROGPATH'] = path
# then input substitutions
self._comp_chem_substitutions['SCFINFO'] = "HF"
class OrcaEnergyJob(OrcaJob):
def __init__(self, basename, **kwargs):
OrcaJob.__init__(self, basename, **kwargs)
self.runtype = 'energy'
def __str__(self):
return "Orca Energy ({0:s})".format(self.basename)
def __repr__(self):
return "OrcaEnergyJob('{0:s}')".format(self.basename)
|
cstein/calcit
|
calcit/orca.py
|
Python
|
mit
| 2,398
|
[
"ORCA"
] |
16181535212c1cef8cfd0db6a5aaab52dd001eeedfdbfa0dc6799e3db563041a
|
""" MQConsumer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import queue
from DIRAC import S_ERROR, S_OK, gLogger
from DIRAC.Resources.MessageQueue.Utilities import getDestinationAddress, getMQService, generateDefaultCallback
from DIRAC.Core.Utilities.DErrno import EMQNOM
class MQConsumer(object):
def __init__(self, mqManager, mqURI, consumerId, callback=generateDefaultCallback()):
self._connectionManager = mqManager
self._mqURI = mqURI
self._destination = getDestinationAddress(self._mqURI)
self._id = consumerId
self._callback = callback
self.log = gLogger.getSubLogger(self.__class__.__name__)
# subscribing to connection
result = self._connectionManager.getConnector(getMQService(self._mqURI))
if result["OK"]:
connector = result["Value"]
if connector:
result = connector.subscribe(
parameters={"messengerId": self._id, "callback": callback, "destination": self._destination}
)
if not result["OK"]:
self.log.error("Failed to subscribe the consumer:" + self._id)
else:
self.log.error("Failed to initialize MQConsumer! No MQConnector!")
else:
self.log.error("Failed to get MQConnector!")
def get(self):
"""Function gets the message
using the default callback machinery.
This function can be called only if the the default
callback function was used !!!!
Returns:
S_OK or S_ERROR: Error in case if there are no messages in the
queue or other error appeared.
S_OK with the message content otherwise.
"""
if not self._callback:
return S_ERROR("No callback set!")
try:
msg = self._callback.get()
except queue.Empty:
return S_ERROR(EMQNOM, "No messages in queue")
except Exception as e:
return S_ERROR("Exception: %s" % e)
else:
return S_OK(msg)
def close(self):
"""Function closes the connection for this client.
The producer id is removed from the connection storage.
It is not guaranteed that the connection will be
removed cause other messengers can be still using it.
Returns:
S_OK or S_ERROR: Error appears in case if the connection was already
closed for this consumer.
"""
return self._connectionManager.stopConnection(mqURI=self._mqURI, messengerId=self._id)
|
ic-hep/DIRAC
|
src/DIRAC/Resources/MessageQueue/MQConsumer.py
|
Python
|
gpl-3.0
| 2,672
|
[
"DIRAC"
] |
8ebc7586f032cf79fac5e5b482b16b195e718da9991403275d9aad3d109ac421
|
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
import math
import numpy as np
from scipy.spatial import distance
class Equilateral(object):
def __init__(self, class_count, normalized_low, normalized_high):
""" Create a lookup table that will be used for both Equilateral encoding and decoding.
"""
# Allocate a matrix to hold the lookup table.
self.encoded = np.ndarray(shape=(class_count, class_count - 1), dtype=float)
# Seed the result.
self.encoded[0][0] = -1
self.encoded[1][0] = 1.0
for k in range(2, class_count):
# scale the matrix so far
r = k
f = math.sqrt(r * r - 1.0) / r
for i in range(0, k):
for j in range(0, k - 1):
self.encoded[i][j] *= f
r = -1.0 / r
for i in range(0, k):
self.encoded[i][k - 1] = r
for i in range(0, k - 1):
self.encoded[k][i] = 0.0
self.encoded[k][k - 1] = 1.0
# Scale it.
min_eq = -1
max_eq = 1
for row in range(0, len(self.encoded)):
for col in range(0, len(self.encoded[row])):
self.encoded[row][col] = ((self.encoded[row][col] - min_eq) / (max_eq - min_eq)) \
* (normalized_high - normalized_low) + normalized_low
def encode(self, class_num):
""" Provide the equilateral encoded vector for the specified class.
"""
return self.encoded[class_num]
def decode(self, vec):
""" Match the specified vector to the class that it most closely fits.
"""
min_dist = float('inf')
result = -1
for i in range(0, len(self.encoded)):
dist = distance.euclidean(vec, self.encoded[i])
if dist < min_dist:
result = i
min_dist = dist
return result
|
PeterLauris/aifh
|
vol2/vol2-python-examples/examples/Equilateral.py
|
Python
|
apache-2.0
| 2,880
|
[
"VisIt"
] |
4369326f5d3f0af8dc558666264d324c7ab649aa6a530206818d18a45d5cf9cb
|
"""Visits models."""
from django.db import models
from django.contrib.sites.models import Site
from django.shortcuts import reverse
from django.utils.timezone import now
from dry_rest_permissions.generics import authenticated_users
from markdownx.models import MarkdownxField
class VisitQuerySet(models.QuerySet):
"""Custom Visit queryset."""
def registrations_open(self, open: bool):
"""Filter visits whose registrations are open or closed.
Equivalent to: queryset.filter(registrations_open=open)
(Django does not allow this syntax because
registrations_open is a property.)
open : bool
True corresponds to open registrations,
False to closed registrations.
"""
today = now()
if open:
return self.filter(deadline__gte=today)
else:
return self.filter(deadline__lt=today)
def passed(self):
"""Return a queryset containing only passed visits.
A visit is passed if its date is strictly after today.
"""
return self.filter(date__gt=now().date())
class Participation(models.Model):
"""Represents the participation of a user to a visit.
Allows to store whether the user was present to the visit,
and whether their files were validated.
"""
user = models.ForeignKey('users.User', verbose_name='utilisateur',
related_name='participations',
on_delete=models.CASCADE, null=True)
visit = models.ForeignKey('Visit', verbose_name='sortie',
related_name='participations',
on_delete=models.CASCADE)
submitted = models.DateTimeField(
auto_now_add=True, null=True,
verbose_name='soumis le',
help_text='Date de soumission de la participation')
accepted = models.NullBooleanField(
'accepté',
help_text=(
"Cocher pour confirmer au tutoré sa participation à la sortie."))
present = models.NullBooleanField(
'présent',
help_text=(
"Une fois la sortie passée, indiquer si le lycéen était présent."
))
class Meta: # noqa
verbose_name = 'participation'
# prevent a user from participating visit multiple times
unique_together = (('user', 'visit'),)
ordering = ('-submitted',)
def __init__(self, *args, **kwargs):
"""Store the initial value of `accepted` to detect changes."""
super().__init__(*args, **kwargs)
self.initial_accepted = self.accepted
def accepted_changed(self):
"""Return whether the `accepted` field has changed."""
return self.initial_accepted != self.accepted
# Permissions
@staticmethod
@authenticated_users
def has_read_permission(request):
return True
@authenticated_users
def has_object_read_permission(self, request):
return True
@staticmethod
@authenticated_users
def has_write_permission(request):
return True
@authenticated_users
def has_object_write_permission(self, request):
return True
def __str__(self):
return '{} participates in {}'.format(self.user, self.visit)
class VisitOrganizer(models.Model):
"""Represent a tutor who organizes a visit."""
tutor = models.ForeignKey(
'profiles.Tutor', on_delete=models.CASCADE, verbose_name='tuteur')
visit = models.ForeignKey(
'Visit', on_delete=models.CASCADE, verbose_name='sortie')
class Meta: # noqa
verbose_name = 'organisateur'
def __str__(self):
return str(self.tutor)
class Visit(models.Model):
"""Represents a visit that users can attend."""
objects = VisitQuerySet.as_manager()
title = models.CharField(
'titre', max_length=100,
help_text=(
"Préciser si besoin le type de sortie (exposition, concert…) "
))
summary = models.CharField(
'résumé', max_length=300,
default='', blank=True,
help_text=(
"Une ou deux phrases décrivant la sortie de manière attrayante."
))
description = MarkdownxField(
blank=True, default='',
help_text=('Une description plus complète des activités proposées '
'durant la sortie. Ce champ supporte Markdown.'))
place = models.ForeignKey(
'Place',
verbose_name='lieu',
on_delete=models.SET_NULL,
null=True)
date = models.DateField(
help_text="Date de la sortie.")
start_time = models.TimeField(
'heure de début',
help_text='Heure de début de la sortie. Format : hh:mm.')
end_time = models.TimeField(
'heure de fin',
help_text='Heure de fin de la sortie. Format : hh:mm.')
meeting = models.CharField(
'lieu de rendez-vous',
max_length=100, blank=True, default='',
help_text=('Indiquez aux tutorés où ils devront vous retrouver. '
'Exemple : "devant le musée".'))
deadline = models.DateTimeField(
"date limite d'inscription",
help_text=("Note : les lycéens ne pourront plus s'inscrire "
"passée cette date. Format de l'heure : hh:mm."))
image = models.ImageField(
'illustration',
blank=True, null=True,
help_text='Une illustration représentative de la sortie.',
upload_to='visits/images/')
fact_sheet = models.FileField(
'fiche sortie', blank=True, null=True,
upload_to='visits/fact_sheets/',
help_text=('Informe le lycéen de détails sur la sortie. '
'Tous formats supportés, PDF recommandé.'))
permission = models.FileField(
'autorisation de sortie', blank=True, null=True,
upload_to='visits/visit_permissions/',
help_text=('À mettre à disposition pour que le lycéen la remplisse. '
'Tout format supporté, PDF recommandé.'))
participants = models.ManyToManyField('users.User',
through='Participation')
organizers = models.ManyToManyField('profiles.Tutor',
through='VisitOrganizer',
related_name='organized_visits')
def _registrations_open(self):
return now() < self.deadline
# display fancy icon in admin instead of True/False
_registrations_open.boolean = True
registrations_open = property(_registrations_open)
registrations_open.fget.short_description = 'Inscriptions ouvertes'
class Meta: # noqa
ordering = ('date',)
verbose_name = 'sortie'
# Read-only permissions
@staticmethod
@authenticated_users
def has_read_permission(request):
return True
@authenticated_users
def has_object_read_permission(self, request):
return True
@staticmethod
@authenticated_users
def has_write_permission(request):
return True
@authenticated_users
def has_object_write_permission(self, request):
return True
def get_absolute_url(self):
return reverse('api:visit-detail', args=[str(self.pk)])
def get_site_url(self):
site = Site.objects.get_current()
return f'https://{site.domain}/membres/sorties/{self.pk}'
def __str__(self):
return str(self.title)
class Place(models.Model):
"""Represents a place a visit happens at."""
name = models.CharField('nom', max_length=200)
address = models.ForeignKey(
'core.Address', on_delete=models.CASCADE, null=True,
verbose_name='adresse',
help_text='Adresse complète de ce lieu'
)
description = MarkdownxField(
default='', blank=True,
help_text=(
"Une description de ce lieu : de quoi s'agit-il ? "
"Ce champ supporte Markdown."
)
)
class Meta: # noqa
verbose_name = 'lieu'
verbose_name_plural = 'lieux'
ordering = ('name',)
def get_absolute_url(self):
return reverse('api:place-detail', args=[str(self.pk)])
def __str__(self):
return str(self.name)
|
oser-cs/oser-website
|
visits/models.py
|
Python
|
gpl-3.0
| 8,253
|
[
"VisIt"
] |
8224aca7f7f3e024529c8623b7c1d6c29fc7cd7cc56e4ebf020c4330713e3b99
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cPickle
import cProfile
from contextlib import contextmanager
import subprocess
import logging
import os
import passlib.utils
import re
import socket
import sys
import threading
import time
import werkzeug.utils
import zipfile
from cStringIO import StringIO
from collections import defaultdict, Iterable, Mapping, MutableSet, OrderedDict
from itertools import islice, izip, groupby, repeat
from lxml import etree
from which import which
from threading import local
import traceback
import csv
from operator import itemgetter
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
from .parse_version import parse_version
import odoo
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from odoo.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase, etree._Entity)
# Configure default global parser
etree.set_default_parser(etree.XMLParser(resolve_entities=False))
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
return which(name, path=os.pathsep.join(path))
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
"""
Force the database PostgreSQL environment variables to the database
configuration of Odoo.
Note: On systems where pg_restore/pg_dump require an explicit password
(i.e. on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if odoo.tools.config['db_host']:
env['PGHOST'] = odoo.tools.config['db_host']
if odoo.tools.config['db_port']:
env['PGPORT'] = str(odoo.tools.config['db_port'])
if odoo.tools.config['db_user']:
env['PGUSER'] = odoo.tools.config['db_user']
if odoo.tools.config['db_password']:
env['PGPASSWORD'] = odoo.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import odoo.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.normcase(os.path.join(basedir, path)))
import odoo.modules as addons
paths = addons.module.ad_paths + [config['root_path']]
for addons_path in paths:
addons_path = os.path.normpath(os.path.normcase(addons_path)) + os.sep
if name.startswith(addons_path):
break
else:
raise ValueError("Unknown path: %s" % name)
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
def partition(pred, elems):
""" Return a pair equivalent to:
``filter(pred, elems), filter(lambda x: not pred(x), elems)` """
yes, nos = [], []
for elem in elems:
(yes if pred(elem) else nos).append(elem)
return yes, nos
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
map(visit, elems[n])
result.append(n)
map(visit, elems)
return result
try:
import xlwt
# add some sanitizations to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedWorkbook(xlwt.Workbook):
def add_sheet(self, name, cell_overwrite_ok=False):
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedWorkbook, self).add_sheet(name, cell_overwrite_ok=cell_overwrite_ok)
xlwt.Workbook = PatchedWorkbook
except ImportError:
xlwt = None
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
csvpath = odoo.modules.module.get_resource_path('base', 'res', 'res.lang.csv')
try:
# read (code, name) from languages in base/res/res.lang.csv
result = []
with open(csvpath) as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
fields = reader.next()
code_index = fields.index("code")
name_index = fields.index("name")
for row in reader:
result.append((ustr(row[code_index]), ustr(row[name_index])))
except Exception:
_logger.error("Could not read %s", csvpath)
result = []
return sorted(result or [('en_US', u'English')], key=itemgetter(1))
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def str2bool(s, default=None):
s = ustr(s).lower()
y = 'y yes 1 true t on'.split()
n = 'n no 0 false f off'.split()
if s not in (y + n):
if default is None:
raise ValueError('Use 0/1/yes/no/true/false/on/off')
return bool(default)
return s in y
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('odoo.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('odoo.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-u', '--update', '-i', '--init', '--i18n-overwrite']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = {th.ident: {'name': th.name,
'uid': getattr(th, 'uid', 'n/a'),
'dbname': getattr(th, 'dbname', 'n/a')}
for th in threading.enumerate()}
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId, {})
code.append("\n# Thread: %s (id:%s) (db:%s) (uid:%s)" %
(thread_info.get('name', 'n/a'),
threadId,
thread_info.get('dbname', 'n/a'),
thread_info.get('uid', 'n/a')))
for line in extract_stack(stack):
code.append(line)
if odoo.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
def freehash(arg):
try:
return hash(arg)
except Exception:
if isinstance(arg, Mapping):
return hash(frozendict(arg))
elif isinstance(arg, Iterable):
return hash(frozenset(map(freehash, arg)))
else:
return id(arg)
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
def __hash__(self):
return hash(frozenset((key, freehash(val)) for key, val in self.iteritems()))
class Collector(Mapping):
""" A mapping from keys to lists. This is essentially a space optimization
for ``defaultdict(list)``.
"""
__slots__ = ['_map']
def __init__(self):
self._map = {}
def add(self, key, val):
vals = self._map.setdefault(key, [])
if val not in vals:
vals.append(val)
def __getitem__(self, key):
return self._map.get(key, ())
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
class OrderedSet(MutableSet):
""" A set collection that remembers the elements first insertion order. """
__slots__ = ['_map']
def __init__(self, elems=()):
self._map = OrderedDict((elem, None) for elem in elems)
def __contains__(self, elem):
return elem in self._map
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
def add(self, elem):
self._map[elem] = None
def discard(self, elem):
self._map.pop(elem, None)
class LastOrderedSet(OrderedSet):
""" A set collection that remembers the elements last insertion order. """
def add(self, elem):
OrderedSet.discard(self, elem)
OrderedSet.add(self, elem)
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
def formatLang(env, value, digits=None, grouping=True, monetary=False, dp=False, currency_obj=False):
"""
Assuming 'Account' decimal.precision=3:
formatLang(value) -> digits=2 (default)
formatLang(value, digits=4) -> digits=4
formatLang(value, dp='Account') -> digits=3
formatLang(value, digits=5, dp='Account') -> digits=5
"""
if digits is None:
digits = DEFAULT_DIGITS = 2
if dp:
decimal_precision_obj = env['decimal.precision']
digits = decimal_precision_obj.precision_get(dp)
elif currency_obj:
digits = currency_obj.decimal_places
elif (hasattr(value, '_field') and isinstance(value._field, (float_field, function_field)) and value._field.digits):
digits = value._field.digits[1]
if not digits and digits is not 0:
digits = DEFAULT_DIGITS
if isinstance(value, (str, unicode)) and not value:
return ''
lang = env.user.company_id.partner_id.lang or 'en_US'
lang_objs = env['res.lang'].search([('code', '=', lang)])
if not lang_objs:
lang_objs = env['res.lang'].search([], limit=1)
lang_obj = lang_objs[0]
res = lang_obj.format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
if currency_obj and currency_obj.symbol:
if currency_obj.position == 'after':
res = '%s %s' % (res, currency_obj.symbol)
elif currency_obj and currency_obj.position == 'before':
res = '%s %s' % (currency_obj.symbol, res)
return res
def _consteq(str1, str2):
""" Constant-time string comparison. Suitable to compare bytestrings of fixed,
known length only, because length difference is optimized. """
return len(str1) == len(str2) and sum(ord(x)^ord(y) for x, y in zip(str1, str2)) == 0
consteq = getattr(passlib.utils, 'consteq', _consteq)
class Pickle(object):
@classmethod
def load(cls, stream, errors=False):
unpickler = cPickle.Unpickler(stream)
# pickle builtins: str/unicode, int/long, float, bool, tuple, list, dict, None
unpickler.find_global = None
try:
return unpickler.load()
except Exception:
_logger.warning('Failed unpickling data, returning default: %r', errors, exc_info=True)
return errors
@classmethod
def loads(cls, text):
return cls.load(StringIO(text))
dumps = cPickle.dumps
dump = cPickle.dump
pickle = Pickle
|
hip-odoo/odoo
|
odoo/tools/misc.py
|
Python
|
agpl-3.0
| 38,249
|
[
"VisIt"
] |
bc1b81801ca9b86ef7473125afeb8f67abe551bc85f3191f0b07a32eaa225531
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
***************************
**espresso.FixedTupleList**
***************************
"""
from espresso import pmi
import _espresso
import espresso
from espresso.esutil import cxxinit
class FixedTupleListLocal(_espresso.FixedTupleList):
'The (local) fixed tuple list.'
def __init__(self, storage):
'Local construction of a fixed tuple list'
if pmi.workerIsActive():
cxxinit(self, _espresso.FixedTupleList, storage)
"""def addTuples(self, tuplelist):
'add tuple to fixed tuple list'
if pmi.workerIsActive():
return self.cxxclass.addTuple(self, tuplelist)"""
def size(self):
'count number of Tuple in GlobalTupleList, involves global reduction'
if pmi.workerIsActive():
return self.cxxclass.size(self)
if pmi.isController:
class FixedTupleList(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.FixedTupleListLocal',
#localcall = [ "add" ],
pmicall = [ "addTuple", "getTuples" ],
pmiinvoke = ["size"]
)
|
BackupTheBerlios/espressopp
|
src/FixedTupleList.py
|
Python
|
gpl-3.0
| 1,980
|
[
"ESPResSo"
] |
8ca213500c860539122f2aa1815d7c2d3ecf111665e71d72ac20ec4009d2ad90
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# Li Li <aiki.nogard@gmail.com>
# Giuseppe Vettigli <vettigli@gmail.com>
# License: BSD 3 clause
from io import StringIO
from numbers import Integral
import numpy as np
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ..base import is_classifier
from . import _criterion
from . import _tree
from ._reingold_tilford import buchheim, Tree
from . import DecisionTreeClassifier
import warnings
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel:
def __repr__(self):
return '"tree.dot"'
SENTINEL = Sentinel()
@_deprecate_positional_args
def plot_tree(decision_tree, *, max_depth=None, feature_names=None,
class_names=None, label='all', filled=False,
impurity=True, node_ids=False,
proportion=False, rotate='deprecated', rounded=False,
precision=3, ax=None, fontsize=None):
"""Plot a decision tree.
The sample counts that are shown are weighted with any sample_weights that
might be present.
The visualization is fit automatically to the size of the axis.
Use the ``figsize`` or ``dpi`` arguments of ``plt.figure`` to control
the size of the rendering.
Read more in the :ref:`User Guide <tree>`.
.. versionadded:: 0.21
Parameters
----------
decision_tree : decision tree regressor or classifier
The decision tree to be plotted.
max_depth : int, default=None
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, default=None
Names of each of the features.
If None, generic names will be used ("X[0]", "X[1]", ...).
class_names : list of str or bool, default=None
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, default='all'
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, default=False
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
impurity : bool, default=True
When set to ``True``, show the impurity at each node.
node_ids : bool, default=False
When set to ``True``, show the ID number on each node.
proportion : bool, default=False
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, default=False
This parameter has no effect on the matplotlib tree visualisation and
it is kept here for backward compatibility.
.. deprecated:: 0.23
``rotate`` is deprecated in 0.23 and will be removed in 0.25.
rounded : bool, default=False
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
precision : int, default=3
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
ax : matplotlib axis, default=None
Axes to plot to. If None, use current axis. Any previous content
is cleared.
fontsize : int, default=None
Size of text font. If None, determined automatically to fit figure.
Returns
-------
annotations : list of artists
List containing the artists for the annotation boxes making up the
tree.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.plot_tree(clf) # doctest: +SKIP
[Text(251.5,345.217,'X[3] <= 0.8...
"""
check_is_fitted(decision_tree)
if rotate != 'deprecated':
warnings.warn(("'rotate' has no effect and is deprecated in 0.23. "
"It will be removed in 0.25."),
FutureWarning)
exporter = _MPLTreeExporter(
max_depth=max_depth, feature_names=feature_names,
class_names=class_names, label=label, filled=filled,
impurity=impurity, node_ids=node_ids,
proportion=proportion, rotate=rotate, rounded=rounded,
precision=precision, fontsize=fontsize)
return exporter.export(decision_tree, ax=ax)
class _BaseTreeExporter:
def __init__(self, max_depth=None, feature_names=None,
class_names=None, label='all', filled=False,
impurity=True, node_ids=False,
proportion=False, rotate=False, rounded=False,
precision=3, fontsize=None):
self.max_depth = max_depth
self.feature_names = feature_names
self.class_names = class_names
self.label = label
self.filled = filled
self.impurity = impurity
self.node_ids = node_ids
self.proportion = proportion
self.rotate = rotate
self.rounded = rounded
self.precision = precision
self.fontsize = fontsize
def get_color(self, value):
# Find the appropriate color & intensity for a node
if self.colors['bounds'] is None:
# Classification tree
color = list(self.colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = ((sorted_values[0] - sorted_values[1])
/ (1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(self.colors['rgb'][0])
alpha = ((value - self.colors['bounds'][0]) /
(self.colors['bounds'][1] - self.colors['bounds'][0]))
# unpack numpy scalars
alpha = float(alpha)
# compute the color as alpha against white
color = [int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color]
# Return html color code in #RRGGBB format
return '#%2x%2x%2x' % tuple(color)
def get_fill_color(self, tree, node_id):
# Fetch appropriate color for node
if 'rgb' not in self.colors:
# Initialize colors and bounds if required
self.colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
self.colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif (tree.n_classes[0] == 1 and
len(np.unique(tree.value)) != 1):
# Find max and min values in leaf nodes for regression
self.colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
return self.get_color(node_val)
def node_to_str(self, tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (self.label == 'root' and node_id == 0) or self.label == 'all'
characters = self.characters
node_string = characters[-1]
# Write node ID
if self.node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if self.feature_names is not None:
feature = self.feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id],
self.precision),
characters[4])
# Write impurity
if self.impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, str):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], self.precision))
+ characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if self.proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if self.proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, self.precision)
elif self.proportion:
# Classification
value_text = np.around(value, self.precision)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, self.precision)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (self.class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if self.class_names is not True:
class_name = self.class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string.endswith(characters[4]):
node_string = node_string[:-len(characters[4])]
return node_string + characters[5]
class _DOTTreeExporter(_BaseTreeExporter):
def __init__(self, out_file=SENTINEL, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False, rounded=False,
special_characters=False, precision=3):
super().__init__(
max_depth=max_depth, feature_names=feature_names,
class_names=class_names, label=label, filled=filled,
impurity=impurity,
node_ids=node_ids, proportion=proportion, rotate=rotate,
rounded=rounded,
precision=precision)
self.leaves_parallel = leaves_parallel
self.out_file = out_file
self.special_characters = special_characters
# PostScript compatibility for special characters
if special_characters:
self.characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>',
'>', '<']
else:
self.characters = ['#', '[', ']', '<=', '\\n', '"', '"']
# validate
if isinstance(precision, Integral):
if precision < 0:
raise ValueError("'precision' should be greater or equal to 0."
" Got {} instead.".format(precision))
else:
raise ValueError("'precision' should be an integer. Got {}"
" instead.".format(type(precision)))
# The depth of each node for plotting with 'leaf' option
self.ranks = {'leaves': []}
# The colors to render each node with
self.colors = {'bounds': None}
def export(self, decision_tree):
# Check length of feature_names before getting into the tree node
# Raise error if length of feature_names does not match
# n_features_ in the decision_tree
if self.feature_names is not None:
if len(self.feature_names) != decision_tree.n_features_:
raise ValueError("Length of feature_names, %d "
"does not match number of features, %d"
% (len(self.feature_names),
decision_tree.n_features_))
# each part writes to out_file
self.head()
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
self.recurse(decision_tree, 0, criterion="impurity")
else:
self.recurse(decision_tree.tree_, 0,
criterion=decision_tree.criterion)
self.tail()
def tail(self):
# If required, draw leaf nodes at same depth as each other
if self.leaves_parallel:
for rank in sorted(self.ranks):
self.out_file.write(
"{rank=same ; " +
"; ".join(r for r in self.ranks[rank]) + "} ;\n")
self.out_file.write("}")
def head(self):
self.out_file.write('digraph Tree {\n')
# Specify node aesthetics
self.out_file.write('node [shape=box')
rounded_filled = []
if self.filled:
rounded_filled.append('filled')
if self.rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
self.out_file.write(
', style="%s", color="black"'
% ", ".join(rounded_filled))
if self.rounded:
self.out_file.write(', fontname=helvetica')
self.out_file.write('] ;\n')
# Specify graph & edge aesthetics
if self.leaves_parallel:
self.out_file.write(
'graph [ranksep=equally, splines=polyline] ;\n')
if self.rounded:
self.out_file.write('edge [fontname=helvetica] ;\n')
if self.rotate:
self.out_file.write('rankdir=LR ;\n')
def recurse(self, tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if self.max_depth is None or depth <= self.max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
self.ranks['leaves'].append(str(node_id))
elif str(depth) not in self.ranks:
self.ranks[str(depth)] = [str(node_id)]
else:
self.ranks[str(depth)].append(str(node_id))
self.out_file.write(
'%d [label=%s' % (node_id, self.node_to_str(tree, node_id,
criterion)))
if self.filled:
self.out_file.write(', fillcolor="%s"'
% self.get_fill_color(tree, node_id))
self.out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
self.out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((self.rotate - .5) * -2)
self.out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
self.out_file.write('%d, headlabel="True"]' %
angles[0])
else:
self.out_file.write('%d, headlabel="False"]' %
angles[1])
self.out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
self.recurse(tree, left_child, criterion=criterion,
parent=node_id, depth=depth + 1)
self.recurse(tree, right_child, criterion=criterion,
parent=node_id, depth=depth + 1)
else:
self.ranks['leaves'].append(str(node_id))
self.out_file.write('%d [label="(...)"' % node_id)
if self.filled:
# color cropped nodes grey
self.out_file.write(', fillcolor="#C0C0C0"')
self.out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
self.out_file.write('%d -> %d ;\n' % (parent, node_id))
class _MPLTreeExporter(_BaseTreeExporter):
def __init__(self, max_depth=None, feature_names=None,
class_names=None, label='all', filled=False,
impurity=True, node_ids=False,
proportion=False, rotate=False, rounded=False,
precision=3, fontsize=None):
super().__init__(
max_depth=max_depth, feature_names=feature_names,
class_names=class_names, label=label, filled=filled,
impurity=impurity, node_ids=node_ids, proportion=proportion,
rotate=rotate, rounded=rounded, precision=precision)
self.fontsize = fontsize
# validate
if isinstance(precision, Integral):
if precision < 0:
raise ValueError("'precision' should be greater or equal to 0."
" Got {} instead.".format(precision))
else:
raise ValueError("'precision' should be an integer. Got {}"
" instead.".format(type(precision)))
# The depth of each node for plotting with 'leaf' option
self.ranks = {'leaves': []}
# The colors to render each node with
self.colors = {'bounds': None}
self.characters = ['#', '[', ']', '<=', '\n', '', '']
self.bbox_args = dict()
if self.rounded:
self.bbox_args['boxstyle'] = "round"
self.arrow_args = dict(arrowstyle="<-")
def _make_tree(self, node_id, et, criterion, depth=0):
# traverses _tree.Tree recursively, builds intermediate
# "_reingold_tilford.Tree" object
name = self.node_to_str(et, node_id, criterion=criterion)
if (et.children_left[node_id] != _tree.TREE_LEAF
and (self.max_depth is None or depth <= self.max_depth)):
children = [self._make_tree(et.children_left[node_id], et,
criterion, depth=depth + 1),
self._make_tree(et.children_right[node_id], et,
criterion, depth=depth + 1)]
else:
return Tree(name, node_id)
return Tree(name, node_id, *children)
def export(self, decision_tree, ax=None):
import matplotlib.pyplot as plt
from matplotlib.text import Annotation
if ax is None:
ax = plt.gca()
ax.clear()
ax.set_axis_off()
my_tree = self._make_tree(0, decision_tree.tree_,
decision_tree.criterion)
draw_tree = buchheim(my_tree)
# important to make sure we're still
# inside the axis after drawing the box
# this makes sense because the width of a box
# is about the same as the distance between boxes
max_x, max_y = draw_tree.max_extents() + 1
ax_width = ax.get_window_extent().width
ax_height = ax.get_window_extent().height
scale_x = ax_width / max_x
scale_y = ax_height / max_y
self.recurse(draw_tree, decision_tree.tree_, ax,
scale_x, scale_y, ax_height)
anns = [ann for ann in ax.get_children()
if isinstance(ann, Annotation)]
# update sizes of all bboxes
renderer = ax.figure.canvas.get_renderer()
for ann in anns:
ann.update_bbox_position_size(renderer)
if self.fontsize is None:
# get figure to data transform
# adjust fontsize to avoid overlap
# get max box width and height
extents = [ann.get_bbox_patch().get_window_extent()
for ann in anns]
max_width = max([extent.width for extent in extents])
max_height = max([extent.height for extent in extents])
# width should be around scale_x in axis coordinates
size = anns[0].get_fontsize() * min(scale_x / max_width,
scale_y / max_height)
for ann in anns:
ann.set_fontsize(size)
return anns
def recurse(self, node, tree, ax, scale_x, scale_y, height, depth=0):
import matplotlib.pyplot as plt
kwargs = dict(bbox=self.bbox_args.copy(), ha='center', va='center',
zorder=100 - 10 * depth, xycoords='axes pixels',
arrowprops=self.arrow_args.copy())
kwargs['arrowprops']['edgecolor'] = plt.rcParams['text.color']
if self.fontsize is not None:
kwargs['fontsize'] = self.fontsize
# offset things by .5 to center them in plot
xy = ((node.x + .5) * scale_x, height - (node.y + .5) * scale_y)
if self.max_depth is None or depth <= self.max_depth:
if self.filled:
kwargs['bbox']['fc'] = self.get_fill_color(tree,
node.tree.node_id)
else:
kwargs['bbox']['fc'] = ax.get_facecolor()
if node.parent is None:
# root
ax.annotate(node.tree.label, xy, **kwargs)
else:
xy_parent = ((node.parent.x + .5) * scale_x,
height - (node.parent.y + .5) * scale_y)
ax.annotate(node.tree.label, xy_parent, xy, **kwargs)
for child in node.children:
self.recurse(child, tree, ax, scale_x, scale_y, height,
depth=depth + 1)
else:
xy_parent = ((node.parent.x + .5) * scale_x,
height - (node.parent.y + .5) * scale_y)
kwargs['bbox']['fc'] = 'grey'
ax.annotate("\n (...) \n", xy_parent, xy, **kwargs)
@_deprecate_positional_args
def export_graphviz(decision_tree, out_file=None, *, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False, precision=3):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : object or str, default=None
Handle or name of the output file. If ``None``, the result is
returned as a string.
.. versionchanged:: 0.20
Default of out_file changed from "tree.dot" to None.
max_depth : int, default=None
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of str, default=None
Names of each of the features.
If None generic names will be used ("feature_0", "feature_1", ...).
class_names : list of str or bool, default=None
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, default='all'
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, default=False
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, default=False
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, default=True
When set to ``True``, show the impurity at each node.
node_ids : bool, default=False
When set to ``True``, show the ID number on each node.
proportion : bool, default=False
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, default=False
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, default=False
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, default=False
When set to ``False``, ignore special characters for PostScript
compatibility.
precision : int, default=3
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf)
'digraph Tree {...
"""
check_is_fitted(decision_tree)
own_file = False
return_string = False
try:
if isinstance(out_file, str):
out_file = open(out_file, "w", encoding="utf-8")
own_file = True
if out_file is None:
return_string = True
out_file = StringIO()
exporter = _DOTTreeExporter(
out_file=out_file, max_depth=max_depth,
feature_names=feature_names, class_names=class_names, label=label,
filled=filled, leaves_parallel=leaves_parallel, impurity=impurity,
node_ids=node_ids, proportion=proportion, rotate=rotate,
rounded=rounded, special_characters=special_characters,
precision=precision)
exporter.export(decision_tree)
if return_string:
return exporter.out_file.getvalue()
finally:
if own_file:
out_file.close()
def _compute_depth(tree, node):
"""
Returns the depth of the subtree rooted in node.
"""
def compute_depth_(current_node, current_depth,
children_left, children_right, depths):
depths += [current_depth]
left = children_left[current_node]
right = children_right[current_node]
if left != -1 and right != -1:
compute_depth_(left, current_depth+1,
children_left, children_right, depths)
compute_depth_(right, current_depth+1,
children_left, children_right, depths)
depths = []
compute_depth_(node, 1, tree.children_left, tree.children_right, depths)
return max(depths)
@_deprecate_positional_args
def export_text(decision_tree, *, feature_names=None, max_depth=10,
spacing=3, decimals=2, show_weights=False):
"""Build a text report showing the rules of a decision tree.
Note that backwards compatibility may not be supported.
Parameters
----------
decision_tree : object
The decision tree estimator to be exported.
It can be an instance of
DecisionTreeClassifier or DecisionTreeRegressor.
feature_names : list of str, default=None
A list of length n_features containing the feature names.
If None generic names will be used ("feature_0", "feature_1", ...).
max_depth : int, default=10
Only the first max_depth levels of the tree are exported.
Truncated branches will be marked with "...".
spacing : int, default=3
Number of spaces between edges. The higher it is, the wider the result.
decimals : int, default=2
Number of decimal digits to display.
show_weights : bool, default=False
If true the classification weights will be exported on each leaf.
The classification weights are the number of samples each class.
Returns
-------
report : string
Text summary of all the rules in the decision tree.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.tree import DecisionTreeClassifier
>>> from sklearn.tree import export_text
>>> iris = load_iris()
>>> X = iris['data']
>>> y = iris['target']
>>> decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2)
>>> decision_tree = decision_tree.fit(X, y)
>>> r = export_text(decision_tree, feature_names=iris['feature_names'])
>>> print(r)
|--- petal width (cm) <= 0.80
| |--- class: 0
|--- petal width (cm) > 0.80
| |--- petal width (cm) <= 1.75
| | |--- class: 1
| |--- petal width (cm) > 1.75
| | |--- class: 2
"""
check_is_fitted(decision_tree)
tree_ = decision_tree.tree_
if is_classifier(decision_tree):
class_names = decision_tree.classes_
right_child_fmt = "{} {} <= {}\n"
left_child_fmt = "{} {} > {}\n"
truncation_fmt = "{} {}\n"
if max_depth < 0:
raise ValueError("max_depth bust be >= 0, given %d" % max_depth)
if (feature_names is not None and
len(feature_names) != tree_.n_features):
raise ValueError("feature_names must contain "
"%d elements, got %d" % (tree_.n_features,
len(feature_names)))
if spacing <= 0:
raise ValueError("spacing must be > 0, given %d" % spacing)
if decimals < 0:
raise ValueError("decimals must be >= 0, given %d" % decimals)
if isinstance(decision_tree, DecisionTreeClassifier):
value_fmt = "{}{} weights: {}\n"
if not show_weights:
value_fmt = "{}{}{}\n"
else:
value_fmt = "{}{} value: {}\n"
if feature_names:
feature_names_ = [feature_names[i] if i != _tree.TREE_UNDEFINED
else None for i in tree_.feature]
else:
feature_names_ = ["feature_{}".format(i) for i in tree_.feature]
export_text.report = ""
def _add_leaf(value, class_name, indent):
val = ''
is_classification = isinstance(decision_tree,
DecisionTreeClassifier)
if show_weights or not is_classification:
val = ["{1:.{0}f}, ".format(decimals, v) for v in value]
val = '['+''.join(val)[:-2]+']'
if is_classification:
val += ' class: ' + str(class_name)
export_text.report += value_fmt.format(indent, '', val)
def print_tree_recurse(node, depth):
indent = ("|" + (" " * spacing)) * depth
indent = indent[:-spacing] + "-" * spacing
value = None
if tree_.n_outputs == 1:
value = tree_.value[node][0]
else:
value = tree_.value[node].T[0]
class_name = np.argmax(value)
if (tree_.n_classes[0] != 1 and
tree_.n_outputs == 1):
class_name = class_names[class_name]
if depth <= max_depth+1:
info_fmt = ""
info_fmt_left = info_fmt
info_fmt_right = info_fmt
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_names_[node]
threshold = tree_.threshold[node]
threshold = "{1:.{0}f}".format(decimals, threshold)
export_text.report += right_child_fmt.format(indent,
name,
threshold)
export_text.report += info_fmt_left
print_tree_recurse(tree_.children_left[node], depth+1)
export_text.report += left_child_fmt.format(indent,
name,
threshold)
export_text.report += info_fmt_right
print_tree_recurse(tree_.children_right[node], depth+1)
else: # leaf
_add_leaf(value, class_name, indent)
else:
subtree_depth = _compute_depth(tree_, node)
if subtree_depth == 1:
_add_leaf(value, class_name, indent)
else:
trunc_report = 'truncated branch of depth %d' % subtree_depth
export_text.report += truncation_fmt.format(indent,
trunc_report)
print_tree_recurse(0, 1)
return export_text.report
|
bnaul/scikit-learn
|
sklearn/tree/_export.py
|
Python
|
bsd-3-clause
| 37,048
|
[
"Brian"
] |
36e7b9292630f772d6171ee6a365d70ac2afca7180e260628ffd1128990129c2
|
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import fnmatch
import json
import operator
import os
import shutil
import stat
import sys
import tarfile
import tempfile
import threading
import time
import yaml
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
from hashlib import sha256
from io import BytesIO
from yaml.error import YAMLError
try:
import queue
except ImportError:
import Queue as queue # Python 2
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.galaxy import get_collections_galaxy_meta_info
from ansible.galaxy.api import CollectionVersionMetadata, GalaxyError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils import six
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
from ansible.utils.galaxy import scm_archive_collection
from ansible.utils.hashing import secure_hash, secure_hash_s
from ansible.utils.version import SemanticVersion
from ansible.module_utils.urls import open_url
urlparse = six.moves.urllib.parse.urlparse
urldefrag = six.moves.urllib.parse.urldefrag
urllib_error = six.moves.urllib.error
display = Display()
MANIFEST_FORMAT = 1
ModifiedContent = namedtuple('ModifiedContent', ['filename', 'expected', 'installed'])
class CollectionRequirement:
_FILE_MAPPING = [(b'MANIFEST.json', 'manifest_file'), (b'FILES.json', 'files_file')]
def __init__(self, namespace, name, b_path, api, versions, requirement, force, parent=None, metadata=None,
files=None, skip=False, allow_pre_releases=False):
"""Represents a collection requirement, the versions that are available to be installed as well as any
dependencies the collection has.
:param namespace: The collection namespace.
:param name: The collection name.
:param b_path: Byte str of the path to the collection tarball if it has already been downloaded.
:param api: The GalaxyAPI to use if the collection is from Galaxy.
:param versions: A list of versions of the collection that are available.
:param requirement: The version requirement string used to verify the list of versions fit the requirements.
:param force: Whether the force flag applied to the collection.
:param parent: The name of the parent the collection is a dependency of.
:param metadata: The galaxy.api.CollectionVersionMetadata that has already been retrieved from the Galaxy
server.
:param files: The files that exist inside the collection. This is based on the FILES.json file inside the
collection artifact.
:param skip: Whether to skip installing the collection. Should be set if the collection is already installed
and force is not set.
:param allow_pre_releases: Whether to skip pre-release versions of collections.
"""
self.namespace = namespace
self.name = name
self.b_path = b_path
self.api = api
self._versions = set(versions)
self.force = force
self.skip = skip
self.required_by = []
self.allow_pre_releases = allow_pre_releases
self._metadata = metadata
self._files = files
self.add_requirement(parent, requirement)
def __str__(self):
return to_native("%s.%s" % (self.namespace, self.name))
def __unicode__(self):
return u"%s.%s" % (self.namespace, self.name)
@property
def metadata(self):
self._get_metadata()
return self._metadata
@property
def versions(self):
if self.allow_pre_releases:
return self._versions
return set(v for v in self._versions if v == '*' or not SemanticVersion(v).is_prerelease)
@versions.setter
def versions(self, value):
self._versions = set(value)
@property
def pre_releases(self):
return set(v for v in self._versions if SemanticVersion(v).is_prerelease)
@property
def latest_version(self):
try:
return max([v for v in self.versions if v != '*'], key=SemanticVersion)
except ValueError: # ValueError: max() arg is an empty sequence
return '*'
@property
def dependencies(self):
if not self._metadata:
if len(self.versions) > 1:
return {}
self._get_metadata()
dependencies = self._metadata.dependencies
if dependencies is None:
return {}
return dependencies
@staticmethod
def artifact_info(b_path):
"""Load the manifest data from the MANIFEST.json and FILES.json. If the files exist, return a dict containing the keys 'files_file' and 'manifest_file'.
:param b_path: The directory of a collection.
"""
info = {}
for b_file_name, property_name in CollectionRequirement._FILE_MAPPING:
b_file_path = os.path.join(b_path, b_file_name)
if not os.path.exists(b_file_path):
continue
with open(b_file_path, 'rb') as file_obj:
try:
info[property_name] = json.loads(to_text(file_obj.read(), errors='surrogate_or_strict'))
except ValueError:
raise AnsibleError("Collection file at '%s' does not contain a valid json string." % to_native(b_file_path))
return info
@staticmethod
def galaxy_metadata(b_path):
"""Generate the manifest data from the galaxy.yml file.
If the galaxy.yml exists, return a dictionary containing the keys 'files_file' and 'manifest_file'.
:param b_path: The directory of a collection.
"""
b_galaxy_path = get_galaxy_metadata_path(b_path)
info = {}
if os.path.exists(b_galaxy_path):
collection_meta = _get_galaxy_yml(b_galaxy_path)
info['files_file'] = _build_files_manifest(b_path, collection_meta['namespace'], collection_meta['name'], collection_meta['build_ignore'])
info['manifest_file'] = _build_manifest(**collection_meta)
return info
@staticmethod
def collection_info(b_path, fallback_metadata=False):
info = CollectionRequirement.artifact_info(b_path)
if info or not fallback_metadata:
return info
return CollectionRequirement.galaxy_metadata(b_path)
def add_requirement(self, parent, requirement):
self.required_by.append((parent, requirement))
new_versions = set(v for v in self.versions if self._meets_requirements(v, requirement, parent))
if len(new_versions) == 0:
if self.skip:
force_flag = '--force-with-deps' if parent else '--force'
version = self.latest_version if self.latest_version != '*' else 'unknown'
msg = "Cannot meet requirement %s:%s as it is already installed at version '%s'. Use %s to overwrite" \
% (to_text(self), requirement, version, force_flag)
raise AnsibleError(msg)
elif parent is None:
msg = "Cannot meet requirement %s for dependency %s" % (requirement, to_text(self))
else:
msg = "Cannot meet dependency requirement '%s:%s' for collection %s" \
% (to_text(self), requirement, parent)
collection_source = to_text(self.b_path, nonstring='passthru') or self.api.api_server
req_by = "\n".join(
"\t%s - '%s:%s'" % (to_text(p) if p else 'base', to_text(self), r)
for p, r in self.required_by
)
versions = ", ".join(sorted(self.versions, key=SemanticVersion))
if not self.versions and self.pre_releases:
pre_release_msg = (
'\nThis collection only contains pre-releases. Utilize `--pre` to install pre-releases, or '
'explicitly provide the pre-release version.'
)
else:
pre_release_msg = ''
raise AnsibleError(
"%s from source '%s'. Available versions before last requirement added: %s\nRequirements from:\n%s%s"
% (msg, collection_source, versions, req_by, pre_release_msg)
)
self.versions = new_versions
def download(self, b_path):
download_url = self._metadata.download_url
artifact_hash = self._metadata.artifact_sha256
headers = {}
self.api._add_auth_token(headers, download_url, required=False)
b_collection_path = _download_file(download_url, b_path, artifact_hash, self.api.validate_certs,
headers=headers)
return to_text(b_collection_path, errors='surrogate_or_strict')
def install(self, path, b_temp_path):
if self.skip:
display.display("Skipping '%s' as it is already installed" % to_text(self))
return
# Install if it is not
collection_path = os.path.join(path, self.namespace, self.name)
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
display.display("Installing '%s:%s' to '%s'" % (to_text(self), self.latest_version, collection_path))
if self.b_path is None:
self.b_path = self.download(b_temp_path)
if os.path.exists(b_collection_path):
shutil.rmtree(b_collection_path)
if os.path.isfile(self.b_path):
self.install_artifact(b_collection_path, b_temp_path)
else:
self.install_scm(b_collection_path)
display.display("%s (%s) was installed successfully" % (to_text(self), self.latest_version))
def install_artifact(self, b_collection_path, b_temp_path):
try:
with tarfile.open(self.b_path, mode='r') as collection_tar:
files_member_obj = collection_tar.getmember('FILES.json')
with _tarfile_extract(collection_tar, files_member_obj) as (dummy, files_obj):
files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict'))
_extract_tar_file(collection_tar, 'MANIFEST.json', b_collection_path, b_temp_path)
_extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path)
for file_info in files['files']:
file_name = file_info['name']
if file_name == '.':
continue
if file_info['ftype'] == 'file':
_extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path,
expected_hash=file_info['chksum_sha256'])
else:
_extract_tar_dir(collection_tar, file_name, b_collection_path)
except Exception:
# Ensure we don't leave the dir behind in case of a failure.
shutil.rmtree(b_collection_path)
b_namespace_path = os.path.dirname(b_collection_path)
if not os.listdir(b_namespace_path):
os.rmdir(b_namespace_path)
raise
def install_scm(self, b_collection_output_path):
"""Install the collection from source control into given dir.
Generates the Ansible collection artifact data from a galaxy.yml and installs the artifact to a directory.
This should follow the same pattern as build_collection, but instead of creating an artifact, install it.
:param b_collection_output_path: The installation directory for the collection artifact.
:raises AnsibleError: If no collection metadata found.
"""
b_collection_path = self.b_path
b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
if not os.path.exists(b_galaxy_path):
raise AnsibleError("The collection galaxy.yml path '%s' does not exist." % to_native(b_galaxy_path))
info = CollectionRequirement.galaxy_metadata(b_collection_path)
collection_manifest = info['manifest_file']
collection_meta = collection_manifest['collection_info']
file_manifest = info['files_file']
_build_collection_dir(b_collection_path, b_collection_output_path, collection_manifest, file_manifest)
collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
collection_manifest['collection_info']['name'])
display.display('Created collection for %s at %s' % (collection_name, to_text(b_collection_output_path)))
def set_latest_version(self):
self.versions = set([self.latest_version])
self._get_metadata()
def verify(self, remote_collection, path, b_temp_tar_path):
if not self.skip:
display.display("'%s' has not been installed, nothing to verify" % (to_text(self)))
return
collection_path = os.path.join(path, self.namespace, self.name)
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
display.vvv("Verifying '%s:%s'." % (to_text(self), self.latest_version))
display.vvv("Installed collection found at '%s'" % collection_path)
display.vvv("Remote collection found at '%s'" % remote_collection.metadata.download_url)
# Compare installed version versus requirement version
if self.latest_version != remote_collection.latest_version:
err = "%s has the version '%s' but is being compared to '%s'" % (to_text(self), self.latest_version, remote_collection.latest_version)
display.display(err)
return
modified_content = []
# Verify the manifest hash matches before verifying the file manifest
expected_hash = _get_tar_file_hash(b_temp_tar_path, 'MANIFEST.json')
self._verify_file_hash(b_collection_path, 'MANIFEST.json', expected_hash, modified_content)
manifest = _get_json_from_tar_file(b_temp_tar_path, 'MANIFEST.json')
# Use the manifest to verify the file manifest checksum
file_manifest_data = manifest['file_manifest_file']
file_manifest_filename = file_manifest_data['name']
expected_hash = file_manifest_data['chksum_%s' % file_manifest_data['chksum_type']]
# Verify the file manifest before using it to verify individual files
self._verify_file_hash(b_collection_path, file_manifest_filename, expected_hash, modified_content)
file_manifest = _get_json_from_tar_file(b_temp_tar_path, file_manifest_filename)
# Use the file manifest to verify individual file checksums
for manifest_data in file_manifest['files']:
if manifest_data['ftype'] == 'file':
expected_hash = manifest_data['chksum_%s' % manifest_data['chksum_type']]
self._verify_file_hash(b_collection_path, manifest_data['name'], expected_hash, modified_content)
if modified_content:
display.display("Collection %s contains modified content in the following files:" % to_text(self))
display.display(to_text(self))
display.vvv(to_text(self.b_path))
for content_change in modified_content:
display.display(' %s' % content_change.filename)
display.vvv(" Expected: %s\n Found: %s" % (content_change.expected, content_change.installed))
else:
display.vvv("Successfully verified that checksums for '%s:%s' match the remote collection" % (to_text(self), self.latest_version))
def _verify_file_hash(self, b_path, filename, expected_hash, error_queue):
b_file_path = to_bytes(os.path.join(to_text(b_path), filename), errors='surrogate_or_strict')
if not os.path.isfile(b_file_path):
actual_hash = None
else:
with open(b_file_path, mode='rb') as file_object:
actual_hash = _consume_file(file_object)
if expected_hash != actual_hash:
error_queue.append(ModifiedContent(filename=filename, expected=expected_hash, installed=actual_hash))
def _get_metadata(self):
if self._metadata:
return
self._metadata = self.api.get_collection_version_metadata(self.namespace, self.name, self.latest_version)
def _meets_requirements(self, version, requirements, parent):
"""
Supports version identifiers can be '==', '!=', '>', '>=', '<', '<=', '*'. Each requirement is delimited by ','
"""
op_map = {
'!=': operator.ne,
'==': operator.eq,
'=': operator.eq,
'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
}
for req in list(requirements.split(',')):
op_pos = 2 if len(req) > 1 and req[1] == '=' else 1
op = op_map.get(req[:op_pos])
requirement = req[op_pos:]
if not op:
requirement = req
op = operator.eq
# In the case we are checking a new requirement on a base requirement (parent != None) we can't accept
# version as '*' (unknown version) unless the requirement is also '*'.
if parent and version == '*' and requirement != '*':
display.warning("Failed to validate the collection requirement '%s:%s' for %s when the existing "
"install does not have a version set, the collection may not work."
% (to_text(self), req, parent))
continue
elif requirement == '*' or version == '*':
continue
if not op(SemanticVersion(version), SemanticVersion.from_loose_version(LooseVersion(requirement))):
break
else:
return True
# The loop was broken early, it does not meet all the requirements
return False
@staticmethod
def from_tar(b_path, force, parent=None):
if not tarfile.is_tarfile(b_path):
raise AnsibleError("Collection artifact at '%s' is not a valid tar file." % to_native(b_path))
info = {}
with tarfile.open(b_path, mode='r') as collection_tar:
for b_member_name, property_name in CollectionRequirement._FILE_MAPPING:
n_member_name = to_native(b_member_name)
try:
member = collection_tar.getmember(n_member_name)
except KeyError:
raise AnsibleError("Collection at '%s' does not contain the required file %s."
% (to_native(b_path), n_member_name))
with _tarfile_extract(collection_tar, member) as (dummy, member_obj):
try:
info[property_name] = json.loads(to_text(member_obj.read(), errors='surrogate_or_strict'))
except ValueError:
raise AnsibleError("Collection tar file member %s does not contain a valid json string."
% n_member_name)
meta = info['manifest_file']['collection_info']
files = info['files_file']['files']
namespace = meta['namespace']
name = meta['name']
version = meta['version']
meta = CollectionVersionMetadata(namespace, name, version, None, None, meta['dependencies'])
if SemanticVersion(version).is_prerelease:
allow_pre_release = True
else:
allow_pre_release = False
return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
metadata=meta, files=files, allow_pre_releases=allow_pre_release)
@staticmethod
def from_path(b_path, force, parent=None, fallback_metadata=False, skip=True):
info = CollectionRequirement.collection_info(b_path, fallback_metadata)
allow_pre_release = False
if 'manifest_file' in info:
manifest = info['manifest_file']['collection_info']
namespace = manifest['namespace']
name = manifest['name']
version = to_text(manifest['version'], errors='surrogate_or_strict')
try:
_v = SemanticVersion()
_v.parse(version)
if _v.is_prerelease:
allow_pre_release = True
except ValueError:
display.warning("Collection at '%s' does not have a valid version set, falling back to '*'. Found "
"version: '%s'" % (to_text(b_path), version))
version = '*'
dependencies = manifest['dependencies']
else:
if fallback_metadata:
warning = "Collection at '%s' does not have a galaxy.yml or a MANIFEST.json file, cannot detect version."
else:
warning = "Collection at '%s' does not have a MANIFEST.json file, cannot detect version."
display.warning(warning % to_text(b_path))
parent_dir, name = os.path.split(to_text(b_path, errors='surrogate_or_strict'))
namespace = os.path.split(parent_dir)[1]
version = '*'
dependencies = {}
meta = CollectionVersionMetadata(namespace, name, version, None, None, dependencies)
files = info.get('files_file', {}).get('files', {})
return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
metadata=meta, files=files, skip=skip, allow_pre_releases=allow_pre_release)
@staticmethod
def from_name(collection, apis, requirement, force, parent=None, allow_pre_release=False):
namespace, name = collection.split('.', 1)
galaxy_meta = None
for api in apis:
try:
if not (requirement == '*' or requirement.startswith('<') or requirement.startswith('>') or
requirement.startswith('!=')):
# Exact requirement
allow_pre_release = True
if requirement.startswith('='):
requirement = requirement.lstrip('=')
resp = api.get_collection_version_metadata(namespace, name, requirement)
galaxy_meta = resp
versions = [resp.version]
else:
versions = api.get_collection_versions(namespace, name)
except GalaxyError as err:
if err.http_code == 404:
display.vvv("Collection '%s' is not available from server %s %s"
% (collection, api.name, api.api_server))
continue
raise
display.vvv("Collection '%s' obtained from server %s %s" % (collection, api.name, api.api_server))
break
else:
raise AnsibleError("Failed to find collection %s:%s" % (collection, requirement))
req = CollectionRequirement(namespace, name, None, api, versions, requirement, force, parent=parent,
metadata=galaxy_meta, allow_pre_releases=allow_pre_release)
return req
def build_collection(collection_path, output_path, force):
"""Creates the Ansible collection artifact in a .tar.gz file.
:param collection_path: The path to the collection to build. This should be the directory that contains the
galaxy.yml file.
:param output_path: The path to create the collection build artifact. This should be a directory.
:param force: Whether to overwrite an existing collection build artifact or fail.
:return: The path to the collection build artifact.
"""
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
if not os.path.exists(b_galaxy_path):
raise AnsibleError("The collection galaxy.yml path '%s' does not exist." % to_native(b_galaxy_path))
info = CollectionRequirement.galaxy_metadata(b_collection_path)
collection_manifest = info['manifest_file']
collection_meta = collection_manifest['collection_info']
file_manifest = info['files_file']
collection_output = os.path.join(output_path, "%s-%s-%s.tar.gz" % (collection_meta['namespace'],
collection_meta['name'],
collection_meta['version']))
b_collection_output = to_bytes(collection_output, errors='surrogate_or_strict')
if os.path.exists(b_collection_output):
if os.path.isdir(b_collection_output):
raise AnsibleError("The output collection artifact '%s' already exists, "
"but is a directory - aborting" % to_native(collection_output))
elif not force:
raise AnsibleError("The file '%s' already exists. You can use --force to re-create "
"the collection artifact." % to_native(collection_output))
_build_collection_tar(b_collection_path, b_collection_output, collection_manifest, file_manifest)
def download_collections(collections, output_path, apis, validate_certs, no_deps, allow_pre_release):
"""Download Ansible collections as their tarball from a Galaxy server to the path specified and creates a requirements
file of the downloaded requirements to be used for an install.
:param collections: The collections to download, should be a list of tuples with (name, requirement, Galaxy Server).
:param output_path: The path to download the collections to.
:param apis: A list of GalaxyAPIs to query when search for a collection.
:param validate_certs: Whether to validate the certificate if downloading a tarball from a non-Galaxy host.
:param no_deps: Ignore any collection dependencies and only download the base requirements.
:param allow_pre_release: Do not ignore pre-release versions when selecting the latest.
"""
with _tempdir() as b_temp_path:
display.display("Process install dependency map")
with _display_progress():
dep_map = _build_dependency_map(collections, [], b_temp_path, apis, validate_certs, True, True, no_deps,
allow_pre_release=allow_pre_release)
requirements = []
display.display("Starting collection download process to '%s'" % output_path)
with _display_progress():
for name, requirement in dep_map.items():
collection_filename = "%s-%s-%s.tar.gz" % (requirement.namespace, requirement.name,
requirement.latest_version)
dest_path = os.path.join(output_path, collection_filename)
requirements.append({'name': collection_filename, 'version': requirement.latest_version})
display.display("Downloading collection '%s' to '%s'" % (name, dest_path))
b_temp_download_path = requirement.download(b_temp_path)
shutil.move(b_temp_download_path, to_bytes(dest_path, errors='surrogate_or_strict'))
display.display("%s (%s) was downloaded successfully" % (name, requirement.latest_version))
requirements_path = os.path.join(output_path, 'requirements.yml')
display.display("Writing requirements.yml file of downloaded collections to '%s'" % requirements_path)
with open(to_bytes(requirements_path, errors='surrogate_or_strict'), mode='wb') as req_fd:
req_fd.write(to_bytes(yaml.safe_dump({'collections': requirements}), errors='surrogate_or_strict'))
def publish_collection(collection_path, api, wait, timeout):
"""Publish an Ansible collection tarball into an Ansible Galaxy server.
:param collection_path: The path to the collection tarball to publish.
:param api: A GalaxyAPI to publish the collection to.
:param wait: Whether to wait until the import process is complete.
:param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite.
"""
import_uri = api.publish_collection(collection_path)
if wait:
# Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is
# always the task_id, though.
# v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"}
# v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"}
task_id = None
for path_segment in reversed(import_uri.split('/')):
if path_segment:
task_id = path_segment
break
if not task_id:
raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri)
display.display("Collection has been published to the Galaxy server %s %s" % (api.name, api.api_server))
with _display_progress():
api.wait_import_task(task_id, timeout)
display.display("Collection has been successfully published and imported to the Galaxy server %s %s"
% (api.name, api.api_server))
else:
display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has "
"completed due to --no-wait being set. Import task results can be found at %s"
% (api.name, api.api_server, import_uri))
def install_collections(collections, output_path, apis, validate_certs, ignore_errors, no_deps, force, force_deps,
allow_pre_release=False):
"""Install Ansible collections to the path specified.
:param collections: The collections to install, should be a list of tuples with (name, requirement, Galaxy server).
:param output_path: The path to install the collections to.
:param apis: A list of GalaxyAPIs to query when searching for a collection.
:param validate_certs: Whether to validate the certificates if downloading a tarball.
:param ignore_errors: Whether to ignore any errors when installing the collection.
:param no_deps: Ignore any collection dependencies and only install the base requirements.
:param force: Re-install a collection if it has already been installed.
:param force_deps: Re-install a collection as well as its dependencies if they have already been installed.
"""
existing_collections = find_existing_collections(output_path, fallback_metadata=True)
with _tempdir() as b_temp_path:
display.display("Process install dependency map")
with _display_progress():
dependency_map = _build_dependency_map(collections, existing_collections, b_temp_path, apis,
validate_certs, force, force_deps, no_deps,
allow_pre_release=allow_pre_release)
display.display("Starting collection install process")
with _display_progress():
for collection in dependency_map.values():
try:
collection.install(output_path, b_temp_path)
except AnsibleError as err:
if ignore_errors:
display.warning("Failed to install collection %s but skipping due to --ignore-errors being set. "
"Error: %s" % (to_text(collection), to_text(err)))
else:
raise
def validate_collection_name(name):
"""Validates the collection name as an input from the user or a requirements file fit the requirements.
:param name: The input name with optional range specifier split by ':'.
:return: The input value, required for argparse validation.
"""
collection, dummy, dummy = name.partition(':')
if AnsibleCollectionRef.is_valid_collection_name(collection):
return name
raise AnsibleError("Invalid collection name '%s', "
"name must be in the format <namespace>.<collection>. \n"
"Please make sure namespace and collection name contains "
"characters from [a-zA-Z0-9_] only." % name)
def validate_collection_path(collection_path):
"""Ensure a given path ends with 'ansible_collections'
:param collection_path: The path that should end in 'ansible_collections'
:return: collection_path ending in 'ansible_collections' if it does not already.
"""
if os.path.split(collection_path)[1] != 'ansible_collections':
return os.path.join(collection_path, 'ansible_collections')
return collection_path
def verify_collections(collections, search_paths, apis, validate_certs, ignore_errors, allow_pre_release=False):
with _display_progress():
with _tempdir() as b_temp_path:
for collection in collections:
try:
local_collection = None
b_collection = to_bytes(collection[0], errors='surrogate_or_strict')
if os.path.isfile(b_collection) or urlparse(collection[0]).scheme.lower() in ['http', 'https'] or len(collection[0].split('.')) != 2:
raise AnsibleError(message="'%s' is not a valid collection name. The format namespace.name is expected." % collection[0])
collection_name = collection[0]
namespace, name = collection_name.split('.')
collection_version = collection[1]
# Verify local collection exists before downloading it from a galaxy server
for search_path in search_paths:
b_search_path = to_bytes(os.path.join(search_path, namespace, name), errors='surrogate_or_strict')
if os.path.isdir(b_search_path):
if not os.path.isfile(os.path.join(to_text(b_search_path, errors='surrogate_or_strict'), 'MANIFEST.json')):
raise AnsibleError(
message="Collection %s does not appear to have a MANIFEST.json. " % collection_name +
"A MANIFEST.json is expected if the collection has been built and installed via ansible-galaxy."
)
local_collection = CollectionRequirement.from_path(b_search_path, False)
break
if local_collection is None:
raise AnsibleError(message='Collection %s is not installed in any of the collection paths.' % collection_name)
# Download collection on a galaxy server for comparison
try:
remote_collection = CollectionRequirement.from_name(collection_name, apis, collection_version, False, parent=None,
allow_pre_release=allow_pre_release)
except AnsibleError as e:
if e.message == 'Failed to find collection %s:%s' % (collection[0], collection[1]):
raise AnsibleError('Failed to find remote collection %s:%s on any of the galaxy servers' % (collection[0], collection[1]))
raise
download_url = remote_collection.metadata.download_url
headers = {}
remote_collection.api._add_auth_token(headers, download_url, required=False)
b_temp_tar_path = _download_file(download_url, b_temp_path, None, validate_certs, headers=headers)
local_collection.verify(remote_collection, search_path, b_temp_tar_path)
except AnsibleError as err:
if ignore_errors:
display.warning("Failed to verify collection %s but skipping due to --ignore-errors being set. "
"Error: %s" % (collection[0], to_text(err)))
else:
raise
@contextmanager
def _tempdir():
b_temp_path = tempfile.mkdtemp(dir=to_bytes(C.DEFAULT_LOCAL_TMP, errors='surrogate_or_strict'))
yield b_temp_path
shutil.rmtree(b_temp_path)
@contextmanager
def _tarfile_extract(tar, member):
tar_obj = tar.extractfile(member)
yield member, tar_obj
tar_obj.close()
@contextmanager
def _display_progress():
config_display = C.GALAXY_DISPLAY_PROGRESS
display_wheel = sys.stdout.isatty() if config_display is None else config_display
if not display_wheel:
yield
return
def progress(display_queue, actual_display):
actual_display.debug("Starting display_progress display thread")
t = threading.current_thread()
while True:
for c in "|/-\\":
actual_display.display(c + "\b", newline=False)
time.sleep(0.1)
# Display a message from the main thread
while True:
try:
method, args, kwargs = display_queue.get(block=False, timeout=0.1)
except queue.Empty:
break
else:
func = getattr(actual_display, method)
func(*args, **kwargs)
if getattr(t, "finish", False):
actual_display.debug("Received end signal for display_progress display thread")
return
class DisplayThread(object):
def __init__(self, display_queue):
self.display_queue = display_queue
def __getattr__(self, attr):
def call_display(*args, **kwargs):
self.display_queue.put((attr, args, kwargs))
return call_display
# Temporary override the global display class with our own which add the calls to a queue for the thread to call.
global display
old_display = display
try:
display_queue = queue.Queue()
display = DisplayThread(display_queue)
t = threading.Thread(target=progress, args=(display_queue, old_display))
t.daemon = True
t.start()
try:
yield
finally:
t.finish = True
t.join()
except Exception:
# The exception is re-raised so we can sure the thread is finished and not using the display anymore
raise
finally:
display = old_display
def _get_galaxy_yml(b_galaxy_yml_path):
meta_info = get_collections_galaxy_meta_info()
mandatory_keys = set()
string_keys = set()
list_keys = set()
dict_keys = set()
for info in meta_info:
if info.get('required', False):
mandatory_keys.add(info['key'])
key_list_type = {
'str': string_keys,
'list': list_keys,
'dict': dict_keys,
}[info.get('type', 'str')]
key_list_type.add(info['key'])
all_keys = frozenset(list(mandatory_keys) + list(string_keys) + list(list_keys) + list(dict_keys))
try:
with open(b_galaxy_yml_path, 'rb') as g_yaml:
galaxy_yml = yaml.safe_load(g_yaml)
except YAMLError as err:
raise AnsibleError("Failed to parse the galaxy.yml at '%s' with the following error:\n%s"
% (to_native(b_galaxy_yml_path), to_native(err)))
set_keys = set(galaxy_yml.keys())
missing_keys = mandatory_keys.difference(set_keys)
if missing_keys:
raise AnsibleError("The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
% (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys))))
extra_keys = set_keys.difference(all_keys)
if len(extra_keys) > 0:
display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
% (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
# Add the defaults if they have not been set
for optional_string in string_keys:
if optional_string not in galaxy_yml:
galaxy_yml[optional_string] = None
for optional_list in list_keys:
list_val = galaxy_yml.get(optional_list, None)
if list_val is None:
galaxy_yml[optional_list] = []
elif not isinstance(list_val, list):
galaxy_yml[optional_list] = [list_val]
for optional_dict in dict_keys:
if optional_dict not in galaxy_yml:
galaxy_yml[optional_dict] = {}
# license is a builtin var in Python, to avoid confusion we just rename it to license_ids
galaxy_yml['license_ids'] = galaxy_yml['license']
del galaxy_yml['license']
return galaxy_yml
def _build_files_manifest(b_collection_path, namespace, name, ignore_patterns):
# We always ignore .pyc and .retry files as well as some well known version control directories. The ignore
# patterns can be extended by the build_ignore key in galaxy.yml
b_ignore_patterns = [
b'galaxy.yml',
b'galaxy.yaml',
b'.git',
b'*.pyc',
b'*.retry',
b'tests/output', # Ignore ansible-test result output directory.
to_bytes('{0}-{1}-*.tar.gz'.format(namespace, name)), # Ignores previously built artifacts in the root dir.
]
b_ignore_patterns += [to_bytes(p) for p in ignore_patterns]
b_ignore_dirs = frozenset([b'CVS', b'.bzr', b'.hg', b'.git', b'.svn', b'__pycache__', b'.tox'])
entry_template = {
'name': None,
'ftype': None,
'chksum_type': None,
'chksum_sha256': None,
'format': MANIFEST_FORMAT
}
manifest = {
'files': [
{
'name': '.',
'ftype': 'dir',
'chksum_type': None,
'chksum_sha256': None,
'format': MANIFEST_FORMAT,
},
],
'format': MANIFEST_FORMAT,
}
def _walk(b_path, b_top_level_dir):
for b_item in os.listdir(b_path):
b_abs_path = os.path.join(b_path, b_item)
b_rel_base_dir = b'' if b_path == b_top_level_dir else b_path[len(b_top_level_dir) + 1:]
b_rel_path = os.path.join(b_rel_base_dir, b_item)
rel_path = to_text(b_rel_path, errors='surrogate_or_strict')
if os.path.isdir(b_abs_path):
if any(b_item == b_path for b_path in b_ignore_dirs) or \
any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
continue
if os.path.islink(b_abs_path):
b_link_target = os.path.realpath(b_abs_path)
if not _is_child_path(b_link_target, b_top_level_dir):
display.warning("Skipping '%s' as it is a symbolic link to a directory outside the collection"
% to_text(b_abs_path))
continue
manifest_entry = entry_template.copy()
manifest_entry['name'] = rel_path
manifest_entry['ftype'] = 'dir'
manifest['files'].append(manifest_entry)
if not os.path.islink(b_abs_path):
_walk(b_abs_path, b_top_level_dir)
else:
if any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
continue
# Handling of file symlinks occur in _build_collection_tar, the manifest for a symlink is the same for
# a normal file.
manifest_entry = entry_template.copy()
manifest_entry['name'] = rel_path
manifest_entry['ftype'] = 'file'
manifest_entry['chksum_type'] = 'sha256'
manifest_entry['chksum_sha256'] = secure_hash(b_abs_path, hash_func=sha256)
manifest['files'].append(manifest_entry)
_walk(b_collection_path, b_collection_path)
return manifest
def _build_manifest(namespace, name, version, authors, readme, tags, description, license_ids, license_file,
dependencies, repository, documentation, homepage, issues, **kwargs):
manifest = {
'collection_info': {
'namespace': namespace,
'name': name,
'version': version,
'authors': authors,
'readme': readme,
'tags': tags,
'description': description,
'license': license_ids,
'license_file': license_file if license_file else None, # Handle galaxy.yml having an empty string (None)
'dependencies': dependencies,
'repository': repository,
'documentation': documentation,
'homepage': homepage,
'issues': issues,
},
'file_manifest_file': {
'name': 'FILES.json',
'ftype': 'file',
'chksum_type': 'sha256',
'chksum_sha256': None, # Filled out in _build_collection_tar
'format': MANIFEST_FORMAT
},
'format': MANIFEST_FORMAT,
}
return manifest
def _build_collection_tar(b_collection_path, b_tar_path, collection_manifest, file_manifest):
"""Build a tar.gz collection artifact from the manifest data."""
files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
with _tempdir() as b_temp_path:
b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path))
with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file:
# Add the MANIFEST.json and FILES.json file to the archive
for name, b in [('MANIFEST.json', collection_manifest_json), ('FILES.json', files_manifest_json)]:
b_io = BytesIO(b)
tar_info = tarfile.TarInfo(name)
tar_info.size = len(b)
tar_info.mtime = time.time()
tar_info.mode = 0o0644
tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
for file_info in file_manifest['files']:
if file_info['name'] == '.':
continue
# arcname expects a native string, cannot be bytes
filename = to_native(file_info['name'], errors='surrogate_or_strict')
b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict'))
def reset_stat(tarinfo):
if tarinfo.type != tarfile.SYMTYPE:
existing_is_exec = tarinfo.mode & stat.S_IXUSR
tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = ''
return tarinfo
if os.path.islink(b_src_path):
b_link_target = os.path.realpath(b_src_path)
if _is_child_path(b_link_target, b_collection_path):
b_rel_path = os.path.relpath(b_link_target, start=os.path.dirname(b_src_path))
tar_info = tarfile.TarInfo(filename)
tar_info.type = tarfile.SYMTYPE
tar_info.linkname = to_native(b_rel_path, errors='surrogate_or_strict')
tar_info = reset_stat(tar_info)
tar_file.addfile(tarinfo=tar_info)
continue
# Dealing with a normal file, just add it by name.
tar_file.add(os.path.realpath(b_src_path), arcname=filename, recursive=False, filter=reset_stat)
shutil.copy(b_tar_filepath, b_tar_path)
collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
collection_manifest['collection_info']['name'])
display.display('Created collection for %s at %s' % (collection_name, to_text(b_tar_path)))
def _build_collection_dir(b_collection_path, b_collection_output, collection_manifest, file_manifest):
"""Build a collection directory from the manifest data.
This should follow the same pattern as _build_collection_tar.
"""
os.makedirs(b_collection_output, mode=0o0755)
files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
# Write contents to the files
for name, b in [('MANIFEST.json', collection_manifest_json), ('FILES.json', files_manifest_json)]:
b_path = os.path.join(b_collection_output, to_bytes(name, errors='surrogate_or_strict'))
with open(b_path, 'wb') as file_obj, BytesIO(b) as b_io:
shutil.copyfileobj(b_io, file_obj)
os.chmod(b_path, 0o0644)
base_directories = []
for file_info in file_manifest['files']:
if file_info['name'] == '.':
continue
src_file = os.path.join(b_collection_path, to_bytes(file_info['name'], errors='surrogate_or_strict'))
dest_file = os.path.join(b_collection_output, to_bytes(file_info['name'], errors='surrogate_or_strict'))
if any(src_file.startswith(directory) for directory in base_directories):
continue
existing_is_exec = os.stat(src_file).st_mode & stat.S_IXUSR
mode = 0o0755 if existing_is_exec else 0o0644
if os.path.isdir(src_file):
mode = 0o0755
base_directories.append(src_file)
shutil.copytree(src_file, dest_file)
else:
shutil.copyfile(src_file, dest_file)
os.chmod(dest_file, mode)
def find_existing_collections(path, fallback_metadata=False):
collections = []
b_path = to_bytes(path, errors='surrogate_or_strict')
for b_namespace in os.listdir(b_path):
b_namespace_path = os.path.join(b_path, b_namespace)
if os.path.isfile(b_namespace_path):
continue
for b_collection in os.listdir(b_namespace_path):
b_collection_path = os.path.join(b_namespace_path, b_collection)
if os.path.isdir(b_collection_path):
req = CollectionRequirement.from_path(b_collection_path, False, fallback_metadata=fallback_metadata)
display.vvv("Found installed collection %s:%s at '%s'" % (to_text(req), req.latest_version,
to_text(b_collection_path)))
collections.append(req)
return collections
def _build_dependency_map(collections, existing_collections, b_temp_path, apis, validate_certs, force, force_deps,
no_deps, allow_pre_release=False):
dependency_map = {}
# First build the dependency map on the actual requirements
for name, version, source, req_type in collections:
_get_collection_info(dependency_map, existing_collections, name, version, source, b_temp_path, apis,
validate_certs, (force or force_deps), allow_pre_release=allow_pre_release, req_type=req_type)
checked_parents = set([to_text(c) for c in dependency_map.values() if c.skip])
while len(dependency_map) != len(checked_parents):
while not no_deps: # Only parse dependencies if no_deps was not set
parents_to_check = set(dependency_map.keys()).difference(checked_parents)
deps_exhausted = True
for parent in parents_to_check:
parent_info = dependency_map[parent]
if parent_info.dependencies:
deps_exhausted = False
for dep_name, dep_requirement in parent_info.dependencies.items():
_get_collection_info(dependency_map, existing_collections, dep_name, dep_requirement,
parent_info.api, b_temp_path, apis, validate_certs, force_deps,
parent=parent, allow_pre_release=allow_pre_release)
checked_parents.add(parent)
# No extra dependencies were resolved, exit loop
if deps_exhausted:
break
# Now we have resolved the deps to our best extent, now select the latest version for collections with
# multiple versions found and go from there
deps_not_checked = set(dependency_map.keys()).difference(checked_parents)
for collection in deps_not_checked:
dependency_map[collection].set_latest_version()
if no_deps or len(dependency_map[collection].dependencies) == 0:
checked_parents.add(collection)
return dependency_map
def _collections_from_scm(collection, requirement, b_temp_path, force, parent=None):
"""Returns a list of collections found in the repo. If there is a galaxy.yml in the collection then just return
the specific collection. Otherwise, check each top-level directory for a galaxy.yml.
:param collection: URI to a git repo
:param requirement: The version of the artifact
:param b_temp_path: The temporary path to the archive of a collection
:param force: Whether to overwrite an existing collection or fail
:param parent: The name of the parent collection
:raises AnsibleError: if nothing found
:return: List of CollectionRequirement objects
:rtype: list
"""
reqs = []
name, version, path, fragment = parse_scm(collection, requirement)
b_repo_root = to_bytes(name, errors='surrogate_or_strict')
b_collection_path = os.path.join(b_temp_path, b_repo_root)
if fragment:
b_fragment = to_bytes(fragment, errors='surrogate_or_strict')
b_collection_path = os.path.join(b_collection_path, b_fragment)
b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
err = ("%s appears to be an SCM collection source, but the required galaxy.yml was not found. "
"Append #path/to/collection/ to your URI (before the comma separated version, if one is specified) "
"to point to a directory containing the galaxy.yml or directories of collections" % collection)
display.vvvvv("Considering %s as a possible path to a collection's galaxy.yml" % b_galaxy_path)
if os.path.exists(b_galaxy_path):
return [CollectionRequirement.from_path(b_collection_path, force, parent, fallback_metadata=True, skip=False)]
if not os.path.isdir(b_collection_path) or not os.listdir(b_collection_path):
raise AnsibleError(err)
for b_possible_collection in os.listdir(b_collection_path):
b_collection = os.path.join(b_collection_path, b_possible_collection)
if not os.path.isdir(b_collection):
continue
b_galaxy = get_galaxy_metadata_path(b_collection)
display.vvvvv("Considering %s as a possible path to a collection's galaxy.yml" % b_galaxy)
if os.path.exists(b_galaxy):
reqs.append(CollectionRequirement.from_path(b_collection, force, parent, fallback_metadata=True, skip=False))
if not reqs:
raise AnsibleError(err)
return reqs
def _get_collection_info(dep_map, existing_collections, collection, requirement, source, b_temp_path, apis,
validate_certs, force, parent=None, allow_pre_release=False, req_type=None):
dep_msg = ""
if parent:
dep_msg = " - as dependency of %s" % parent
display.vvv("Processing requirement collection '%s'%s" % (to_text(collection), dep_msg))
b_tar_path = None
is_file = (
req_type == 'file' or
(not req_type and os.path.isfile(to_bytes(collection, errors='surrogate_or_strict')))
)
is_url = (
req_type == 'url' or
(not req_type and urlparse(collection).scheme.lower() in ['http', 'https'])
)
is_scm = (
req_type == 'git' or
(not req_type and not b_tar_path and collection.startswith(('git+', 'git@')))
)
if is_file:
display.vvvv("Collection requirement '%s' is a tar artifact" % to_text(collection))
b_tar_path = to_bytes(collection, errors='surrogate_or_strict')
elif is_url:
display.vvvv("Collection requirement '%s' is a URL to a tar artifact" % collection)
try:
b_tar_path = _download_file(collection, b_temp_path, None, validate_certs)
except urllib_error.URLError as err:
raise AnsibleError("Failed to download collection tar from '%s': %s"
% (to_native(collection), to_native(err)))
if is_scm:
if not collection.startswith('git'):
collection = 'git+' + collection
name, version, path, fragment = parse_scm(collection, requirement)
b_tar_path = scm_archive_collection(path, name=name, version=version)
with tarfile.open(b_tar_path, mode='r') as collection_tar:
collection_tar.extractall(path=to_text(b_temp_path))
# Ignore requirement if it is set (it must follow semantic versioning, unlike a git version, which is any tree-ish)
# If the requirement was the only place version was set, requirement == version at this point
if requirement not in {"*", ""} and requirement != version:
display.warning(
"The collection {0} appears to be a git repository and two versions were provided: '{1}', and '{2}'. "
"The version {2} is being disregarded.".format(collection, version, requirement)
)
requirement = "*"
reqs = _collections_from_scm(collection, requirement, b_temp_path, force, parent)
for req in reqs:
collection_info = get_collection_info_from_req(dep_map, req)
update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement)
else:
if b_tar_path:
req = CollectionRequirement.from_tar(b_tar_path, force, parent=parent)
collection_info = get_collection_info_from_req(dep_map, req)
else:
validate_collection_name(collection)
display.vvvv("Collection requirement '%s' is the name of a collection" % collection)
if collection in dep_map:
collection_info = dep_map[collection]
collection_info.add_requirement(parent, requirement)
else:
apis = [source] if source else apis
collection_info = CollectionRequirement.from_name(collection, apis, requirement, force, parent=parent,
allow_pre_release=allow_pre_release)
update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement)
def get_collection_info_from_req(dep_map, collection):
collection_name = to_text(collection)
if collection_name in dep_map:
collection_info = dep_map[collection_name]
collection_info.add_requirement(None, collection.latest_version)
else:
collection_info = collection
return collection_info
def update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement):
existing = [c for c in existing_collections if to_text(c) == to_text(collection_info)]
if existing and not collection_info.force:
# Test that the installed collection fits the requirement
existing[0].add_requirement(parent, requirement)
collection_info = existing[0]
dep_map[to_text(collection_info)] = collection_info
def parse_scm(collection, version):
if ',' in collection:
collection, version = collection.split(',', 1)
elif version == '*' or not version:
version = 'HEAD'
if collection.startswith('git+'):
path = collection[4:]
else:
path = collection
path, fragment = urldefrag(path)
fragment = fragment.strip(os.path.sep)
if path.endswith(os.path.sep + '.git'):
name = path.split(os.path.sep)[-2]
elif '://' not in path and '@' not in path:
name = path
else:
name = path.split('/')[-1]
if name.endswith('.git'):
name = name[:-4]
return name, version, path, fragment
def _download_file(url, b_path, expected_hash, validate_certs, headers=None):
urlsplit = os.path.splitext(to_text(url.rsplit('/', 1)[1]))
b_file_name = to_bytes(urlsplit[0], errors='surrogate_or_strict')
b_file_ext = to_bytes(urlsplit[1], errors='surrogate_or_strict')
b_file_path = tempfile.NamedTemporaryFile(dir=b_path, prefix=b_file_name, suffix=b_file_ext, delete=False).name
display.display("Downloading %s to %s" % (url, to_text(b_path)))
# Galaxy redirs downloads to S3 which reject the request if an Authorization header is attached so don't redir that
resp = open_url(to_native(url, errors='surrogate_or_strict'), validate_certs=validate_certs, headers=headers,
unredirected_headers=['Authorization'], http_agent=user_agent())
with open(b_file_path, 'wb') as download_file:
actual_hash = _consume_file(resp, download_file)
if expected_hash:
display.vvvv("Validating downloaded file hash %s with expected hash %s" % (actual_hash, expected_hash))
if expected_hash != actual_hash:
raise AnsibleError("Mismatch artifact hash with downloaded file")
return b_file_path
def _extract_tar_dir(tar, dirname, b_dest):
""" Extracts a directory from a collection tar. """
member_names = [to_native(dirname, errors='surrogate_or_strict')]
# Create list of members with and without trailing separator
if not member_names[-1].endswith(os.path.sep):
member_names.append(member_names[-1] + os.path.sep)
# Try all of the member names and stop on the first one that are able to successfully get
for member in member_names:
try:
tar_member = tar.getmember(member)
except KeyError:
continue
break
else:
# If we still can't find the member, raise a nice error.
raise AnsibleError("Unable to extract '%s' from collection" % to_native(member, errors='surrogate_or_strict'))
b_dir_path = os.path.join(b_dest, to_bytes(dirname, errors='surrogate_or_strict'))
b_parent_path = os.path.dirname(b_dir_path)
try:
os.makedirs(b_parent_path, mode=0o0755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if tar_member.type == tarfile.SYMTYPE:
b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
if not _is_child_path(b_link_path, b_dest, link_name=b_dir_path):
raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
"collection '%s'" % (to_native(dirname), b_link_path))
os.symlink(b_link_path, b_dir_path)
else:
if not os.path.isdir(b_dir_path):
os.mkdir(b_dir_path, 0o0755)
def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
""" Extracts a file from a collection tar. """
with _get_tar_file_member(tar, filename) as (tar_member, tar_obj):
if tar_member.type == tarfile.SYMTYPE:
actual_hash = _consume_file(tar_obj)
else:
with tempfile.NamedTemporaryFile(dir=b_temp_path, delete=False) as tmpfile_obj:
actual_hash = _consume_file(tar_obj, tmpfile_obj)
if expected_hash and actual_hash != expected_hash:
raise AnsibleError("Checksum mismatch for '%s' inside collection at '%s'"
% (to_native(filename, errors='surrogate_or_strict'), to_native(tar.name)))
b_dest_filepath = os.path.abspath(os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict')))
b_parent_dir = os.path.dirname(b_dest_filepath)
if not _is_child_path(b_parent_dir, b_dest):
raise AnsibleError("Cannot extract tar entry '%s' as it will be placed outside the collection directory"
% to_native(filename, errors='surrogate_or_strict'))
if not os.path.exists(b_parent_dir):
# Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check
# makes sure we create the parent directory even if it wasn't set in the metadata.
os.makedirs(b_parent_dir, mode=0o0755)
if tar_member.type == tarfile.SYMTYPE:
b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
if not _is_child_path(b_link_path, b_dest, link_name=b_dest_filepath):
raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
"collection '%s'" % (to_native(filename), b_link_path))
os.symlink(b_link_path, b_dest_filepath)
else:
shutil.move(to_bytes(tmpfile_obj.name, errors='surrogate_or_strict'), b_dest_filepath)
# Default to rw-r--r-- and only add execute if the tar file has execute.
tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict'))
new_mode = 0o644
if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR:
new_mode |= 0o0111
os.chmod(b_dest_filepath, new_mode)
def _get_tar_file_member(tar, filename):
n_filename = to_native(filename, errors='surrogate_or_strict')
try:
member = tar.getmember(n_filename)
except KeyError:
raise AnsibleError("Collection tar at '%s' does not contain the expected file '%s'." % (
to_native(tar.name),
n_filename))
return _tarfile_extract(tar, member)
def _get_json_from_tar_file(b_path, filename):
file_contents = ''
with tarfile.open(b_path, mode='r') as collection_tar:
with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
bufsize = 65536
data = tar_obj.read(bufsize)
while data:
file_contents += to_text(data)
data = tar_obj.read(bufsize)
return json.loads(file_contents)
def _get_tar_file_hash(b_path, filename):
with tarfile.open(b_path, mode='r') as collection_tar:
with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
return _consume_file(tar_obj)
def _is_child_path(path, parent_path, link_name=None):
""" Checks that path is a path within the parent_path specified. """
b_path = to_bytes(path, errors='surrogate_or_strict')
if link_name and not os.path.isabs(b_path):
# If link_name is specified, path is the source of the link and we need to resolve the absolute path.
b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))
b_path = os.path.abspath(os.path.join(b_link_dir, b_path))
b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')
return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep))
def _consume_file(read_from, write_to=None):
bufsize = 65536
sha256_digest = sha256()
data = read_from.read(bufsize)
while data:
if write_to is not None:
write_to.write(data)
write_to.flush()
sha256_digest.update(data)
data = read_from.read(bufsize)
return sha256_digest.hexdigest()
def get_galaxy_metadata_path(b_path):
b_default_path = os.path.join(b_path, b'galaxy.yml')
candidate_names = [b'galaxy.yml', b'galaxy.yaml']
for b_name in candidate_names:
b_path = os.path.join(b_path, b_name)
if os.path.exists(b_path):
return b_path
return b_default_path
|
dpassante/ansible
|
lib/ansible/galaxy/collection.py
|
Python
|
gpl-3.0
| 69,076
|
[
"Galaxy"
] |
36920e739f93a3c8bbb5e0497d8687b002124a49db5e7c04a2090d2046fa56d0
|
import vtk
from vtk.util.colors import blue, hot_pink
import numpy as np
""" Visualization Helpers """
def add_sensor_visualization(filter_depth_image, positions, vtk_renderer):
"""
Add visualization specific to the sensor
:param filter_depth_image: FilterDepthImage, needed to get camera info
:param positions: list of sensor positions
:param vtk_renderer: The vtkRenderer where the actors will be added.
"""
""" Frustum of the sensor """
cameraActor = vtk.vtkCameraActor()
cameraActor.SetCamera(filter_depth_image.get_vtk_camera())
cameraActor.SetWidthByHeightRatio(filter_depth_image.get_width_by_height_ratio())
cameraActor.GetProperty().SetColor(blue)
""" Path of the sensor """
npts = positions.shape[0]
points = vtk.vtkPoints()
points.SetNumberOfPoints(npts)
lines = vtk.vtkCellArray()
lines.InsertNextCell(npts)
for i, pos in enumerate(positions):
points.SetPoint(i, pos)
lines.InsertCellPoint(i)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
polymapper = vtk.vtkPolyDataMapper()
polymapper.SetInputData(polydata)
polymapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(polymapper)
actor.GetProperty().SetColor(blue)
actor.GetProperty().SetOpacity(0.5)
ball = vtk.vtkSphereSource()
ball.SetRadius(0.02)
ball.SetThetaResolution(12)
ball.SetPhiResolution(12)
balls = vtk.vtkGlyph3D()
balls.SetInputData(polydata)
balls.SetSourceConnection(ball.GetOutputPort())
mapBalls = vtk.vtkPolyDataMapper()
mapBalls.SetInputConnection(balls.GetOutputPort())
spcActor = vtk.vtkActor()
spcActor.SetMapper(mapBalls)
spcActor.GetProperty().SetColor(hot_pink)
spcActor.GetProperty().SetSpecularColor(1, 1, 1)
spcActor.GetProperty().SetSpecular(0.3)
spcActor.GetProperty().SetSpecularPower(20)
spcActor.GetProperty().SetAmbient(0.2)
spcActor.GetProperty().SetDiffuse(0.8)
""" Add to the given renderer """
vtk_renderer.AddActor(spcActor)
vtk_renderer.AddActor(cameraActor)
vtk_renderer.AddActor(actor)
""" Environment Calculations """
def find_bounds_and_observation_position_lookat(vtk_algorithm):
"""
Crude method to automatically calculate bounds of the environment
and estimate a good position for a camera to observe the environment.
:param vtk_algorithm: vtkAlgorithm containing environment mesh
:return:
"""
bounds = []
if callable(vtk_algorithm.set_object_state):
vtk_algorithm.set_object_state(object_id='floor', state=False)
vtk_algorithm.Update()
bounds = vtk_algorithm.GetOutputDataObject(0).GetBounds()
vtk_algorithm.set_object_state(object_id='floor', state=True)
vtk_algorithm.Update()
else:
bounds = vtk_algorithm.GetOutputDataObject(0).GetBounds()
# all these values were found very empirically
padc = (3.0, 5.0, 8.0) # pad coefficient
position = (padc[0] * bounds[1] + 4.0,
padc[1] * bounds[3],
padc[2] * bounds[5] * 2.0 + 3.0)
lookat = (-padc[0] * bounds[1],
padc[1] * bounds[3] * -0.3,
-padc[2] * bounds[5])
return bounds, position, lookat
""" Path Creation """
def create_sensor_path(name=None, nsteps=None, bounds=None):
"""
Create specific sensor paths as specified by path_name, Path names
ending in "_ub" use the given bounds when calculating the path.
:param name: Name of specific path
:param name: Name of steps, although each path will define a default
:param bounds: Bounds of thing to create a path around
:return:
"""
# reasonable default
if not bounds:
bounds = (-2.0, 2.0, 0.0, 2.0, -2.0, 2.0)
b = bounds
if name == 'helix_table_ub':
if not nsteps: nsteps = 20
xd = b[1] - b[0]
zd = b[5] - b[4]
xd += 3.0
zd += 3.0
position = _create_path({'name': 'helix',
'nsteps': nsteps,
'helix_nspins': 2,
'helix_x_diameter': xd + 1.0,
'helix_z_diameter': zd + 0.5,
'helix_y_start_end': (0.75, 1.5)})
lookat = _create_path({'name': 'line',
'nsteps': nsteps,
'line_start': (0.0, 0.4, 0.0),
'line_end': (0.0, 0.6, 0.0)})
elif name == 'helix_survey_ub':
if not nsteps: nsteps = 20
xd = b[1] - b[0]
zd = b[5] - b[4]
xd += 3.0
zd += 3.0
position = _create_path({'name': 'helix',
'nsteps': nsteps,
'helix_nspins': 1,
'helix_x_diameter': xd + 1.5,
'helix_z_diameter': zd + 1.0,
'helix_y_start_end': (0.75, 1.5)})
lookat = _create_path({'name': 'line',
'nsteps': nsteps,
'line_start': (0.0, 0.4, 0.0),
'line_end': (0.0, 0.6, 0.0)})
elif name == 'helix_bunny_ub':
if not nsteps: nsteps = 20
xd = b[1] - b[0]
zd = b[5] - b[4]
xd += 3.0
zd += 3.0
position = _create_path({'name': 'helix',
'nsteps': nsteps,
'helix_nspins': 2,
'helix_x_diameter': xd + 1.0,
'helix_z_diameter': zd + 1.0,
'helix_y_start_end': (0.75, 1.00)})
lookat = _create_path({'name': 'line',
'nsteps': nsteps,
'line_start': (0.0, 0.4, 0.0),
'line_end': (0.0, 0.6, 0.0)})
elif name == 'helix_survey_bunny_ub':
if not nsteps: nsteps = 20
xd = b[1] - b[0]
zd = b[5] - b[4]
xd += 3.0
zd += 3.0
position = _create_path({'name': 'helix',
'nsteps': nsteps,
'helix_nspins': 1,
'helix_x_diameter': xd + 5.0,
'helix_z_diameter': zd + 5.5,
'helix_y_start_end': (0.75, 4.0)})
lookat = _create_path({'name': 'line',
'nsteps': nsteps,
'line_start': (0.0, 0.1, 0.0),
'line_end': (0.0, 0.3, 0.0)})
return position, lookat
def _create_path(path_param=None):
"""
(nsteps, 3) = path.shape
:param path_param: dictionary of parameters that describe the desired path
:return: position and lookat
"""
path_param = {} if not path_param else path_param
path_param.setdefault('name', 'line')
path_param.setdefault('nsteps', 20)
path_param.setdefault('line_start', (-1.5, 1.0, 1.5))
path_param.setdefault('line_end', (1.5, 1.0, 1.5))
path_param.setdefault('helix_nspins', 1)
path_param.setdefault('helix_x_diameter', 6.0)
path_param.setdefault('helix_z_diameter', 6.0)
path_param.setdefault('helix_y_start_end', (0.5, 1.5))
t = np.linspace(0, 1, num=path_param['nsteps'])
if path_param['name'] == 'line':
p0 = path_param['line_start']
p1 = path_param['line_end']
path = np.vstack((p0[0] + (p1[0] - p0[0]) * t,
p0[1] + (p1[1] - p0[1]) * t,
p0[2] + (p1[2] - p0[2]) * t)).T
elif path_param['name'] == 'helix':
# http://mathworld.wolfram.com/Helix.html
x_radius = path_param['helix_x_diameter'] / 2
z_radius = path_param['helix_z_diameter'] / 2
nspins = path_param['helix_nspins']
yse = path_param['helix_y_start_end']
path = np.vstack((x_radius * np.sin(t * np.pi * 2 * nspins),
yse[0] + (yse[1] - yse[0]) * t,
z_radius * np.cos(t * np.pi * 2 * nspins))).T
return path
|
lucasplus/MABDI
|
mabdi/MabdiSimulateUtilities.py
|
Python
|
bsd-3-clause
| 8,203
|
[
"VTK"
] |
0f3b384ebcf34dee159e061491ee97fe79627dc4686f33466dfeea416b949b0a
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides model classes that assume the noise is Gaussian.
"""
import logging
import shlex
from abc import ABCMeta
from six import add_metaclass
import numpy
from pycbc import filter as pyfilter
from pycbc.waveform import (NoWaveformError, FailedWaveformError)
from pycbc.waveform import generator
from pycbc.types import Array, FrequencySeries
from pycbc.strain import gates_from_cli
from pycbc.strain.calibration import Recalibrate
from pycbc.inject import InjectionSet
from pycbc.io import FieldArray
from pycbc.types.optparse import MultiDetOptionAction
from .base import ModelStats
from .base_data import BaseDataModel
from .data_utils import (data_opts_from_config, data_from_cli,
fd_data_from_strain_dict, gate_overwhitened_data)
@add_metaclass(ABCMeta)
class BaseGaussianNoise(BaseDataModel):
r"""Model for analyzing GW data with assuming a wide-sense stationary
Gaussian noise model.
This model will load gravitational wave data and calculate the log noise
likelihood ``_lognl`` and normalization. It also implements the
``_loglikelihood`` function as the sum of the log likelihood ratio and the
``lognl``. It does not implement a log likelihood ratio function
``_loglr``, however, since that can differ depending on the signal model.
Models that analyze GW data assuming it is stationary Gaussian should
therefore inherit from this class and implement their own ``_loglr``
function.
For more details on the inner product used, the log likelihood of the
noise, and the normalization factor, see :py:class:`GaussianNoise`.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
data : dict
A dictionary of data, in which the keys are the detector names and the
values are the data (assumed to be unwhitened). All data must have the
same frequency resolution.
low_frequency_cutoff : dict
A dictionary of starting frequencies, in which the keys are the
detector names and the values are the starting frequencies for the
respective detectors to be used for computing inner products.
psds : dict, optional
A dictionary of FrequencySeries keyed by the detector names. The
dictionary must have a psd for each detector specified in the data
dictionary. If provided, the inner products in each detector will be
weighted by 1/psd of that detector.
high_frequency_cutoff : dict, optional
A dictionary of ending frequencies, in which the keys are the
detector names and the values are the ending frequencies for the
respective detectors to be used for computing inner products. If not
provided, the minimum of the largest frequency stored in the data
and a given waveform will be used.
normalize : bool, optional
If True, the normalization factor :math:`alpha` will be included in the
log likelihood. See :py:class:`GaussianNoise` for details. Default is
to not include it.
static_params : dict, optional
A dictionary of parameter names -> values to keep fixed.
ignore_failed_waveforms : bool, optional
If the waveform generator raises an error when it tries to generate,
treat the point as having zero likelihood. This allows the parameter
estimation to continue. Otherwise, an error will be raised, stopping
the run. Default is False.
\**kwargs :
All other keyword arguments are passed to ``BaseDataModel``.
Attributes
----------
data : dict
Dictionary of detectors -> frequency-domain data.
ignore_failed_waveforms : bool
If True, points in parameter space that cause waveform generation to
fail (i.e., they raise a ``FailedWaveformError``) will be treated as
points with zero likelihood. Otherwise, such points will cause the
model to raise a ``FailedWaveformError``.
low_frequency_cutoff
high_frequency_cutoff
kmin
kmax
psds
psd_segments
weight
whitened_data
normalize
lognorm
"""
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
static_params=None, ignore_failed_waveforms=False,
**kwargs):
# set up the boiler-plate attributes
super(BaseGaussianNoise, self).__init__(variable_params, data,
static_params=static_params,
**kwargs)
self.ignore_failed_waveforms = ignore_failed_waveforms
# check if low frequency cutoff has been provided for every IFO with
# data
for ifo in self.data.keys():
if low_frequency_cutoff[ifo] is None:
raise ValueError(
"A low-frequency-cutoff must be provided for every "
"detector for which data has been provided. If "
"loading the model settings from "
"a config file, please provide "
"`{DETECTOR}:low-frequency-cutoff` options for "
"every detector in the `[model]` section, where "
"`{DETECTOR} is the name of the detector,"
"or provide a single low-frequency-cutoff option"
"which will be used for all detectors")
# check that the data sets all have the same delta fs and delta ts
dts = numpy.array([d.delta_t for d in self.data.values()])
dfs = numpy.array([d.delta_f for d in self.data.values()])
if not all(dts == dts[0]):
raise ValueError("all data must have the same sample rate")
if not all(dfs == dfs[0]):
raise ValueError("all data must have the same segment length")
# store the number of samples in the time domain
self._N = int(1./(dts[0]*dfs[0]))
# Set low frequency cutoff
self.low_frequency_cutoff = self._f_lower = low_frequency_cutoff
# set upper frequency cutoff
self._f_upper = None
self.high_frequency_cutoff = high_frequency_cutoff
# Set the cutoff indices
self._kmin = {}
self._kmax = {}
for (det, d) in self._data.items():
kmin, kmax = pyfilter.get_cutoff_indices(self._f_lower[det],
self._f_upper[det],
d.delta_f, self._N)
self._kmin[det] = kmin
self._kmax[det] = kmax
# store the psd segments
self._psd_segments = {}
if psds is not None:
self.set_psd_segments(psds)
# store the psds and calculate the inner product weight
self._psds = {}
self._weight = {}
self._lognorm = {}
self._det_lognls = {}
self._whitened_data = {}
# set the normalization state
self._normalize = False
self.normalize = normalize
# store the psds and whiten the data
self.psds = psds
@property
def high_frequency_cutoff(self):
"""The high frequency cutoff of the inner product.
If a high frequency cutoff was not provided for a detector, it will
be ``None``.
"""
return self._f_upper
@high_frequency_cutoff.setter
def high_frequency_cutoff(self, high_frequency_cutoff):
"""Sets the high frequency cutoff.
Parameters
----------
high_frequency_cutoff : dict
Dictionary mapping detector names to frequencies. If a high
frequency cutoff is not provided for one or more detectors, the
Nyquist frequency will be used for those detectors.
"""
self._f_upper = {}
if high_frequency_cutoff is not None and bool(high_frequency_cutoff):
for det in self._data:
if det in high_frequency_cutoff:
self._f_upper[det] = high_frequency_cutoff[det]
else:
self._f_upper[det] = None
else:
for det in self._data.keys():
self._f_upper[det] = None
@property
def kmin(self):
"""Dictionary of starting indices for the inner product.
This is determined from the lower frequency cutoff and the ``delta_f``
of the data using
:py:func:`pycbc.filter.matchedfilter.get_cutoff_indices`.
"""
return self._kmin
@property
def kmax(self):
"""Dictionary of ending indices for the inner product.
This is determined from the high frequency cutoff and the ``delta_f``
of the data using
:py:func:`pycbc.filter.matchedfilter.get_cutoff_indices`. If no high
frequency cutoff was provided, this will be the indice corresponding to
the Nyquist frequency.
"""
return self._kmax
@property
def psds(self):
"""Dictionary of detectors -> PSD frequency series.
If no PSD was provided for a detector, this will just be a frequency
series of ones.
"""
return self._psds
@psds.setter
def psds(self, psds):
"""Sets the psds, and calculates the weight and norm from them.
The data and the low and high frequency cutoffs must be set first.
"""
# check that the data has been set
if self._data is None:
raise ValueError("No data set")
if self._f_lower is None:
raise ValueError("low frequency cutoff not set")
if self._f_upper is None:
raise ValueError("high frequency cutoff not set")
# make sure the relevant caches are cleared
self._psds.clear()
self._weight.clear()
self._lognorm.clear()
self._det_lognls.clear()
self._whitened_data.clear()
for det, d in self._data.items():
if psds is None:
# No psd means assume white PSD
p = FrequencySeries(numpy.ones(int(self._N/2+1)),
delta_f=d.delta_f)
else:
# copy for storage
p = psds[det].copy()
self._psds[det] = p
# we'll store the weight to apply to the inner product
w = Array(numpy.zeros(len(p)))
# only set weight in band we will analyze
kmin = self._kmin[det]
kmax = self._kmax[det]
w[kmin:kmax] = numpy.sqrt(4.*p.delta_f/p[kmin:kmax])
self._weight[det] = w
self._whitened_data[det] = d.copy()
self._whitened_data[det][kmin:kmax] *= w[kmin:kmax]
# set the lognl and lognorm; we'll get this by just calling lognl
_ = self.lognl
@property
def psd_segments(self):
"""Dictionary giving times used for PSD estimation for each detector.
If a detector's PSD was not estimated from data, or the segment wasn't
provided, that detector will not be in the dictionary.
"""
return self._psd_segments
def set_psd_segments(self, psds):
"""Sets the PSD segments from a dictionary of PSDs.
This attempts to get the PSD segment from a ``psd_segment`` attribute
of each detector's PSD frequency series. If that attribute isn't set,
then that detector is not added to the dictionary of PSD segments.
Parameters
----------
psds : dict
Dictionary of detector name -> PSD frequency series. The segment
used for each PSD will try to be retrieved from the PSD's
``.psd_segment`` attribute.
"""
for det, p in psds.items():
try:
self._psd_segments[det] = p.psd_segment
except AttributeError:
continue
@property
def weight(self):
r"""Dictionary of detectors -> frequency series of inner-product
weights.
The weights are :math:`\sqrt{4 \Delta f / S_n(f)}`. This is set when
the PSDs are set.
"""
return self._weight
@property
def whitened_data(self):
r"""Dictionary of detectors -> whitened data frequency series.
The whitened data is the data multiplied by the inner-product weight.
Note that this includes the :math:`\sqrt{4 \Delta f}` factor. This
is set when the PSDs are set.
"""
return self._whitened_data
def det_lognorm(self, det):
"""The log of the likelihood normalization in the given detector.
If ``self.normalize`` is False, will just return 0.
"""
if not self.normalize:
return 0.
try:
return self._lognorm[det]
except KeyError:
# hasn't been calculated yet
p = self._psds[det]
dt = self._whitened_data[det].delta_t
kmin = self._kmin[det]
kmax = self._kmax[det]
lognorm = -float(self._N*numpy.log(numpy.pi*self._N*dt)/2.
+ numpy.log(p[kmin:kmax]).sum())
self._lognorm[det] = lognorm
return self._lognorm[det]
@property
def normalize(self):
"""Determines if the loglikelihood includes the normalization term.
"""
return self._normalize
@normalize.setter
def normalize(self, normalize):
"""Clears the current stats if the normalization state is changed.
"""
if normalize != self._normalize:
self._current_stats = ModelStats()
self._lognorm.clear()
self._det_lognls.clear()
self._normalize = normalize
@property
def lognorm(self):
"""The log of the normalization of the log likelihood."""
return sum(self.det_lognorm(det) for det in self._data)
def det_lognl(self, det):
r"""Returns the log likelihood of the noise in the given detector:
.. math::
\log p(d_i|n_i) = \log \alpha_i -
\frac{1}{2} \left<d_i | d_i\right>.
Parameters
----------
det : str
The name of the detector.
Returns
-------
float :
The log likelihood of the noise in the requested detector.
"""
try:
return self._det_lognls[det]
except KeyError:
# hasn't been calculated yet; calculate & store
kmin = self._kmin[det]
kmax = self._kmax[det]
d = self._whitened_data[det]
lognorm = self.det_lognorm(det)
lognl = lognorm - 0.5 * d[kmin:kmax].inner(d[kmin:kmax]).real
self._det_lognls[det] = lognl
return self._det_lognls[det]
def _lognl(self):
"""Computes the log likelihood assuming the data is noise.
Since this is a constant for Gaussian noise, this is only computed once
then stored.
"""
return sum(self.det_lognl(det) for det in self._data)
def _loglikelihood(self):
r"""Computes the log likelihood of the paramaters,
.. math::
\log p(d|\Theta, h) = \log \alpha -\frac{1}{2}\sum_i
\left<d_i - h_i(\Theta) | d_i - h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood evaluated at the given point.
"""
# since the loglr has fewer terms, we'll call that, then just add
# back the noise term that canceled in the log likelihood ratio
return self.loglr + self.lognl
def write_metadata(self, fp):
"""Adds writing the psds and lognl, since it's a constant.
The lognl is written to the sample group's ``attrs``.
The analyzed detectors, their analysis segments, and the segments
used for psd estimation are written to the file's ``attrs``, as
``analyzed_detectors``, ``{{detector}}_analysis_segment``, and
``{{detector}}_psd_segment``, respectively.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
"""
super(BaseGaussianNoise, self).write_metadata(fp)
# write the analyzed detectors and times
fp.attrs['analyzed_detectors'] = self.detectors
for det, data in self.data.items():
key = '{}_analysis_segment'.format(det)
fp.attrs[key] = [float(data.start_time), float(data.end_time)]
if self._psds is not None:
fp.write_psd(self._psds)
# write the times used for psd estimation (if they were provided)
for det in self.psd_segments:
key = '{}_psd_segment'.format(det)
fp.attrs[key] = list(map(float, self.psd_segments[det]))
try:
attrs = fp[fp.samples_group].attrs
except KeyError:
# group doesn't exist, create it
fp.create_group(fp.samples_group)
attrs = fp[fp.samples_group].attrs
attrs['lognl'] = self.lognl
for det in self.detectors:
# Save lognl for each IFO as attributes in the samples group
attrs['{}_lognl'.format(det)] = self.det_lognl(det)
# Save each IFO's low frequency cutoff used in the likelihood
# computation as an attribute
fp.attrs['{}_likelihood_low_freq'.format(det)] = self._f_lower[det]
# Save the IFO's high frequency cutoff used in the likelihood
# computation as an attribute if one was provided the user
if self._f_upper[det] is not None:
fp.attrs['{}_likelihood_high_freq'.format(det)] = \
self._f_upper[det]
@classmethod
def from_config(cls, cp, data_section='data', **kwargs):
r"""Initializes an instance of this class from the given config file.
In addition to ``[model]``, a ``data_section`` (default ``[data]``)
must be in the configuration file. The data section specifies settings
for loading data and estimating PSDs. See the `online documentation
<http://pycbc.org/pycbc/latest/html/inference.html#setting-data>`_ for
more details.
The following options are read from the ``[model]`` section, in
addition to ``name`` (which must be set):
* ``{{DET}}-low-frequency-cutoff = FLOAT`` :
The low frequency cutoff to use for each detector {{DET}}. A cutoff
must be provided for every detector that may be analyzed (any
additional detectors are ignored).
* ``{{DET}}-high-frequency-cutoff = FLOAT`` :
(Optional) A high frequency cutoff for each detector. If not
provided, the Nyquist frequency is used.
* ``check-for-valid-times =`` :
(Optional) If provided, will check that there are no data quality
flags on during the analysis segment and the segment used for PSD
estimation in each detector. To check for flags,
:py:func:`pycbc.dq.query_flag` is used, with settings pulled from the
``dq-*`` options in the ``[data]`` section. If a detector has bad
data quality during either the analysis segment or PSD segment, it
will be removed from the analysis.
* ``shift-psd-times-to-valid =`` :
(Optional) If provided, the segment used for PSD estimation will
automatically be shifted left or right until a continous block of
data with no data quality issues can be found. If no block can be
found with a maximum shift of +/- the requested psd segment length,
the detector will not be analyzed.
* ``err-on-missing-detectors =`` :
Raises an error if any detector is removed from the analysis because
a valid time could not be found. Otherwise, a warning is printed
to screen and the detector is removed from the analysis.
* ``normalize =`` :
(Optional) Turn on the normalization factor.
* ``ignore-failed-waveforms =`` :
Sets the ``ignore_failed_waveforms`` attribute.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
data_section : str, optional
The name of the section to load data options from.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file.
"""
# get the injection file, to replace any FROM_INJECTION settings
if 'injection-file' in cp.options('data'):
injection_file = cp.get('data', 'injection-file')
else:
injection_file = None
# update any values that are to be retrieved from the injection
# Note: this does nothing if there are FROM_INJECTION values
get_values_from_injection(cp, injection_file, update_cp=True)
args = cls._init_args_from_config(cp)
# add the injection file
args['injection_file'] = injection_file
# check if normalize is set
if cp.has_option('model', 'normalize'):
args['normalize'] = True
if cp.has_option('model', 'ignore-failed-waveforms'):
args['ignore_failed_waveforms'] = True
# get any other keyword arguments provided in the model section
ignore_args = ['name', 'normalize', 'ignore-failed-waveforms']
for option in cp.options("model"):
if option in ("low-frequency-cutoff", "high-frequency-cutoff"):
ignore_args.append(option)
name = option.replace('-', '_')
args[name] = cp.get_cli_option('model', name,
nargs='+', type=float,
action=MultiDetOptionAction)
if 'low_frequency_cutoff' not in args:
raise ValueError("low-frequency-cutoff must be provided in the"
" model section, but is not found!")
# data args
bool_args = ['check-for-valid-times', 'shift-psd-times-to-valid',
'err-on-missing-detectors']
data_args = {arg.replace('-', '_'): True for arg in bool_args
if cp.has_option('model', arg)}
ignore_args += bool_args
# load the data
opts = data_opts_from_config(cp, data_section,
args['low_frequency_cutoff'])
strain_dict, psd_strain_dict = data_from_cli(opts, **data_args)
# convert to frequency domain and get psds
stilde_dict, psds = fd_data_from_strain_dict(opts, strain_dict,
psd_strain_dict)
# save the psd data segments if the psd was estimated from data
if opts.psd_estimation is not None:
_tdict = psd_strain_dict or strain_dict
for det in psds:
psds[det].psd_segment = (_tdict[det].start_time,
_tdict[det].end_time)
# gate overwhitened if desired
if opts.gate_overwhitened and opts.gate is not None:
stilde_dict = gate_overwhitened_data(stilde_dict, psds, opts.gate)
args.update({'data': stilde_dict, 'psds': psds})
# any extra args
args.update(cls.extra_args_from_config(cp, "model",
skip_args=ignore_args))
# get ifo-specific instances of calibration model
if cp.has_section('calibration'):
logging.info("Initializing calibration model")
recalib = {
ifo: Recalibrate.from_config(cp, ifo, section='calibration')
for ifo in opts.instruments}
args['recalibration'] = recalib
# get gates for templates
gates = gates_from_cli(opts)
if gates:
args['gates'] = gates
return cls(**args)
class GaussianNoise(BaseGaussianNoise):
r"""Model that assumes data is stationary Gaussian noise.
With Gaussian noise the log likelihood functions for signal
:math:`\log p(d|\Theta, h)` and for noise :math:`\log p(d|n)` are given by:
.. math::
\log p(d|\Theta, h) &= \log\alpha -\frac{1}{2} \sum_i
\left< d_i - h_i(\Theta) | d_i - h_i(\Theta) \right> \\
\log p(d|n) &= \log\alpha -\frac{1}{2} \sum_i \left<d_i | d_i\right>
where the sum is over the number of detectors, :math:`d_i` is the data in
each detector, and :math:`h_i(\Theta)` is the model signal in each
detector. The (discrete) inner product is given by:
.. math::
\left<a_i | b_i\right> = 4\Re \Delta f
\sum_{k=k_{\mathrm{min}}}^{k_{\mathrm{max}}}
\frac{\tilde{a}_i^{*}[k] \tilde{b}_i[k]}{S^{(i)}_n[k]},
where :math:`\Delta f` is the frequency resolution (given by 1 / the
observation time :math:`T`), :math:`k` is an index over the discretely
sampled frequencies :math:`f = k \Delta_f`, and :math:`S^{(i)}_n[k]` is the
PSD in the given detector. The upper cutoff on the inner product
:math:`k_{\max}` is by default the Nyquist frequency
:math:`k_{\max} = N/2+1`, where :math:`N = \lfloor T/\Delta t \rfloor`
is the number of samples in the time domain, but this can be set manually
to a smaller value.
The normalization factor :math:`\alpha` is:
.. math::
\alpha = \prod_{i} \frac{1}{\left(\pi T\right)^{N/2}
\prod_{k=k_\mathrm{min}}^{k_{\mathrm{max}}} S^{(i)}_n[k]},
where the product is over the number of detectors. By default, the
normalization constant is not included in the log likelihood, but it can
be turned on using the ``normalize`` keyword argument.
Note that the log likelihood ratio has fewer terms than the log likelihood,
since the normalization and :math:`\left<d_i|d_i\right>` terms cancel:
.. math::
\log \mathcal{L}(\Theta) = \sum_i \left[
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2} \left<h_i(\Theta)|h_i(\Theta)\right> \right]
Upon initialization, the data is whitened using the given PSDs. If no PSDs
are given the data and waveforms returned by the waveform generator are
assumed to be whitened.
For more details on initialization parameters and definition of terms, see
:py:class:`models.BaseDataModel`.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
data : dict
A dictionary of data, in which the keys are the detector names and the
values are the data (assumed to be unwhitened). The list of keys must
match the waveform generator's detectors keys, and the epoch of every
data set must be the same as the waveform generator's epoch.
low_frequency_cutoff : dict
A dictionary of starting frequencies, in which the keys are the
detector names and the values are the starting frequencies for the
respective detectors to be used for computing inner products.
psds : dict, optional
A dictionary of FrequencySeries keyed by the detector names. The
dictionary must have a psd for each detector specified in the data
dictionary. If provided, the inner products in each detector will be
weighted by 1/psd of that detector.
high_frequency_cutoff : dict, optional
A dictionary of ending frequencies, in which the keys are the
detector names and the values are the ending frequencies for the
respective detectors to be used for computing inner products. If not
provided, the minimum of the largest frequency stored in the data
and a given waveform will be used.
normalize : bool, optional
If True, the normalization factor :math:`alpha` will be included in the
log likelihood. Default is to not include it.
static_params : dict, optional
A dictionary of parameter names -> values to keep fixed.
\**kwargs :
All other keyword arguments are passed to ``BaseDataModel``.
Examples
--------
Create a signal, and set up the model using that signal:
>>> from pycbc import psd as pypsd
>>> from pycbc.inference.models import GaussianNoise
>>> from pycbc.waveform.generator import (FDomainDetFrameGenerator,
... FDomainCBCGenerator)
>>> seglen = 4
>>> sample_rate = 2048
>>> N = seglen*sample_rate/2+1
>>> fmin = 30.
>>> static_params = {'approximant': 'IMRPhenomD', 'f_lower': fmin,
... 'mass1': 38.6, 'mass2': 29.3,
... 'spin1z': 0., 'spin2z': 0., 'ra': 1.37, 'dec': -1.26,
... 'polarization': 2.76, 'distance': 3*500.}
>>> variable_params = ['tc']
>>> tsig = 3.1
>>> generator = FDomainDetFrameGenerator(
... FDomainCBCGenerator, 0., detectors=['H1', 'L1'],
... variable_args=variable_params,
... delta_f=1./seglen, **static_params)
>>> signal = generator.generate(tc=tsig)
>>> psd = pypsd.aLIGOZeroDetHighPower(N, 1./seglen, 20.)
>>> psds = {'H1': psd, 'L1': psd}
>>> low_frequency_cutoff = {'H1': fmin, 'L1': fmin}
>>> model = GaussianNoise(variable_params, signal, low_frequency_cutoff,
psds=psds, static_params=static_params)
Set the current position to the coalescence time of the signal:
>>> model.update(tc=tsig)
Now compute the log likelihood ratio and prior-weighted likelihood ratio;
since we have not provided a prior, these should be equal to each other:
>>> print('{:.2f}'.format(model.loglr))
282.43
>>> print('{:.2f}'.format(model.logplr))
282.43
Print all of the default_stats:
>>> print(',\n'.join(['{}: {:.2f}'.format(s, v)
... for (s, v) in sorted(model.current_stats.items())]))
H1_cplx_loglr: 177.76+0.00j,
H1_optimal_snrsq: 355.52,
L1_cplx_loglr: 104.67+0.00j,
L1_optimal_snrsq: 209.35,
logjacobian: 0.00,
loglikelihood: 0.00,
loglr: 282.43,
logprior: 0.00
Compute the SNR; for this system and PSD, this should be approximately 24:
>>> from pycbc.conversions import snr_from_loglr
>>> x = snr_from_loglr(model.loglr)
>>> print('{:.2f}'.format(x))
23.77
Since there is no noise, the SNR should be the same as the quadrature sum
of the optimal SNRs in each detector:
>>> x = (model.det_optimal_snrsq('H1') +
... model.det_optimal_snrsq('L1'))**0.5
>>> print('{:.2f}'.format(x))
23.77
Toggle on the normalization constant:
>>> model.normalize = True
>>> model.loglikelihood
835397.8757405131
Using the same model, evaluate the log likelihood ratio at several points
in time and check that the max is at tsig:
>>> import numpy
>>> times = numpy.linspace(tsig-1, tsig+1, num=101)
>>> loglrs = numpy.zeros(len(times))
>>> for (ii, t) in enumerate(times):
... model.update(tc=t)
... loglrs[ii] = model.loglr
>>> print('tsig: {:.2f}, time of max loglr: {:.2f}'.format(
... tsig, times[loglrs.argmax()]))
tsig: 3.10, time of max loglr: 3.10
Create a prior and use it (see distributions module for more details):
>>> from pycbc import distributions
>>> uniform_prior = distributions.Uniform(tc=(tsig-0.2,tsig+0.2))
>>> prior = distributions.JointDistribution(variable_params, uniform_prior)
>>> model = GaussianNoise(variable_params,
... signal, low_frequency_cutoff, psds=psds, prior=prior,
... static_params=static_params)
>>> model.update(tc=tsig)
>>> print('{:.2f}'.format(model.logplr))
283.35
>>> print(',\n'.join(['{}: {:.2f}'.format(s, v)
... for (s, v) in sorted(model.current_stats.items())]))
H1_cplx_loglr: 177.76+0.00j,
H1_optimal_snrsq: 355.52,
L1_cplx_loglr: 104.67+0.00j,
L1_optimal_snrsq: 209.35,
logjacobian: 0.00,
loglikelihood: 0.00,
loglr: 282.43,
logprior: 0.92
"""
name = 'gaussian_noise'
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
static_params=None, **kwargs):
# set up the boiler-plate attributes
super(GaussianNoise, self).__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
static_params=static_params, **kwargs)
# create the waveform generator
self.waveform_generator = create_waveform_generator(
self.variable_params, self.data,
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
gates=self.gates, **self.static_params)
@property
def _extra_stats(self):
"""Adds ``loglr``, plus ``cplx_loglr`` and ``optimal_snrsq`` in each
detector."""
return ['loglr'] + \
['{}_cplx_loglr'.format(det) for det in self._data] + \
['{}_optimal_snrsq'.format(det) for det in self._data]
def _nowaveform_loglr(self):
"""Convenience function to set loglr values if no waveform generated.
"""
for det in self._data:
setattr(self._current_stats, 'loglikelihood', -numpy.inf)
setattr(self._current_stats, '{}_cplx_loglr'.format(det),
-numpy.inf)
# snr can't be < 0 by definition, so return 0
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.)
return -numpy.inf
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio.
"""
params = self.current_params
try:
wfs = self.waveform_generator.generate(**params)
except NoWaveformError:
return self._nowaveform_loglr()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_loglr()
else:
raise e
lr = 0.
for det, h in wfs.items():
# the kmax of the waveforms may be different than internal kmax
kmax = min(len(h), self._kmax[det])
if self._kmin[det] >= kmax:
# if the waveform terminates before the filtering low frequency
# cutoff, then the loglr is just 0 for this detector
cplx_hd = 0j
hh = 0.
else:
slc = slice(self._kmin[det], kmax)
# whiten the waveform
h[self._kmin[det]:kmax] *= self._weight[det][slc]
# the inner products
cplx_hd = self._whitened_data[det][slc].inner(h[slc]) # <h, d>
hh = h[slc].inner(h[slc]).real # < h, h>
cplx_loglr = cplx_hd - 0.5*hh
# store
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh)
setattr(self._current_stats, '{}_cplx_loglr'.format(det),
cplx_loglr)
lr += cplx_loglr.real
# also store the loglikelihood, to ensure it is populated in the
# current stats even if loglikelihood is never called
self._current_stats.loglikelihood = lr + self.lognl
return float(lr)
def det_cplx_loglr(self, det):
"""Returns the complex log likelihood ratio in the given detector.
Parameters
----------
det : str
The name of the detector.
Returns
-------
complex float :
The complex log likelihood ratio.
"""
# try to get it from current stats
try:
return getattr(self._current_stats, '{}_cplx_loglr'.format(det))
except AttributeError:
# hasn't been calculated yet; call loglr to do so
self._loglr()
# now try returning again
return getattr(self._current_stats, '{}_cplx_loglr'.format(det))
def det_optimal_snrsq(self, det):
"""Returns the opitmal SNR squared in the given detector.
Parameters
----------
det : str
The name of the detector.
Returns
-------
float :
The opimtal SNR squared.
"""
# try to get it from current stats
try:
return getattr(self._current_stats, '{}_optimal_snrsq'.format(det))
except AttributeError:
# hasn't been calculated yet; call loglr to do so
self._loglr()
# now try returning again
return getattr(self._current_stats, '{}_optimal_snrsq'.format(det))
#
# =============================================================================
#
# Support functions
#
# =============================================================================
#
def get_values_from_injection(cp, injection_file, update_cp=True):
"""Replaces all FROM_INJECTION values in a config file with the
corresponding value from the injection.
This looks for any options that start with ``FROM_INJECTION[:ARG]`` in
a config file. It then replaces that value with the corresponding value
from the injection file. An argument may be optionally provided, in which
case the argument will be retrieved from the injection file. Functions of
parameters in the injection file may be used; the syntax and functions
available is the same as the ``--parameters`` argument in executables
such as ``pycbc_inference_extract_samples``. If no ``ARG`` is provided,
then the option name will try to be retrieved from the injection.
For example,
.. code-block:: ini
mass1 = FROM_INJECTION
will cause ``mass1`` to be retrieved from the injection file, while:
.. code-black:: ini
mass1 = FROM_INJECTION:'primary_mass(mass1, mass2)'
will cause the larger of mass1 and mass2 to be retrieved from the injection
file. Note that if spaces are in the argument, it must be encased in
single quotes.
The injection file may contain only one injection. Otherwise, a ValueError
will be raised.
Parameters
----------
cp : ConfigParser
The config file within which to replace values.
injection_file : str or None
The injection file to get values from. A ValueError will be raised
if there are any ``FROM_INJECTION`` values in the config file, and
injection file is None, or if there is more than one injection.
update_cp : bool, optional
Update the config parser with the replaced parameters. If False,
will just retrieve the parameter values to update, without updating
the config file. Default is True.
Returns
-------
list
The parameters that were replaced, as a tuple of section name, option,
value.
"""
lookfor = 'FROM_INJECTION'
# figure out what parameters need to be set
replace_params = []
for sec in cp.sections():
for opt in cp.options(sec):
val = cp.get(sec, opt)
splitvals = shlex.split(val)
replace_this = []
for ii, subval in enumerate(splitvals):
if subval.startswith(lookfor):
# determine what we should retrieve from the injection
subval = subval.split(':', 1)
if len(subval) == 1:
subval = opt
else:
subval = subval[1]
replace_this.append((ii, subval))
if replace_this:
replace_params.append((sec, opt, splitvals, replace_this))
if replace_params:
# check that we have an injection file
if injection_file is None:
raise ValueError("One or values are set to {}, but no injection "
"file provided".format(lookfor))
# load the injection file
inj = InjectionSet(injection_file).table.view(type=FieldArray)
# make sure there's only one injection provided
if inj.size > 1:
raise ValueError("One or more values are set to {}, but more than "
"one injection exists in the injection file."
.format(lookfor))
# get the injection values to replace
for ii, (sec, opt, splitvals, replace_this) in enumerate(replace_params):
# replace the value in the shlex-splitted string with the value
# from the injection
for jj, arg in replace_this:
splitvals[jj] = str(inj[arg][0])
# now rejoin the string...
# shlex will strip quotes around arguments; this can be problematic
# when rejoining if the the argument had a space in it. In python 3.8
# there is a shlex.join function which properly rejoins things taking
# that into account. Since we need to continue to support earlier
# versions of python, the following kludge tries to account for that.
# If/when we drop support for all earlier versions of python, then the
# following can just be replaced by:
# replace_val = shlex.join(splitvals)
for jj, arg in enumerate(splitvals):
if ' ' in arg:
arg = "'" + arg + "'"
splitvals[jj] = arg
replace_val = ' '.join(splitvals)
replace_params[ii] = (sec, opt, replace_val)
# replace in the config file
if update_cp:
for (sec, opt, replace_val) in replace_params:
cp.set(sec, opt, replace_val)
return replace_params
def create_waveform_generator(variable_params, data, waveform_transforms=None,
recalibration=None, gates=None,
**static_params):
"""Creates a waveform generator for use with a model.
Parameters
----------
variable_params : list of str
The names of the parameters varied.
data : dict
Dictionary mapping detector names to either a
:py:class:`<pycbc.types.TimeSeries TimeSeries>` or
:py:class:`<pycbc.types.FrequencySeries FrequencySeries>`.
waveform_transforms : list, optional
The list of transforms applied to convert variable parameters into
parameters that will be understood by the waveform generator.
recalibration : dict, optional
Dictionary mapping detector names to
:py:class:`<pycbc.calibration.Recalibrate>` instances for
recalibrating data.
gates : dict of tuples, optional
Dictionary of detectors -> tuples of specifying gate times. The
sort of thing returned by :py:func:`pycbc.gate.gates_from_cli`.
Returns
-------
pycbc.waveform.FDomainDetFrameGenerator
A waveform generator for frequency domain generation.
"""
# the waveform generator will get the variable_params + the output
# of the waveform transforms, so we'll add them to the list of
# parameters
if waveform_transforms is not None:
wfoutputs = set.union(*[t.outputs
for t in waveform_transforms])
else:
wfoutputs = set()
variable_params = list(variable_params) + list(wfoutputs)
# figure out what generator to use based on the approximant
try:
approximant = static_params['approximant']
except KeyError:
raise ValueError("no approximant provided in the static args")
generator_function = generator.select_waveform_generator(approximant)
# get data parameters; we'll just use one of the data to get the
# values, then check that all the others are the same
delta_f = None
for d in data.values():
if delta_f is None:
delta_f = d.delta_f
delta_t = d.delta_t
start_time = d.start_time
else:
if not all([d.delta_f == delta_f, d.delta_t == delta_t,
d.start_time == start_time]):
raise ValueError("data must all have the same delta_t, "
"delta_f, and start_time")
waveform_generator = generator.FDomainDetFrameGenerator(
generator_function, epoch=start_time,
variable_args=variable_params, detectors=list(data.keys()),
delta_f=delta_f, delta_t=delta_t,
recalib=recalibration, gates=gates,
**static_params)
return waveform_generator
|
ahnitz/pycbc
|
pycbc/inference/models/gaussian_noise.py
|
Python
|
gpl-3.0
| 45,946
|
[
"Gaussian"
] |
1fcfe25700dea248462fd05e9cabb2bfea00308ff2fb4eb285c697e40e2e5a1c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script just updates the 'fusioncatcher/etc/configuration.cfg' file with
the paths that it finds when running in the PATH variable.
It only needs to have pre-installed:
- Python version >=2.6.0 and < 3.0.
Author: Daniel Nicorici, Daniel.Nicorici@gmail.com
Copyright (c) 2009-2021 Daniel Nicorici
This file is part of FusionCatcher.
FusionCatcher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FusionCatcher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FusionCatcher (see file 'COPYING.txt'). If not, see
<http://www.gnu.org/licenses/>.
By default, FusionCatcher is running BLAT aligner
<http://users.soe.ucsc.edu/~kent/src/> but it offers also the option to disable
all its scripts which make use of BLAT aligner if you choose explicitly to do so.
BLAT's license does not allow to be used for commercial activities. If BLAT
license does not allow to be used in your case then you may still use
FusionCatcher by forcing not use the BLAT aligner by specifying the option
'--skip-blat'. Fore more information regarding BLAT please see its license.
Please, note that FusionCatcher does not require BLAT in order to find
candidate fusion genes!
This file is NOT running/executing/using BLAT.
"""
import os
import sys
import optparse
import shutil
import subprocess
import time
import tempfile
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
#############################################
# expand path
#############################################
def expand(*p):
return os.path.abspath(os.path.expanduser(os.path.join(*p)))
#############################################
# Find the path
#############################################
def findpath(exe,title=None,d=""):
if not title:
title = exe
print >>sys.stderr,"Finding",title,"..."
p = which(exe)
if p:
p = os.path.dirname(expand(p))
print >>sys.stderr," * Ok! '%s' found!" % (p,)
else:
p = d
print >>sys.stderr," * WARNING: '%s' NOT found!" % (exe,)
if p:
p = p.rstrip("/")+"/"
return p
#############################################
# test Python modules
#############################################
def findmodule(module, title = "",d="", verbose = True):
""" Test is a given module is installed
Example:
module = 'Bio'
title = 'BioPython'
"""
if not title:
title = module
if verbose:
print >>sys.stderr,"Checking if the Python library named '%s' is installed..." % (title,)
flag = True
try:
__import__(module)
except:
flag = False
if verbose:
print >>sys.stderr, " * WARNING: The Python library '%s' is not installed!\n" % (title,)
module_path = None
try:
module_path = getattr(__import__(module),'__path__')[0]
except:
if verbose:
print >>sys.stderr, " * WARNING: Cannot find the path of the Python library '%s'!" % (title,)
if verbose:
if flag:
if module_path:
print " * Ok! Python library '%s' found at '%s'!" % (title,module_path)
else:
print " * Ok! Python library '%s' found!" % (title,)
else:
print >>sys.stderr," * WARNING! Python library '%s' not found!" % (title,)
if flag == False or (not module_path):
module_path = d
if module_path:
module_path = module_path.rstrip("/")+"/"
return module_path
#############################################
# simulates which
#############################################
def which(program, cwd = True):
"""
Simulates which from Linux
Usage example:
p = which(exe, cwd = False)
if p:
p = expand(p)
"""
if os.path.dirname(program):
if os.access(program,os.X_OK) and os.path.isfile(program):
return program
else:
paths = os.environ["PATH"].split(os.pathsep)
if cwd:
paths.append(os.getcwd())
for path in paths:
if path:
p = os.path.join(path.strip('"'),program)
if os.access(p,os.X_OK) and os.path.isfile(p):
return p
return None
################################################################################
# MAIN
################################################################################
if __name__ == '__main__':
#command line parsing
usage = "%prog [options]"
description = ("This script just updates the 'fusioncatcher/etc/configuration.cfg'\n"+
"file with the paths that it finds when running in the PATH variable.")
version = "%prog 0.99.7c beta"
parser = optparse.OptionParser(usage = usage,
description = description,
version = version)
parser.add_option("-w","--write-changes",
action = "store_true",
default = False,
dest = "write_changes",
help = """If specified than the updates/changes will be written to '%s'.""" % ( os.path.abspath(os.path.join(os.path.dirname(expand(__file__)),"..","etc","configuration.cfg")),))
(options, args) = parser.parse_args()
################################################################################
################################################################################
################################################################################
os.system("set +e") # make sure that the shell scripts are still executed if there are errors
PATH = dict()
############################################################################
# Absolute path to the Python executable
############################################################################
print >>sys.stderr,"Obtaining the absolute path of the Python executable..."
PATH["python"] = expand(sys.executable)
print >>sys.stderr," * Ok! '%s' found!" % (PATH["python"],)
############################################################################
# Python version
############################################################################
print >>sys.stderr,"Checking Python version..."
version = sys.version_info
if version >= (2,6) and version < (3,0):
print >>sys.stderr," * Ok! Found Python version: %s.%s" % (version[0],version[1])
else:
print >>sys.stderr, " * ERROR: Found Python version: %s.%s !\n" % (version[0],version[1])
print >>sys.stderr, " The Python version should be >=2.6.0 and < 3.0 . If there is another"
print >>sys.stderr, " Python version installed you could run again this script using that"
sys.exit(1)
############################################################################
# Test 64-bit environment
############################################################################
print >>sys.stderr,"Checking if this environment is a 64-bit environment..."
import struct
if struct.calcsize("P") * 8 >= 64:
print >>sys.stderr," * Ok! 64-bit environment found."
else:
print >>sys.stderr, " * WARNING: Not a 64-bit environment! 64-bit environment is needed!"
############################################################################
# FUSIONCATCHER
############################################################################
print >>sys.stderr,"Finding FusionCatcher's path..."
PATH["scripts"] = os.path.dirname(expand(__file__))
print >>sys.stderr," * Ok! '%s' found!" % (PATH["scripts"],)
PATH["data"] = os.path.abspath(os.path.join(os.path.dirname(expand(__file__)),"..","data","current"))
############################################################################
# BIOPYTHON
############################################################################
PATH["biopython"] = findmodule("Bio","BioPython")
############################################################################
# Python module: XLRD
############################################################################
PATH["xlrd"] = findmodule("xlrd","Xlrd")
############################################################################
# Python module: OPENPYXL
############################################################################
PATH["openpyxl"] = findmodule("openpyxl","OpenPyXL")
############################################################################
# BOWTIE
############################################################################
PATH["bowtie"] = findpath("bowtie")
############################################################################
# BOWTIE2
############################################################################
PATH["bowtie2"] = findpath("bowtie2")
############################################################################
# SRATOOLKIT
############################################################################
PATH["sra"] = findpath("fastq-dump")
############################################################################
# LILTFOVER
############################################################################
PATH["liftover"] = findpath("liftOver")
############################################################################
# BLAT
############################################################################
PATH["blat"] = findpath("blat")
############################################################################
# FATOTWOBIT
############################################################################
PATH["fatotwobit"] = findpath("faToTwoBit")
############################################################################
# SEQTK
############################################################################
PATH["seqtk"] = findpath("seqtk")
############################################################################
# STAR
############################################################################
PATH["star"] = findpath("STAR")
############################################################################
# PIGZ
############################################################################
PATH["pigz"] = findpath("pigz")
############################################################################
# EXTRA (SORT and LZOP)
############################################################################
PATH["bwa"] = findpath("bwa")
############################################################################
# SAMTOOLS
############################################################################
PATH["samtools"] = findpath("samtools")
############################################################################
# VELVET
############################################################################
PATH["velvet"] = findpath("velveth")
############################################################################
# PARALLEL
############################################################################
PATH["parallel"] = findpath("parallel")
############################################################################
# JAVA
############################################################################
PATH["java"] = findpath("java")
# ############################################################################
# # PICARD
# ############################################################################
# PICARD_PATH = findpath("picard")
# ############################################################################
# # LZO
# ############################################################################
# LZO_PATH = r
# LZOP_PATH = r
# ############################################################################
# # COREUTILS
# ############################################################################
# COREUTILS_PATH = r
# ############################################################################
# # PXZ
# ############################################################################
# PXZ_PATH = r
if options.write_changes:
c = os.path.abspath(os.path.join(os.path.dirname(expand(__file__)),"..","etc","configuration.cfg"))
n = os.path.abspath(os.path.join(os.path.dirname(expand(__file__)),"..","etc","configuration.cfg.bak"))
print >>sys.stderr, "\n\nWARNING: Writting updates/changes to the configuration file '%s'!\n\n" %(c,)
d = [line for line in file(c,"r").readlines()]
# save the original into BAK file
file(n,"w").writelines(d)
r = []
for line in d:
t = line
if line and line.rstrip("\r\n") and (not line.startswith("#")) and (not line.startswith("[")):
#
x = line.split("=")
k = x[0].strip()
v = PATH.get(k,None)
if v:
t = "%s = %s\n" % (k,v)
print >>sys.stderr," * Changed: %s" % (t.strip(),)
r.append(t)
# write the changes
file(c,"w").writelines(r)
else:
print >>sys.stderr, "\n\nWARNING: No changes have been done to the configuration file!\n\n"
|
ndaniel/fusioncatcher
|
bin/update-config.py
|
Python
|
gpl-3.0
| 14,402
|
[
"BWA",
"Biopython",
"Bowtie"
] |
ca22ed2a4019cf880d5951ade1dd36324a0a40c30cbee5c7d9afb4a86f3a8137
|
# Created by DrLecter, based on DraX' scripts
# This script is part of the L2J Datapack Project
# Visit us at http://www.l2jdp.com/
# See readme-dp.txt and gpl.txt for license and distribution details
# Let us know if you did not receive a copy of such files.
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "elven_human_fighters_2"
#Quest items
MARK_OF_CHALLENGER = 2627
MARK_OF_DUTY = 2633
MARK_OF_SEEKER = 2673
MARK_OF_TRUST = 2734
MARK_OF_DUELIST = 2762
MARK_OF_SEARCHER = 2809
MARK_OF_HEALER = 2820
MARK_OF_LIFE = 3140
MARK_OF_CHAMPION = 3276
MARK_OF_SAGITTARIUS = 3293
MARK_OF_WITCHCRAFT = 3307
#HANNAVALT,BLACKBIRD,SIRIA,SEDRICK,MARCUS,HECTOR,SCHULE
NPCS=[30109,30187,30689,30849,30900,31965,32094]
#event:[newclass,req_class,req_race,low_ni,low_i,ok_ni,ok_i,req_item]
#low_ni : level too low, and you dont have quest item
#low_i: level too low, despite you have the item
#ok_ni: level ok, but you don't have quest item
#ok_i: level ok, you got quest item, class change takes place
CLASSES = {
"TK":[20,19,1,"36","37","38","39",[MARK_OF_DUTY,MARK_OF_LIFE,MARK_OF_HEALER]],
"SS":[21,19,1,"40","41","42","43",[MARK_OF_CHALLENGER,MARK_OF_LIFE,MARK_OF_DUELIST]],
"PL":[ 5, 4,0,"44","45","46","47",[MARK_OF_DUTY,MARK_OF_TRUST,MARK_OF_HEALER]],
"DA":[ 6, 4,0,"48","49","50","51",[MARK_OF_DUTY,MARK_OF_TRUST,MARK_OF_WITCHCRAFT]],
"TH":[ 8, 7,0,"52","53","54","55",[MARK_OF_SEEKER,MARK_OF_TRUST,MARK_OF_SEARCHER]],
"HE":[ 9, 7,0,"56","57","58","59",[MARK_OF_SEEKER,MARK_OF_TRUST,MARK_OF_SAGITTARIUS]],
"PW":[23,22,1,"60","61","62","63",[MARK_OF_SEEKER,MARK_OF_LIFE,MARK_OF_SEARCHER]],
"SR":[24,22,1,"64","65","66","67",[MARK_OF_SEEKER,MARK_OF_LIFE,MARK_OF_SAGITTARIUS]],
"GL":[ 2, 1,0,"68","69","70","71",[MARK_OF_CHALLENGER,MARK_OF_TRUST,MARK_OF_DUELIST]],
"WL":[ 3, 1,0,"72","73","74","75",[MARK_OF_CHALLENGER,MARK_OF_TRUST,MARK_OF_CHAMPION]]
}
#Messages
default = "No Quest"
def change(st,player,newclass,items) :
for item in items :
st.takeItems(item,1)
st.playSound("ItemSound.quest_fanfare_2")
player.setClassId(newclass)
player.setBaseClass(newclass)
player.broadcastUserInfo()
return
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onAdvEvent (self,event,npc,player) :
npcId = npc.getNpcId()
htmltext = default
suffix = ''
st = player.getQuestState(qn)
if not st : return
race = player.getRace().ordinal()
classid = player.getClassId().getId()
level = player.getLevel()
if npcId not in NPCS : return
if not event in CLASSES.keys() :
return event
else :
newclass,req_class,req_race,low_ni,low_i,ok_ni,ok_i,req_item=CLASSES[event]
if race == req_race and classid == req_class :
item = True
for i in req_item :
if not st.getQuestItemsCount(i):
item = False
if level < 40 :
suffix = low_i
if not item :
suffix = low_ni
else :
if not item :
suffix = ok_ni
else :
suffix = ok_i
change(st,player,newclass,req_item)
st.exitQuest(1)
htmltext = "30109-"+suffix+".htm"
return htmltext
def onTalk (self,npc,player):
st = player.getQuestState(qn)
npcId = npc.getNpcId()
race = player.getRace().ordinal()
classId = player.getClassId()
id = classId.getId()
htmltext = default
if player.isSubClassActive() :
st.exitQuest(1)
return htmltext
if npcId in NPCS :
htmltext = "30109"
if race in [0,1] : # Human and Elves only
if id == 19 : # elven knight
return htmltext+"-01.htm"
elif id == 4 : # human knight
return htmltext+"-08.htm"
elif id == 7 : # rogue
return htmltext+"-15.htm"
elif id == 22 : # elven scout
return htmltext+"-22.htm"
elif id == 1 : # human warrior
return htmltext+"-29.htm"
elif classId.level() == 0 : # first occupation change not made yet
htmltext += "-76.htm"
elif classId.level() >= 2 : # second/third occupation change already made
htmltext += "-77.htm"
else :
htmltext += "-78.htm" # other conditions
else :
htmltext += "-78.htm" # other races
st.exitQuest(1)
return htmltext
QUEST = Quest(99991,qn,"village_master")
CREATED = State('Start', QUEST)
QUEST.setInitialState(CREATED)
for npc in NPCS:
QUEST.addStartNpc(npc)
QUEST.addTalkId(npc)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/village_master/elven_human_fighters_2/__init__.py
|
Python
|
gpl-3.0
| 4,821
|
[
"VisIt"
] |
ac258f20f8efb2c60ad72db5a720832ac128fe7143bf7ea37d98872bee011c00
|
import numpy as np
import matplotlib as plt
import mayavi.mlab as mv
import gto
MAX_HF_ITERS = 10
class Atom:
def __init__(this, position, charge):
this.position = position
this.charge = charge
def make_initial_guess(atoms):
return []
def build_fock_matrix(atoms, current_wavefunctions):
return []
def get_fock_eigenfunctions(matrix):
return []
def hf_converged(w1, w2):
return True
def hf_optimize(atoms):
new_wavefunctions = make_initial_guess(atoms)
for i in range(1, MAX_HF_ITERS):
current_wavefunctions = new_wavefunctions
fock_matrix = build_fock_matrix(atoms, current_wavefunctions)
new_wavefunctions = get_fock_eigenfunctions(fock_matrix)
if hf_converged(current_wavefunctions, new_wavefunctions):
break
def gto_val(alpha, center, momentum, pos):
delta = pos - center
return (delta[0]**momentum[0])*(delta[1]**momentum[1])*(delta[2]**momentum[2])*np.exp(-alpha*np.dot(delta, delta))
X, Y, Z = np.mgrid[-2:2:100j, -2:2:20j, -2:2:20j]
c0 = np.array([0., 0., 0.])
m0 = np.array([1., 0., 0.])
m1 = np.array([0., 1., 0.])
cfunc = lambda x, y, z: (gto_val(1, c0, m0, np.array([x, y, z])) + gto_val(1, c0, m1, np.array([x, y, z])))
mv.clf()
mv.contour3d(X, Y, Z, np.vectorize(cfunc), contours=4)
|
DmitriyNE/QmDemos
|
HfDemo/HfOptimize.py
|
Python
|
bsd-2-clause
| 1,358
|
[
"Mayavi"
] |
4f79279e7c6e1407ac47484db5b132a849a92fba40daf391619ceffd45022391
|
#! /usr/bin/env python
# coding:utf-8
# Author: bingwang
# Email: toaya.kase@gmail.com
# Copylight 2012-2012 Bing Wang
# LICENCES: GPL v3.0
__docformat__ = "epytext en"
# dependents: YGOB/
# biopython
# bing
from Bio import SeqIO
YGOB_path="/Users/bingwang/zen/yeast_anno_pipe/YGOB_dataset/"
out_path="/Users/bingwang/zen/yeast_anno_pipe/output/"
class Yeast_Gene:
#self.length
def __init__(self,gene_name,chr_id=None):
self.name = gene_name
self.chr_id = chr_id
def get_pos(self,gene_pos):
self.pos_str = gene_pos
self.strand = "-" if "complement" in gene_pos else "+"
self.intron = True if gene_pos.count("..") > 1 else False
pos_string = gene_pos[gene_pos.find("(")+1:gene_pos.rfind(")")]
if self.intron:
self.pos = []
pairs = [a for a in pos_string.split(",")]
for pair in pairs:
self.pos.append((int(pair.split("..")[0]),int(pair.split("..")[1])))
else:
a,b = pos_string.split("..")
self.pos = [int(a),int(b)]
def intron_pillar():
gene_list = []
for line in open(out_path+"YGOB_intron.fsa"):
if line.startswith(">"):
gene_list.append(line.split("\t")[0][1:])
f = open(out_path+"intron_pillar.tab", "w")
for line in open(YGOB_path+"Pillars.tab"):
flag = False
names = line.split("\t")
for name in names:
if name != "---" and name in gene_list:
flag = True
break
if flag:
record = []
for name in names:
if name == "---" or name in gene_list:
record.append(name)
else:
record.append("+++")
f.write("\t".join(record)+"\n")
name2sp = {}
seq_dict = {}
for sp_name in ["Vpolyspora","Tphaffii","Tblattae","Ndairenensis",\
"Ncastellii","Knaganishii","Kafricana","Cglabrata","Suvarum",\
"Skudriavzevii","Smikatae","Scerevisiae","Zrouxii",\
"Tdelbrueckii","Klactis","Egossypii","Ecymbalariae","Lkluyveri",\
"Lthermotolerans","Lwaltii"]:
for record in SeqIO.parse(YGOB_path + sp_name + "_sequence.fsa","fasta"):
seq_dict[sp_name+"_"+record.id[record.id.find("_"):].replace("_","")] = record.seq
for line in open(YGOB_path + sp_name + "_genome.tab"):
gene_name = line.split("\t")[0]
name2sp[gene_name] = sp_name
Gene_dict = {}
for record in SeqIO.parse(YGOB_path+"AA.fsa","fasta"):
gene_name,chr_name,gene_pos,length = record.description.split(" ")[:4]
if gene_pos.count("..") > 1:
sp = name2sp[gene_name]
Gene_dict[gene_name] = Yeast_Gene(gene_name,chr_name)
Gene_dict[gene_name].get_pos(gene_pos)
Gene_dict[gene_name].aa = record.seq
Gene_dict[gene_name].sp = sp
try:
pos_start = Gene_dict[gene_name].pos[0][0]
except:
print gene_name
assert 1 == 0
pos_end = Gene_dict[gene_name].pos[-1][1]
if Gene_dict[gene_name].strand == "+":
Gene_dict[gene_name].full_nt = seq_dict[sp+"_"+chr_name][pos_start-1:pos_end]
else:
Gene_dict[gene_name].full_nt = \
seq_dict[sp+"_"+chr_name][pos_start-1:pos_end].reverse_complement()
f = open(out_path+"YGOB_intron.fsa","w")
for gene_name in Gene_dict:
sp = Gene_dict[gene_name].sp
chr_id = Gene_dict[gene_name].chr_id
starnd = Gene_dict[gene_name].strand
pos = Gene_dict[gene_name].pos_str
aa = str(Gene_dict[gene_name].aa)
nt_seq = str(Gene_dict[gene_name].full_nt)
f.write(">%s\t%s\t%s\t%s\t%s\t%s\n" % (gene_name,sp,chr_id,starnd,pos,aa))
f.write("%s\n" % nt_seq)
|
BingW/yeast_anno_pipe
|
src/get_ygob_intron.py
|
Python
|
gpl-3.0
| 3,751
|
[
"Biopython"
] |
adf107d81ac5ada04948553165b27f80ff1deed6e19ef95c0726576fc5fb76a7
|
from ..core import mutinf, nmutinf
from .base import (AlphaAngleBaseMetric, ContactBaseMetric, DihedralBaseMetric,
BaseMetric)
import numpy as np
from itertools import combinations_with_replacement as combinations
from multiprocessing import Pool
from contextlib import closing
__all__ = ['AlphaAngleMutualInformation', 'ContactMutualInformation',
'DihedralMutualInformation']
class MutualInformationBase(BaseMetric):
"""Base mutual information object"""
def _partial_mutinf(self, p):
i, j = p
return self._est(self.n_bins,
self.data[i].values,
self.shuffled_data[j].values,
rng=self.rng,
method=self.method)
def _exec(self):
M = np.zeros((self.labels.size, self.labels.size))
with closing(Pool(processes=self.n_threads)) as pool:
values = pool.map(self._partial_mutinf,
combinations(self.labels, 2))
pool.terminate()
idx = np.triu_indices_from(M)
M[idx] = values
return M + M.T - np.diag(M.diagonal())
def __init__(self, normed=True, **kwargs):
self._est = nmutinf if normed else mutinf
self.partial_transform.__func__.__doc__ = """
Partial transform a mdtraj.Trajectory into an n_residue by n_residue
matrix of mutual information scores.
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to transform
shuffle : int
Number of shuffle iterations (default: 0)
verbose : bool
Whether to display performance
Returns
-------
result : np.ndarray, shape = (n_residue, n_residue)
Mutual information matrix
"""
super(MutualInformationBase, self).__init__(**kwargs)
class AlphaAngleMutualInformation(AlphaAngleBaseMetric, MutualInformationBase):
"""Mutual information calculations for alpha angles"""
class ContactMutualInformation(ContactBaseMetric, MutualInformationBase):
"""Mutual information calculations for contacts"""
class DihedralMutualInformation(DihedralBaseMetric, MutualInformationBase):
"""Mutual information calculations for dihedral angles"""
|
msmbuilder/mdentropy
|
mdentropy/metrics/mutinf.py
|
Python
|
mit
| 2,364
|
[
"MDTraj"
] |
67eaafc706fc77891c8a38b2989a9cf2067e3030c24dec47edefea1667634a00
|
#!/usr/bin/env python
import glob
import os
import subprocess
PULSAR_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
GALAXY_DIR = os.path.normpath(os.path.join(PULSAR_ROOT, "..", "galaxy"))
subprocess.run(["python", "-m", "pip", "install", "build"], check=True, stdout=subprocess.PIPE)
env = os.environ
env["PULSAR_GALAXY_LIB"] = "1"
subprocess.run(["python", "-m", "build", "--wheel"], env=env, check=True, stdout=subprocess.PIPE)
lib_wheel_path = glob.glob(f'{PULSAR_ROOT}/dist/pulsar_galaxy_lib-*-none-any.whl')[0]
print(f"Replacing Galaxy pulsar-galaxy-lib requirements in {GALAXY_DIR} with {lib_wheel_path}")
for req in ["lib/galaxy/dependencies/pinned-requirements.txt", "lib/galaxy/dependencies/dev-requirements.txt"]:
req_abs_path = os.path.join(GALAXY_DIR, req)
with open(req_abs_path) as f:
lines = f.read()
new_lines = []
for line in lines.splitlines():
if line.startswith("pulsar-galaxy-lib"):
line = lib_wheel_path
new_lines.append(line)
with open(req_abs_path, "w") as f:
f.write("\n".join(new_lines))
|
galaxyproject/pulsar
|
tools/replace_galaxy_requirements_for_ci.py
|
Python
|
apache-2.0
| 1,122
|
[
"Galaxy"
] |
1196a151f67b41465a859e4ce0536dc57dda2da8de85c3cf0c8c506a8157607e
|
"""
Metadata for all sources.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2008, Prabhu Ramachandran Enthought, Inc.
# License: BSD Style.
# Local imports.
from mayavi.core.metadata import SourceMetadata
from mayavi.core.pipeline_info import PipelineInfo
BASE = 'mayavi.sources'
open_3ds = SourceMetadata(
id = "3DSFile",
class_name = BASE + ".three_ds_importer.ThreeDSImporter",
tooltip = "Import a 3D Studio file",
desc = "Import a 3D Studio file",
help = "Import a 3D Studio file",
menu_name = "&3D Studio file",
extensions = ['3ds'],
wildcard = '3D Studio files (*.3ds)|*.3ds',
output_info = PipelineInfo(datasets=['none'],
attribute_types=['any'],
attributes=['any'])
)
open_image = SourceMetadata(
id = "ImageFile",
class_name = BASE + ".image_reader.ImageReader",
menu_name = "&Image file (PNG/JPG/BMP/PNM/TIFF/DEM/DCM/XIMG/MHA/MHD/MINC)",
tooltip = "Import a PNG/JPG/BMP/PNM/TIFF/DCM/DEM/XIMG/MHA/MHD/MINC image",
desc = "Import a PNG/JPG/BMP/PNM/TIFF/DCM/DEM/XIMG/MHA/MHD/MINC image",
extensions = ['png', 'jpg', 'jpeg', 'bmp', 'pnm', 'tiff', 'dcm', 'dem',
'ximg', 'mha', 'mhd', 'mnc'],
wildcard = 'PNG files (*.png)|*.png|'\
'JPEG files (*.jpg)|*.jpg|'\
'JPEG files (*.jpeg)|*.jpeg|'\
'BMP files (*.bmp)|*.bmp|'\
'PNM files (*.pnm)|*.pnm|'\
'DCM files (*.dcm)|*.dcm|'\
'DEM files (*.dem)|*.dem|'\
'Meta mha files (*.mha)|*.mha|'\
'Meta mhd files (*.mhd)|*.mhd|'\
'MINC files (*.mnc)|*.mnc|'\
'XIMG files (*.ximg)|*.ximg|'\
'TIFF files (*.tiff)|*.tiff',
output_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
)
open_poly_data = SourceMetadata(
id = "PolyDataFile",
class_name = BASE + ".poly_data_reader.PolyDataReader",
menu_name = "&PolyData file (STL/STLA/STLB/TXT/RAW/PLY/PDB/SLC/FACET\
/OBJ/BYU/XYZ/CUBE)",
tooltip = "Import a STL/STLA/STLB/TXT/RAW/PLY/PDB/SLC/FACET/OBJ/\
BYU/XYZ/CUBE Poly Data",
desc = "Import a STL/STLA/STLB/TXT/RAWPLY/PDB/SLC/FACET/OBJ/BYU/XYZ/\
CUBE Poly Data",
extensions = ['stl', 'stla', 'stlb', 'txt', 'raw', 'ply', 'pdb', 'slc',
'facet', 'xyz', 'cube', 'obj', 'g'],
wildcard = 'STL files (*.stl)|*.stl|'\
'STLA files (*.stla)|*.stla|'\
'STLB files (*.stlb)|*.stlb|'\
'BYU files (*.g)|*.g|'\
'TXT files (*.txt)|*.txt|'\
'RAW files (*.raw)|*.raw|'\
'PLY files (*.ply)|*.ply|'\
'PDB files (*.pdb)|*.pdb|'\
'SLC files (*.slc)|*.slc|'\
'XYZ files (*.xyz)|*.xyz|'\
'CUBE files (*.cube)|*.cube|'\
'FACET files (*.facet)|*.facet|'\
'OBJ files (*.obj)|*.obj',
can_read_test = 'mayavi.sources.poly_data_reader:PolyDataReader.can_read',
output_info = PipelineInfo(datasets=['poly_data'],
attribute_types=['any'],
attributes=['any'])
)
open_ugrid_data = SourceMetadata(
id = "VTKUnstructuredFile",
class_name = BASE + ".unstructured_grid_reader.UnstructuredGridReader",
menu_name = "&Unstrucured Grid fil (INP/NEU/EXII)",
tooltip = "Open a Unstrucured Grid file",
desc = "Open a Unstrucured Grid file",
help = "Open a Unstrucured Grid file",
extensions = ['inp', 'neu', 'exii'],
wildcard = 'AVSUCD INP files (*.inp)|*.inp|'\
'GAMBIT NEU (*.neu)|*.neu|'\
'EXODUS EXII (*.exii)|*.exii',
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
)
open_plot3d = SourceMetadata(
id = "PLOT3DFile",
class_name = BASE + ".plot3d_reader.PLOT3DReader",
menu_name = "&PLOT3D file",
tooltip = "Open a PLOT3D data data",
desc = "Open a PLOT3D data data",
help = "Open a PLOT3D data data",
extensions = ['xyz'],
wildcard = 'PLOT3D files (*.xyz)|*.xyz',
output_info = PipelineInfo(datasets=['structured_grid'],
attribute_types=['any'],
attributes=['any'])
)
open_vrml = SourceMetadata(
id = "VRMLFile",
class_name = BASE + ".vrml_importer.VRMLImporter",
menu_name = "V&RML2 file",
tooltip = "Import a VRML2 data file",
desc = "Import a VRML2 data file",
help = "Import a VRML2 data file",
extensions = ['wrl'],
wildcard = 'VRML2 files (*.wrl)|*.wrl',
output_info = PipelineInfo(datasets=['none'],
attribute_types=['any'],
attributes=['any'])
)
open_vtk = SourceMetadata(
id = "VTKFile",
class_name = BASE + ".vtk_file_reader.VTKFileReader",
menu_name = "&VTK file",
tooltip = "Open a VTK data file",
desc = "Open a VTK data file",
help = "Open a VTK data file",
extensions = ['vtk'],
wildcard = 'VTK files (*.vtk)|*.vtk',
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
)
open_vtk_xml = SourceMetadata(
id = "VTKXMLFile",
class_name = BASE + ".vtk_xml_file_reader.VTKXMLFileReader",
menu_name = "VTK &XML file",
tooltip = "Open a VTK XML data file",
desc = "Open a VTK XML data file",
help = "Open a VTK XML data file",
extensions = ['xml', 'vti', 'vtp', 'vtr', 'vts', 'vtu',
'pvti', 'pvtp', 'pvtr', 'pvts', 'pvtu'],
wildcard = 'VTK XML files (*.xml)|*.xml|'\
'Image Data (*.vti)|*.vti|'\
'Poly Data (*.vtp)|*.vtp|'\
'Rectilinear Grid (*.vtr)|*.vtr|'\
'Structured Grid (*.vts)|*.vts|'\
'Unstructured Grid (*.vtu)|*.vtu|'\
'Parallel Image Data (*.pvti)|*.pvti|'\
'Parallel Poly Data (*.pvtp)|*.pvtp|'\
'Parallel Rectilinear Grid (*.pvtr)|*.pvtr|'\
'Parallel Structured Grid (*.pvts)|*.pvts|'\
'Parallel Unstructured Grid (*.pvtu)|*.pvtu',
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
)
parametric_surface = SourceMetadata(
id = "ParametricSurfaceSource",
class_name = BASE + ".parametric_surface.ParametricSurface",
menu_name = "&Create Parametric surface source",
tooltip = "Create a parametric surface source",
desc = "Create a parametric surface source",
help = "Create a parametric surface source",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['poly_data'],
attribute_types=['any'],
attributes=['any'])
)
point_load = SourceMetadata(
id = "PointLoadSource",
class_name = BASE + ".point_load.PointLoad",
menu_name = "Create Point &load source",
tooltip = "Simulates a point load on a cube of data (for tensors)",
desc = "Simulates a point load on a cube of data (for tensors)",
help = "Simulates a point load on a cube of data (for tensors)",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
)
builtin_surface = SourceMetadata(
id = "BuiltinSurfaceSource",
class_name = BASE + ".builtin_surface.BuiltinSurface",
menu_name = "Create built-in &surface",
tooltip = "Create a vtk poly data source",
desc = "Create a vtk poly data source",
help = "Create a vtk poly data source",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['poly_data'],
attribute_types=['any'],
attributes=['any'])
)
builtin_image = SourceMetadata(
id = "BuiltinImageSource",
class_name = BASE + ".builtin_image.BuiltinImage",
menu_name = "Create built-in &image",
tooltip = "Create a vtk image data source",
desc = "Create a vtk image data source",
help = "Create a vtk image data source",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
)
open_volume = SourceMetadata(
id = "VolumeFile",
class_name = BASE + ".volume_reader.VolumeReader",
menu_name = "&Volume file",
tooltip = "Open a Volume file",
desc = "Open a Volume file",
help = "Open a Volume file",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
)
open_chaco = SourceMetadata(
id = "ChacoFile",
class_name = BASE + ".chaco_reader.ChacoReader",
menu_name = "&Chaco file",
tooltip = "Open a Chaco file",
desc = "Open a Chaco file",
help = "Open a Chaco file",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['unstructured_grid'],
attribute_types=['any'],
attributes=['any'])
)
# Now collect all the sources for the mayavi registry.
sources = [open_3ds,
open_image,
open_plot3d,
open_vrml,
open_vtk,
open_vtk_xml,
parametric_surface,
point_load,
builtin_surface,
builtin_image,
open_poly_data,
open_ugrid_data,
open_volume,
open_chaco,
]
|
dmsurti/mayavi
|
mayavi/sources/metadata.py
|
Python
|
bsd-3-clause
| 10,449
|
[
"Mayavi",
"VTK"
] |
94f69a6947682dbcab6f802b9c5b553285957152db9330d15a4ba8338a131562
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os.path
from commoncode.testcase import FileBasedTesting
import cluecode.copyrights
class TestCopyrightDetector(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_copyright_detect2_basic(self):
location = self.get_test_loc('copyrights/copyright_essential_smoke-ibm_c.c')
expected = [
([u'Copyright IBM and others (c) 2008'], [], [u'2008'], [u'IBM and others'], 6, 6),
([u'Copyright Eclipse, IBM and others (c) 2008'], [], [u'2008'], [u'Eclipse, IBM and others'], 8, 8)
]
results = list(cluecode.copyrights.detect_copyrights(location))
assert expected == results
def check_detection(expected, test_file,
what='copyrights', with_line_num=True):
"""
Run detection of copyright on the test_file, checking the results
match the expected list of values.
If expected_in_results and results_in_expected are True (the default),
then expected and test results are tested for equality. To accommodate
for some level of approximate testing, the check can test only if an
expected result in a test result, or the opposite. If
expected_in_results and results_in_expected are both False an
exception is raised as this is not a case that make sense.
"""
all_results = []
for detection in cluecode.copyrights.detect_copyrights(test_file):
copyrights, authors, years, holders, start_line, end_line = detection
what_is_detected = locals().get(what)
if not what_is_detected:
continue
if with_line_num:
results = (what_is_detected, start_line, end_line)
all_results.append(results)
else:
results = what_is_detected
all_results.extend(results)
assert expected == all_results
class TestCopyrightLinesDetection(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_company_name_in_java(self):
test_file = self.get_test_loc('copyrights/company_name_in_java-9_java.java')
expected = [
([u'Copyright (c) 2008-2011 Company Name Incorporated'], 2, 3)
]
check_detection(expected, test_file)
def test_copyright_03e16f6c_0(self):
test_file = self.get_test_loc('copyrights/copyright_03e16f6c_0-e_f_c.0')
expected = [
(
[u'Copyright (c) 1997 Microsoft Corp.',
u'Copyright (c) 1997 Microsoft Corp.',
u'(c) 1997 Microsoft'
],
28, 78
)
]
check_detection(expected, test_file)
def test_copyright_3a3b02ce_0(self):
# this is a certificate and the actual copyright holder is not clear:
# could be either Wisekey or OISTE Foundation.
test_file = self.get_test_loc('copyrights/copyright_3a3b02ce_0-a_b_ce.0')
expected = [([u'Copyright (c) 2005', u'Copyright (c) 2005'], 28, 66)]
check_detection(expected, test_file)
def test_copyright_ABC_cpp(self):
test_file = self.get_test_loc('copyrights/copyright_ABC_cpp-Case_cpp.cpp')
expected = [([u'Copyright (c) ABC Company'], 12, 12)]
check_detection(expected, test_file)
def test_copyright_ABC_file_cpp(self):
test_file = self.get_test_loc('copyrights/copyright_ABC_file_cpp-File_cpp.cpp')
expected = [([u'Copyright (c) ABC Company'], 12, 12)]
check_detection(expected, test_file)
def test_copyright_heunrich_c(self):
test_file = self.get_test_loc('copyrights/copyright_heunrich_c-c.c')
expected = [([u'Copyright (c) 2000 HEUNRICH HERTZ INSTITUTE'], 5, 5)]
check_detection(expected, test_file)
def test_copyright_isc(self):
test_file = self.get_test_loc('copyrights/copyright_isc-c.c')
expected = [([u'Copyright (c) 1998-2000 The Internet Software Consortium.'], 1, 3)]
check_detection(expected, test_file)
def test_copyright_sample_py(self):
test_file = self.get_test_loc('copyrights/copyright_sample_py-py.py')
expected = [([u'COPYRIGHT 2006'], 6, 7)]
check_detection(expected, test_file)
def test_copyright_abc(self):
test_file = self.get_test_loc('copyrights/copyright_abc')
expected = [([u'Copyright (c) 2006 abc.org'], 1, 2)]
check_detection(expected, test_file)
def test_copyright_abc_loss_of_holder_c(self):
test_file = self.get_test_loc('copyrights/copyright_abc_loss_of_holder_c-c.c')
expected = [([u'copyright abc 2001'], 1, 2)]
check_detection(expected, test_file)
def test_copyright_abiword_common_copyright(self):
test_file = self.get_test_loc('copyrights/copyright_abiword_common_copyright-abiword_common_copyright.copyright')
expected = [
([u'Copyright (c) 1998- AbiSource, Inc.'], 17, 17),
([u'Copyright (c) 2009 Masayuki Hatta',
u'Copyright (c) 2009 Patrik Fimml <patrik@fimml.at>'],
41, 42),
]
check_detection(expected, test_file)
def test_copyright_acme_c(self):
test_file = self.get_test_loc('copyrights/copyright_acme_c-c.c')
expected = [([u'Copyright (c) 2000 ACME, Inc.'], 1, 1)]
check_detection(expected, test_file)
def test_copyright_activefieldattribute_cs(self):
test_file = self.get_test_loc('copyrights/copyright_activefieldattribute_cs-ActiveFieldAttribute_cs.cs')
expected = [([u'Web Applications Copyright 2009 - Thomas Hansen thomas@ra-ajax.org.'], 2, 5)]
check_detection(expected, test_file)
def test_copyright_addr_c(self):
test_file = self.get_test_loc('copyrights/copyright_addr_c-addr_c.c')
expected = [
([u'Copyright 1999 Cornell University.'], 2, 4),
([u'Copyright 2000 Jon Doe.'], 5, 5)
]
check_detection(expected, test_file)
def test_copyright_adler_inflate_c(self):
test_file = self.get_test_loc('copyrights/copyright_adler_inflate_c-inflate_c.c')
expected = [([u'Not copyrighted 1992 by Mark Adler'], 1, 2)]
check_detection(expected, test_file)
def test_copyright_aleal(self):
test_file = self.get_test_loc('copyrights/copyright_aleal-c.c')
expected = [([u'copyright (c) 2006 by aleal'], 2, 2)]
check_detection(expected, test_file)
def test_copyright_andre_darcy(self):
test_file = self.get_test_loc('copyrights/copyright_andre_darcy-c.c')
expected = [
([u'Copyright (c) 1995, Pascal Andre (andre@via.ecp.fr).'], 2, 6),
([u"copyright 1997, 1998, 1999 by D'Arcy J.M. Cain (darcy@druid.net)"], 25, 26)
]
check_detection(expected, test_file)
def test_copyright_android_c(self):
test_file = self.get_test_loc('copyrights/copyright_android_c-c.c')
expected = [
([u'Copyright (c) 2009 The Android Open Source Project'], 2, 2),
([u'Copyright 2003-2005 Colin Percival'], 23, 24)
]
check_detection(expected, test_file)
def test_copyright_apache_notice(self):
test_file = self.get_test_loc('copyrights/copyright_apache_notice-NOTICE')
expected = [
([u'Copyright 1999-2006 The Apache Software Foundation'], 6, 7),
([u'Copyright 1999-2006 The Apache Software Foundation'], 16, 17),
([u'Copyright 2001-2003,2006 The Apache Software Foundation.'], 27, 28),
([u'copyright (c) 2000 World Wide Web Consortium'], 33, 34)
]
check_detection(expected, test_file)
def test_copyright_aptitude_copyright_label(self):
test_file = self.get_test_loc('copyrights/copyright_aptitude_copyright_label-aptitude_copyright_label.label')
expected = [([u'Copyright 1999-2005 Daniel Burrows <dburrows@debian.org>'], 1, 1)]
check_detection(expected, test_file)
def test_copyright_atheros_spanning_lines(self):
test_file = self.get_test_loc('copyrights/copyright_atheros_spanning_lines-py.py')
expected = [
([u'Copyright (c) 2000 Atheros Communications, Inc.'], 2, 2),
([u'Copyright (c) 2001 Atheros Communications, Inc.'], 3, 3),
([u'Copyright (c) 1994-1997 by Intel Corporation.'], 8, 11)
]
check_detection(expected, test_file)
def test_copyright_att_in_c(self):
test_file = self.get_test_loc('copyrights/copyright_att_in_c-9_c.c')
expected = [([u'Copyright (c) 1991 by AT&T.'], 5, 5)]
check_detection(expected, test_file)
def test_copyright_audio_c(self):
test_file = self.get_test_loc('copyrights/copyright_audio_c-c.c')
expected = [([u'copyright (c) 1995, AudioCodes, DSP Group, France Telecom, Universite de Sherbrooke.'], 2, 4)]
check_detection(expected, test_file)
def test_copyright_babkin_txt(self):
test_file = self.get_test_loc('copyrights/copyright_babkin_txt.txt')
expected = [
([u'Copyright (c) North',
u'Copyright (c) South',
u'Copyright (c) 2001 by the TTF2PT1 project',
u'Copyright (c) 2001 by Sergey Babkin'
], 1, 5)
]
check_detection(expected, test_file)
def test_copyright_blender_debian(self):
test_file = self.get_test_loc('copyrights/copyright_blender_debian-blender_copyright.copyright')
expected = [
([u'Copyright (c) 2002-2008 Blender Foundation'], 8, 11),
([u'Copyright (c) 2004-2005 Masayuki Hatta <mhatta@debian.org>',
u'(c) 2005-2007 Florian Ernst <florian@debian.org>',
u'(c) 2007-2008 Cyril Brulebois <kibi@debian.org>'],
30, 35)
]
check_detection(expected, test_file)
|
retrography/scancode-toolkit
|
tests/cluecode/test_copyrights_lines.py
|
Python
|
apache-2.0
| 11,241
|
[
"VisIt"
] |
f11883c120eb894d3fb2ece512dca81c02215fc66c344e515db42fa16e07910c
|
r"""Efficient implementation of the Gaussian divergence-free kernel."""
from time import time
from pympler.asizeof import asizeof
from numpy.random import rand, seed
from numpy.linalg import norm
from numpy import dot, zeros, logspace, log10, matrix, int, eye, float
from scipy.sparse.linalg import LinearOperator
from sklearn.kernel_approximation import RBFSampler
from matplotlib.pyplot import savefig, subplots, tight_layout
def _rebase(phiX, W, Wn):
return (phiX.reshape((phiX.shape[0], 1, 1, phiX.shape[1])) *
(eye(W.shape[1]).reshape(1, W.shape[1], W.shape[1], 1) * Wn -
W * W.reshape(1, 1, W.shape[1], phiX.shape[1]) / Wn)).reshape(
(-1, W.shape[1] * Wn.shape[3]))
def NaiveDivergenceFreeGaussianORFF(X, gamma=1.,
D=100, eps=1e-5, random_state=0):
r"""Return the Naive ORFF map associated with the data X.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Samples.
gamma : {float},
Gamma parameter of the RBF kernel.
D : {integer},
Number of random features.
eps : {float},
Cutoff threshold for the singular values of A.
random_state : {integer},
Seed of the generator.
Returns
-------
\tilde{\Phi}(X) : array
"""
phi_s = RBFSampler(gamma=gamma, n_components=D,
random_state=random_state)
phiX = _rebase(phi_s.fit_transform(X),
phi_s.random_weights_.reshape((1, -1, 1, D)),
norm(phi_s.random_weights_, axis=0).reshape((1, 1, 1, -1)))
return matrix(phiX)
def EfficientDivergenceFreeGaussianORFF(X, gamma=1.,
D=100, eps=1e-5, random_state=0):
r"""Return the Efficient ORFF map associated with the data X.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Samples.
gamma : {float},
Gamma parameter of the RBF kernel.
D : {integer},
Number of random features.
eps : {float},
Cutoff threshold for the singular values of A.
random_state : {integer},
Seed of the generator.
Returns
-------
\tilde{\Phi}(X) : array
"""
phi_s = RBFSampler(gamma=gamma, n_components=D,
random_state=random_state)
phiX = phi_s.fit_transform(X)
W = phi_s.random_weights_.reshape((1, -1, 1, phiX.shape[1]))
Wn = norm(phi_s.random_weights_, axis=0).reshape((1, 1, 1, -1))
return LinearOperator((phiX.shape[0] * X.shape[1],
phiX.shape[1] * X.shape[1]),
matvec=lambda b: dot(_rebase(phiX, W, Wn), b),
rmatvec=lambda r: dot(_rebase(phiX, W, Wn).T, r),
dtype=float)
def main():
r"""Plot figure: Efficient decomposable gaussian ORFF."""
N = 100 # Number of points
dmax = 100 # Input dimension
D = 100 # Number of random features
seed(0)
R, T = 10, 10
time_Efficient, mem_Efficient = zeros((R, T, 2)), zeros((R, T))
time_naive, mem_naive = zeros((R, T, 2)), zeros((R, T))
for i, d in enumerate(logspace(0, log10(dmax), T)):
X = rand(N, int(d))
# Perform \Phi(X)^T \theta with Efficient implementation
for j in range(R):
start = time()
phiX1 = EfficientDivergenceFreeGaussianORFF(X, D)
time_Efficient[j, i, 0] = time() - start
theta = rand(phiX1.shape[1], 1)
start = time()
phiX1 * theta
time_Efficient[j, i, 1] = time() - start
mem_Efficient[j, i] = asizeof(phiX1, code=True)
# Perform \Phi(X)^T \theta with naive implementation
for j in range(R):
start = time()
phiX2 = NaiveDivergenceFreeGaussianORFF(X, D)
time_naive[j, i, 0] = time() - start
theta = rand(phiX2.shape[1], 1)
start = time()
phiX2 * theta
time_naive[j, i, 1] = time() - start
mem_naive[j, i] = asizeof(phiX2, code=True)
# Plot
f, axes = subplots(1, 3, figsize=(10, 4), sharex=True, sharey=False)
axes[0].errorbar(logspace(0, log10(dmax), T).astype(int),
time_Efficient[:, :, 0].mean(axis=0),
time_Efficient[:, :, 0].std(axis=0),
label='Efficient decomposable ORFF')
axes[0].errorbar(logspace(0, log10(dmax), T).astype(int),
time_naive[:, :, 0].mean(axis=0),
time_naive[:, :, 0].std(axis=0),
label='Naive decomposable ORFF')
axes[1].errorbar(logspace(0, log10(dmax), T).astype(int),
time_Efficient[:, :, 1].mean(axis=0),
time_Efficient[:, :, 1].std(axis=0),
label='Efficient decomposable ORFF')
axes[1].errorbar(logspace(0, log10(dmax), T).astype(int),
time_naive[:, :, 1].mean(axis=0),
time_naive[:, :, 1].std(axis=0),
label='Naive decomposable ORFF')
axes[2].errorbar(logspace(0, log10(dmax), T).astype(int),
mem_Efficient[:, :].mean(axis=0),
mem_Efficient[:, :].std(axis=0),
label='Efficient decomposable ORFF')
axes[2].errorbar(logspace(0, log10(dmax), T).astype(int),
mem_naive[:, :].mean(axis=0),
mem_naive[:, :].std(axis=0),
label='Naive decomposable ORFF')
axes[0].set_xscale('log')
axes[0].set_yscale('log')
axes[1].set_xscale('log')
axes[1].set_yscale('log')
axes[2].set_xscale('log')
axes[2].set_yscale('log')
axes[0].set_xlabel(r'$p=\dim(\mathcal{Y})$')
axes[1].set_xlabel(r'$p=\dim(\mathcal{Y})$')
axes[2].set_xlabel(r'$p=\dim(\mathcal{Y})$')
axes[0].set_ylabel(r'time (s)')
axes[2].set_ylabel(r'memory (bytes)')
axes[0].set_title(r'Preprocessing time')
axes[1].set_title(r'$\widetilde{\Phi}(X)^T \theta$ computation time')
axes[2].set_title(r'$\widetilde{\Phi}(X)^T$ required memory')
axes[0].legend(loc=2)
tight_layout()
savefig('efficient_divfree_gaussian.pgf', bbox_inches='tight')
if __name__ == "__main__":
main()
|
RomainBrault/JMLR-ORFF
|
src/efficient_divfree_gaussian.py
|
Python
|
unlicense
| 6,300
|
[
"Gaussian"
] |
1be3c459f582f7ae042b6948f095a0909078986e4eb0ad0b2e62053599721ac0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-31 11:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtail_personalisation', '0009_auto_20170531_0428'),
]
operations = [
migrations.AlterModelOptions(
name='dayrule',
options={'verbose_name': 'Day Rule'},
),
migrations.AlterModelOptions(
name='devicerule',
options={'verbose_name': 'Device Rule'},
),
migrations.AlterModelOptions(
name='queryrule',
options={'verbose_name': 'Query Rule'},
),
migrations.AlterModelOptions(
name='referralrule',
options={'verbose_name': 'Referral Rule'},
),
migrations.AlterModelOptions(
name='timerule',
options={'verbose_name': 'Time Rule'},
),
migrations.AlterModelOptions(
name='userisloggedinrule',
options={'verbose_name': 'Logged in Rule'},
),
migrations.AlterModelOptions(
name='visitcountrule',
options={'verbose_name': 'Visit count Rule'},
),
migrations.AlterField(
model_name='referralrule',
name='regex_string',
field=models.TextField(verbose_name='Regular expression to match the referrer'),
),
]
|
LabD/wagtail-personalisation
|
src/wagtail_personalisation/migrations/0010_auto_20170531_1101.py
|
Python
|
mit
| 1,464
|
[
"VisIt"
] |
62e7fbf472bf0009e9c86571ca56cef78023c63fc0c0edda454c3ce0d3e64b64
|
#!/usr/bin/env python
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer = vtk.vtkRenderer()
renWin.AddRenderer(renderer)
src1 = vtk.vtkSphereSource()
src1.SetRadius(5)
src1.SetPhiResolution(20)
src1.SetThetaResolution(20)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(src1.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Load the material. Here, we are loading a material
# defined in the Vtk Library. One can also specify
# a filename to a material description xml.
actor.GetProperty().LoadMaterial("GLSLTwisted")
# Turn shading on. Otherwise, shaders are not used.
actor.GetProperty().ShadingOn()
# Pass a shader variable need by GLSLTwisted.
actor.GetProperty().AddShaderVariable("Rate",1.0)
renderer.AddActor(actor)
renWin.Render()
renderer.GetActiveCamera().Azimuth(-50)
renderer.GetActiveCamera().Roll(70)
renWin.Render()
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Rendering/Core/Testing/Python/TestGLSLShader.py
|
Python
|
bsd-3-clause
| 970
|
[
"VTK"
] |
4cbb33b3f236b08233e17c68bde9fe54bc4dad4b15572d1ab6ebfee10cb060bf
|
import pytest
import numpy as np
import numpy.testing as npt
import scipy.sparse as spp
from itertools import product
import indigo
from indigo.backends import available_backends
BACKENDS = available_backends()
@pytest.mark.parametrize("backend,L,M,N,K,density",
list(product( BACKENDS, [3,4], [5,6], [7,8], [1,8,9,17], [1,0.01,0.1,0.5] ))
)
def test_Realize_Product(backend, L, M, N, K, density):
b = backend()
A0_h = indigo.util.randM(L, M, density)
A1_h = indigo.util.randM(M, N, density)
A0 = b.SpMatrix(A0_h, name='A0')
A1 = b.SpMatrix(A1_h, name='A1')
A = A0 * A1
A = A.realize()
# forward
x = b.rand_array((N,K))
y = b.rand_array((L,K))
A.eval(y, x)
x_h = x.to_host()
y_act = y.to_host()
y_exp = A0_h @ (A1_h @ x_h)
npt.assert_allclose(y_act, y_exp, rtol=1e-3)
# adjoint
x = b.rand_array((L,K))
y = b.rand_array((N,K))
A.H.eval(y, x)
x_h = x.to_host()
y_act = y.to_host()
y_exp = A1_h.H @ (A0_h.H @ x_h)
npt.assert_allclose(y_act, y_exp, rtol=1e-3)
# shape
assert A.shape == (L,N)
assert A.H.shape == (N,L)
# dtype
assert A.dtype == np.dtype('complex64')
@pytest.mark.parametrize("backend", BACKENDS )
def test_Realize_HStack(backend):
from indigo.operators import SpMatrix
b = backend()
x = b.Eye(4)
y = b.Eye(4)
z = b.HStack((x,y))
zr = z.realize()
assert isinstance(zr, SpMatrix)
@pytest.mark.parametrize("backend", BACKENDS )
def test_Realize_BlockDiag(backend):
from indigo.operators import SpMatrix
b = backend()
x = b.Eye(4)
y = b.Eye(4)
z = b.BlockDiag((x,y))
zr = z.realize()
assert isinstance(zr, SpMatrix)
@pytest.mark.parametrize("backend", BACKENDS )
def test_DistributeKroniOverProd(backend):
from indigo.operators import Product, Kron
from indigo.transforms import DistributeKroniOverProd
b = backend()
x = b.Eye(4)
y = b.Eye(4)
z = b.KronI(2, x*y)
z2 = DistributeKroniOverProd().visit(z)
assert isinstance(z2, Product)
assert isinstance(z2.left, Kron)
assert isinstance(z2.right, Kron)
@pytest.mark.parametrize("backend", BACKENDS )
def test_DistributeAdjointOverProd(backend):
from indigo.operators import Product, Adjoint
from indigo.transforms import DistributeAdjointOverProd
b = backend()
x = b.Eye(4)
y = b.Eye(4)
z = b.Adjoint(x*y)
z2 = DistributeAdjointOverProd().visit(z)
assert isinstance(z2, Product)
assert isinstance(z2.left, Adjoint)
assert isinstance(z2.right, Adjoint)
@pytest.mark.parametrize("backend", BACKENDS )
def test_LiftUnscaledFFTs(backend):
from indigo.operators import Product, Adjoint
from indigo.transforms import LiftUnscaledFFTs
b = backend()
s = b.Eye(4)
f = b.UnscaledFFT((2,2), dtype=s.dtype)
z = (f*s)*(s*f)
z2 = LiftUnscaledFFTs().visit(z)
@pytest.mark.parametrize("backend,M",
list(product( BACKENDS, [3,4]))
)
def test_Realize_Eye(backend, M):
from indigo.operators import SpMatrix
b = backend()
A = b.Eye(M, dtype=np.complex64)
A = A.realize()
assert isinstance(A, SpMatrix)
@pytest.mark.parametrize("backend,M",
list(product( BACKENDS, [3,4]))
)
def test_Realize_Scale(backend, M):
from indigo.operators import SpMatrix
b = backend()
A = 3 * b.Eye(M, dtype=np.complex64)
A = A.realize()
assert isinstance(A, SpMatrix)
@pytest.mark.parametrize("backend,M,N",
list(product( BACKENDS, [3,4], [5,6]))
)
def test_Realize_One(backend, M, N):
from indigo.operators import SpMatrix
b = backend()
A = b.One((M,N))
A = A.realize()
assert isinstance(A, SpMatrix)
assert np.all(A._matrix.data == 1)
@pytest.mark.parametrize("backend,M,N",
list(product( BACKENDS, [3,4], [5,6]))
)
def test_SpyOut(backend, M, N):
matplotlib = pytest.importorskip('matplotlib')
matplotlib.use('Agg')
from indigo.operators import SpMatrix
from indigo.transforms import SpyOut
b = backend()
A = b.One((M,N)).realize()
SpyOut().visit(A)
@pytest.mark.parametrize("backend,M,N",
list(product( BACKENDS, [3,4], [5,6]))
)
def test_GroupRLP(backend, M, N):
from indigo.operators import SpMatrix
from indigo.transforms import GroupRightLeaningProducts
b = backend()
A = b.One((M,N)).realize()
B = A * A.H * A
GroupRightLeaningProducts().visit(B)
@pytest.mark.parametrize("backend,M,N",
list(product( BACKENDS, [3,4], [5,6]))
)
def test_MakeRL(backend, M, N):
from indigo.operators import SpMatrix
from indigo.transforms import MakeRightLeaning
b = backend()
A = b.One((M,N)).realize()
B = A * A.H * A
MakeRightLeaning().visit(B)
@pytest.mark.parametrize("backend,M,N",
list(product( BACKENDS, [3,4], [5,6]))
)
def test_LiftUFFTS(backend, M, N):
from indigo.transforms import LiftUnscaledFFTs
b = backend()
A = b.One((M,N)).realize()
B = b.KronI(3, A)
LiftUnscaledFFTs().visit(B)
@pytest.mark.parametrize("backend,M,N",
list(product( BACKENDS, [3,4], [5,6]))
)
def test_LiftUFFTS2(backend, M, N):
from indigo.operators import SpMatrix
from indigo.transforms import LiftUnscaledFFTs
b = backend()
A = b.UnscaledFFT((M,N), dtype=np.complex64).realize().H
LiftUnscaledFFTs().visit(A)
|
mbdriscoll/indigo
|
indigo/test_transforms.py
|
Python
|
bsd-3-clause
| 5,323
|
[
"VisIt"
] |
ee51300fcd0fffae1bf41ba72d0badb4077e4f9d885c6a0fd3662ec8a93a9726
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Qgis(CMakePackage):
"""QGIS is a free and open-source cross-platform desktop geographic
information system application that supports viewing, editing, and
analysis of geospatial data.
"""
homepage = "https://qgis.org"
url = "https://qgis.org/downloads/qgis-3.8.1.tar.bz2"
maintainers = ['adamjstewart', 'Sinan81']
version('3.14.16', sha256='c9915c2e577f1812a2b35b678b123c58407e07824d73e5ec0dda13db7ca75c04')
version('3.14.0', sha256='1b76c5278def0c447c3d354149a2afe2562ac26cf0bcbe69b9e0528356d407b8')
version('3.12.3', sha256='c2b53815f9b994e1662995d1f25f90628156b996758f5471bffb74ab29a95220')
version('3.12.2', sha256='501f81715672205afd2c1a289ffc765aff96eaa8ecb49d079a58ef4d907467b8')
version('3.12.1', sha256='a7dc7af768b8960c08ce72a06c1f4ca4664f4197ce29c7fe238429e48b2881a8')
version('3.12.0', sha256='19e9c185dfe88cad7ee6e0dcf5ab7b0bbfe1672307868a53bf771e0c8f9d5e9c')
# Prefer latest long term release
version('3.10.10', sha256='e21a778139823fb6cf12e4a38f00984fcc060f41abcd4f0af83642d566883839', preferred=True)
version('3.10.7', sha256='f6c02489e065bae355d2f4374b84a1624379634c34a770b6d65bf38eb7e71564')
version('3.10.6', sha256='a96791bf6615e4f8ecdbbb9a90a8ef14a12459d8c5c374ab22eb5f776f864bb5')
version('3.10.5', sha256='f3e1cc362941ec69cc21062eeaea160354ef71382b21dc4b3191c315447b4ce1')
version('3.10.4', sha256='a032e2b8144c2fd825bc26766f586cfb1bd8574bc72efd1aa8ce18dfff8b6c9f')
version('3.10.3', sha256='0869704df9120dd642996ff1ed50213ac8247650aa0640b62f8c9c581c05d7a7')
version('3.10.2', sha256='381cb01a8ac2f5379a915b124e9c830d727d2c67775ec49609c7153fe765a6f7')
version('3.10.1', sha256='466ac9fad91f266cf3b9d148f58e2adebd5b9fcfc03e6730eb72251e6c34c8ab')
version('3.10.0', sha256='25eb1c41d9fb922ffa337a720dfdceee43cf2d38409923f087c2010c9742f012')
version('3.8.3', sha256='3cca3e8483bc158cb8e972eb819a55a5734ba70f2c7da28ebc485864aafb17bd')
version('3.8.2', sha256='4d682f7625465a5b3596b3f7e83eddad86a60384fead9c81a6870704baffaddd')
version('3.8.1', sha256='d65c8e1c7471bba46f5017f261ebbef81dffb5843a24f0e7713a00f70785ea99')
version('3.4.15', sha256='81c93b72adbea41bd765294c0cdb09476a632d8b3f90101abc409ca9ea7fb04d')
version('3.4.14', sha256='e138716c7ea84011d3b28fb9c75e6a79322fb66f532246393571906a595d7261')
variant('3d', default=False, description='Build QGIS 3D library')
variant('analysis', default=True, description='Build QGIS analysis library')
variant('apidoc', default=False, description='Build QGIS API doxygen documentation')
variant('astyle', default=False, description='Contribute QGIS with astyle')
variant('bindings', default=True, description='Build Python bindings')
variant('clang_tidy', default=False, description='Use Clang tidy')
variant('core', default=True, description='Build QGIS Core')
variant('custom_widgets', default=False, description='Build QGIS custom widgets for Qt Designer')
variant('desktop', default=True, description='Build QGIS desktop')
variant('georeferencer', default=True, description='Build GeoReferencer plugin')
variant('globe', default=False, description='Build Globe plugin')
variant('grass7', default=False, description='Build with GRASS providers and plugin')
variant('gui', default=True, description='Build QGIS GUI library and everything built on top of it')
variant('internal_mdal', default=True, description='Build with MDAl support')
variant('internal_o2', default=True, description='Download and locally include source of o2 library')
variant('oauth2_plugin', default=True, description='Build OAuth2 authentication method plugin')
variant('oracle', default=False, description='Build with Oracle support')
variant('postgresql', default=True, description='Build with PostreSQL support')
variant('py_compile', default=False, description='Byte compile Python modules in staged or installed locations')
variant('qsciapi', default=True, description='Generate PyQGIS QScintilla2 API')
variant('qspatialite', default=False, description='Build QSpatialite sql driver')
variant('qt5serialport', default=True, description='Try Qt5SerialPort for GPS positioning')
variant('qtmobility', default=False, description='Build QtMobility related code')
variant('qtwebkit', default=False, description='Enable QtWebkit Support')
variant('quick', default=False, description='Build QGIS Quick library')
variant('qwtpolar', default=False, description='Build QwtPolar')
variant('server', default=False, description='Build QGIS server')
variant('staged_plugins', default=True, description='Stage-install core Python plugins to run from build directory')
variant('thread_local', default=True, description='Use std::thread_local')
variant('txt2tags', default=False, description='Generate PDF for txt2tags documentation')
# Ref. for dependencies:
# http://htmlpreview.github.io/?https://raw.github.com/qgis/QGIS/master/doc/INSTALL.html
# https://github.com/qgis/QGIS/blob/master/INSTALL
depends_on('exiv2')
depends_on('expat@1.95:')
depends_on('gdal@2.1.0: +python', type=('build', 'link', 'run'))
depends_on('geos@3.4.0:')
depends_on('libspatialindex')
depends_on('libspatialite@4.2.0:')
depends_on('libzip')
depends_on('libtasn1')
depends_on('proj@4.4.0:')
depends_on('py-psycopg2', type=('build', 'run')) # TODO: is build dependency necessary?
depends_on('py-pyqt4', when='@2')
depends_on('py-pyqt5@5.3:', when='@3')
depends_on('py-requests', type=('build', 'run')) # TODO: is build dependency necessary?
depends_on('python@2.7:2.8', type=('build', 'run'), when='@2')
depends_on('python@3.0.0:', type=('build', 'run'), when='@3')
depends_on('qca@2.2.1')
depends_on('qjson')
depends_on('qscintilla +python')
depends_on('qt+dbus')
depends_on('qtkeychain@0.5:', when='@3:')
depends_on('qwt@5:')
depends_on('qwtpolar')
depends_on('sqlite@3.0.0: +column_metadata')
# Runtime python dependencies, not mentioned in install instructions
depends_on('py-pyyaml', type='run')
depends_on('py-owslib', type='run')
depends_on('py-jinja2', type='run')
depends_on('py-pygments', type='run')
# optionals
depends_on('postgresql@8:', when='+postgresql') # for PostGIS support
depends_on('gsl', when='+georeferencer') # for georeferencer
# grass@7.8.1 is the first version that supports proj@6
depends_on('grass@7:', type=('build', 'link', 'run'), when='+grass7') # for georeferencer
# The below dependencies are shown in cmake config
# hdf5 and netcdf-c together run afoul of a concretizer bug.
# netcdf-c already depends on hdf5
# depends_on('hdf5').
depends_on('netcdf-c')
# build
depends_on('cmake@3.0.0:', type='build')
depends_on('flex@2.5.6:', type='build')
depends_on('bison@2.4:', type='build')
depends_on('pkgconfig', type='build')
# Take care of conflicts using depends_on
depends_on('qt@5.9.0:5.12.99', when='@3.8')
depends_on('qt@5.9.0:', when='@3.10.0:')
depends_on('qtkeychain@:1.5.99', when='^qt@4')
depends_on('qt@:4', when='@2')
patch('pyqt5.patch', when='^qt@5')
def cmake_args(self):
spec = self.spec
args = []
# qtwebkit module was removed from qt as of version 5.6
# needs to be compiled as a separate package
args.extend([
'-DUSE_OPENCL=OFF',
# cmake couldn't determine the following paths
'-DEXPAT_LIBRARY={0}'.format(self.spec['expat'].libs),
'-DPOSTGRESQL_PREFIX={0}'.format(
self.spec['postgresql'].prefix),
'-DQSCINTILLA_INCLUDE_DIR=' +
self.spec['qscintilla'].prefix.include,
'-DQSCINTILLA_LIBRARY=' + self.spec['qscintilla'].prefix +
'/lib/libqscintilla2_qt5.so',
'-DLIBZIP_INCLUDE_DIR=' +
self.spec['libzip'].prefix.include,
'-DLIBZIP_CONF_INCLUDE_DIR=' +
self.spec['libzip'].prefix.lib.libzip.include,
'-DGDAL_CONFIG_PREFER_PATH=' +
self.spec['gdal'].prefix.bin,
'-DGEOS_CONFIG_PREFER_PATH=' +
self.spec['geos'].prefix.bin,
'-DGSL_CONFIG_PREFER_PATH=' + self.spec['gsl'].prefix.bin,
'-DPOSTGRES_CONFIG_PREFER_PATH=' +
self.spec['postgresql'].prefix.bin
])
args.extend([
'-DWITH_3D={0}'.format(
'TRUE' if '+3d' in spec else 'FALSE'),
'-DWITH_ANALYSIS={0}'.format(
'TRUE' if '+analysis' in spec else 'FALSE'),
'-DWITH_APIDOC={0}'.format(
'TRUE' if '+apidoc' in spec else 'FALSE'),
'-DWITH_ASTYLE={0}'.format(
'TRUE' if '+astyle' in spec else 'FALSE'),
'-DWITH_BINDINGS={0}'.format(
'TRUE' if '+bindings' in spec else 'FALSE'),
'-DWITH_CLANG_TIDY={0}'.format(
'TRUE' if '+clang_tidy' in spec else 'FALSE'),
'-DWITH_CORE={0}'.format(
'TRUE' if '+core' in spec else 'FALSE'),
'-DWITH_CUSTOM_WIDGETS={0}'.format(
'TRUE' if '+custom_widgets' in spec else 'FALSE'),
'-DWITH_DESKTOP={0}'.format(
'TRUE' if '+desktop' in spec else 'FALSE'),
'-DWITH_GEOREFERENCER={0}'.format(
'TRUE' if '+georeferencer' in spec else 'FALSE'),
'-DWITH_GLOBE={0}'.format(
'TRUE' if '+globe' in spec else 'FALSE'),
'-DWITH_GUI={0}'.format(
'TRUE' if '+gui' in spec else 'FALSE'),
'-DWITH_INTERNAL_MDAL={0}'.format(
'TRUE' if '+internal_mdal' in spec else 'FALSE'),
'-DWITH_INTERNAL_O2={0}'.format(
'ON' if '+internal_o2' in spec else 'OFF'),
'-DWITH_OAUTH2_PLUGIN={0}'.format(
'TRUE' if '+oauth2_plugin' in spec else 'FALSE'),
'-DWITH_ORACLE={0}'.format(
'TRUE' if '+oracle' in spec else 'FALSE'),
'-DWITH_POSTGRESQL={0}'.format(
'TRUE' if '+postgresql' in spec else 'FALSE'),
'-DWITH_PY_COMPILE={0}'.format(
'TRUE' if '+py_compile' in spec else 'FALSE'),
'-DWITH_QSCIAPI={0}'.format(
'TRUE' if '+qsciapi' in spec else 'FALSE'),
'-DWITH_QSPATIALITE={0}'.format(
'ON' if '+qspatialite' in spec else 'OFF'),
'-DWITH_QT5SERIALPORT={0}'.format(
'TRUE' if '+qt5serialport' in spec else 'FALSE'),
'-DWITH_QTMOBILITY={0}'.format(
'TRUE' if '+qtmobility' in spec else 'FALSE'),
'-DWITH_QTWEBKIT={0}'.format(
'ON' if '+qtwebkit' in spec else 'OFF'),
'-DWITH_QUICK={0}'.format(
'TRUE' if '+quick' in spec else 'FALSE'),
'-DWITH_QWTPOLAR={0}'.format(
'TRUE' if '+qwtpolar' in spec else 'FALSE'),
'-DWITH_SERVER={0}'.format(
'TRUE' if '+server' in spec else 'FALSE'),
'-DWITH_STAGED_PLUGINS={0}'.format(
'TRUE' if '+staged_plugins' in spec else 'FALSE'),
'-DWITH_THREAD_LOCAL={0}'.format(
'TRUE' if '+thread_local' in spec else 'FALSE'),
'-DWITH_TXT2TAGS_PDF={0}'.format(
'TRUE' if '+txt2tags_pdf' in spec else 'FALSE'),
])
if '+grass7' in self.spec:
args.extend([
'-DWITH_GRASS7=ON',
'-DGRASS_PREFIX7={0}'.format(self.spec['grass'].prefix),
'-DGRASS_INCLUDE_DIR7={0}'.format(
self.spec['grass'].prefix.include)
])
else:
args.append('-DWITH_GRASS7=OFF')
return args
|
iulian787/spack
|
var/spack/repos/builtin/packages/qgis/package.py
|
Python
|
lgpl-2.1
| 12,602
|
[
"NetCDF"
] |
64ae9268dd3edd6d63ea8df201fa31ca918307e13b6dda6d5daf29eca523467a
|
#!/usr/bin/python -u
import os, sys, fnmatch
try:
import autotest.common as common
except ImportError:
import common
# do a basic check to see if pylint is even installed
try:
import pylint
from pylint.__pkginfo__ import version as pylint_version
except ImportError:
print "Unable to import pylint, it may need to be installed"
sys.exit(1)
major, minor, release = pylint_version.split('.')
pylint_version = float("%s.%s" % (major, minor))
pylintrc_path = os.path.expanduser('~/.pylintrc')
if not os.path.exists(pylintrc_path):
open(pylintrc_path, 'w').close()
# patch up the logilab module lookup tools to understand autotest_lib.* trash
import logilab.common.modutils
_ffm = logilab.common.modutils.file_from_modpath
def file_from_modpath(modpath, path=None, context_file=None):
if modpath[0] == "autotest_lib":
return _ffm(modpath[1:], path, context_file)
else:
return _ffm(modpath, path, context_file)
logilab.common.modutils.file_from_modpath = file_from_modpath
import pylint.lint
from pylint.checkers import imports
ROOT_MODULE = 'autotest_lib.'
# need to put autotest root dir on sys.path so pylint will be happy
autotest_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, autotest_root)
# patch up pylint import checker to handle our importing magic
RealImportsChecker = imports.ImportsChecker
class CustomImportsChecker(imports.ImportsChecker):
def visit_from(self, node):
if node.modname.startswith(ROOT_MODULE):
node.modname = node.modname[len(ROOT_MODULE):]
return RealImportsChecker.visit_from(self, node)
imports.ImportsChecker = CustomImportsChecker
# some files make pylint blow up, so make sure we ignore them
blacklist = ['/contrib/*', '/frontend/afe/management.py']
# only show errors
# there are three major sources of E1101/E1103/E1120 false positives:
# * common_lib.enum.Enum objects
# * DB model objects (scheduler models are the worst, but Django models also
# generate some errors)
if pylint_version >= 0.21:
pylint_base_opts = ['--disable=W,R,C,E1101,E1103,E1120,F0401']
else:
pylint_base_opts = ['--disable-msg-cat=warning,refactor,convention',
'--disable-msg=E1101,E1103,E1120,F0401']
pylint_base_opts += ['--reports=no',
'--include-ids=y']
file_list = sys.argv[1:]
if '--' in file_list:
index = file_list.index('--')
pylint_base_opts.extend(file_list[index+1:])
file_list = file_list[:index]
def check_file(file_path):
if not file_path.endswith('.py'):
return
for blacklist_pattern in blacklist:
if fnmatch.fnmatch(os.path.abspath(file_path),
'*' + blacklist_pattern):
return
pylint.lint.Run(pylint_base_opts + [file_path])
def visit(arg, dirname, filenames):
for filename in filenames:
check_file(os.path.join(dirname, filename))
def check_dir(dir_path):
os.path.walk(dir_path, visit, None)
if len(file_list) > 0:
for path in file_list:
if os.path.isdir(path):
check_dir(path)
else:
check_file(path)
else:
check_dir('.')
|
libvirt/autotest
|
utils/run_pylint.py
|
Python
|
gpl-2.0
| 3,195
|
[
"VisIt"
] |
18b4a1e14cce58ceb23d6043a4c1fbfdce4ccb114ded1b38e99b5777a3d9ae18
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Copyright (c) 2012 Aurélien Ginolhac, Mikkel Schubert, Hákon Jónsson
and Ludovic Orlando
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
plot and quantify damage patterns from a SAM/BAM file
:Authors: Aurélien Ginolhac, Mikkel Schubert, Hákon Jónsson, Ludovic Orlando
:Date: November 2012
:Type: tool
:Input: SAM/BAM
:Output: tabulated tables, pdf
"""
import logging
import time
import sys
import coloredlogs
import pysam
import mapdamage
import mapdamage.config
import mapdamage.statistics
import mapdamage.reader
# Log format for terminal and log-file output
_LOG_FORMAT = "%(asctime)s %(name)s %(levelname)s %(message)s"
# Shorter 'asctime' format for terminal output; log-file uses default dates and times
_TIMESTAMP_FORMAT = "%H:%M:%S"
def main(argv):
start_time = time.time()
coloredlogs.install(fmt=_LOG_FORMAT, datefmt=_TIMESTAMP_FORMAT)
logger = logging.getLogger(__name__)
try:
options = mapdamage.config.parse_args(argv)
except mapdamage.config.ArgumentError as error:
if error.message:
if error.argument_name:
logging.error("%s %s", error.argument_name, error.message)
elif error:
logging.error("%s", error.message)
logging.error("See 'mapDamage --help' for more information")
return 1
handler = logging.FileHandler(options.folder / "Runtime_log.txt")
formatter = logging.Formatter(_LOG_FORMAT)
handler.setFormatter(formatter)
handler.setLevel(options.log_level)
logging.getLogger().addHandler(handler)
logger.info("Started with the command: " + " ".join(sys.argv))
# plot using R if results folder already done
if options.plot_only:
if options.no_r:
logger.error("Cannot use plot damage patterns if R is missing, terminating")
return 1
else:
if not mapdamage.rscript.misincorporation_plot(options):
return 1
if not mapdamage.rscript.length_distribution_plot(options):
return 1
return 0
# run the Bayesian estimation if the matrix construction is done
if options.stats_only:
# does not work for very low damage levels
if mapdamage.statistics.check_table_and_warn_if_dmg_freq_is_low(options.folder):
# before running the Bayesian estimation get the base composition
path_to_basecomp = options.folder / "dnacomp_genome.csv"
if path_to_basecomp.is_file():
# Try to read the base composition file
mapdamage.composition.read_base_comp(path_to_basecomp)
else:
# Construct the base composition file
mapdamage.composition.write_base_comp(options.ref, path_to_basecomp)
if not mapdamage.rscript.perform_bayesian_estimates(options):
return 1
return 0
else:
logger.error("Cannot use the Bayesian estimation, terminating the program")
return 1
# fetch all references and associated lengths in nucleotides
try:
ref = pysam.FastaFile(options.ref)
except IOError as error:
logger.error("Could not open the reference file '%s': %e", options.ref, error)
raise
# rescale the qualities
if options.rescale_only:
logger.info("Starting rescaling...")
return mapdamage.rescale.rescale_qual(ref, options)
# open SAM/BAM file
reader = mapdamage.reader.BAMReader(
filepath=options.filename,
downsample_to=options.downsample,
downsample_seed=options.downsample_seed,
merge_libraries=options.merge_libraries,
)
if reader.is_stream and options.rescale:
# rescaling is not possible on a streasm, since we need to read it twice
logger.error("Cannot build model and rescale in one run when input is a pipe")
return 1
reflengths = reader.get_references()
# check if references in SAM/BAM are the same in the fasta reference file
fai_lengths = mapdamage.seq.read_fasta_index(str(options.ref) + ".fai")
if not fai_lengths:
return 1
elif not mapdamage.seq.compare_sequence_dicts(fai_lengths, reflengths):
return 1
# for misincorporation patterns, record mismatches
misincorp = mapdamage.statistics.MisincorporationRates(
libraries=reader.get_libraries(), length=options.length
)
# for fragmentation patterns, record base compositions
dnacomp = mapdamage.statistics.DNAComposition(
libraries=reader.get_libraries(), around=options.around, length=options.length
)
# for length distributions
lgdistrib = mapdamage.statistics.FragmentLengths(libraries=reader.get_libraries())
logger.info("Reading from '%s'", options.filename)
if options.minqual != 0:
logger.info("Filtering out bases with a Phred score < %d", options.minqual)
logger.info("Writing results to '%s/'", options.folder)
# main loop
counter = 0
warned_about_quals = False
for read in reader:
counter += 1
library = reader.get_sample_and_library(read)
# external coordinates 5' and 3' , 3' is 1-based offset
coordinate = mapdamage.align.get_coordinates(read)
# record aligned length for single-end reads
lgdistrib.update(read, library)
# fetch reference name, chromosome or contig names
chrom = reader.handle.getrname(read.tid)
(before, after) = mapdamage.align.get_around(
coordinate, chrom, reflengths, options.around, ref
)
refseq = ref.fetch(chrom, min(coordinate), max(coordinate)).upper()
# read.query contains aligned sequences while read.seq is the read itself
seq = read.query
# add gaps according to the cigar string, do it for qualities if filtering options is on
if not (options.minqual and read.qual):
if options.minqual and not warned_about_quals:
logger.warning(
"Reads without PHRED scores found; cannot filter by --min-basequal"
)
warned_about_quals = True
(seq, refseq) = mapdamage.align.align(read.cigar, seq, refseq)
else:
# add gaps to qualities and mask read and reference nucleotides if below desired threshold
(seq, _, refseq) = mapdamage.align.align_with_qual(
read.cigar, seq, read.qqual, options.minqual, refseq
)
# reverse complement read and reference when mapped reverse strand
if read.is_reverse:
refseq = mapdamage.seq.revcomp(refseq)
seq = mapdamage.seq.revcomp(seq)
beforerev = mapdamage.seq.revcomp(after)
after = mapdamage.seq.revcomp(before)
before = beforerev
# record soft clipping when present
misincorp.update_soft_clipping(read, library)
# count misincorparations by comparing read and reference base by base
misincorp.update(read, seq, refseq, "5p", library)
# do the same with sequences align to 3'-ends
misincorp.update(read, reversed(seq), reversed(refseq), "3p", library)
# compute base composition for reads
dnacomp.update_read(read, options.length, library)
# compute base composition for genomic regions
dnacomp.update_reference(read, before, after, library)
if counter % 50000 == 0:
logger.debug("%10d filtered alignments processed", counter)
logger.debug("Done. %d filtered alignments processed", counter)
logger.debug("BAM read in %f seconds", time.time() - start_time)
# close file handles
reader.close()
# output results, write summary tables to disk
misincorp.write(options.folder / "misincorporation.txt")
dnacomp.write(options.folder / "dnacomp.txt")
lgdistrib.write(options.folder / "lgdistribution.txt")
# plot using R
if not options.no_r:
if not mapdamage.rscript.misincorporation_plot(options):
return 1
if not mapdamage.rscript.length_distribution_plot(options):
return 1
# raises a warning for very low damage levels
if not mapdamage.statistics.check_table_and_warn_if_dmg_freq_is_low(options.folder):
options.no_stats = True
# run the Bayesian estimation
if not options.no_stats:
# before running the Bayesian estimation get the base composition
mapdamage.composition.write_base_comp(
options.ref, options.folder / "dnacomp_genome.csv"
)
if not mapdamage.rscript.perform_bayesian_estimates(options):
return 1
# rescale the qualities
if options.rescale:
return mapdamage.rescale.rescale_qual(ref, options)
# need the fasta reference still open for rescaling
ref.close()
# log the time it took
logger.info("Successful run")
logger.debug("Run completed in %f seconds", time.time() - start_time)
return 0
def entry_point():
return main(sys.argv[1:])
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
ginolhac/mapDamage
|
mapdamage/main.py
|
Python
|
mit
| 10,167
|
[
"pysam"
] |
66bb266786fbe4c6eeaee48e2657dbf0497ce7ec63bb7bab2e5e0541fb6d3549
|
'''
this script quantifies the read counts using HTSeq. To be run in osiris.
'''
import sys,os
import multiprocessing,multiprocessing.pool
def htseqCounter(sample):
'''
this function calls HTSeq
'''
htseqExecutable='htseq-count'
flag1='-m union'
flag2='-f bam'
flag3='-t mRNA'
flag5='-i ID'
#flag4='-s yes'
flag4='-s no'
inputFile=bamFilesDir+sample+'/Aligned.sortedByCoord.out.bam'
outputDirection='> {}{}.txt'.format(countsDir,sample)
cmd=' '.join(['time ',htseqExecutable,flag1,flag2,flag3,flag4,flag5,inputFile,genomeAnnotationFile,outputDirection])
print()
print(cmd)
print()
os.system(cmd)
return None
###
### README
###
# 0. defining user variables
bamFilesDir='/Volumes/omics4tb2/alomana/projects/TLR/data/ecoli/bam/'
countsDir='/Volumes/omics4tb2/alomana/projects/TLR/data/ecoli/counts/'
genomeAnnotationFile='/Volumes/omics4tb2/alomana/projects/TLR/data/ecoli/genome/Escherichia_coli_str_k_12_substr_mg1655.ASM584v2.37.gff3'
numberOfThreads=6
# 1. defining the BAM files
samples=os.listdir(bamFilesDir)
print(samples)
# 2. calling HTSeq in a parallel environment
#for sample in samples:
# htseqCounter(sample)
# sys.exit()
hydra=multiprocessing.pool.Pool(numberOfThreads)
hydra.map(htseqCounter,samples)
|
adelomana/30sols
|
SI/extra/other_species/eco_27924019/star/readCounter.py
|
Python
|
gpl-3.0
| 1,333
|
[
"HTSeq"
] |
a3a1bd671be5e961f35cf6c9a2bf730ee69887065eca05e423d9f16ad5f24950
|
#!/bin/python
import moose
import math
context = moose.PyMooseBase.getContext()
def calc_esl(form, v, A, B, V0):
if ( form == 1 ):
return A * math.exp((v-V0)/B)
if ( form == 2 ):
return A / ( 1.0 + math.exp(( v - V0 ) / B ))
if ( form == 3):
if ( math.fabs( v - V0 ) < 1e-6 ):
v = v + 1e-6
return A*(v-V0)/(math.exp((v-V0)/B) - 1.0 )
def calc_Na_m_alpha(v):
form = 3
A = - 1.0e5
B = -0.010
V0 = -0.040
return calc_esl( form, v, A, B, V0)
def calc_Na_m_beta(v):
form = 1
A = 4.0e3
B = -0.018
V0 = - 0.065
return calc_esl(form, v, A, B, V0)
def calc_Na_h_alpha(v):
form = 1
A = 70.0
B = -0.020
V0 = - 0.035
return calc_esl(form, v, A, B, V0)
def calc_Na_h_beta(v):
form = 2
A = 1.0e3
B = -0.010
V0 = -0.035
return calc_esl(form, v, A, B, V0)
def calc_K_n_alpha(v):
form = 3
A = -1.0e4
B = -0.010
V0 = -0.055
return calc_esl(form, v, A, B, V0)
def calc_K_n_beta(v):
form = 1
B = -0.080
V0 = -0.065
return calc_esl(form, v, A, B, V0)
def make_compartment(path, RA, RM, CM, EM, inject, diameter, length):
PI_D_L = math.pi*diameter*length
Ra = 4.0 + length * RA / PI_D_L
Rm = RM / PI_D_L
Cm = CM * PI_D_L
comp = moose.Compartment(path)
comp.Ra = Ra
comp.Rm = Rm
comp.Cm = Cm
comp.Em = EM
comp.inject = inject
comp.diameter = diameter
comp.length = length
comp.initVm = EM
ENa = 0.050
GNa = 1200
EK = - 0.077
GK = 360
Gbar_Na = GNa * PI_D_L
Gbar_K = GK * PI_D_L
NaChan = moose.HHChannel(path+'/Na')
NaChan.Ek = ENa
NaChan.Gbar = Gbar_Na
NaChan.Xpower = 3
NaChan.Ypower = 1
NaChan.X = 0.05293250
NaChan.Y = 0.59612067
KChan = moose.HHChannel(path+'/K')
KChan.Ek = EK
KChan.Gbar = Gbar_K
KChan.Xpower = 4
KChan.X = 0.31767695
parent = NaChan.parent
context.connect(parent, 'channel', NaChan, 'channel')
context.connect(parent, 'channel', KChan, 'channel')
VMIN = - 0.100
VMAX = 0.05
NDIVS = 150
Na_xGate_A = moose.Table(NaChan.path+'/xGate/A')
Na_xGate_A.xmin = VMIN
Na_xGate_A.xmax = VMAX
Na_xGate_A.xdivs = NDIVS
Na_xGate_B = moose.Table(NaChan.path+'/xGate/B')
Na_xGate_B.xmin = VMIN
Na_xGate_B.xmax = VMAX
Na_xGate_B.xdivs = NDIVS
Na_yGate_A = moose.Table(NaChan.path+'/yGate/A')
Na_yGate_A.xmin = VMIN
Na_yGate_A.xmax = VMAX
Na_yGate_A.xdivs = NDIVS
Na_yGate_B = moose.Table(NaChan.path+'/yGate/B')
Na_yGate_B.xmin = VMIN
Na_yGate_B.xmax = VMAX
Na_yGate_B.xdivs = NDIVS
K_xGate_A = moose.Table(KChan.path+'/xGate/A')
K_xGate_A.xmin = VMIN
K_xGate_A.xmax = VMAX
K_xGate_A.xdivs = NDIVS
K_xGate_B = moose.Table(KChan.path+'/xGate/B')
K_xGate_B.xmin = VMIN
K_xGate_B.xmax = VMAX
K_xGate_B.xdivs = NDIVS
v = VMIN
dv = (VMAX-VMIN)/NDIVS
for i in range(NDIVS+1):
Na_xGate_A[i] = calc_Na_m_alpha(v)
Na_xGate_B[i] = calc_Na_m_alpha(v) + calc_Na_m_beta(v)
Na_yGate_A[i] = calc_Na_h_alpha(v)
Na_yGate_B[i] = calc_Na_h_alpha(v) + calc_Na_h_beta(v)
K_xGate_A[i] = calc_K_n_alpha(v)
K_xGate_B[i] = calc_K_n_alpha(v)+calc_K_n_beta(v)
v = v + dv
def link_compartment(path1, path2):
context.connect(Id(path1),'raxial', Id(path2), 'axial')
|
BhallaLab/moose-thalamocortical
|
DEMOS/pymoose/rallpack/rall.3/util.py
|
Python
|
lgpl-2.1
| 3,536
|
[
"MOOSE"
] |
d2da738c582e5731db0a21345021588c3671db94686f628660646ab8a4e13580
|
"""Unit tests for the Jobbing mail module."""
import unittest
from unittest import mock
from bob_emploi.frontend.api import user_pb2
from bob_emploi.frontend.server import scoring
from bob_emploi.frontend.server.mail.test import campaign_helper
_SCORING_MODELS_EXCEPT_JOBBING = dict(scoring.SCORING_MODELS)
del _SCORING_MODELS_EXCEPT_JOBBING['advice-reorient-jobbing']
class JobbingTest(campaign_helper.CampaignTestBase):
"""Unit tests for the _get_jobbing_vars method."""
campaign_id = 'jobbing'
def setUp(self) -> None:
super().setUp()
self.user.profile.frustrations.append(user_pb2.RESUME)
self.user.profile.gender = user_pb2.MASCULINE
self.user.profile.name = 'Patrick'
self.user.profile.coaching_email_frequency = user_pb2.EMAIL_ONCE_A_MONTH
self.project.opened_strategies.add(strategy_id='diploma-free-job')
self.project.city.departement_id = '31'
self.project.city.ClearField('departement_name')
self.project.city.ClearField('departement_prefix')
self.project.target_job.masculine_name = 'Steward'
self.project.target_job.job_group.rome_id = 'M1601'
self.database.departements.insert_one({
'_id': '31',
'name': 'Haute-Garonne',
'prefix': 'en ',
})
self.database.local_diagnosis.insert_one({
'_id': '31:M1601',
'imt': {'yearlyAvgOffersPer10Candidates': 1},
})
self.database.reorient_jobbing.insert_one({
'_id': '31',
'departementJobStats': {
'jobs': [
{
'romeId': 'A1413',
'masculineName': 'Aide caviste',
'feminineName': 'Aide caviste',
'name': 'Aide caviste',
'marketScore': 10,
},
{
'romeId': 'A1401',
'feminineName': 'Aide arboricole',
'masculineName': 'Aide arboricole',
'name': 'Aide arboricole',
'marketScore': 3,
},
],
},
})
def test_basic_mail_blast(self) -> None:
"""Basic usage of a mail blast."""
self._assert_user_receives_campaign()
def test_not_started_strategy(self) -> None:
"""User did not started the diploma-free-job strategy."""
del self.project.opened_strategies[:]
self._assert_user_receives_focus(should_be_sent=False)
def test_basic_focus(self) -> None:
"""Basic usage."""
self._assert_user_receives_focus()
self._assert_has_default_vars()
self._assert_has_unsubscribe_url('changeEmailSettingsUrl', **{
'coachingEmailFrequency': 'EMAIL_ONCE_A_MONTH',
})
self._assert_has_status_update_link('statusUpdateUrl')
self._assert_has_logged_url('loginUrl', path='/projet/0')
self._assert_remaining_variables({
'inDepartement': 'en Haute-Garonne',
'ofJobName': 'de steward',
'jobs': [{'name': 'Aide caviste'}, {'name': 'Aide arboricole'}],
})
def test_undefined_project(self) -> None:
"""Undefined project."""
self.project.ClearField('target_job')
self._assert_user_receives_focus()
self.assertEqual('de definir votre projet professionnel', self._variables['ofJobName'])
@mock.patch.dict(scoring.SCORING_MODELS, _SCORING_MODELS_EXCEPT_JOBBING, clear=True)
def test_missing_scoring_model(self) -> None:
"""Our scoring model is missing."""
self._assert_user_receives_focus(should_be_sent=False)
def test_no_better_market(self) -> None:
"""User is looking for a job where there's no better other jobs."""
self.project.target_job.job_group.rome_id = 'M1602'
self.database.local_diagnosis.insert_one({
'_id': '31:M1602',
'imt': {'yearlyAvgOffersPer10Candidates': 12},
})
self._assert_user_receives_focus(should_be_sent=False)
if __name__ == '__main__':
unittest.main()
|
bayesimpact/bob-emploi
|
frontend/server/mail/test/jobbing_test.py
|
Python
|
gpl-3.0
| 4,215
|
[
"BLAST"
] |
c8739c085e1888280fc3c2df7ecb25ff4087756491eb5e0e8f720fc976874bcd
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAnnotationforge(RPackage):
"""Tools for building SQLite-based annotation data packages.
Provides code for generating Annotation packages and their databases.
Packages produced are intended to be used with AnnotationDbi."""
homepage = "https://bioconductor.org/packages/AnnotationForge"
git = "https://git.bioconductor.org/packages/AnnotationForge.git"
version('1.26.0', commit='5d181f32df1fff6446af64a2538a7d25c23fe46e')
version('1.24.0', commit='3e1fe863573e5b0f69f35a9ad6aebce11ef83d0d')
version('1.22.2', commit='8eafb1690c1c02f6291ccbb38ac633d54b8217f8')
version('1.20.0', commit='7b440f1570cb90acce8fe2fa8d3b5ac34f638882')
version('1.18.2', commit='44ca3d4ef9e9825c14725ffdbbaa57ea059532e1')
depends_on('r@2.7.0:', type=('build', 'run'))
depends_on('r-biocgenerics@0.15.10:', type=('build', 'run'))
depends_on('r-biobase@1.17.0:', type=('build', 'run'))
depends_on('r-annotationdbi@1.33.14:', type=('build', 'run'))
depends_on('r-dbi', type=('build', 'run'))
depends_on('r-rsqlite', type=('build', 'run'))
depends_on('r-xml', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-rcurl', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-annotationforge/package.py
|
Python
|
lgpl-2.1
| 1,466
|
[
"Bioconductor"
] |
7f0e5cba409824d1385636ff8b8b5f807c7674b6b9deba0ffb8852e5e871791d
|
"""
== Region description ==
Regions serve for selection of certain parts of the computational domain (= selection of nodes and elements of a FE mesh). They are used to define the boundary conditions, the domains of terms and materials etc.
"""
from pyparsing import Literal, CaselessLiteral, Word, delimitedList,\
Group, Optional, ZeroOrMore, nums, alphas, alphanums,\
Combine, StringStart, StringEnd, Forward, oneOf, ParseException
op_codes = ['OA_SubN', 'OA_SubE', 'OA_AddN', 'OA_AddE',
'OA_IntersectN', 'OA_IntersectE']
eval_codes = ['E_NIR', 'E_NOS', 'E_NBF', 'E_EBF', 'E_EOG', 'E_NOG',
'E_ONIR', 'E_NI', 'E_EI1', 'E_EI2']
kw_codes = ['KW_All', 'KW_Region']
##
# 11.05.2006, c
def to_stack( stack ):
def push_first( str, loc, toks ):
if toks:
# print stack
stack.append( toks[0] )
# print toks, '->', stack
# print ''
return toks
return push_first
##
# 14.06.2006, c
# 15.06.2006
# 02.05.2007
def replace( what, keep = False ):
def _replace( str, loc, toks ):
ret = {'token' : what, 'orig' : []}
if keep:
ret['orig'] = list( toks[0] )
return ret
return _replace
##
# 02.05.2007, c
def replace_with_region( what, r_index ):
def _replace( str, loc, toks ):
ret = {'token' : what, 'orig' : []}
orig = toks[0]
r_orig = orig[r_index]
if isinstance( r_orig, dict ) and (r_orig['token'] == 'KW_Region'):
orig = list( orig[:r_index] ) + r_orig['orig']
ret['orig'] = orig
return ret
return _replace
##
# 14.06.2006, c
def join_tokens( str, loc, toks ):
# print toks
return [" ".join( toks[0] )]
##
# 14.06.2006, c
def visit_stack( stack, op_visitor, leaf_visitor ):
def visit( stack, level ):
op = stack.pop()
token = op['token']
if token in op_codes:
res2 = visit( stack, level + 1 )
res1 = visit( stack, level + 1 )
return op_visitor( level, op, res1, res2 )
elif token in eval_codes:
return leaf_visitor( level, op )
elif token in kw_codes:
return leaf_visitor( level, op )
else:
raise ValueError, token
return visit( stack, 0 )
##
# 14.06.2006, c
def print_op( level, op, item1, item2 ):
print level * ' ' + (': %s' % op)
##
# 14.06.2006, c
def print_leaf( level, op ):
print level * ' ' + ('< %s' % op)
##
# 14.06.2006, c
def print_stack( stack ):
visit_stack( stack, print_op, print_leaf )
##
# c: 13.06.2006, r: 14.07.2008
def create_bnf( stack ):
point = Literal( "." )
comma = Literal( "," )
e = CaselessLiteral( "E" )
inumber = Word( nums )
fnumber = Combine( Word( "+-"+nums, nums ) +
Optional( point + Optional( Word( nums ) ) ) +
Optional( e + Word( "+-"+nums, nums ) ) )
_of = Literal( 'of' )
_in = Literal( 'in' )
_by = Literal( 'by' )
_copy = Literal( 'copy' )
_mn = Literal( '-n' ).setParseAction( replace( 'OA_SubN' ) )
_me = Literal( '-e' ).setParseAction( replace( 'OA_SubE' ) )
_pn = Literal( '+n' ).setParseAction( replace( 'OA_AddN' ) )
_pe = Literal( '+e' ).setParseAction( replace( 'OA_AddE' ) )
_inn = Literal( '*n' ).setParseAction( replace( 'OA_IntersectN' ) )
_ine = Literal( '*e' ).setParseAction( replace( 'OA_IntersectE' ) )
regop = (_mn | _me | _pn | _pe | _inn | _ine)
lpar = Literal( "(" ).suppress()
rpar = Literal( ")" ).suppress()
_all = Literal( 'all' ).setParseAction( replace( 'KW_All' ) )
node = Literal( 'node' )
nodes = Literal( 'nodes' )
element = Literal( 'element' )
elements = Literal( 'elements' )
group = Literal( 'group' )
surface = Literal( 'surface' )
variable = Word( 'xyz', max = 1 ) | Literal( 'domain' )
any_var = Word( alphas + '_', alphanums + '_' ) | fnumber
function = Word( alphas, alphanums + '_' )
function = Group( function ).setParseAction( join_tokens )
region = Combine( Literal( 'r.' ) + Word( alphas, '_' + alphas + nums ) )
region = Group( Optional( _copy, default = 'nocopy' ) + region )
region.setParseAction( replace( 'KW_Region', keep = True ) )
coor = oneOf( 'x y z' )
boolop = oneOf( '& |' )
relop = oneOf( '< > <= >= != ==' )
bool_term = ZeroOrMore( '(' ) + (coor | fnumber ) + relop + (coor | fnumber)\
+ ZeroOrMore( ')' )
relation = Forward()
relation << ZeroOrMore( '(' )\
+ bool_term + ZeroOrMore( boolop + relation )\
+ ZeroOrMore( ')' )
relation = Group( relation ).setParseAction( join_tokens )
nos = Group( nodes + _of + surface ).setParseAction( replace( 'E_NOS' ) )
nir = Group( nodes + _in + relation ).setParseAction( \
replace( 'E_NIR', keep = True ) )
nbf = Group( nodes + _by + function ).setParseAction( \
replace( 'E_NBF', keep = True ) )
ebf = Group( elements + _by + function ).setParseAction( \
replace( 'E_EBF', keep = True ) )
eog = Group( elements + _of + group + Word( nums ) ).setParseAction( \
replace( 'E_EOG', keep = True ) )
nog = Group( nodes + _of + group + Word( nums ) ).setParseAction( \
replace( 'E_NOG', keep = True ) )
onir = Group( node + _in + region ).setParseAction( \
replace_with_region( 'E_ONIR', 2 ) )
ni = Group( node + delimitedList( inumber ) ).setParseAction( \
replace( 'E_NI', keep = True ) )
ei1 = Group( element + delimitedList( inumber ) ).setParseAction( \
replace( 'E_EI1', keep = True ) )
etuple = lpar.suppress() + inumber + comma.suppress() \
+ inumber + rpar.suppress()
ei2 = Group( element + delimitedList( etuple ) ).setParseAction( \
replace( 'E_EI2', keep = True ) )
region_expression = Forward()
atom1 = (_all | region | ni | onir | nos | nir | nbf
| ei1 | ei2 | ebf | eog | nog)
atom1.setParseAction( to_stack( stack ) )
atom2 = (lpar + region_expression.suppress() + rpar)
atom = (atom1 | atom2)
aux = (regop + region_expression)
aux.setParseAction( to_stack( stack ) )
region_expression << atom + ZeroOrMore( aux )
region_expression = StringStart() + region_expression + StringEnd()
# region.set_debug()
# relation.set_debug()
# region_expression.set_debug()
return region_expression
_test_strs = ['nodes of surface -n r.egion_1',
'r.egion_2 +n copy r.egion_1',
'nodes in (y <= 0.00001) & (x < 0.11)',
'nodes in ((y <= 0.00001) & (x < 0.11))',
'nodes in (((y <= 0.00001) & (x < 0.11)))',
'nodes in (((0.00001 < y) & (x < 0.11)))',
'nodes in (y < 1.0)',
'all -n nodes in (y == 0.00001)',
'all -n nodes of surface',
'all -e r.egion_100',
'r.egion_1 -n nodes of surface *e r.egion_8 *n nodes in (y > 0)',
'nodes of surface +n nodes by pokus',
'elements of group 6 +e nodes by fn2_3c',
"""r.egion_1 *n (r.egion_2 +e (nodes in (y > 0) *n r.egion_32))
-n nodes of surface -e r.egion_5""",
'node in r.region_3',
'node 10',
'elements by afun']
if __name__ == "__main__":
test_strs = _test_strs
stack = []
bnf = create_bnf( stack )
n_fail = 0
for test_str in test_strs:
print test_str
stack[:] = []
try:
out = bnf.parseString( test_str )
# print out
# print stack
except:
print '...failed!'
n_fail += 1
continue
print_stack( stack )
print 'failed: %d' % n_fail
|
olivierverdier/sfepy
|
sfepy/fem/parseReg.py
|
Python
|
bsd-3-clause
| 7,800
|
[
"VisIt"
] |
ed70032af08ccdca872707e20658270f4098b8b3b9af1074b57ad5b4795915e7
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
Provide plugin management
"""
import os
import sys
import logging
import imp
from openlp.core.lib import Plugin, PluginStatus, Registry
from openlp.core.utils import AppLocation
log = logging.getLogger(__name__)
class PluginManager(object):
"""
This is the Plugin manager, which loads all the plugins,
and executes all the hooks, as and when necessary.
"""
log.info('Plugin manager loaded')
def __init__(self):
"""
The constructor for the plugin manager. Passes the controllers on to
the plugins for them to interact with via their ServiceItems.
"""
log.info('Plugin manager Initialising')
Registry().register('plugin_manager', self)
Registry().register_function('bootstrap_initialise', self.bootstrap_initialise)
self.base_path = os.path.abspath(AppLocation.get_directory(AppLocation.PluginsDir))
log.debug('Base path %s ', self.base_path)
self.plugins = []
log.info('Plugin manager Initialised')
def bootstrap_initialise(self):
"""
Bootstrap all the plugin manager functions
"""
log.info('bootstrap_initialise')
self.find_plugins()
# hook methods have to happen after find_plugins. Find plugins needs
# the controllers hence the hooks have moved from setupUI() to here
# Find and insert settings tabs
log.info('hook settings')
self.hook_settings_tabs()
# Find and insert media manager items
log.info('hook media')
self.hook_media_manager()
# Call the hook method to pull in import menus.
log.info('hook menus')
self.hook_import_menu()
# Call the hook method to pull in export menus.
self.hook_export_menu()
# Call the hook method to pull in tools menus.
self.hook_tools_menu()
# Call the initialise method to setup plugins.
log.info('initialise plugins')
self.initialise_plugins()
def find_plugins(self):
"""
Scan a directory for objects inheriting from the ``Plugin`` class.
"""
log.info('Finding plugins')
start_depth = len(os.path.abspath(self.base_path).split(os.sep))
present_plugin_dir = os.path.join(self.base_path, 'presentations')
log.debug('finding plugins in %s at depth %d', str(self.base_path), start_depth)
for root, dirs, files in os.walk(self.base_path):
if sys.platform == 'darwin' and root.startswith(present_plugin_dir):
# TODO Presentation plugin is not yet working on Mac OS X.
# For now just ignore it. The following code will ignore files from the presentation plugin directory
# and thereby never import the plugin.
continue
for name in files:
if name.endswith('.py') and not name.startswith('__'):
path = os.path.abspath(os.path.join(root, name))
this_depth = len(path.split(os.sep))
if this_depth - start_depth > 2:
# skip anything lower down
break
module_name = name[:-3]
# import the modules
log.debug('Importing %s from %s. Depth %d', module_name, root, this_depth)
try:
# Use the "imp" library to try to get around a problem with the PyUNO library which
# monkey-patches the __import__ function to do some magic. This causes issues with our tests.
# First, try to find the module we want to import, searching the directory in root
fp, path_name, description = imp.find_module(module_name, [root])
# Then load the module (do the actual import) using the details from find_module()
imp.load_module(module_name, fp, path_name, description)
except ImportError as e:
log.exception('Failed to import module %s on path %s: %s', module_name, path, e.args[0])
plugin_classes = Plugin.__subclasses__()
plugin_objects = []
for p in plugin_classes:
try:
plugin = p()
log.debug('Loaded plugin %s', str(p))
plugin_objects.append(plugin)
except TypeError:
log.exception('Failed to load plugin %s', str(p))
plugins_list = sorted(plugin_objects, key=lambda plugin: plugin.weight)
for plugin in plugins_list:
if plugin.check_pre_conditions():
log.debug('Plugin %s active', str(plugin.name))
plugin.set_status()
else:
plugin.status = PluginStatus.Disabled
self.plugins.append(plugin)
def hook_media_manager(self):
"""
Create the plugins' media manager items.
"""
for plugin in self.plugins:
if plugin.status is not PluginStatus.Disabled:
plugin.create_media_manager_item()
def hook_settings_tabs(self):
"""
Loop through all the plugins. If a plugin has a valid settings tab
item, add it to the settings tab.
Tabs are set for all plugins not just Active ones
"""
for plugin in self.plugins:
if plugin.status is not PluginStatus.Disabled:
plugin.create_settings_tab(self.settings_form)
def hook_import_menu(self):
"""
Loop through all the plugins and give them an opportunity to add an
item to the import menu.
"""
for plugin in self.plugins:
if plugin.status is not PluginStatus.Disabled:
plugin.add_import_menu_item(self.main_window.file_import_menu)
def hook_export_menu(self):
"""
Loop through all the plugins and give them an opportunity to add an
item to the export menu.
"""
for plugin in self.plugins:
if plugin.status is not PluginStatus.Disabled:
plugin.add_export_menu_Item(self.main_window.file_export_menu)
def hook_tools_menu(self):
"""
Loop through all the plugins and give them an opportunity to add an
item to the tools menu.
"""
for plugin in self.plugins:
if plugin.status is not PluginStatus.Disabled:
plugin.add_tools_menu_item(self.main_window.tools_menu)
def hook_upgrade_plugin_settings(self, settings):
"""
Loop through all the plugins and give them an opportunity to upgrade their settings.
``settings``
The Settings object containing the old settings.
"""
for plugin in self.plugins:
if plugin.status is not PluginStatus.Disabled:
plugin.upgrade_settings(settings)
def initialise_plugins(self):
"""
Loop through all the plugins and give them an opportunity to
initialise themselves.
"""
log.info('Initialise Plugins - Started')
for plugin in self.plugins:
log.info('initialising plugins %s in a %s state' % (plugin.name, plugin.is_active()))
if plugin.is_active():
plugin.initialise()
log.info('Initialisation Complete for %s ' % plugin.name)
log.info('Initialise Plugins - Finished')
def finalise_plugins(self):
"""
Loop through all the plugins and give them an opportunity to
clean themselves up
"""
log.info('finalising plugins')
for plugin in self.plugins:
if plugin.is_active():
plugin.finalise()
log.info('Finalisation Complete for %s ' % plugin.name)
def get_plugin_by_name(self, name):
"""
Return the plugin which has a name with value ``name``.
"""
for plugin in self.plugins:
if plugin.name == name:
return plugin
return None
def new_service_created(self):
"""
Loop through all the plugins and give them an opportunity to handle a new service
"""
log.info('plugins - new service created')
for plugin in self.plugins:
if plugin.is_active():
plugin.new_service_created()
def _get_settings_form(self):
"""
Adds the plugin manager to the class dynamically
"""
if not hasattr(self, '_settings_form'):
self._settings_form = Registry().get('settings_form')
return self._settings_form
settings_form = property(_get_settings_form)
def _get_main_window(self):
"""
Adds the main window to the class dynamically
"""
if not hasattr(self, '_main_window'):
self._main_window = Registry().get('main_window')
return self._main_window
main_window = property(_get_main_window)
|
marmyshev/item_title
|
openlp/core/lib/pluginmanager.py
|
Python
|
gpl-2.0
| 11,121
|
[
"Brian"
] |
79fef329c3209a197738585199c0da07042f71319e9db2a121f4b4341e432d9c
|
import sys
sys.path.append('..')
import stile
import dummy
def main():
# setups
dh = dummy.DummyDataHandler()
bin_list = [stile.BinStep('ra',low=-1,high=1,step=1),
stile.BinStep('dec',low=-1,high=1,step=1)]
sys_test = stile.CorrelationFunctionSysTest(type='GalaxyShear')
stile_args = {'ra_units': 'degrees', 'dec_units': 'degrees',
'min_sep': 0.05, 'max_sep': 1, 'sep_units': 'degrees', 'nbins': 20}
data_ids = dh.listData(object_types = ['galaxy lens','galaxy'], epoch='single',
extent='field', data_format='table')
# do a test without binning
data = dh.getData(data_ids[0],'galaxy lens','single','field','table')
data2 = dh.getData(data_ids[1],'galaxy','single','field','table')
# run the test
results = sys_test(data, data2=data2, config=stile_args)
fig = sys_test.plot(results)
fig.savefig(sys_test.short_name+'.png')
stile.WriteASCIITable('realshear.dat',results)
print "Done with unbinned systematics test"
# do with binning
data = dh.getData(data_ids[0],'galaxy lens','single','field','table')
# turns a list of binning schemes into a pseudo-nested list of single bins
expanded_bin_list = stile.ExpandBinList(bin_list)
handles_list = []
deletes_list = []
# for each set of bins, do the systematics test as above
for bin_list in expanded_bin_list:
bins_name = '-'.join([bl.short_name for bl in bin_list])
data2 = dh.getData(data_ids[1],'galaxy','single','field','table',bin_list=bin_list)
results = sys_test(data, data2=data2, config=stile_args)
stile.WriteASCIITable('realshear-'+bins_name+'.dat',results)
fig = sys_test.plot(results)
fig.savefig(sys_test.short_name+bins_name+'.png')
print "Done with binned systematics test", bins_name
if __name__=='__main__':
main()
|
msimet/Stile
|
examples/example_run.py
|
Python
|
bsd-3-clause
| 1,915
|
[
"Galaxy"
] |
8f251ce8342009b03da2b4bc36ad45843c5d60169cf0ffbeec0b2c2639de9105
|
"""
The following contains the S22 and s26 databases of weakly interacting dimers and complexes
S22 geometry data are from
P. Jurecka, J. Sponer, J. Cerny, P. Hobza; Phys Chem Chem Phys 2006, 8 (17), 1985-1993.
See http://www.begdb.com/index.php?action=106a6c241b8797f52e1e77317b96a201 for the original files.
All geometries are optimized at either the CCSD(T) or MP2 level except for the methyl amide dimers
where only the hydrogen position is optimized at the DFT level (the precise optimization is written as a comment).
The S22 interaction energies are all calculated using both CCSD(T)/CBS counter poised corrected (CP) and MP2 /CBS CP.
The original S22 interaction energies are listed in the above references.
The S22 energies used here are from
Takatani, T. et al., J. Chem. Phys., 132, 144104 (2010)
where a large and more complete basis set has been used for all database members.
The original S22 set has been expanded with an extra 4 single hydrogen bonded complexes.
The expanded set is called S26. Data for the 4 extra dimers are from
Riley, K.E., Hobza, P., J. Chem. Phys. A, 111(33), 8257-8263 (2007).
Geometry optimizations: MP2/cc-pVTZ CP or DFT TPSS/TZVP noCP
Interaction energies: CCSD(T)/CBS CP or MP2/cc-pVDZ CP
The original s22 has also been expanded with 4 non-equilibrium structures for each system.
This defines the s22x5 database containing one shortened and three elongated structures:
0.9, 1.0, 1.2, 1.5 and 2.0 times the original intermolecular distance.
CCSD(T)/CBS interaction energies are consistent with the original s22 work.
Reference: L. Grafova, M. Pitonak, P. Hobza, J. Chem. Theo. Comput., 2010, ASAP article.
"""
from ase.atoms import Atoms
s22 = ['Ammonia_dimer','Water_dimer','Formic_acid_dimer','Formamide_dimer',
'Uracil_dimer_h-bonded','2-pyridoxine_2-aminopyridine_complex',
'Adenine-thymine_Watson-Crick_complex','Methane_dimer','Ethene_dimer',
'Benzene-methane_complex','Benzene_dimer_parallel_displaced','Pyrazine_dimer',
'Uracil_dimer_stack','Indole-benzene_complex_stack',
'Adenine-thymine_complex_stack','Ethene-ethyne_complex','Benzene-water_complex',
'Benzene-ammonia_complex','Benzene-HCN_complex','Benzene_dimer_T-shaped',
'Indole-benzene_T-shape_complex','Phenol_dimer']
s26 = s22 + ['Methanol_dimer','Methanol-formaldehyde_complex',
'Methyl_amide_dimer_alpha','Methyl_amide_dimer_beta']
s22x5 = ['Ammonia_dimer_0.9','Ammonia_dimer_1.0','Ammonia_dimer_1.2','Ammonia_dimer_1.5','Ammonia_dimer_2.0',
'Water_dimer_0.9','Water_dimer_1.0','Water_dimer_1.2','Water_dimer_1.5','Water_dimer_2.0',
'Formic_acid_dimer_0.9','Formic_acid_dimer_1.0','Formic_acid_dimer_1.2','Formic_acid_dimer_1.5','Formic_acid_dimer_2.0',
'Formamide_dimer_0.9','Formamide_dimer_1.0','Formamide_dimer_1.2','Formamide_dimer_1.5','Formamide_dimer_2.0',
'Uracil_dimer_h-bonded_0.9','Uracil_dimer_h-bonded_1.0','Uracil_dimer_h-bonded_1.2','Uracil_dimer_h-bonded_1.5','Uracil_dimer_h-bonded_2.0',
'2-pyridoxine_2-aminopyridine_complex_0.9','2-pyridoxine_2-aminopyridine_complex_1.0',
'2-pyridoxine_2-aminopyridine_complex_1.2','2-pyridoxine_2-aminopyridine_complex_1.5','2-pyridoxine_2-aminopyridine_complex_2.0',
'Adenine-thymine_Watson-Crick_complex_0.9','Adenine-thymine_Watson-Crick_complex_1.0',
'Adenine-thymine_Watson-Crick_complex_1.2','Adenine-thymine_Watson-Crick_complex_1.5','Adenine-thymine_Watson-Crick_complex_2.0',
'Methane_dimer_0.9','Methane_dimer_1.0','Methane_dimer_1.2','Methane_dimer_1.5','Methane_dimer_2.0',
'Ethene_dimer_0.9','Ethene_dimer_1.0','Ethene_dimer_1.2','Ethene_dimer_1.5','Ethene_dimer_2.0',
'Benzene-methane_complex_0.9','Benzene-methane_complex_1.0','Benzene-methane_complex_1.2','Benzene-methane_complex_1.5','Benzene-methane_complex_2.0',
'Benzene_dimer_parallel_displaced_0.9','Benzene_dimer_parallel_displaced_1.0',
'Benzene_dimer_parallel_displaced_1.2','Benzene_dimer_parallel_displaced_1.5','Benzene_dimer_parallel_displaced_2.0',
'Pyrazine_dimer_0.9','Pyrazine_dimer_1.0','Pyrazine_dimer_1.2','Pyrazine_dimer_1.5','Pyrazine_dimer_2.0',
'Uracil_dimer_stack_0.9','Uracil_dimer_stack_1.0','Uracil_dimer_stack_1.2','Uracil_dimer_stack_1.5','Uracil_dimer_stack_2.0',
'Indole-benzene_complex_stack_0.9','Indole-benzene_complex_stack_1.0',
'Indole-benzene_complex_stack_1.2','Indole-benzene_complex_stack_1.5','Indole-benzene_complex_stack_2.0',
'Adenine-thymine_complex_stack_0.9','Adenine-thymine_complex_stack_1.0',
'Adenine-thymine_complex_stack_1.2','Adenine-thymine_complex_stack_1.5','Adenine-thymine_complex_stack_2.0',
'Ethene-ethyne_complex_0.9','Ethene-ethyne_complex_1.0','Ethene-ethyne_complex_1.2','Ethene-ethyne_complex_1.5','Ethene-ethyne_complex_2.0',
'Benzene-water_complex_0.9','Benzene-water_complex_1.0','Benzene-water_complex_1.2','Benzene-water_complex_1.5','Benzene-water_complex_2.0',
'Benzene-ammonia_complex_0.9','Benzene-ammonia_complex_1.0','Benzene-ammonia_complex_1.2','Benzene-ammonia_complex_1.5','Benzene-ammonia_complex_2.0',
'Benzene-HCN_complex_0.9','Benzene-HCN_complex_1.0','Benzene-HCN_complex_1.2','Benzene-HCN_complex_1.5','Benzene-HCN_complex_2.0',
'Benzene_dimer_T-shaped_0.9','Benzene_dimer_T-shaped_1.0','Benzene_dimer_T-shaped_1.2','Benzene_dimer_T-shaped_1.5','Benzene_dimer_T-shaped_2.0',
'Indole-benzene_T-shape_complex_0.9','Indole-benzene_T-shape_complex_1.0',
'Indole-benzene_T-shape_complex_1.2','Indole-benzene_T-shape_complex_1.5','Indole-benzene_T-shape_complex_2.0',
'Phenol_dimer_0.9','Phenol_dimer_1.0','Phenol_dimer_1.2','Phenol_dimer_1.5','Phenol_dimer_2.0']
data = {
# --- s22 and s22x5 ---#
'2-pyridoxine_2-aminopyridine_complex': {
'description': "Complex, S22, S26, 2 h-bond, double h-bond, nucleic base model",
'name': "2-pyridoxine_2-aminopyridine_complex",
's26_number': "06",
'interaction energy CC': -0.7372,
'interaction energies s22x5': [-0.6561,-0.7242,-0.6041,-0.3547,-0.1414],
'offset': 0.0130,
'symbols': 'ONCCCCCHHHHHNCCCCCHHHHNHH',
'magmoms': None,
'dimer atoms': [12,13],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ -1.3976213, -1.8858368, -0.3673061],
[ -1.4642550, 0.3641828, 0.0192301],
[ -4.1857398, 0.3696669, 0.0360960],
[ -3.4832598, 1.5783111, 0.2500752],
[ -2.1179502, 1.5307048, 0.2338383],
[ -2.0773833, -0.8637492, -0.1899414],
[ -3.5156032, -0.8051950, -0.1757585],
[ -5.2678045, 0.3707428, 0.0411419],
[ -3.9920334, 2.5127560, 0.4214414],
[ -1.4929196, 2.3984096, 0.3885018],
[ -4.0401226, -1.7348452, -0.3379269],
[ -0.4265266, 0.3612127, 0.0073538],
[ 1.4327616, 0.3639703, -0.0159508],
[ 2.1154200, -0.7803450, 0.1681099],
[ 3.5237586, -0.8016096, 0.1545027],
[ 4.2185897, 0.3735783, -0.0525929],
[ 3.5099708, 1.5615014, -0.2449763],
[ 2.1280138, 1.4953324, -0.2175374],
[ 4.0459206, -1.7361356, 0.3076883],
[ 5.2999426, 0.3666009, -0.0663349],
[ 4.0110923, 2.5024313, -0.4130052],
[ 1.5339878, 2.3893837, -0.3670565],
[ 1.3883123, -1.9083038, 0.4198149],
[ 1.8694714, -2.7812773, 0.2940385],
[ 0.4089067, -1.9079942, 0.1300860]],
'positions 0.9':[[ -0.969652624 , -2.245611164 , -0.386822525 ],
[ -1.037789793 , 0.004508753 , -0.001131127 ],
[ -3.759261297 , 0.014028068 , -0.018375760 ],
[ -3.057727058 , 1.221631156 , 0.204402100 ],
[ -1.692392879 , 1.172000703 , 0.205277859 ],
[ -1.650068007 , -1.222514751 , -0.217981663 ],
[ -3.088264390 , -1.161828225 , -0.221825966 ],
[ -4.841300764 , 0.016708498 , -0.026892047 ],
[ -3.567221821 , 2.156831083 , 0.369386687 ],
[ -1.068064568 , 2.038779450 , 0.367771502 ],
[ -3.612088503 , -2.090701001 , -0.390563867 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 1.673493386 , 0.000000000 , 0.000000000 ],
[ 2.352093429 , -1.145324213 , 0.192591910 ],
[ 3.760459273 , -1.168677470 , 0.196637005 ],
[ 4.459573002 , 0.005477083 , -0.001723239 ],
[ 3.755182987 , 1.194447664 , -0.202961469 ],
[ 2.372894041 , 1.130328028 , -0.192845808 ],
[ 4.279274134 , -2.103975233 , 0.356345736 ],
[ 5.541001766 , -0.003103367 , -0.001911235 ],
[ 4.259765167 , 2.134632052 , -0.364687797 ],
[ 1.782114958 , 2.025258423 , -0.349790900 ],
[ 1.620216197 , -2.272201547 , 0.435153550 ],
[ 2.101618920 , -3.145888174 , 0.315408858 ],
[ 0.644520940 , -2.270442069 , 0.133172072 ]],
'positions 1.0':[[ -0.969652624000000 , -2.245611164000000 , -0.386822525000000 ],
[ -1.037789793000000 , 0.004508753000000 , -0.001131127000000 ],
[ -3.759261297000000 , 0.014028068000000 , -0.018375760000000 ],
[ -3.057727058000000 , 1.221631156000000 , 0.204402100000000 ],
[ -1.692392879000000 , 1.172000703000000 , 0.205277859000000 ],
[ -1.650068007000000 , -1.222514751000000 , -0.217981663000000 ],
[ -3.088264390000000 , -1.161828225000000 , -0.221825966000000 ],
[ -4.841300764000000 , 0.016708498000000 , -0.026892047000000 ],
[ -3.567221821000000 , 2.156831083000000 , 0.369386687000000 ],
[ -1.068064568000000 , 2.038779450000000 , 0.367771502000000 ],
[ -3.612088503000000 , -2.090701001000000 , -0.390563867000000 ],
[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ 1.859437095454546 , 0.000000000000000 , 0.000000000000000 ],
[ 2.538037138454545 , -1.145324213000000 , 0.192591910000000 ],
[ 3.946402982454545 , -1.168677470000000 , 0.196637005000000 ],
[ 4.645516711454546 , 0.005477083000000 , -0.001723239000000 ],
[ 3.941126696454545 , 1.194447664000000 , -0.202961469000000 ],
[ 2.558837750454545 , 1.130328028000000 , -0.192845808000000 ],
[ 4.465217843454545 , -2.103975233000000 , 0.356345736000000 ],
[ 5.726945475454546 , -0.003103367000000 , -0.001911235000000 ],
[ 4.445708876454546 , 2.134632052000000 , -0.364687797000000 ],
[ 1.968058667454545 , 2.025258423000000 , -0.349790900000000 ],
[ 1.806159906454545 , -2.272201547000000 , 0.435153550000000 ],
[ 2.287562629454545 , -3.145888174000000 , 0.315408858000000 ],
[ 0.830464649454546 , -2.270442069000000 , 0.133172072000000 ]],
'positions 1.2':[[ -0.969652624 , -2.245611164 , -0.386822525 ],
[ -1.037789793 , 0.004508753 , -0.001131127 ],
[ -3.759261297 , 0.014028068 , -0.018375760 ],
[ -3.057727058 , 1.221631156 , 0.204402100 ],
[ -1.692392879 , 1.172000703 , 0.205277859 ],
[ -1.650068007 , -1.222514751 , -0.217981663 ],
[ -3.088264390 , -1.161828225 , -0.221825966 ],
[ -4.841300764 , 0.016708498 , -0.026892047 ],
[ -3.567221821 , 2.156831083 , 0.369386687 ],
[ -1.068064568 , 2.038779450 , 0.367771502 ],
[ -3.612088503 , -2.090701001 , -0.390563867 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 2.231324514 , 0.000000000 , 0.000000000 ],
[ 2.909924557 , -1.145324213 , 0.192591910 ],
[ 4.318290401 , -1.168677470 , 0.196637005 ],
[ 5.017404130 , 0.005477083 , -0.001723239 ],
[ 4.313014115 , 1.194447664 , -0.202961469 ],
[ 2.930725169 , 1.130328028 , -0.192845808 ],
[ 4.837105262 , -2.103975233 , 0.356345736 ],
[ 6.098832894 , -0.003103367 , -0.001911235 ],
[ 4.817596295 , 2.134632052 , -0.364687797 ],
[ 2.339946086 , 2.025258423 , -0.349790900 ],
[ 2.178047325 , -2.272201547 , 0.435153550 ],
[ 2.659450048 , -3.145888174 , 0.315408858 ],
[ 1.202352068 , -2.270442069 , 0.133172072 ]],
'positions 1.5':[[ -0.969652624 , -2.245611164 , -0.386822525 ],
[ -1.037789793 , 0.004508753 , -0.001131127 ],
[ -3.759261297 , 0.014028068 , -0.018375760 ],
[ -3.057727058 , 1.221631156 , 0.204402100 ],
[ -1.692392879 , 1.172000703 , 0.205277859 ],
[ -1.650068007 , -1.222514751 , -0.217981663 ],
[ -3.088264390 , -1.161828225 , -0.221825966 ],
[ -4.841300764 , 0.016708498 , -0.026892047 ],
[ -3.567221821 , 2.156831083 , 0.369386687 ],
[ -1.068064568 , 2.038779450 , 0.367771502 ],
[ -3.612088503 , -2.090701001 , -0.390563867 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 2.789155642 , 0.000000000 , 0.000000000 ],
[ 3.467755685 , -1.145324213 , 0.192591910 ],
[ 4.876121529 , -1.168677470 , 0.196637005 ],
[ 5.575235258 , 0.005477083 , -0.001723239 ],
[ 4.870845243 , 1.194447664 , -0.202961469 ],
[ 3.488556297 , 1.130328028 , -0.192845808 ],
[ 5.394936390 , -2.103975233 , 0.356345736 ],
[ 6.656664022 , -0.003103367 , -0.001911235 ],
[ 5.375427423 , 2.134632052 , -0.364687797 ],
[ 2.897777214 , 2.025258423 , -0.349790900 ],
[ 2.735878453 , -2.272201547 , 0.435153550 ],
[ 3.217281176 , -3.145888174 , 0.315408858 ],
[ 1.760183196 , -2.270442069 , 0.133172072 ]],
'positions 2.0':[[ -0.969652624 , -2.245611164 , -0.386822525 ],
[ -1.037789793 , 0.004508753 , -0.001131127 ],
[ -3.759261297 , 0.014028068 , -0.018375760 ],
[ -3.057727058 , 1.221631156 , 0.204402100 ],
[ -1.692392879 , 1.172000703 , 0.205277859 ],
[ -1.650068007 , -1.222514751 , -0.217981663 ],
[ -3.088264390 , -1.161828225 , -0.221825966 ],
[ -4.841300764 , 0.016708498 , -0.026892047 ],
[ -3.567221821 , 2.156831083 , 0.369386687 ],
[ -1.068064568 , 2.038779450 , 0.367771502 ],
[ -3.612088503 , -2.090701001 , -0.390563867 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 3.718874190 , 0.000000000 , 0.000000000 ],
[ 4.397474233 , -1.145324213 , 0.192591910 ],
[ 5.805840077 , -1.168677470 , 0.196637005 ],
[ 6.504953806 , 0.005477083 , -0.001723239 ],
[ 5.800563791 , 1.194447664 , -0.202961469 ],
[ 4.418274845 , 1.130328028 , -0.192845808 ],
[ 6.324654938 , -2.103975233 , 0.356345736 ],
[ 7.586382570 , -0.003103367 , -0.001911235 ],
[ 6.305145971 , 2.134632052 , -0.364687797 ],
[ 3.827495762 , 2.025258423 , -0.349790900 ],
[ 3.665597001 , -2.272201547 , 0.435153550 ],
[ 4.146999724 , -3.145888174 , 0.315408858 ],
[ 2.689901744 , -2.270442069 , 0.133172072 ]]},
'Adenine-thymine_complex_stack': {
'description': "Complex, S22, S26, stack, dispersion bonded, nucleic base",
'name': "Adenine-thymine_complex_stack",
's26_number': "15",
'interaction energy CC': -0.5056,
'interaction energies s22x5':[-0.3465,-0.5299,-0.3569,-0.1409,-0.0399],
'offset': -0.0243,
'symbols': 'NCHNCCNHHNCHNCHNCHCCHHHCONHCOH',
'magmoms': None,
'dimer atoms': [15,15],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ 0.2793014, 2.4068393, -0.6057517],
[ -1.0848570, 2.4457461, -0.5511608],
[ -1.6594403, 3.0230294, -1.2560905],
[ -1.5977117, 1.7179877, 0.4287543],
[ -0.4897255, 1.1714358, 1.0301910],
[ -0.3461366, 0.2914710, 2.1172343],
[ -1.4187090, -0.1677767, 2.8101441],
[ -1.2388750, -0.9594802, 3.4047578],
[ -2.2918734, -0.1788223, 2.3073619],
[ 0.8857630, -0.0700763, 2.4919494],
[ 1.9352348, 0.4072878, 1.7968022],
[ 2.9060330, 0.0788414, 2.1458181],
[ 1.9409775, 1.2242019, 0.7402202],
[ 0.6952186, 1.5779858, 0.4063984],
[ 0.8610073, 2.8298045, -1.3104502],
[ 1.2754606, -0.6478993, -1.9779104],
[ 1.4130533, -1.5536850, -0.9550667],
[ 2.4258769, -1.8670780, -0.7468778],
[ 0.3575976, -2.0239499, -0.2530575],
[ 0.4821292, -3.0179494, 0.8521221],
[ 0.1757705, -2.5756065, 1.7986281],
[ -0.1601691, -3.8770412, 0.6639498],
[ 1.5112443, -3.3572767, 0.9513659],
[ -0.9684711, -1.5298112, -0.5939792],
[ -2.0029280, -1.8396957, -0.0199453],
[ -0.9956916, -0.6383870, -1.6720420],
[ -1.9014057, -0.2501720, -1.8985760],
[ 0.0684702, -0.1191762, -2.3763759],
[ -0.0397875, 0.7227006, -3.2531083],
[ 2.0853289, -0.2760176, -2.4454577]],
'positions 0.9':[[ 0.067390759 , 1.213806097 , -1.171192513 ],
[ -0.034440687 , 0.160916029 , -2.035179690 ],
[ -0.037909102 , 0.307694674 , -3.102311444 ],
[ -0.122286497 , -1.014214485 , -1.431659388 ],
[ -0.061278153 , -0.690156063 , -0.097738525 ],
[ -0.083866474 , -1.480006435 , 1.065121981 ],
[ -0.207551291 , -2.830167865 , 1.008466281 ],
[ 0.020236002 , -3.318294510 , 1.858492777 ],
[ 0.100823981 , -3.261839820 , 0.151791829 ],
[ -0.015107287 , -0.872886238 , 2.254820437 ],
[ 0.095534438 , 0.468473589 , 2.286592142 ],
[ 0.148443656 , 0.902433537 , 3.277055537 ],
[ 0.150791629 , 1.330817541 , 1.268232413 ],
[ 0.061278153 , 0.690156063 , 0.097738525 ],
[ 0.213123816 , 2.178532043 , -1.420082564 ],
[ 2.995457244 , 1.318912569 , 0.115169333 ],
[ 3.033773997 , 0.544134785 , 1.248235461 ],
[ 3.166936649 , 1.084216460 , 2.174491246 ],
[ 2.913123372 , -0.802036026 , 1.213306349 ],
[ 2.965573998 , -1.664227788 , 2.429380731 ],
[ 2.009790775 , -2.161867438 , 2.585037720 ],
[ 3.726416066 , -2.435033978 , 2.315487569 ],
[ 3.189128467 , -1.070628980 , 3.313538183 ],
[ 2.718644614 , -1.440326451 , -0.080379664 ],
[ 2.558245305 , -2.640081851 , -0.255033817 ],
[ 2.729839539 , -0.560837886 , -1.168484485 ],
[ 2.554150647 , -0.977998743 , -2.072617562 ],
[ 2.814781928 , 0.814169728 , -1.152798148 ],
[ 2.732113465 , 1.513854058 , -2.149163262 ],
[ 3.033823338 , 2.322516737 , 0.179118562 ]],
'positions 1.0':[[ 0.067390759000000 , 1.213806097000000 , -1.171192513000000 ],
[ -0.034440687000000 , 0.160916029000000 , -2.035179690000000 ],
[ -0.037909102000000 , 0.307694674000000 , -3.102311444000000 ],
[ -0.122286497000000 , -1.014214485000000 , -1.431659388000000 ],
[ -0.061278153000000 , -0.690156063000000 , -0.097738525000000 ],
[ -0.083866474000000 , -1.480006435000000 , 1.065121981000000 ],
[ -0.207551291000000 , -2.830167865000000 , 1.008466281000000 ],
[ 0.020236002000000 , -3.318294510000000 , 1.858492777000000 ],
[ 0.100823981000000 , -3.261839820000000 , 0.151791829000000 ],
[ -0.015107287000000 , -0.872886238000000 , 2.254820437000000 ],
[ 0.095534438000000 , 0.468473589000000 , 2.286592142000000 ],
[ 0.148443656000000 , 0.902433537000000 , 3.277055537000000 ],
[ 0.150791629000000 , 1.330817541000000 , 1.268232413000000 ],
[ 0.061278153000000 , 0.690156063000000 , 0.097738525000000 ],
[ 0.213123816000000 , 2.178532043000000 , -1.420082564000000 ],
[ 3.314050951181818 , 1.318912569000000 , 0.115169333000000 ],
[ 3.352367704181818 , 0.544134785000000 , 1.248235461000000 ],
[ 3.485530356181818 , 1.084216460000000 , 2.174491246000000 ],
[ 3.231717079181818 , -0.802036026000000 , 1.213306349000000 ],
[ 3.284167705181818 , -1.664227788000000 , 2.429380731000000 ],
[ 2.328384482181818 , -2.161867438000000 , 2.585037720000000 ],
[ 4.045009773181818 , -2.435033978000000 , 2.315487569000000 ],
[ 3.507722174181819 , -1.070628980000000 , 3.313538183000000 ],
[ 3.037238321181818 , -1.440326451000000 , -0.080379664000000 ],
[ 2.876839012181818 , -2.640081851000000 , -0.255033817000000 ],
[ 3.048433246181818 , -0.560837886000000 , -1.168484485000000 ],
[ 2.872744354181818 , -0.977998743000000 , -2.072617562000000 ],
[ 3.133375635181818 , 0.814169728000000 , -1.152798148000000 ],
[ 3.050707172181818 , 1.513854058000000 , -2.149163262000000 ],
[ 3.352417045181818 , 2.322516737000000 , 0.179118562000000 ]],
'positions 1.2':[[ 0.067390759 , 1.213806097 , -1.171192513 ],
[ -0.034440687 , 0.160916029 , -2.035179690 ],
[ -0.037909102 , 0.307694674 , -3.102311444 ],
[ -0.122286497 , -1.014214485 , -1.431659388 ],
[ -0.061278153 , -0.690156063 , -0.097738525 ],
[ -0.083866474 , -1.480006435 , 1.065121981 ],
[ -0.207551291 , -2.830167865 , 1.008466281 ],
[ 0.020236002 , -3.318294510 , 1.858492777 ],
[ 0.100823981 , -3.261839820 , 0.151791829 ],
[ -0.015107287 , -0.872886238 , 2.254820437 ],
[ 0.095534438 , 0.468473589 , 2.286592142 ],
[ 0.148443656 , 0.902433537 , 3.277055537 ],
[ 0.150791629 , 1.330817541 , 1.268232413 ],
[ 0.061278153 , 0.690156063 , 0.097738525 ],
[ 0.213123816 , 2.178532043 , -1.420082564 ],
[ 3.951238365 , 1.318912569 , 0.115169333 ],
[ 3.989555118 , 0.544134785 , 1.248235461 ],
[ 4.122717770 , 1.084216460 , 2.174491246 ],
[ 3.868904493 , -0.802036026 , 1.213306349 ],
[ 3.921355119 , -1.664227788 , 2.429380731 ],
[ 2.965571896 , -2.161867438 , 2.585037720 ],
[ 4.682197187 , -2.435033978 , 2.315487569 ],
[ 4.144909588 , -1.070628980 , 3.313538183 ],
[ 3.674425735 , -1.440326451 , -0.080379664 ],
[ 3.514026426 , -2.640081851 , -0.255033817 ],
[ 3.685620660 , -0.560837886 , -1.168484485 ],
[ 3.509931768 , -0.977998743 , -2.072617562 ],
[ 3.770563049 , 0.814169728 , -1.152798148 ],
[ 3.687894586 , 1.513854058 , -2.149163262 ],
[ 3.989604459 , 2.322516737 , 0.179118562 ]],
'positions 1.5':[[ 0.067390759 , 1.213806097 , -1.171192513 ],
[ -0.034440687 , 0.160916029 , -2.035179690 ],
[ -0.037909102 , 0.307694674 , -3.102311444 ],
[ -0.122286497 , -1.014214485 , -1.431659388 ],
[ -0.061278153 , -0.690156063 , -0.097738525 ],
[ -0.083866474 , -1.480006435 , 1.065121981 ],
[ -0.207551291 , -2.830167865 , 1.008466281 ],
[ 0.020236002 , -3.318294510 , 1.858492777 ],
[ 0.100823981 , -3.261839820 , 0.151791829 ],
[ -0.015107287 , -0.872886238 , 2.254820437 ],
[ 0.095534438 , 0.468473589 , 2.286592142 ],
[ 0.148443656 , 0.902433537 , 3.277055537 ],
[ 0.150791629 , 1.330817541 , 1.268232413 ],
[ 0.061278153 , 0.690156063 , 0.097738525 ],
[ 0.213123816 , 2.178532043 , -1.420082564 ],
[ 4.907019487 , 1.318912569 , 0.115169333 ],
[ 4.945336240 , 0.544134785 , 1.248235461 ],
[ 5.078498892 , 1.084216460 , 2.174491246 ],
[ 4.824685615 , -0.802036026 , 1.213306349 ],
[ 4.877136241 , -1.664227788 , 2.429380731 ],
[ 3.921353018 , -2.161867438 , 2.585037720 ],
[ 5.637978309 , -2.435033978 , 2.315487569 ],
[ 5.100690710 , -1.070628980 , 3.313538183 ],
[ 4.630206857 , -1.440326451 , -0.080379664 ],
[ 4.469807548 , -2.640081851 , -0.255033817 ],
[ 4.641401782 , -0.560837886 , -1.168484485 ],
[ 4.465712890 , -0.977998743 , -2.072617562 ],
[ 4.726344171 , 0.814169728 , -1.152798148 ],
[ 4.643675708 , 1.513854058 , -2.149163262 ],
[ 4.945385581 , 2.322516737 , 0.179118562 ]],
'positions 2.0':[[ 0.067390759 , 1.213806097 , -1.171192513 ],
[ -0.034440687 , 0.160916029 , -2.035179690 ],
[ -0.037909102 , 0.307694674 , -3.102311444 ],
[ -0.122286497 , -1.014214485 , -1.431659388 ],
[ -0.061278153 , -0.690156063 , -0.097738525 ],
[ -0.083866474 , -1.480006435 , 1.065121981 ],
[ -0.207551291 , -2.830167865 , 1.008466281 ],
[ 0.020236002 , -3.318294510 , 1.858492777 ],
[ 0.100823981 , -3.261839820 , 0.151791829 ],
[ -0.015107287 , -0.872886238 , 2.254820437 ],
[ 0.095534438 , 0.468473589 , 2.286592142 ],
[ 0.148443656 , 0.902433537 , 3.277055537 ],
[ 0.150791629 , 1.330817541 , 1.268232413 ],
[ 0.061278153 , 0.690156063 , 0.097738525 ],
[ 0.213123816 , 2.178532043 , -1.420082564 ],
[ 6.499988023 , 1.318912569 , 0.115169333 ],
[ 6.538304776 , 0.544134785 , 1.248235461 ],
[ 6.671467428 , 1.084216460 , 2.174491246 ],
[ 6.417654151 , -0.802036026 , 1.213306349 ],
[ 6.470104777 , -1.664227788 , 2.429380731 ],
[ 5.514321554 , -2.161867438 , 2.585037720 ],
[ 7.230946845 , -2.435033978 , 2.315487569 ],
[ 6.693659246 , -1.070628980 , 3.313538183 ],
[ 6.223175393 , -1.440326451 , -0.080379664 ],
[ 6.062776084 , -2.640081851 , -0.255033817 ],
[ 6.234370318 , -0.560837886 , -1.168484485 ],
[ 6.058681426 , -0.977998743 , -2.072617562 ],
[ 6.319312707 , 0.814169728 , -1.152798148 ],
[ 6.236644244 , 1.513854058 , -2.149163262 ],
[ 6.538354117 , 2.322516737 , 0.179118562 ]]},
'Adenine-thymine_Watson-Crick_complex': {
'description': "Complex, S22, S26, 2 h-bonds, double h-bond, nucleic base",
'name': "Adenine-thymine_Watson-Crick_complex",
's26_number': "07",
'interaction energy CC':-0.7259,
'interaction energies s22x5':[-0.6513,-0.7099,-0.5767,-0.3222,-0.1123],
'offset': 0.0160,
'symbols': 'NCCCNCNCNNHHHHHNCCCNCCOOHHHHHH',
'magmoms': None,
'dimer atoms': [15,15],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ 0.9350155, -0.0279801, -0.3788916],
[ 1.6739638, -0.0357766, 0.7424316],
[ 3.0747955, -0.0094480, 0.5994562],
[ 3.5646109, 0.0195446, -0.7059872],
[ 2.8531510, 0.0258031, -1.8409596],
[ 1.5490760, 0.0012569, -1.5808009],
[ 4.0885824, -0.0054429, 1.5289786],
[ 5.1829921, 0.0253971, 0.7872176],
[ 4.9294871, 0.0412404, -0.5567274],
[ 1.0716177, -0.0765366, 1.9391390],
[ 0.8794435, 0.0050260, -2.4315709],
[ 6.1882591, 0.0375542, 1.1738824],
[ 5.6035368, 0.0648755, -1.3036811],
[ 0.0586915, -0.0423765, 2.0039181],
[ 1.6443796, -0.0347395, 2.7619159],
[ -3.9211729, -0.0009646, -1.5163659],
[ -4.6136833, 0.0169051, -0.3336520],
[ -3.9917387, 0.0219348, 0.8663338],
[ -2.5361367, 0.0074651, 0.8766724],
[ -1.9256484, -0.0110593, -0.3638948],
[ -2.5395897, -0.0149474, -1.5962357],
[ -4.7106131, 0.0413373, 2.1738637],
[ -1.8674730, 0.0112093, 1.9120833],
[ -1.9416783, -0.0291878, -2.6573783],
[ -4.4017172, -0.0036078, -2.4004924],
[ -0.8838255, -0.0216168, -0.3784269],
[ -5.6909220, 0.0269347, -0.4227183],
[ -4.4439282, -0.8302573, 2.7695655],
[ -4.4267056, 0.9186178, 2.7530256],
[ -5.7883971, 0.0505530, 2.0247280]],
'positions 0.9':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.738685058 , -0.157889771 , 1.110355410 ],
[ -2.139452884 , -0.168053559 , 0.964712563 ],
[ -2.629497187 , -0.008665792 , -0.331201352 ],
[ -1.918309833 , 0.152634753 , -1.454844039 ],
[ -0.614262216 , 0.143659867 , -1.193547121 ],
[ -3.152980999 , -0.310697201 , 1.883518666 ],
[ -4.247466012 , -0.237200328 , 1.144874976 ],
[ -3.994250734 , -0.056604504 , -0.187030096 ],
[ -0.136179412 , -0.289433845 , 2.300428025 ],
[ 0.055161346 , 0.265959015 , -2.035655088 ],
[ -5.252585445 , -0.308958331 , 1.525406574 ],
[ -4.668404863 , 0.026245320 , -0.929656824 ],
[ 0.876876426 , -0.329105732 , 2.359811410 ],
[ -0.708581316 , -0.452407073 , 3.108240602 ],
[ 4.674076612 , 0.155627547 , -1.128075158 ],
[ 5.366947235 , -0.031573530 , 0.039652507 ],
[ 4.745331442 , -0.213180550 , 1.225999310 ],
[ 3.289690418 , -0.205459536 , 1.237959001 ],
[ 2.678823212 , -0.008913767 , 0.013109028 ],
[ 3.292432779 , 0.176239188 , -1.205417098 ],
[ 5.464603172 , -0.419950938 , 2.517000917 ],
[ 2.621308338 , -0.362031655 , 2.261654302 ],
[ 2.694203350 , 0.342506569 , -2.253367774 ],
[ 5.154382378 , 0.288458351 , -2.002300903 ],
[ 1.636966971 , 0.000000000 , 0.000000000 ],
[ 6.444191927 , -0.024779868 , -0.049650000 ],
[ 5.195022957 , 0.354841198 , 3.233018736 ],
[ 5.183915029 , -1.373098243 , 2.962397530 ],
[ 6.542374655 , -0.403617008 , 2.368385087 ]],
'positions 1.0':[[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ -0.738685058000000 , -0.157889771000000 , 1.110355410000000 ],
[ -2.139452884000000 , -0.168053559000000 , 0.964712563000000 ],
[ -2.629497187000000 , -0.008665792000000 , -0.331201352000000 ],
[ -1.918309833000000 , 0.152634753000000 , -1.454844039000000 ],
[ -0.614262216000000 , 0.143659867000000 , -1.193547121000000 ],
[ -3.152980999000000 , -0.310697201000000 , 1.883518666000000 ],
[ -4.247466012000000 , -0.237200328000000 , 1.144874976000000 ],
[ -3.994250734000000 , -0.056604504000000 , -0.187030096000000 ],
[ -0.136179412000000 , -0.289433845000000 , 2.300428025000000 ],
[ 0.055161346000000 , 0.265959015000000 , -2.035655088000000 ],
[ -5.252585445000000 , -0.308958331000000 , 1.525406574000000 ],
[ -4.668404863000000 , 0.026245320000000 , -0.929656824000000 ],
[ 0.876876426000000 , -0.329105732000000 , 2.359811410000000 ],
[ -0.708581316000000 , -0.452407073000000 , 3.108240602000000 ],
[ 4.855961831000000 , 0.155627547000000 , -1.128075158000000 ],
[ 5.548832453999999 , -0.031573530000000 , 0.039652507000000 ],
[ 4.927216661000000 , -0.213180550000000 , 1.225999310000000 ],
[ 3.471575637000000 , -0.205459536000000 , 1.237959001000000 ],
[ 2.860708431000000 , -0.008913767000000 , 0.013109028000000 ],
[ 3.474317998000000 , 0.176239188000000 , -1.205417098000000 ],
[ 5.646488391000000 , -0.419950938000000 , 2.517000917000000 ],
[ 2.803193557000000 , -0.362031655000000 , 2.261654302000000 ],
[ 2.876088569000000 , 0.342506569000000 , -2.253367774000000 ],
[ 5.336267597000000 , 0.288458351000000 , -2.002300903000000 ],
[ 1.818852190000000 , 0.000000000000000 , 0.000000000000000 ],
[ 6.626077146000000 , -0.024779868000000 , -0.049650000000000 ],
[ 5.376908176000000 , 0.354841198000000 , 3.233018736000000 ],
[ 5.365800247999999 , -1.373098243000000 , 2.962397530000000 ],
[ 6.724259873999999 , -0.403617008000000 , 2.368385087000000 ]],
'positions 1.2':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.738685058 , -0.157889771 , 1.110355410 ],
[ -2.139452884 , -0.168053559 , 0.964712563 ],
[ -2.629497187 , -0.008665792 , -0.331201352 ],
[ -1.918309833 , 0.152634753 , -1.454844039 ],
[ -0.614262216 , 0.143659867 , -1.193547121 ],
[ -3.152980999 , -0.310697201 , 1.883518666 ],
[ -4.247466012 , -0.237200328 , 1.144874976 ],
[ -3.994250734 , -0.056604504 , -0.187030096 ],
[ -0.136179412 , -0.289433845 , 2.300428025 ],
[ 0.055161346 , 0.265959015 , -2.035655088 ],
[ -5.252585445 , -0.308958331 , 1.525406574 ],
[ -4.668404863 , 0.026245320 , -0.929656824 ],
[ 0.876876426 , -0.329105732 , 2.359811410 ],
[ -0.708581316 , -0.452407073 , 3.108240602 ],
[ 5.219732269 , 0.155627547 , -1.128075158 ],
[ 5.912602892 , -0.031573530 , 0.039652507 ],
[ 5.290987099 , -0.213180550 , 1.225999310 ],
[ 3.835346075 , -0.205459536 , 1.237959001 ],
[ 3.224478869 , -0.008913767 , 0.013109028 ],
[ 3.838088436 , 0.176239188 , -1.205417098 ],
[ 6.010258829 , -0.419950938 , 2.517000917 ],
[ 3.166963995 , -0.362031655 , 2.261654302 ],
[ 3.239859007 , 0.342506569 , -2.253367774 ],
[ 5.700038035 , 0.288458351 , -2.002300903 ],
[ 2.182622628 , 0.000000000 , 0.000000000 ],
[ 6.989847584 , -0.024779868 , -0.049650000 ],
[ 5.740678614 , 0.354841198 , 3.233018736 ],
[ 5.729570686 , -1.373098243 , 2.962397530 ],
[ 7.088030312 , -0.403617008 , 2.368385087 ]],
'positions 1.5':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.738685058 , -0.157889771 , 1.110355410 ],
[ -2.139452884 , -0.168053559 , 0.964712563 ],
[ -2.629497187 , -0.008665792 , -0.331201352 ],
[ -1.918309833 , 0.152634753 , -1.454844039 ],
[ -0.614262216 , 0.143659867 , -1.193547121 ],
[ -3.152980999 , -0.310697201 , 1.883518666 ],
[ -4.247466012 , -0.237200328 , 1.144874976 ],
[ -3.994250734 , -0.056604504 , -0.187030096 ],
[ -0.136179412 , -0.289433845 , 2.300428025 ],
[ 0.055161346 , 0.265959015 , -2.035655088 ],
[ -5.252585445 , -0.308958331 , 1.525406574 ],
[ -4.668404863 , 0.026245320 , -0.929656824 ],
[ 0.876876426 , -0.329105732 , 2.359811410 ],
[ -0.708581316 , -0.452407073 , 3.108240602 ],
[ 5.765387926 , 0.155627547 , -1.128075158 ],
[ 6.458258549 , -0.031573530 , 0.039652507 ],
[ 5.836642756 , -0.213180550 , 1.225999310 ],
[ 4.381001732 , -0.205459536 , 1.237959001 ],
[ 3.770134526 , -0.008913767 , 0.013109028 ],
[ 4.383744093 , 0.176239188 , -1.205417098 ],
[ 6.555914486 , -0.419950938 , 2.517000917 ],
[ 3.712619652 , -0.362031655 , 2.261654302 ],
[ 3.785514664 , 0.342506569 , -2.253367774 ],
[ 6.245693692 , 0.288458351 , -2.002300903 ],
[ 2.728278285 , 0.000000000 , 0.000000000 ],
[ 7.535503241 , -0.024779868 , -0.049650000 ],
[ 6.286334271 , 0.354841198 , 3.233018736 ],
[ 6.275226343 , -1.373098243 , 2.962397530 ],
[ 7.633685969 , -0.403617008 , 2.368385087 ]],
'positions 2.0':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.738685058 , -0.157889771 , 1.110355410 ],
[ -2.139452884 , -0.168053559 , 0.964712563 ],
[ -2.629497187 , -0.008665792 , -0.331201352 ],
[ -1.918309833 , 0.152634753 , -1.454844039 ],
[ -0.614262216 , 0.143659867 , -1.193547121 ],
[ -3.152980999 , -0.310697201 , 1.883518666 ],
[ -4.247466012 , -0.237200328 , 1.144874976 ],
[ -3.994250734 , -0.056604504 , -0.187030096 ],
[ -0.136179412 , -0.289433845 , 2.300428025 ],
[ 0.055161346 , 0.265959015 , -2.035655088 ],
[ -5.252585445 , -0.308958331 , 1.525406574 ],
[ -4.668404863 , 0.026245320 , -0.929656824 ],
[ 0.876876426 , -0.329105732 , 2.359811410 ],
[ -0.708581316 , -0.452407073 , 3.108240602 ],
[ 6.674814021 , 0.155627547 , -1.128075158 ],
[ 7.367684644 , -0.031573530 , 0.039652507 ],
[ 6.746068851 , -0.213180550 , 1.225999310 ],
[ 5.290427827 , -0.205459536 , 1.237959001 ],
[ 4.679560621 , -0.008913767 , 0.013109028 ],
[ 5.293170188 , 0.176239188 , -1.205417098 ],
[ 7.465340581 , -0.419950938 , 2.517000917 ],
[ 4.622045747 , -0.362031655 , 2.261654302 ],
[ 4.694940759 , 0.342506569 , -2.253367774 ],
[ 7.155119787 , 0.288458351 , -2.002300903 ],
[ 3.637704380 , 0.000000000 , 0.000000000 ],
[ 8.444929336 , -0.024779868 , -0.049650000 ],
[ 7.195760366 , 0.354841198 , 3.233018736 ],
[ 7.184652438 , -1.373098243 , 2.962397530 ],
[ 8.543112064 , -0.403617008 , 2.368385087 ]]},
'Ammonia_dimer': {
'description': "Complex, S22, S26, 2 h-bonds",
'name': "Ammonia_dimer",
's26_number': "01",
'interaction energy CC':-0.1375,
'interaction energies s22x5':[-0.1045,-0.1362,-0.1023,-0.0481,-0.0156],
'offset': 0.0013,
'symbols': 'NHHHNHHH',
'magmoms': None,
'dimer atoms': [4,4],
# Optimisation level: CCSD(T)/cc-pVQZ
'positions':[[ -1.578718, -0.046611, 0.000000],
[ -2.158621, 0.136396, -0.809565],
[ -2.158621, 0.136396, 0.809565],
[ -0.849471, 0.658193, 0.000000],
[ 1.578718, 0.046611, 0.000000],
[ 2.158621, -0.136396, -0.809565],
[ 0.849471, -0.658193, 0.000000],
[ 2.158621, -0.136396, 0.809565]],
'positions 0.9':[[ -0.535020551 , -0.861570006 , 0.000000000 ],
[ -1.142058700 , -0.825740733 , -0.809565000 ],
[ -1.142058700 , -0.825740733 , 0.809565000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 2.253621272 , 0.000000000 , 0.000000000 ],
[ 2.860659421 , -0.035829274 , -0.809565000 ],
[ 1.718600721 , -0.861570006 , 0.000000000 ],
[ 2.860659421 , -0.035829274 , 0.809565000 ]],
'positions 1.0':[[ -0.535020551000000 , -0.861570006000000 , 0.000000000000000 ],
[ -1.142058700000000 , -0.825740733000000 , -0.809565000000000 ],
[ -1.142058700000000 , -0.825740733000000 , 0.809565000000000 ],
[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ 2.504023635454546 , 0.000000000000000 , 0.000000000000000 ],
[ 3.111061784454545 , -0.035829274000000 , -0.809565000000000 ],
[ 1.969003084454545 , -0.861570006000000 , 0.000000000000000 ],
[ 3.111061784454545 , -0.035829274000000 , 0.809565000000000 ]],
'positions 1.2':[[ -0.535020551 , -0.861570006 , 0.000000000 ],
[ -1.142058700 , -0.825740733 , -0.809565000 ],
[ -1.142058700 , -0.825740733 , 0.809565000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 3.004828362 , 0.000000000 , 0.000000000 ],
[ 3.611866511 , -0.035829274 , -0.809565000 ],
[ 2.469807811 , -0.861570006 , 0.000000000 ],
[ 3.611866511 , -0.035829274 , 0.809565000 ]],
'positions 1.5':[[ -0.535020551 , -0.861570006 , 0.000000000 ],
[ -1.142058700 , -0.825740733 , -0.809565000 ],
[ -1.142058700 , -0.825740733 , 0.809565000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 3.756035452 , 0.000000000 , 0.000000000 ],
[ 4.363073601 , -0.035829274 , -0.809565000 ],
[ 3.221014901 , -0.861570006 , 0.000000000 ],
[ 4.363073601 , -0.035829274 , 0.809565000 ]],
'positions 2.0':[[ -0.535020551 , -0.861570006 , 0.000000000 ],
[ -1.142058700 , -0.825740733 , -0.809565000 ],
[ -1.142058700 , -0.825740733 , 0.809565000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 5.008047270 , 0.000000000 , 0.000000000 ],
[ 5.615085419 , -0.035829274 , -0.809565000 ],
[ 4.473026719 , -0.861570006 , 0.000000000 ],
[ 5.615085419 , -0.035829274 , 0.809565000 ]]},
'Benzene-methane_complex': {
'description': "Complex, S22, S26, stack, dispersion bonded",
'name': "Benzene-methane_complex",
's26_number': "10",
'interaction energy CC':-0.0629,
'interaction energies s22x5':[-0.0473,-0.0650,-0.0490,-0.0208,-0.0052],
'offset': -0.0021,
'symbols': 'CCCCCCHHHHHHCHHHH',
'magmoms': None,
'dimer atoms': [12,5],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ 1.3932178, 0.0362913, -0.6332803],
[ 0.7280364, -1.1884015, -0.6333017],
[ -0.6651797, -1.2247077, -0.6332803],
[ -1.3932041, -0.0362972, -0.6333017],
[ -0.7280381, 1.1884163, -0.6332803],
[ 0.6651677, 1.2246987, -0.6333017],
[ 2.4742737, 0.0644484, -0.6317240],
[ 1.2929588, -2.1105409, -0.6317401],
[ -1.1813229, -2.1750081, -0.6317240],
[ -2.4742614, -0.0644647, -0.6317401],
[ -1.2929508, 2.1105596, -0.6317240],
[ 1.1813026, 2.1750056, -0.6317401],
[ 0.0000000, 0.0000000, 3.0826195],
[ 0.5868776, 0.8381742, 3.4463772],
[ -1.0193189, 0.0891638, 3.4463772],
[ 0.0000000, 0.0000000, 1.9966697],
[ 0.4324413, -0.9273380, 3.446377]],
'positions 0.9':[[ 0.000011002 , 0.036291078 , -1.393218002 ],
[ -0.000011075 , -1.188401879 , -0.728035925 ],
[ 0.000010922 , -1.224707791 , 0.665180078 ],
[ -0.000011002 , -0.036296745 , 1.393204002 ],
[ 0.000011075 , 1.188416213 , 0.728037925 ],
[ -0.000010922 , 1.224699125 , -0.665168078 ],
[ 0.001567004 , 0.064448010 , -2.474274004 ],
[ 0.001550866 , -2.110540915 , -1.292958866 ],
[ 0.001566862 , -2.175007759 , 1.181323138 ],
[ 0.001550996 , -0.064464677 , 2.474261004 ],
[ 0.001567134 , 2.110560249 , 1.292950866 ],
[ 0.001551138 , 2.175006092 , -1.181303138 ],
[ 3.452913900 , -0.000000069 , 0.000000000 ],
[ 3.816671953 , 0.838173871 , -0.586878053 ],
[ 3.816671906 , 0.089163973 , 1.019318994 ],
[ 2.366964900 , 0.000000000 , 0.000000000 ],
[ 3.816671841 , -0.927338119 , -0.432440941 ]],
'positions 1.0':[[ 0.000011002000000 , 0.036291078000000 , -1.393218002000000 ],
[ -0.000011075000000 , -1.188401879000000 , -0.728035925000000 ],
[ 0.000010922000000 , -1.224707791000000 , 0.665180078000000 ],
[ -0.000011002000000 , -0.036296745000000 , 1.393204002000000 ],
[ 0.000011075000000 , 1.188416213000000 , 0.728037925000000 ],
[ -0.000010922000000 , 1.224699125000000 , -0.665168078000000 ],
[ 0.001567004000000 , 0.064448010000000 , -2.474274004000000 ],
[ 0.001550866000000 , -2.110540915000000 , -1.292958866000000 ],
[ 0.001566862000000 , -2.175007759000000 , 1.181323138000000 ],
[ 0.001550996000000 , -0.064464677000000 , 2.474261004000000 ],
[ 0.001567134000000 , 2.110560249000000 , 1.292950866000000 ],
[ 0.001551138000000 , 2.175006092000000 , -1.181303138000000 ],
[ 3.715910000000000 , -0.000000069000000 , 0.000000000000000 ],
[ 4.079668053000000 , 0.838173871000000 , -0.586878053000000 ],
[ 4.079668005999999 , 0.089163973000000 , 1.019318994000000 ],
[ 2.629961000000000 , 0.000000000000000 , 0.000000000000000 ],
[ 4.079667940999999 , -0.927338119000000 , -0.432440941000000 ]],
'positions 1.2':[[ 0.000011002 , 0.036291078 , -1.393218002 ],
[ -0.000011075 , -1.188401879 , -0.728035925 ],
[ 0.000010922 , -1.224707791 , 0.665180078 ],
[ -0.000011002 , -0.036296745 , 1.393204002 ],
[ 0.000011075 , 1.188416213 , 0.728037925 ],
[ -0.000010922 , 1.224699125 , -0.665168078 ],
[ 0.001567004 , 0.064448010 , -2.474274004 ],
[ 0.001550866 , -2.110540915 , -1.292958866 ],
[ 0.001566862 , -2.175007759 , 1.181323138 ],
[ 0.001550996 , -0.064464677 , 2.474261004 ],
[ 0.001567134 , 2.110560249 , 1.292950866 ],
[ 0.001551138 , 2.175006092 , -1.181303138 ],
[ 4.241902200 , -0.000000069 , 0.000000000 ],
[ 4.605660253 , 0.838173871 , -0.586878053 ],
[ 4.605660206 , 0.089163973 , 1.019318994 ],
[ 3.155953200 , 0.000000000 , 0.000000000 ],
[ 4.605660141 , -0.927338119 , -0.432440941 ]],
'positions 1.5':[[ 0.000011002 , 0.036291078 , -1.393218002 ],
[ -0.000011075 , -1.188401879 , -0.728035925 ],
[ 0.000010922 , -1.224707791 , 0.665180078 ],
[ -0.000011002 , -0.036296745 , 1.393204002 ],
[ 0.000011075 , 1.188416213 , 0.728037925 ],
[ -0.000010922 , 1.224699125 , -0.665168078 ],
[ 0.001567004 , 0.064448010 , -2.474274004 ],
[ 0.001550866 , -2.110540915 , -1.292958866 ],
[ 0.001566862 , -2.175007759 , 1.181323138 ],
[ 0.001550996 , -0.064464677 , 2.474261004 ],
[ 0.001567134 , 2.110560249 , 1.292950866 ],
[ 0.001551138 , 2.175006092 , -1.181303138 ],
[ 5.030890500 , -0.000000069 , 0.000000000 ],
[ 5.394648553 , 0.838173871 , -0.586878053 ],
[ 5.394648506 , 0.089163973 , 1.019318994 ],
[ 3.944941500 , 0.000000000 , 0.000000000 ],
[ 5.394648441 , -0.927338119 , -0.432440941 ]],
'positions 2.0':[[ 0.000011002 , 0.036291078 , -1.393218002 ],
[ -0.000011075 , -1.188401879 , -0.728035925 ],
[ 0.000010922 , -1.224707791 , 0.665180078 ],
[ -0.000011002 , -0.036296745 , 1.393204002 ],
[ 0.000011075 , 1.188416213 , 0.728037925 ],
[ -0.000010922 , 1.224699125 , -0.665168078 ],
[ 0.001567004 , 0.064448010 , -2.474274004 ],
[ 0.001550866 , -2.110540915 , -1.292958866 ],
[ 0.001566862 , -2.175007759 , 1.181323138 ],
[ 0.001550996 , -0.064464677 , 2.474261004 ],
[ 0.001567134 , 2.110560249 , 1.292950866 ],
[ 0.001551138 , 2.175006092 , -1.181303138 ],
[ 6.345871000 , -0.000000069 , 0.000000000 ],
[ 6.709629053 , 0.838173871 , -0.586878053 ],
[ 6.709629006 , 0.089163973 , 1.019318994 ],
[ 5.259922000 , 0.000000000 , 0.000000000 ],
[ 6.709628941 , -0.927338119 , -0.432440941 ]]},
'Benzene-ammonia_complex': {
'description': "Complex, S22, S26",
'name': "Benzene-ammonia_complex",
's26_number': "18",
'interaction energy CC':-0.1006,
'interaction energies s22x5':[-0.0885,-0.1019,-0.0759,-0.0369,-0.0121],
'offset': -0.0013,
'symbols': 'CCCCCCHHHHHHNHHH',
'magmoms': None,
'dimer atoms': [12,4],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ -0.7392810, 0.5158785, -1.2071079],
[ -1.4261442, 0.3965455, 0.0000000],
[ -0.7392810, 0.5158785, 1.2071079],
[ 0.6342269, 0.7546398, 1.2070735],
[ 1.3210434, 0.8737566, 0.0000000],
[ 0.6342269, 0.7546398, -1.2070735],
[ -1.2719495, 0.4206316, -2.1432894],
[ -2.4902205, 0.2052381, 0.0000000],
[ -1.2719495, 0.4206316, 2.1432894],
[ 1.1668005, 0.8474885, 2.1436950],
[ 2.3863585, 1.0596312, 0.0000000],
[ 1.1668005, 0.8474885, -2.1436950],
[ 0.1803930, -2.9491231, 0.0000000],
[ 0.7595495, -3.1459477, -0.8060729],
[ 0.7595495, -3.1459477, 0.8060729],
[ 0.0444167, -1.9449399, 0.0000000]],
'positions 0.9':[[ 0.000000000 , 0.000000000 , -1.207108000 ],
[ -0.094723910 , -0.690687169 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 1.207108000 ],
[ 0.189293052 , 1.381194838 , 1.207073000 ],
[ 0.284209467 , 2.071771374 , 0.000000000 ],
[ 0.189293052 , 1.381194838 , -1.207073000 ],
[ -0.070884435 , -0.536454706 , -2.143289000 ],
[ -0.235335157 , -1.762640796 , 0.000000000 ],
[ -0.070884435 , -0.536454706 , 2.143289000 ],
[ 0.262434233 , 1.916830087 , 2.143695000 ],
[ 0.430373810 , 3.143257869 , 0.000000000 ],
[ 0.262434233 , 1.916830087 , -2.143695000 ],
[ 3.322432676 , -0.175158455 , 0.000000000 ],
[ 3.685723470 , 0.316960994 , -0.806073000 ],
[ 3.685723470 , 0.316960994 , 0.806073000 ],
[ 2.324338249 , 0.000000000 , 0.000000000 ]],
'positions 1.0':[[ 0.000000000000000 , 0.000000000000000 , -1.207108000000000 ],
[ -0.094723910000000 , -0.690687169000000 , 0.000000000000000 ],
[ 0.000000000000000 , 0.000000000000000 , 1.207108000000000 ],
[ 0.189293052000000 , 1.381194838000000 , 1.207073000000000 ],
[ 0.284209467000000 , 2.071771374000000 , 0.000000000000000 ],
[ 0.189293052000000 , 1.381194838000000 , -1.207073000000000 ],
[ -0.070884435000000 , -0.536454706000000 , -2.143289000000000 ],
[ -0.235335157000000 , -1.762640796000000 , 0.000000000000000 ],
[ -0.070884435000000 , -0.536454706000000 , 2.143289000000000 ],
[ 0.262434233000000 , 1.916830087000000 , 2.143695000000000 ],
[ 0.430373810000000 , 3.143257869000000 , 0.000000000000000 ],
[ 0.262434233000000 , 1.916830087000000 , -2.143695000000000 ],
[ 3.580692481363636 , -0.175158455000000 , 0.000000000000000 ],
[ 3.943983275363637 , 0.316960994000000 , -0.806073000000000 ],
[ 3.943983275363637 , 0.316960994000000 , 0.806073000000000 ],
[ 2.582598054363637 , 0.000000000000000 , 0.000000000000000 ]],
'positions 1.2':[[ 0.000000000 , 0.000000000 , -1.207108000 ],
[ -0.094723910 , -0.690687169 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 1.207108000 ],
[ 0.189293052 , 1.381194838 , 1.207073000 ],
[ 0.284209467 , 2.071771374 , 0.000000000 ],
[ 0.189293052 , 1.381194838 , -1.207073000 ],
[ -0.070884435 , -0.536454706 , -2.143289000 ],
[ -0.235335157 , -1.762640796 , 0.000000000 ],
[ -0.070884435 , -0.536454706 , 2.143289000 ],
[ 0.262434233 , 1.916830087 , 2.143695000 ],
[ 0.430373810 , 3.143257869 , 0.000000000 ],
[ 0.262434233 , 1.916830087 , -2.143695000 ],
[ 4.097212092 , -0.175158455 , 0.000000000 ],
[ 4.460502886 , 0.316960994 , -0.806073000 ],
[ 4.460502886 , 0.316960994 , 0.806073000 ],
[ 3.099117665 , 0.000000000 , 0.000000000 ]],
'positions 1.5':[[ 0.000000000 , 0.000000000 , -1.207108000 ],
[ -0.094723910 , -0.690687169 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 1.207108000 ],
[ 0.189293052 , 1.381194838 , 1.207073000 ],
[ 0.284209467 , 2.071771374 , 0.000000000 ],
[ 0.189293052 , 1.381194838 , -1.207073000 ],
[ -0.070884435 , -0.536454706 , -2.143289000 ],
[ -0.235335157 , -1.762640796 , 0.000000000 ],
[ -0.070884435 , -0.536454706 , 2.143289000 ],
[ 0.262434233 , 1.916830087 , 2.143695000 ],
[ 0.430373810 , 3.143257869 , 0.000000000 ],
[ 0.262434233 , 1.916830087 , -2.143695000 ],
[ 4.871991508 , -0.175158455 , 0.000000000 ],
[ 5.235282302 , 0.316960994 , -0.806073000 ],
[ 5.235282302 , 0.316960994 , 0.806073000 ],
[ 3.873897081 , 0.000000000 , 0.000000000 ]],
'positions 2.0':[[ 0.000000000 , 0.000000000 , -1.207108000 ],
[ -0.094723910 , -0.690687169 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 1.207108000 ],
[ 0.189293052 , 1.381194838 , 1.207073000 ],
[ 0.284209467 , 2.071771374 , 0.000000000 ],
[ 0.189293052 , 1.381194838 , -1.207073000 ],
[ -0.070884435 , -0.536454706 , -2.143289000 ],
[ -0.235335157 , -1.762640796 , 0.000000000 ],
[ -0.070884435 , -0.536454706 , 2.143289000 ],
[ 0.262434233 , 1.916830087 , 2.143695000 ],
[ 0.430373810 , 3.143257869 , 0.000000000 ],
[ 0.262434233 , 1.916830087 , -2.143695000 ],
[ 6.163290535 , -0.175158455 , 0.000000000 ],
[ 6.526581329 , 0.316960994 , -0.806073000 ],
[ 6.526581329 , 0.316960994 , 0.806073000 ],
[ 5.165196108 , 0.000000000 , 0.000000000 ]]},
'Benzene_dimer_parallel_displaced': {
'description': "Complex, S22, S26, stack, dispersion bonded",
'name': "Benzene_dimer_parallel_displaced",
's26_number': "11",
'interaction energy CC':-0.1136,
'interaction energies s22x5':[-0.0065,-0.1219,-0.0833,-0.0230,-0.0030],
'offset': -0.0083,
'symbols': 'CCCCCCHHHHHHCCCCCCHHHHHH',
'magmoms': None,
'dimer atoms': [12,12],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ -1.0478252, -1.4216736, 0.0000000],
[ -1.4545034, -0.8554459, 1.2062048],
[ -1.4545034, -0.8554459, -1.2062048],
[ -2.2667970, 0.2771610, 1.2069539],
[ -2.6714781, 0.8450211, 0.0000000],
[ -2.2667970, 0.2771610, -1.2069539],
[ -1.1338534, -1.2920593, -2.1423150],
[ -2.5824943, 0.7163066, -2.1437977],
[ -3.3030422, 1.7232700, 0.0000000],
[ -2.5824943, 0.7163066, 2.1437977],
[ -1.1338534, -1.2920593, 2.1423150],
[ -0.4060253, -2.2919049, 0.0000000],
[ 1.0478252, 1.4216736, 0.0000000],
[ 1.4545034, 0.8554459, -1.2062048],
[ 1.4545034, 0.8554459, 1.2062048],
[ 2.2667970, -0.2771610, -1.2069539],
[ 2.6714781, -0.8450211, 0.0000000],
[ 2.2667970, -0.2771610, 1.2069539],
[ 0.4060253, 2.2919049, 0.0000000],
[ 1.1338534, 1.2920593, 2.1423150],
[ 2.5824943, -0.7163066, 2.1437977],
[ 3.3030422, -1.7232700, 0.0000000],
[ 2.5824943, -0.7163066, -2.1437977],
[ 1.1338534, 1.2920593, -2.1423150]],
'positions 0.9':[[ 0.629051507 , -1.244058476 , 0.000000000 ],
[ 0.314072291 , -0.622134657 , 1.206205000 ],
[ 0.314072291 , -0.622134657 , -1.206205000 ],
[ -0.314813547 , 0.621699240 , 1.206954000 ],
[ -0.627568995 , 1.244929310 , 0.000000000 ],
[ -0.314813547 , 0.621699240 , -1.206954000 ],
[ 0.563930576 , -1.102778154 , -2.142315000 ],
[ -0.559388819 , 1.104085746 , -2.143798000 ],
[ -1.116894124 , 2.209685917 , 0.000000000 ],
[ -0.559388819 , 1.104085746 , 2.143798000 ],
[ 0.563930576 , -1.102778154 , 2.142315000 ],
[ 1.129721711 , -2.202462660 , 0.000000000 ],
[ 2.759649224 , 1.244058476 , 0.000000000 ],
[ 3.074628440 , 0.622134657 , -1.206205000 ],
[ 3.074628440 , 0.622134657 , 1.206205000 ],
[ 3.703514278 , -0.621699240 , -1.206954000 ],
[ 4.016269727 , -1.244929310 , 0.000000000 ],
[ 3.703514278 , -0.621699240 , 1.206954000 ],
[ 2.258979020 , 2.202462660 , 0.000000000 ],
[ 2.824770156 , 1.102778154 , 2.142315000 ],
[ 3.948089550 , -1.104085746 , 2.143798000 ],
[ 4.505594855 , -2.209685917 , 0.000000000 ],
[ 3.948089550 , -1.104085746 , -2.143798000 ],
[ 2.824770156 , 1.102778154 , -2.142315000 ]],
'positions 1.0':[[ 0.629051507000000 , -1.244058476000000 , 0.000000000000000 ],
[ 0.314072291000000 , -0.622134657000000 , 1.206205000000000 ],
[ 0.314072291000000 , -0.622134657000000 , -1.206205000000000 ],
[ -0.314813547000000 , 0.621699240000000 , 1.206954000000000 ],
[ -0.627568995000000 , 1.244929310000000 , 0.000000000000000 ],
[ -0.314813547000000 , 0.621699240000000 , -1.206954000000000 ],
[ 0.563930576000000 , -1.102778154000000 , -2.142315000000000 ],
[ -0.559388819000000 , 1.104085746000000 , -2.143798000000000 ],
[ -1.116894124000000 , 2.209685917000000 , 0.000000000000000 ],
[ -0.559388819000000 , 1.104085746000000 , 2.143798000000000 ],
[ 0.563930576000000 , -1.102778154000000 , 2.142315000000000 ],
[ 1.129721711000000 , -2.202462660000000 , 0.000000000000000 ],
[ 3.136171527545454 , 1.244058476000000 , 0.000000000000000 ],
[ 3.451150743545455 , 0.622134657000000 , -1.206205000000000 ],
[ 3.451150743545455 , 0.622134657000000 , 1.206205000000000 ],
[ 4.080036581545454 , -0.621699240000000 , -1.206954000000000 ],
[ 4.392792030545455 , -1.244929310000000 , 0.000000000000000 ],
[ 4.080036581545454 , -0.621699240000000 , 1.206954000000000 ],
[ 2.635501323545455 , 2.202462660000000 , 0.000000000000000 ],
[ 3.201292459545455 , 1.102778154000000 , 2.142315000000000 ],
[ 4.324611853545455 , -1.104085746000000 , 2.143798000000000 ],
[ 4.882117158545454 , -2.209685917000000 , 0.000000000000000 ],
[ 4.324611853545455 , -1.104085746000000 , -2.143798000000000 ],
[ 3.201292459545455 , 1.102778154000000 , -2.142315000000000 ]],
'positions 1.2':[[ 0.629051507 , -1.244058476 , 0.000000000 ],
[ 0.314072291 , -0.622134657 , 1.206205000 ],
[ 0.314072291 , -0.622134657 , -1.206205000 ],
[ -0.314813547 , 0.621699240 , 1.206954000 ],
[ -0.627568995 , 1.244929310 , 0.000000000 ],
[ -0.314813547 , 0.621699240 , -1.206954000 ],
[ 0.563930576 , -1.102778154 , -2.142315000 ],
[ -0.559388819 , 1.104085746 , -2.143798000 ],
[ -1.116894124 , 2.209685917 , 0.000000000 ],
[ -0.559388819 , 1.104085746 , 2.143798000 ],
[ 0.563930576 , -1.102778154 , 2.142315000 ],
[ 1.129721711 , -2.202462660 , 0.000000000 ],
[ 3.889216135 , 1.244058476 , 0.000000000 ],
[ 4.204195351 , 0.622134657 , -1.206205000 ],
[ 4.204195351 , 0.622134657 , 1.206205000 ],
[ 4.833081189 , -0.621699240 , -1.206954000 ],
[ 5.145836638 , -1.244929310 , 0.000000000 ],
[ 4.833081189 , -0.621699240 , 1.206954000 ],
[ 3.388545931 , 2.202462660 , 0.000000000 ],
[ 3.954337067 , 1.102778154 , 2.142315000 ],
[ 5.077656461 , -1.104085746 , 2.143798000 ],
[ 5.635161766 , -2.209685917 , 0.000000000 ],
[ 5.077656461 , -1.104085746 , -2.143798000 ],
[ 3.954337067 , 1.102778154 , -2.142315000 ]],
'positions 1.5':[[ 0.629051507 , -1.244058476 , 0.000000000 ],
[ 0.314072291 , -0.622134657 , 1.206205000 ],
[ 0.314072291 , -0.622134657 , -1.206205000 ],
[ -0.314813547 , 0.621699240 , 1.206954000 ],
[ -0.627568995 , 1.244929310 , 0.000000000 ],
[ -0.314813547 , 0.621699240 , -1.206954000 ],
[ 0.563930576 , -1.102778154 , -2.142315000 ],
[ -0.559388819 , 1.104085746 , -2.143798000 ],
[ -1.116894124 , 2.209685917 , 0.000000000 ],
[ -0.559388819 , 1.104085746 , 2.143798000 ],
[ 0.563930576 , -1.102778154 , 2.142315000 ],
[ 1.129721711 , -2.202462660 , 0.000000000 ],
[ 5.018783046 , 1.244058476 , 0.000000000 ],
[ 5.333762262 , 0.622134657 , -1.206205000 ],
[ 5.333762262 , 0.622134657 , 1.206205000 ],
[ 5.962648100 , -0.621699240 , -1.206954000 ],
[ 6.275403549 , -1.244929310 , 0.000000000 ],
[ 5.962648100 , -0.621699240 , 1.206954000 ],
[ 4.518112842 , 2.202462660 , 0.000000000 ],
[ 5.083903978 , 1.102778154 , 2.142315000 ],
[ 6.207223372 , -1.104085746 , 2.143798000 ],
[ 6.764728677 , -2.209685917 , 0.000000000 ],
[ 6.207223372 , -1.104085746 , -2.143798000 ],
[ 5.083903978 , 1.102778154 , -2.142315000 ]],
'positions 2.0':[[ 0.629051507 , -1.244058476 , 0.000000000 ],
[ 0.314072291 , -0.622134657 , 1.206205000 ],
[ 0.314072291 , -0.622134657 , -1.206205000 ],
[ -0.314813547 , 0.621699240 , 1.206954000 ],
[ -0.627568995 , 1.244929310 , 0.000000000 ],
[ -0.314813547 , 0.621699240 , -1.206954000 ],
[ 0.563930576 , -1.102778154 , -2.142315000 ],
[ -0.559388819 , 1.104085746 , -2.143798000 ],
[ -1.116894124 , 2.209685917 , 0.000000000 ],
[ -0.559388819 , 1.104085746 , 2.143798000 ],
[ 0.563930576 , -1.102778154 , 2.142315000 ],
[ 1.129721711 , -2.202462660 , 0.000000000 ],
[ 6.901394563 , 1.244058476 , 0.000000000 ],
[ 7.216373779 , 0.622134657 , -1.206205000 ],
[ 7.216373779 , 0.622134657 , 1.206205000 ],
[ 7.845259617 , -0.621699240 , -1.206954000 ],
[ 8.158015066 , -1.244929310 , 0.000000000 ],
[ 7.845259617 , -0.621699240 , 1.206954000 ],
[ 6.400724359 , 2.202462660 , 0.000000000 ],
[ 6.966515495 , 1.102778154 , 2.142315000 ],
[ 8.089834889 , -1.104085746 , 2.143798000 ],
[ 8.647340194 , -2.209685917 , 0.000000000 ],
[ 8.089834889 , -1.104085746 , -2.143798000 ],
[ 6.966515495 , 1.102778154 , -2.142315000 ]]},
'Benzene_dimer_T-shaped': {
'description': "Complex, S22, S26",
'name': "Benzene_dimer_T-shaped",
's26_number': "20",
'interaction energy CC':-0.1175,
'interaction energies s22x5':[-0.0954,-0.1214,-0.0976,-0.0486,-0.0152],
'offset': -0.0039,
'symbols': 'CCCCCCHHHHHHCCCCCCHHHHHH',
'magmoms': None,
'dimer atoms': [12,12],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ 0.0000000, 0.0000000, 1.0590353],
[ 0.0000000, -1.2060084, 1.7576742],
[ 0.0000000, -1.2071767, 3.1515905],
[ 0.0000000, 0.0000000, 3.8485751],
[ 0.0000000, 1.2071767, 3.1515905],
[ 0.0000000, 1.2060084, 1.7576742],
[ 0.0000000, 0.0000000, -0.0215805],
[ 0.0000000, -2.1416387, 1.2144217],
[ 0.0000000, -2.1435657, 3.6929953],
[ 0.0000000, 0.0000000, 4.9301499],
[ 0.0000000, 2.1435657, 3.6929953],
[ 0.0000000, 2.1416387, 1.2144217],
[ -1.3940633, 0.0000000, -2.4541524],
[ -0.6970468, 1.2072378, -2.4546277],
[ 0.6970468, 1.2072378, -2.4546277],
[ 1.3940633, 0.0000000, -2.4541524],
[ 0.6970468, -1.2072378, -2.4546277],
[ -0.6970468, -1.2072378, -2.4546277],
[ -2.4753995, 0.0000000, -2.4503221],
[ -1.2382321, 2.1435655, -2.4536764],
[ 1.2382321, 2.1435655, -2.4536764],
[ 2.4753995, 0.0000000, -2.4503221],
[ 1.2382321, -2.1435655, -2.4536764],
[ -1.2382321, -2.1435655, -2.4536764]],
'positions 0.9':[[ -1.080615000 , 0.000000000 , 0.000000000 ],
[ -1.779254000 , -1.206008000 , 0.000000000 ],
[ -3.173171000 , -1.207177000 , 0.000000000 ],
[ -3.870155000 , 0.000000000 , 0.000000000 ],
[ -3.173171000 , 1.207177000 , 0.000000000 ],
[ -1.779254000 , 1.206008000 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -1.236002000 , -2.141639000 , 0.000000000 ],
[ -3.714575000 , -2.143566000 , 0.000000000 ],
[ -4.951730000 , 0.000000000 , 0.000000000 ],
[ -3.714575000 , 2.143566000 , 0.000000000 ],
[ -1.236002000 , 2.141639000 , 0.000000000 ],
[ 2.189283067 , 0.000000000 , -1.394063000 ],
[ 2.189759067 , 1.207238000 , -0.697047000 ],
[ 2.189759067 , 1.207238000 , 0.697047000 ],
[ 2.189283067 , 0.000000000 , 1.394063000 ],
[ 2.189759067 , -1.207238000 , 0.697047000 ],
[ 2.189759067 , -1.207238000 , -0.697047000 ],
[ 2.185453067 , 0.000000000 , -2.475399000 ],
[ 2.188807067 , 2.143565000 , -1.238232000 ],
[ 2.188807067 , 2.143565000 , 1.238232000 ],
[ 2.185453067 , 0.000000000 , 2.475399000 ],
[ 2.188807067 , -2.143565000 , 1.238232000 ],
[ 2.188807067 , -2.143565000 , -1.238232000 ]],
'positions 1.0':[[ -1.080615000000000 , 0.000000000000000 , 0.000000000000000 ],
[ -1.779254000000000 , -1.206008000000000 , 0.000000000000000 ],
[ -3.173171000000000 , -1.207177000000000 , 0.000000000000000 ],
[ -3.870155000000000 , 0.000000000000000 , 0.000000000000000 ],
[ -3.173171000000000 , 1.207177000000000 , 0.000000000000000 ],
[ -1.779254000000000 , 1.206008000000000 , 0.000000000000000 ],
[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ -1.236002000000000 , -2.141639000000000 , 0.000000000000000 ],
[ -3.714575000000000 , -2.143566000000000 , 0.000000000000000 ],
[ -4.951730000000000 , 0.000000000000000 , 0.000000000000000 ],
[ -3.714575000000000 , 2.143566000000000 , 0.000000000000000 ],
[ -1.236002000000000 , 2.141639000000000 , 0.000000000000000 ],
[ 2.432572000272727 , 0.000000000000000 , -1.394063000000000 ],
[ 2.433048000272727 , 1.207238000000000 , -0.697047000000000 ],
[ 2.433048000272727 , 1.207238000000000 , 0.697047000000000 ],
[ 2.432572000272727 , 0.000000000000000 , 1.394063000000000 ],
[ 2.433048000272727 , -1.207238000000000 , 0.697047000000000 ],
[ 2.433048000272727 , -1.207238000000000 , -0.697047000000000 ],
[ 2.428742000272727 , 0.000000000000000 , -2.475399000000000 ],
[ 2.432096000272727 , 2.143565000000000 , -1.238232000000000 ],
[ 2.432096000272727 , 2.143565000000000 , 1.238232000000000 ],
[ 2.428742000272727 , 0.000000000000000 , 2.475399000000000 ],
[ 2.432096000272727 , -2.143565000000000 , 1.238232000000000 ],
[ 2.432096000272727 , -2.143565000000000 , -1.238232000000000 ]],
'positions 1.2':[[ -1.080615000 , 0.000000000 , 0.000000000 ],
[ -1.779254000 , -1.206008000 , 0.000000000 ],
[ -3.173171000 , -1.207177000 , 0.000000000 ],
[ -3.870155000 , 0.000000000 , 0.000000000 ],
[ -3.173171000 , 1.207177000 , 0.000000000 ],
[ -1.779254000 , 1.206008000 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -1.236002000 , -2.141639000 , 0.000000000 ],
[ -3.714575000 , -2.143566000 , 0.000000000 ],
[ -4.951730000 , 0.000000000 , 0.000000000 ],
[ -3.714575000 , 2.143566000 , 0.000000000 ],
[ -1.236002000 , 2.141639000 , 0.000000000 ],
[ 2.919149867 , 0.000000000 , -1.394063000 ],
[ 2.919625867 , 1.207238000 , -0.697047000 ],
[ 2.919625867 , 1.207238000 , 0.697047000 ],
[ 2.919149867 , 0.000000000 , 1.394063000 ],
[ 2.919625867 , -1.207238000 , 0.697047000 ],
[ 2.919625867 , -1.207238000 , -0.697047000 ],
[ 2.915319867 , 0.000000000 , -2.475399000 ],
[ 2.918673867 , 2.143565000 , -1.238232000 ],
[ 2.918673867 , 2.143565000 , 1.238232000 ],
[ 2.915319867 , 0.000000000 , 2.475399000 ],
[ 2.918673867 , -2.143565000 , 1.238232000 ],
[ 2.918673867 , -2.143565000 , -1.238232000 ]],
'positions 1.5':[[ -1.080615000 , 0.000000000 , 0.000000000 ],
[ -1.779254000 , -1.206008000 , 0.000000000 ],
[ -3.173171000 , -1.207177000 , 0.000000000 ],
[ -3.870155000 , 0.000000000 , 0.000000000 ],
[ -3.173171000 , 1.207177000 , 0.000000000 ],
[ -1.779254000 , 1.206008000 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -1.236002000 , -2.141639000 , 0.000000000 ],
[ -3.714575000 , -2.143566000 , 0.000000000 ],
[ -4.951730000 , 0.000000000 , 0.000000000 ],
[ -3.714575000 , 2.143566000 , 0.000000000 ],
[ -1.236002000 , 2.141639000 , 0.000000000 ],
[ 3.649016667 , 0.000000000 , -1.394063000 ],
[ 3.649492667 , 1.207238000 , -0.697047000 ],
[ 3.649492667 , 1.207238000 , 0.697047000 ],
[ 3.649016667 , 0.000000000 , 1.394063000 ],
[ 3.649492667 , -1.207238000 , 0.697047000 ],
[ 3.649492667 , -1.207238000 , -0.697047000 ],
[ 3.645186667 , 0.000000000 , -2.475399000 ],
[ 3.648540667 , 2.143565000 , -1.238232000 ],
[ 3.648540667 , 2.143565000 , 1.238232000 ],
[ 3.645186667 , 0.000000000 , 2.475399000 ],
[ 3.648540667 , -2.143565000 , 1.238232000 ],
[ 3.648540667 , -2.143565000 , -1.238232000 ]],
'positions 2.0':[[ -1.080615000 , 0.000000000 , 0.000000000 ],
[ -1.779254000 , -1.206008000 , 0.000000000 ],
[ -3.173171000 , -1.207177000 , 0.000000000 ],
[ -3.870155000 , 0.000000000 , 0.000000000 ],
[ -3.173171000 , 1.207177000 , 0.000000000 ],
[ -1.779254000 , 1.206008000 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -1.236002000 , -2.141639000 , 0.000000000 ],
[ -3.714575000 , -2.143566000 , 0.000000000 ],
[ -4.951730000 , 0.000000000 , 0.000000000 ],
[ -3.714575000 , 2.143566000 , 0.000000000 ],
[ -1.236002000 , 2.141639000 , 0.000000000 ],
[ 4.865461333 , 0.000000000 , -1.394063000 ],
[ 4.865937333 , 1.207238000 , -0.697047000 ],
[ 4.865937333 , 1.207238000 , 0.697047000 ],
[ 4.865461333 , 0.000000000 , 1.394063000 ],
[ 4.865937333 , -1.207238000 , 0.697047000 ],
[ 4.865937333 , -1.207238000 , -0.697047000 ],
[ 4.861631333 , 0.000000000 , -2.475399000 ],
[ 4.864985333 , 2.143565000 , -1.238232000 ],
[ 4.864985333 , 2.143565000 , 1.238232000 ],
[ 4.861631333 , 0.000000000 , 2.475399000 ],
[ 4.864985333 , -2.143565000 , 1.238232000 ],
[ 4.864985333 , -2.143565000 , -1.238232000 ]]},
'Benzene-HCN_complex': {
'description': "Complex, S22, S26",
'name': "Benzene-HCN_complex",
's26_number': "19",
'interaction energy CC':-0.1973,
'interaction energies s22x5':[-0.1743,-0.1960,-0.1596,-0.0906,-0.0369],
'offset': 0.0013,
'symbols': 'CCCCCCHHHHHHNCH',
'magmoms': None,
'dimer atoms': [12,3],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ -0.7097741, -0.9904230, 1.2077018],
[ -1.4065340, -0.9653529, 0.0000000],
[ -0.7097741, -0.9904230, -1.2077018],
[ 0.6839651, -1.0405105, -1.2078652],
[ 1.3809779, -1.0655522, 0.0000000],
[ 0.6839651, -1.0405105, 1.2078652],
[ -1.2499482, -0.9686280, 2.1440507],
[ -2.4869197, -0.9237060, 0.0000000],
[ -1.2499482, -0.9686280, -2.1440507],
[ 1.2242882, -1.0580753, -2.1442563],
[ 2.4615886, -1.1029818, 0.0000000],
[ 1.2242882, -1.0580753, 2.1442563],
[ -0.0034118, 3.5353926, 0.0000000],
[ 0.0751963, 2.3707040, 0.0000000],
[ 0.1476295, 1.3052847, 0.0000000]],
'positions 0.9':[[ -0.023100946 , 0.696978594 , 1.207702000 ],
[ -0.046160335 , 1.393808033 , 0.000000000 ],
[ -0.023100946 , 0.696978594 , -1.207702000 ],
[ 0.023085816 , -0.696895106 , -1.207865000 ],
[ 0.046190594 , -1.393975010 , 0.000000000 ],
[ 0.023085816 , -0.696895106 , 1.207865000 ],
[ -0.038624622 , 1.237369182 , 2.144051000 ],
[ -0.079148681 , 2.474493071 , 0.000000000 ],
[ -0.038624622 , 1.237369182 , -2.144051000 ],
[ 0.042839694 , -1.237142510 , -2.144256000 ],
[ 0.083401415 , -2.474593580 , 0.000000000 ],
[ 0.042839694 , -1.237142510 , 2.144256000 ],
[ 4.308034683 , 0.304536859 , 0.000000000 ],
[ 3.151543935 , 0.145763954 , 0.000000000 ],
[ 2.093660645 , 0.000000000 , 0.000000000 ]],
'positions 1.0':[[ -0.023100946000000 , 0.696978594000000 , 1.207702000000000 ],
[ -0.046160335000000 , 1.393808033000000 , 0.000000000000000 ],
[ -0.023100946000000 , 0.696978594000000 , -1.207702000000000 ],
[ 0.023085816000000 , -0.696895106000000 , -1.207865000000000 ],
[ 0.046190594000000 , -1.393975010000000 , 0.000000000000000 ],
[ 0.023085816000000 , -0.696895106000000 , 1.207865000000000 ],
[ -0.038624622000000 , 1.237369182000000 , 2.144051000000000 ],
[ -0.079148681000000 , 2.474493071000000 , 0.000000000000000 ],
[ -0.038624622000000 , 1.237369182000000 , -2.144051000000000 ],
[ 0.042839694000000 , -1.237142510000000 , -2.144256000000000 ],
[ 0.083401415000000 , -2.474593580000000 , 0.000000000000000 ],
[ 0.042839694000000 , -1.237142510000000 , 2.144256000000000 ],
[ 4.540663643636363 , 0.304536859000000 , 0.000000000000000 ],
[ 3.384172895636364 , 0.145763954000000 , 0.000000000000000 ],
[ 2.326289605636364 , 0.000000000000000 , 0.000000000000000 ]],
'positions 1.2':[[ -0.023100946 , 0.696978594 , 1.207702000 ],
[ -0.046160335 , 1.393808033 , 0.000000000 ],
[ -0.023100946 , 0.696978594 , -1.207702000 ],
[ 0.023085816 , -0.696895106 , -1.207865000 ],
[ 0.046190594 , -1.393975010 , 0.000000000 ],
[ 0.023085816 , -0.696895106 , 1.207865000 ],
[ -0.038624622 , 1.237369182 , 2.144051000 ],
[ -0.079148681 , 2.474493071 , 0.000000000 ],
[ -0.038624622 , 1.237369182 , -2.144051000 ],
[ 0.042839694 , -1.237142510 , -2.144256000 ],
[ 0.083401415 , -2.474593580 , 0.000000000 ],
[ 0.042839694 , -1.237142510 , 2.144256000 ],
[ 5.005921565 , 0.304536859 , 0.000000000 ],
[ 3.849430817 , 0.145763954 , 0.000000000 ],
[ 2.791547527 , 0.000000000 , 0.000000000 ]],
'positions 1.5':[[ -0.023100946 , 0.696978594 , 1.207702000 ],
[ -0.046160335 , 1.393808033 , 0.000000000 ],
[ -0.023100946 , 0.696978594 , -1.207702000 ],
[ 0.023085816 , -0.696895106 , -1.207865000 ],
[ 0.046190594 , -1.393975010 , 0.000000000 ],
[ 0.023085816 , -0.696895106 , 1.207865000 ],
[ -0.038624622 , 1.237369182 , 2.144051000 ],
[ -0.079148681 , 2.474493071 , 0.000000000 ],
[ -0.038624622 , 1.237369182 , -2.144051000 ],
[ 0.042839694 , -1.237142510 , -2.144256000 ],
[ 0.083401415 , -2.474593580 , 0.000000000 ],
[ 0.042839694 , -1.237142510 , 2.144256000 ],
[ 5.703808447 , 0.304536859 , 0.000000000 ],
[ 4.547317699 , 0.145763954 , 0.000000000 ],
[ 3.489434409 , 0.000000000 , 0.000000000 ]],
'positions 2.0':[[ -0.023100946 , 0.696978594 , 1.207702000 ],
[ -0.046160335 , 1.393808033 , 0.000000000 ],
[ -0.023100946 , 0.696978594 , -1.207702000 ],
[ 0.023085816 , -0.696895106 , -1.207865000 ],
[ 0.046190594 , -1.393975010 , 0.000000000 ],
[ 0.023085816 , -0.696895106 , 1.207865000 ],
[ -0.038624622 , 1.237369182 , 2.144051000 ],
[ -0.079148681 , 2.474493071 , 0.000000000 ],
[ -0.038624622 , 1.237369182 , -2.144051000 ],
[ 0.042839694 , -1.237142510 , -2.144256000 ],
[ 0.083401415 , -2.474593580 , 0.000000000 ],
[ 0.042839694 , -1.237142510 , 2.144256000 ],
[ 6.866953250 , 0.304536859 , 0.000000000 ],
[ 5.710462502 , 0.145763954 , 0.000000000 ],
[ 4.652579212 , 0.000000000 , 0.000000000 ]]},
'Benzene-water_complex': {
'description': "Complex, S22, S26",
'name': "Benzene-water_complex",
's26_number': "17",
'interaction energy CC':-0.1427,
'interaction energies s22x5':[-0.1305,-0.1418,-0.1071,-0.0564,-0.0212],
'offset': 0.0009,
'symbols': 'CCCCCCHHHHHHOHH',
'magmoms': None,
'dimer atoms': [12,3],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ 0.7806117, -0.6098875, -1.2075426],
[ 0.4784039, 0.7510406, -1.2079040],
[ 0.3276592, 1.4318573, 0.0000000],
[ 0.4784039, 0.7510406, 1.2079040],
[ 0.7806117, -0.6098875, 1.2075426],
[ 0.9321510, -1.2899614, 0.0000000],
[ 0.8966688, -1.1376051, -2.1441482],
[ 0.3573895, 1.2782091, -2.1440546],
[ 0.0918593, 2.4871407, 0.0000000],
[ 0.3573895, 1.2782091, 2.1440546],
[ 0.8966688, -1.1376051, 2.1441482],
[ 1.1690064, -2.3451668, 0.0000000],
[ -2.7885270, -0.2744854, 0.0000000],
[ -2.6229114, -1.2190831, 0.0000000],
[ -1.9015103, 0.0979110, 0.0000000]],
'positions 0.9':[[ 0.068736158 , 1.392383840 , -1.207543000 ],
[ 0.000000000 , 0.000000000 , -1.207904000 ],
[ -0.034807303 , -0.696435878 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 1.207904000 ],
[ 0.068736158 , 1.392383840 , 1.207543000 ],
[ 0.102581137 , 2.088313342 , 0.000000000 ],
[ 0.096477114 , 1.931999350 , -2.144148000 ],
[ -0.022815407 , -0.540397951 , -2.144055000 ],
[ -0.086694943 , -1.776497744 , 0.000000000 ],
[ -0.022815407 , -0.540397951 , 2.144055000 ],
[ 0.096477114 , 1.931999350 , 2.144148000 ],
[ 0.153430751 , 3.168579194 , 0.000000000 ],
[ 3.175061618 , 0.124369730 , 0.000000000 ],
[ 3.265337861 , 1.079117991 , 0.000000000 ],
[ 2.221117117 , 0.000000000 , 0.000000000 ]],
'positions 1.0':[[ 0.068736158000000 , 1.392383840000000 , -1.207543000000000 ],
[ 0.000000000000000 , 0.000000000000000 , -1.207904000000000 ],
[ -0.034807303000000 , -0.696435878000000 , 0.000000000000000 ],
[ 0.000000000000000 , 0.000000000000000 , 1.207904000000000 ],
[ 0.068736158000000 , 1.392383840000000 , 1.207543000000000 ],
[ 0.102581137000000 , 2.088313342000000 , 0.000000000000000 ],
[ 0.096477114000000 , 1.931999350000000 , -2.144148000000000 ],
[ -0.022815407000000 , -0.540397951000000 , -2.144055000000000 ],
[ -0.086694943000000 , -1.776497744000000 , 0.000000000000000 ],
[ -0.022815407000000 , -0.540397951000000 , 2.144055000000000 ],
[ 0.096477114000000 , 1.931999350000000 , 2.144148000000000 ],
[ 0.153430751000000 , 3.168579194000000 , 0.000000000000000 ],
[ 3.421852408818182 , 0.124369730000000 , 0.000000000000000 ],
[ 3.512128651818182 , 1.079117991000000 , 0.000000000000000 ],
[ 2.467907907818182 , 0.000000000000000 , 0.000000000000000 ]],
'positions 1.2':[[ 0.068736158 , 1.392383840 , -1.207543000 ],
[ 0.000000000 , 0.000000000 , -1.207904000 ],
[ -0.034807303 , -0.696435878 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 1.207904000 ],
[ 0.068736158 , 1.392383840 , 1.207543000 ],
[ 0.102581137 , 2.088313342 , 0.000000000 ],
[ 0.096477114 , 1.931999350 , -2.144148000 ],
[ -0.022815407 , -0.540397951 , -2.144055000 ],
[ -0.086694943 , -1.776497744 , 0.000000000 ],
[ -0.022815407 , -0.540397951 , 2.144055000 ],
[ 0.096477114 , 1.931999350 , 2.144148000 ],
[ 0.153430751 , 3.168579194 , 0.000000000 ],
[ 3.915433991 , 0.124369730 , 0.000000000 ],
[ 4.005710234 , 1.079117991 , 0.000000000 ],
[ 2.961489490 , 0.000000000 , 0.000000000 ]],
'positions 1.5':[[ 0.068736158 , 1.392383840 , -1.207543000 ],
[ 0.000000000 , 0.000000000 , -1.207904000 ],
[ -0.034807303 , -0.696435878 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 1.207904000 ],
[ 0.068736158 , 1.392383840 , 1.207543000 ],
[ 0.102581137 , 2.088313342 , 0.000000000 ],
[ 0.096477114 , 1.931999350 , -2.144148000 ],
[ -0.022815407 , -0.540397951 , -2.144055000 ],
[ -0.086694943 , -1.776497744 , 0.000000000 ],
[ -0.022815407 , -0.540397951 , 2.144055000 ],
[ 0.096477114 , 1.931999350 , 2.144148000 ],
[ 0.153430751 , 3.168579194 , 0.000000000 ],
[ 4.655806363 , 0.124369730 , 0.000000000 ],
[ 4.746082606 , 1.079117991 , 0.000000000 ],
[ 3.701861862 , 0.000000000 , 0.000000000 ]],
'positions 2.0':[[ 0.068736158 , 1.392383840 , -1.207543000 ],
[ 0.000000000 , 0.000000000 , -1.207904000 ],
[ -0.034807303 , -0.696435878 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 1.207904000 ],
[ 0.068736158 , 1.392383840 , 1.207543000 ],
[ 0.102581137 , 2.088313342 , 0.000000000 ],
[ 0.096477114 , 1.931999350 , -2.144148000 ],
[ -0.022815407 , -0.540397951 , -2.144055000 ],
[ -0.086694943 , -1.776497744 , 0.000000000 ],
[ -0.022815407 , -0.540397951 , 2.144055000 ],
[ 0.096477114 , 1.931999350 , 2.144148000 ],
[ 0.153430751 , 3.168579194 , 0.000000000 ],
[ 5.889760317 , 0.124369730 , 0.000000000 ],
[ 5.980036560 , 1.079117991 , 0.000000000 ],
[ 4.935815816 , 0.000000000 , 0.000000000 ]]},
'Ethene_dimer': {
'description': "Complex, S22, S26, stack, dispersion bonded",
'name': "Ethene_dimer",
's26_number': "09",
'interaction energy CC':-0.0650,
'interaction energies s22x5':[-0.0295,-0.0642,-0.0351,-0.0087,-0.0013],
'offset': 0.0008,
'symbols': 'CCHHHHCCHHHH',
'magmoms': None,
'dimer atoms': [6,6],
# Optimisation level: CCSD(T)/cc-pVQZ
'positions':[[ -0.471925, -0.471925, -1.859111],
[ 0.471925, 0.471925, -1.859111],
[ -0.872422, -0.872422, -0.936125],
[ 0.872422, 0.872422, -0.936125],
[ -0.870464, -0.870464, -2.783308],
[ 0.870464, 0.870464, -2.783308],
[ -0.471925, 0.471925, 1.859111],
[ 0.471925, -0.471925, 1.859111],
[ -0.872422, 0.872422, 0.936125],
[ 0.872422, -0.872422, 0.936125],
[ -0.870464, 0.870464, 2.783308],
[ 0.870464, -0.870464, 2.783308]],
'positions 0.9':[[ 0.000000000 , -0.471925000 , 0.471925000 ],
[ 0.000000000 , 0.471925000 , -0.471925000 ],
[ 0.922986000 , -0.872422000 , 0.872422000 ],
[ 0.922986000 , 0.872422000 , -0.872422000 ],
[ -0.924197000 , -0.870464000 , 0.870464000 ],
[ -0.924197000 , 0.870464000 , -0.870464000 ],
[ 3.346399800 , 0.471925000 , 0.471925000 ],
[ 3.346399800 , -0.471925000 , -0.471925000 ],
[ 2.423413800 , 0.872422000 , 0.872422000 ],
[ 2.423413800 , -0.872422000 , -0.872422000 ],
[ 4.270596800 , 0.870464000 , 0.870464000 ],
[ 4.270596800 , -0.870464000 , -0.870464000 ]],
'positions 1.0':[[ 0.000000000000000 , -0.471925000000000 , 0.471925000000000 ],
[ 0.000000000000000 , 0.471925000000000 , -0.471925000000000 ],
[ 0.922986000000000 , -0.872422000000000 , 0.872422000000000 ],
[ 0.922986000000000 , 0.872422000000000 , -0.872422000000000 ],
[ -0.924197000000000 , -0.870464000000000 , 0.870464000000000 ],
[ -0.924197000000000 , 0.870464000000000 , -0.870464000000000 ],
[ 3.718222000000000 , 0.471925000000000 , 0.471925000000000 ],
[ 3.718222000000000 , -0.471925000000000 , -0.471925000000000 ],
[ 2.795236000000000 , 0.872422000000000 , 0.872422000000000 ],
[ 2.795236000000000 , -0.872422000000000 , -0.872422000000000 ],
[ 4.642418999999999 , 0.870464000000000 , 0.870464000000000 ],
[ 4.642418999999999 , -0.870464000000000 , -0.870464000000000 ]],
'positions 1.2':[[ 0.000000000 , -0.471925000 , 0.471925000 ],
[ 0.000000000 , 0.471925000 , -0.471925000 ],
[ 0.922986000 , -0.872422000 , 0.872422000 ],
[ 0.922986000 , 0.872422000 , -0.872422000 ],
[ -0.924197000 , -0.870464000 , 0.870464000 ],
[ -0.924197000 , 0.870464000 , -0.870464000 ],
[ 4.461866400 , 0.471925000 , 0.471925000 ],
[ 4.461866400 , -0.471925000 , -0.471925000 ],
[ 3.538880400 , 0.872422000 , 0.872422000 ],
[ 3.538880400 , -0.872422000 , -0.872422000 ],
[ 5.386063400 , 0.870464000 , 0.870464000 ],
[ 5.386063400 , -0.870464000 , -0.870464000 ]],
'positions 1.5':[[ 0.000000000 , -0.471925000 , 0.471925000 ],
[ 0.000000000 , 0.471925000 , -0.471925000 ],
[ 0.922986000 , -0.872422000 , 0.872422000 ],
[ 0.922986000 , 0.872422000 , -0.872422000 ],
[ -0.924197000 , -0.870464000 , 0.870464000 ],
[ -0.924197000 , 0.870464000 , -0.870464000 ],
[ 5.577333000 , 0.471925000 , 0.471925000 ],
[ 5.577333000 , -0.471925000 , -0.471925000 ],
[ 4.654347000 , 0.872422000 , 0.872422000 ],
[ 4.654347000 , -0.872422000 , -0.872422000 ],
[ 6.501530000 , 0.870464000 , 0.870464000 ],
[ 6.501530000 , -0.870464000 , -0.870464000 ]],
'positions 2.0':[[ 0.000000000 , -0.471925000 , 0.471925000 ],
[ 0.000000000 , 0.471925000 , -0.471925000 ],
[ 0.922986000 , -0.872422000 , 0.872422000 ],
[ 0.922986000 , 0.872422000 , -0.872422000 ],
[ -0.924197000 , -0.870464000 , 0.870464000 ],
[ -0.924197000 , 0.870464000 , -0.870464000 ],
[ 7.436444000 , 0.471925000 , 0.471925000 ],
[ 7.436444000 , -0.471925000 , -0.471925000 ],
[ 6.513458000 , 0.872422000 , 0.872422000 ],
[ 6.513458000 , -0.872422000 , -0.872422000 ],
[ 8.360641000 , 0.870464000 , 0.870464000 ],
[ 8.360641000 , -0.870464000 , -0.870464000 ]]},
'Ethene-ethyne_complex': {
'description': "Complex, S22, S26",
'name': "Ethene-ethyne_complex",
's26_number': "16",
'interaction energy CC':-0.0655,
'interaction energies s22x5':[-0.0507,-0.0646,-0.0468,-0.0212,-0.0065],
'offset': 0.0009,
'symbols': 'CCHHHHCCHH',
'magmoms': None,
'dimer atoms': [6,4],
# Optimisation level: CCSD(T)/cc-pVQZ
'positions':[[ 0.000000, -0.667578, -2.124659],
[ 0.000000, 0.667578, -2.124659],
[ 0.923621, -1.232253, -2.126185],
[ -0.923621, -1.232253, -2.126185],
[ -0.923621, 1.232253, -2.126185],
[ 0.923621, 1.232253, -2.126185],
[ 0.000000, 0.000000, 2.900503],
[ 0.000000, 0.000000, 1.693240],
[ 0.000000, 0.000000, 0.627352],
[ 0.000000, 0.000000, 3.963929]],
'positions 0.9':[[ 0.000000000 , -0.667578000 , 0.000000000 ],
[ 0.000000000 , 0.667578000 , 0.000000000 ],
[ -0.001526000 , -1.232253000 , -0.923621000 ],
[ -0.001526000 , -1.232253000 , 0.923621000 ],
[ -0.001526000 , 1.232253000 , 0.923621000 ],
[ -0.001526000 , 1.232253000 , -0.923621000 ],
[ 4.749960900 , 0.000000000 , 0.000000000 ],
[ 3.542697900 , 0.000000000 , 0.000000000 ],
[ 2.476809900 , 0.000000000 , 0.000000000 ],
[ 5.813386900 , 0.000000000 , 0.000000000 ]],
'positions 1.0':[[ 0.000000000000000 , -0.667578000000000 , 0.000000000000000 ],
[ 0.000000000000000 , 0.667578000000000 , 0.000000000000000 ],
[ -0.001526000000000 , -1.232253000000000 , -0.923621000000000 ],
[ -0.001526000000000 , -1.232253000000000 , 0.923621000000000 ],
[ -0.001526000000000 , 1.232253000000000 , 0.923621000000000 ],
[ -0.001526000000000 , 1.232253000000000 , -0.923621000000000 ],
[ 5.025162000000000 , 0.000000000000000 , 0.000000000000000 ],
[ 3.817899000000000 , 0.000000000000000 , 0.000000000000000 ],
[ 2.752011000000000 , 0.000000000000000 , 0.000000000000000 ],
[ 6.088588000000001 , 0.000000000000000 , 0.000000000000000 ]],
'positions 1.2':[[ 0.000000000 , -0.667578000 , 0.000000000 ],
[ 0.000000000 , 0.667578000 , 0.000000000 ],
[ -0.001526000 , -1.232253000 , -0.923621000 ],
[ -0.001526000 , -1.232253000 , 0.923621000 ],
[ -0.001526000 , 1.232253000 , 0.923621000 ],
[ -0.001526000 , 1.232253000 , -0.923621000 ],
[ 5.575564200 , 0.000000000 , 0.000000000 ],
[ 4.368301200 , 0.000000000 , 0.000000000 ],
[ 3.302413200 , 0.000000000 , 0.000000000 ],
[ 6.638990200 , 0.000000000 , 0.000000000 ]],
'positions 1.5':[[ 0.000000000 , -0.667578000 , 0.000000000 ],
[ 0.000000000 , 0.667578000 , 0.000000000 ],
[ -0.001526000 , -1.232253000 , -0.923621000 ],
[ -0.001526000 , -1.232253000 , 0.923621000 ],
[ -0.001526000 , 1.232253000 , 0.923621000 ],
[ -0.001526000 , 1.232253000 , -0.923621000 ],
[ 6.401167500 , 0.000000000 , 0.000000000 ],
[ 5.193904500 , 0.000000000 , 0.000000000 ],
[ 4.128016500 , 0.000000000 , 0.000000000 ],
[ 7.464593500 , 0.000000000 , 0.000000000 ]],
'positions 2.0':[[ 0.000000000 , -0.667578000 , 0.000000000 ],
[ 0.000000000 , 0.667578000 , 0.000000000 ],
[ -0.001526000 , -1.232253000 , -0.923621000 ],
[ -0.001526000 , -1.232253000 , 0.923621000 ],
[ -0.001526000 , 1.232253000 , 0.923621000 ],
[ -0.001526000 , 1.232253000 , -0.923621000 ],
[ 7.777173000 , 0.000000000 , 0.000000000 ],
[ 6.569910000 , 0.000000000 , 0.000000000 ],
[ 5.504022000 , 0.000000000 , 0.000000000 ],
[ 8.840599000 , 0.000000000 , 0.000000000 ]]},
'Formamide_dimer': {
'description': "Complex, S22, S26, 2 h-bonds, double h-bond",
'name': "Formamide_dimer",
's26_number': "04",
'interaction energy CC':-0.6990,
'interaction energies s22x5':[-0.6132,-0.6917,-0.5811,-0.3512,-0.1522],
'offset': 0.0073,
'symbols': 'CONHHHCONHHH',
'magmoms': None,
'dimer atoms': [6,6],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ -2.018649, 0.052883, 0.000000],
[ -1.452200, 1.143634, 0.000000],
[ -1.407770, -1.142484, 0.000000],
[ -1.964596, -1.977036, 0.000000],
[ -0.387244, -1.207782, 0.000000],
[ -3.117061, -0.013701, 0.000000],
[ 2.018649, -0.052883, 0.000000],
[ 1.452200, -1.143634, 0.000000],
[ 1.407770, 1.142484, 0.000000],
[ 1.964596, 1.977036, 0.000000],
[ 0.387244, 1.207782, 0.000000],
[ 3.117061, 0.013701, 0.000000]],
'positions 0.9':[[ -0.604120150 , -1.070346233 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.035273679 , -2.286277608 , 0.000000000 ],
[ -0.620847527 , -3.100915874 , 0.000000000 ],
[ 0.982356530 , -2.387103713 , 0.000000000 ],
[ -1.704185444 , -1.098607493 , 0.000000000 ],
[ 3.242982655 , -1.316757480 , 0.000000000 ],
[ 2.638862505 , -2.387103713 , 0.000000000 ],
[ 2.674136184 , -0.100826104 , 0.000000000 ],
[ 3.259710032 , 0.713812161 , 0.000000000 ],
[ 1.656505975 , 0.000000000 , 0.000000000 ],
[ 4.343047949 , -1.288496220 , 0.000000000 ]],
'positions 1.0':[[ -0.604120150000000 , -1.070346233000000 , 0.000000000000000 ],
[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ -0.035273679000000 , -2.286277608000000 , 0.000000000000000 ],
[ -0.620847527000000 , -3.100915874000000 , 0.000000000000000 ],
[ 0.982356530000000 , -2.387103713000000 , 0.000000000000000 ],
[ -1.704185444000000 , -1.098607493000000 , 0.000000000000000 ],
[ 3.427038874545455 , -1.316757480000000 , 0.000000000000000 ],
[ 2.822918724545455 , -2.387103713000000 , 0.000000000000000 ],
[ 2.858192403545455 , -0.100826104000000 , 0.000000000000000 ],
[ 3.443766251545455 , 0.713812161000000 , 0.000000000000000 ],
[ 1.840562194545454 , 0.000000000000000 , 0.000000000000000 ],
[ 4.527104168545454 , -1.288496220000000 , 0.000000000000000 ]],
'positions 1.2':[[ -0.604120150 , -1.070346233 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.035273679 , -2.286277608 , 0.000000000 ],
[ -0.620847527 , -3.100915874 , 0.000000000 ],
[ 0.982356530 , -2.387103713 , 0.000000000 ],
[ -1.704185444 , -1.098607493 , 0.000000000 ],
[ 3.795151314 , -1.316757480 , 0.000000000 ],
[ 3.191031164 , -2.387103713 , 0.000000000 ],
[ 3.226304843 , -0.100826104 , 0.000000000 ],
[ 3.811878691 , 0.713812161 , 0.000000000 ],
[ 2.208674634 , 0.000000000 , 0.000000000 ],
[ 4.895216608 , -1.288496220 , 0.000000000 ]],
'positions 1.5':[[ -0.604120150 , -1.070346233 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.035273679 , -2.286277608 , 0.000000000 ],
[ -0.620847527 , -3.100915874 , 0.000000000 ],
[ 0.982356530 , -2.387103713 , 0.000000000 ],
[ -1.704185444 , -1.098607493 , 0.000000000 ],
[ 4.347319973 , -1.316757480 , 0.000000000 ],
[ 3.743199823 , -2.387103713 , 0.000000000 ],
[ 3.778473502 , -0.100826104 , 0.000000000 ],
[ 4.364047350 , 0.713812161 , 0.000000000 ],
[ 2.760843293 , 0.000000000 , 0.000000000 ],
[ 5.447385267 , -1.288496220 , 0.000000000 ]],
'positions 2.0':[[ -0.604120150 , -1.070346233 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.035273679 , -2.286277608 , 0.000000000 ],
[ -0.620847527 , -3.100915874 , 0.000000000 ],
[ 0.982356530 , -2.387103713 , 0.000000000 ],
[ -1.704185444 , -1.098607493 , 0.000000000 ],
[ 5.267601070 , -1.316757480 , 0.000000000 ],
[ 4.663480920 , -2.387103713 , 0.000000000 ],
[ 4.698754599 , -0.100826104 , 0.000000000 ],
[ 5.284328447 , 0.713812161 , 0.000000000 ],
[ 3.681124390 , 0.000000000 , 0.000000000 ],
[ 6.367666364 , -1.288496220 , 0.000000000 ]]},
'Formic_acid_dimer': {
'description': "Complex, S22, S26, 2 h-bonds, double h-bond",
'name': "Formic_acid_dimer",
's26_number': "03",
'interaction energy CC':-0.8152,
'interaction energies s22x5':[-0.7086,-0.8061,-0.6773,-0.4007,-0.1574],
'offset': 0.0091,
'symbols': 'COOHHCOOHH',
'magmoms': None,
'dimer atoms': [5,5],
# Optimisation level: CCSD(T)/cc-pVTZ
'positions':[[ -1.888896, -0.179692, 0.000000],
[ -1.493280, 1.073689, 0.000000],
[ -1.170435, -1.166590, 0.000000],
[ -2.979488, -0.258829, 0.000000],
[ -0.498833, 1.107195, 0.000000],
[ 1.888896, 0.179692, 0.000000],
[ 1.493280, -1.073689, 0.000000],
[ 1.170435, 1.166590, 0.000000],
[ 2.979488, 0.258829, 0.000000],
[ 0.498833, -1.107195, 0.000000]],
'positions 0.9':[[ -1.434944263 , -1.236643950 , 0.000000000 ],
[ -0.995009531 , 0.001876693 , 0.000000000 ],
[ -0.752030700 , -2.248465543 , 0.000000000 ],
[ -2.527660580 , -1.276950582 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 2.186205474 , -1.011821594 , 0.000000000 ],
[ 1.746270742 , -2.250342236 , 0.000000000 ],
[ 1.503291911 , 0.000000000 , 0.000000000 ],
[ 3.278921791 , -0.971514961 , 0.000000000 ],
[ 0.751261211 , -2.248465543 , 0.000000000 ]],
'positions 1.0':[[ -1.434944263000000 , -1.236643950000000 , 0.000000000000000 ],
[ -0.995009531000000 , 0.001876693000000 , 0.000000000000000 ],
[ -0.752030700000000 , -2.248465543000000 , 0.000000000000000 ],
[ -2.527660580000000 , -1.276950582000000 , 0.000000000000000 ],
[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ 2.353237908636364 , -1.011821594000000 , 0.000000000000000 ],
[ 1.913303176636364 , -2.250342236000000 , 0.000000000000000 ],
[ 1.670324345636364 , 0.000000000000000 , 0.000000000000000 ],
[ 3.445954225636364 , -0.971514961000000 , 0.000000000000000 ],
[ 0.918293645636364 , -2.248465543000000 , 0.000000000000000 ]],
'positions 1.2':[[ -1.434944263 , -1.236643950 , 0.000000000 ],
[ -0.995009531 , 0.001876693 , 0.000000000 ],
[ -0.752030700 , -2.248465543 , 0.000000000 ],
[ -2.527660580 , -1.276950582 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 2.687302778 , -1.011821594 , 0.000000000 ],
[ 2.247368046 , -2.250342236 , 0.000000000 ],
[ 2.004389215 , 0.000000000 , 0.000000000 ],
[ 3.780019095 , -0.971514961 , 0.000000000 ],
[ 1.252358515 , -2.248465543 , 0.000000000 ]],
'positions 1.5':[[ -1.434944263 , -1.236643950 , 0.000000000 ],
[ -0.995009531 , 0.001876693 , 0.000000000 ],
[ -0.752030700 , -2.248465543 , 0.000000000 ],
[ -2.527660580 , -1.276950582 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 3.188400082 , -1.011821594 , 0.000000000 ],
[ 2.748465350 , -2.250342236 , 0.000000000 ],
[ 2.505486519 , 0.000000000 , 0.000000000 ],
[ 4.281116399 , -0.971514961 , 0.000000000 ],
[ 1.753455819 , -2.248465543 , 0.000000000 ]],
'positions 2.0':[[ -1.434944263 , -1.236643950 , 0.000000000 ],
[ -0.995009531 , 0.001876693 , 0.000000000 ],
[ -0.752030700 , -2.248465543 , 0.000000000 ],
[ -2.527660580 , -1.276950582 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 4.023562255 , -1.011821594 , 0.000000000 ],
[ 3.583627523 , -2.250342236 , 0.000000000 ],
[ 3.340648692 , 0.000000000 , 0.000000000 ],
[ 5.116278572 , -0.971514961 , 0.000000000 ],
[ 2.588617992 , -2.248465543 , 0.000000000 ]]},
'Indole-benzene_complex_stack': {
'description': "Complex, S22, S26, stack, dispersion bonded",
'name': "Indole-benzene_complex_stack",
's26_number': "14",
'interaction energy CC':-0.1990,
'interaction energies s22x5':[-0.0924,-0.2246,-0.1565,-0.0468,-0.0043],
'offset': -0.0256,
'symbols': 'CCCCCCHHHHHHHCCHCCHCNCCHCHHH',
'magmoms': None,
'dimer atoms': [12,16],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ -0.0210742, 1.5318615, -1.3639345],
[ -1.2746794, 0.9741030, -1.6074097],
[ -1.3783055, -0.2256981, -2.3084154],
[ -0.2289426, -0.8664053, -2.7687944],
[ 1.0247882, -0.3035171, -2.5312410],
[ 1.1289996, 0.8966787, -1.8299830],
[ 0.0600740, 2.4565627, -0.8093957],
[ -2.1651002, 1.4654521, -1.2405676],
[ -2.3509735, -0.6616122, -2.4926698],
[ -0.3103419, -1.7955762, -3.3172704],
[ 1.9165847, -0.7940845, -2.8993942],
[ 2.1000347, 1.3326757, -1.6400420],
[ -2.9417647, 0.8953834, 2.2239054],
[ -2.0220674, 0.4258540, 1.9013549],
[ -0.8149418, 1.0740453, 2.1066982],
[ -0.7851529, 2.0443812, 2.5856086],
[ 0.3704286, 0.4492852, 1.6847458],
[ 1.7508619, 0.8038935, 1.7194004],
[ 2.1870108, 1.6998281, 2.1275903],
[ 2.4451359, -0.2310742, 1.1353313],
[ 1.5646462, -1.2137812, 0.7555384],
[ 0.2861214, -0.8269486, 1.0618752],
[ -0.9284667, -1.4853121, 0.8606937],
[ -0.9729200, -2.4554847, 0.3834013],
[ -2.0792848, -0.8417668, 1.2876443],
[ -3.0389974, -1.3203846, 1.1468400],
[ 1.8075741, -2.0366963, 0.2333038],
[ 3.5028794, -0.3485344, 0.969523]],
'positions 0.9':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.044485647 , -1.177978626 , 0.743160105 ],
[ -0.010824638 , -2.411208517 , 0.095333145 ],
[ 0.064150773 , -2.466933785 , -1.295623602 ],
[ 0.100950904 , -1.287437054 , -2.038959973 ],
[ 0.067356799 , -0.053500209 , -1.391376263 ],
[ -0.013797739 , 0.956881587 , 0.503348328 ],
[ -0.091346970 , -1.134458005 , 1.822398921 ],
[ -0.039754009 , -3.325680275 , 0.672358669 ],
[ 0.085389531 , -3.424849020 , -1.798373823 ],
[ 0.146442780 , -1.330172544 , -3.119514770 ],
[ 0.100852832 , 0.862456237 , -1.964945566 ],
[ 2.717766027 , -0.578056849 , 3.494904751 ],
[ 2.793508398 , -0.571969873 , 2.415753956 ],
[ 2.753054336 , 0.633650134 , 1.734349558 ],
[ 2.645935858 , 1.567038531 , 2.272036098 ],
[ 2.855804852 , 0.624347564 , 0.333339655 ],
[ 2.845637545 , 1.633662034 , -0.673499279 ],
[ 2.762013625 , 2.698030593 , -0.533251753 ],
[ 2.976224608 , 0.992808148 , -1.884517470 ],
[ 3.081930238 , -0.360086596 , -1.675422891 ],
[ 2.997750328 , -0.624347564 , -0.333339655 ],
[ 3.046288127 , -1.839842986 , 0.351754941 ],
[ 3.153106953 , -2.780217935 , -0.172940228 ],
[ 2.941516868 , -1.796211682 , 1.733036170 ],
[ 2.973148444 , -2.718261443 , 2.297634930 ],
[ 3.103876306 , -1.056446212 , -2.398978775 ],
[ 3.012441631 , 1.398036276 , -2.881807744 ]],
'positions 1.0':[[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ -0.044485647000000 , -1.177978626000000 , 0.743160105000000 ],
[ -0.010824638000000 , -2.411208517000000 , 0.095333145000000 ],
[ 0.064150773000000 , -2.466933785000000 , -1.295623602000000 ],
[ 0.100950904000000 , -1.287437054000000 , -2.038959973000000 ],
[ 0.067356799000000 , -0.053500209000000 , -1.391376263000000 ],
[ -0.013797739000000 , 0.956881587000000 , 0.503348328000000 ],
[ -0.091346970000000 , -1.134458005000000 , 1.822398921000000 ],
[ -0.039754009000000 , -3.325680275000000 , 0.672358669000000 ],
[ 0.085389531000000 , -3.424849020000000 , -1.798373823000000 ],
[ 0.146442780000000 , -1.330172544000000 , -3.119514770000000 ],
[ 0.100852832000000 , 0.862456237000000 , -1.964945566000000 ],
[ 3.042963537000000 , -0.578056849000000 , 3.494904751000000 ],
[ 3.118705908000000 , -0.571969873000000 , 2.415753956000000 ],
[ 3.078251846000000 , 0.633650134000000 , 1.734349558000000 ],
[ 2.971133368000000 , 1.567038531000000 , 2.272036098000000 ],
[ 3.181002362000000 , 0.624347564000000 , 0.333339655000000 ],
[ 3.170835055000000 , 1.633662034000000 , -0.673499279000000 ],
[ 3.087211135000000 , 2.698030593000000 , -0.533251753000000 ],
[ 3.301422118000000 , 0.992808148000000 , -1.884517470000000 ],
[ 3.407127748000000 , -0.360086596000000 , -1.675422891000000 ],
[ 3.322947838000000 , -0.624347564000000 , -0.333339655000000 ],
[ 3.371485637000000 , -1.839842986000000 , 0.351754941000000 ],
[ 3.478304463000000 , -2.780217935000000 , -0.172940228000000 ],
[ 3.266714378000000 , -1.796211682000000 , 1.733036170000000 ],
[ 3.298345954000000 , -2.718261443000000 , 2.297634930000000 ],
[ 3.429073816000000 , -1.056446212000000 , -2.398978775000000 ],
[ 3.337639141000000 , 1.398036276000000 , -2.881807744000000 ]],
'positions 1.2':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.044485647 , -1.177978626 , 0.743160105 ],
[ -0.010824638 , -2.411208517 , 0.095333145 ],
[ 0.064150773 , -2.466933785 , -1.295623602 ],
[ 0.100950904 , -1.287437054 , -2.038959973 ],
[ 0.067356799 , -0.053500209 , -1.391376263 ],
[ -0.013797739 , 0.956881587 , 0.503348328 ],
[ -0.091346970 , -1.134458005 , 1.822398921 ],
[ -0.039754009 , -3.325680275 , 0.672358669 ],
[ 0.085389531 , -3.424849020 , -1.798373823 ],
[ 0.146442780 , -1.330172544 , -3.119514770 ],
[ 0.100852832 , 0.862456237 , -1.964945566 ],
[ 3.693358557 , -0.578056849 , 3.494904751 ],
[ 3.769100928 , -0.571969873 , 2.415753956 ],
[ 3.728646866 , 0.633650134 , 1.734349558 ],
[ 3.621528388 , 1.567038531 , 2.272036098 ],
[ 3.831397382 , 0.624347564 , 0.333339655 ],
[ 3.821230075 , 1.633662034 , -0.673499279 ],
[ 3.737606155 , 2.698030593 , -0.533251753 ],
[ 3.951817138 , 0.992808148 , -1.884517470 ],
[ 4.057522768 , -0.360086596 , -1.675422891 ],
[ 3.973342858 , -0.624347564 , -0.333339655 ],
[ 4.021880657 , -1.839842986 , 0.351754941 ],
[ 4.128699483 , -2.780217935 , -0.172940228 ],
[ 3.917109398 , -1.796211682 , 1.733036170 ],
[ 3.948740974 , -2.718261443 , 2.297634930 ],
[ 4.079468836 , -1.056446212 , -2.398978775 ],
[ 3.988034161 , 1.398036276 , -2.881807744 ]],
'positions 1.5':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.044485647 , -1.177978626 , 0.743160105 ],
[ -0.010824638 , -2.411208517 , 0.095333145 ],
[ 0.064150773 , -2.466933785 , -1.295623602 ],
[ 0.100950904 , -1.287437054 , -2.038959973 ],
[ 0.067356799 , -0.053500209 , -1.391376263 ],
[ -0.013797739 , 0.956881587 , 0.503348328 ],
[ -0.091346970 , -1.134458005 , 1.822398921 ],
[ -0.039754009 , -3.325680275 , 0.672358669 ],
[ 0.085389531 , -3.424849020 , -1.798373823 ],
[ 0.146442780 , -1.330172544 , -3.119514770 ],
[ 0.100852832 , 0.862456237 , -1.964945566 ],
[ 4.668951087 , -0.578056849 , 3.494904751 ],
[ 4.744693458 , -0.571969873 , 2.415753956 ],
[ 4.704239396 , 0.633650134 , 1.734349558 ],
[ 4.597120918 , 1.567038531 , 2.272036098 ],
[ 4.806989912 , 0.624347564 , 0.333339655 ],
[ 4.796822605 , 1.633662034 , -0.673499279 ],
[ 4.713198685 , 2.698030593 , -0.533251753 ],
[ 4.927409668 , 0.992808148 , -1.884517470 ],
[ 5.033115298 , -0.360086596 , -1.675422891 ],
[ 4.948935388 , -0.624347564 , -0.333339655 ],
[ 4.997473187 , -1.839842986 , 0.351754941 ],
[ 5.104292013 , -2.780217935 , -0.172940228 ],
[ 4.892701928 , -1.796211682 , 1.733036170 ],
[ 4.924333504 , -2.718261443 , 2.297634930 ],
[ 5.055061366 , -1.056446212 , -2.398978775 ],
[ 4.963626691 , 1.398036276 , -2.881807744 ]],
'positions 2.0':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.044485647 , -1.177978626 , 0.743160105 ],
[ -0.010824638 , -2.411208517 , 0.095333145 ],
[ 0.064150773 , -2.466933785 , -1.295623602 ],
[ 0.100950904 , -1.287437054 , -2.038959973 ],
[ 0.067356799 , -0.053500209 , -1.391376263 ],
[ -0.013797739 , 0.956881587 , 0.503348328 ],
[ -0.091346970 , -1.134458005 , 1.822398921 ],
[ -0.039754009 , -3.325680275 , 0.672358669 ],
[ 0.085389531 , -3.424849020 , -1.798373823 ],
[ 0.146442780 , -1.330172544 , -3.119514770 ],
[ 0.100852832 , 0.862456237 , -1.964945566 ],
[ 6.294938637 , -0.578056849 , 3.494904751 ],
[ 6.370681008 , -0.571969873 , 2.415753956 ],
[ 6.330226946 , 0.633650134 , 1.734349558 ],
[ 6.223108468 , 1.567038531 , 2.272036098 ],
[ 6.432977462 , 0.624347564 , 0.333339655 ],
[ 6.422810155 , 1.633662034 , -0.673499279 ],
[ 6.339186235 , 2.698030593 , -0.533251753 ],
[ 6.553397218 , 0.992808148 , -1.884517470 ],
[ 6.659102848 , -0.360086596 , -1.675422891 ],
[ 6.574922938 , -0.624347564 , -0.333339655 ],
[ 6.623460737 , -1.839842986 , 0.351754941 ],
[ 6.730279563 , -2.780217935 , -0.172940228 ],
[ 6.518689478 , -1.796211682 , 1.733036170 ],
[ 6.550321054 , -2.718261443 , 2.297634930 ],
[ 6.681048916 , -1.056446212 , -2.398978775 ],
[ 6.589614241 , 1.398036276 , -2.881807744 ]]},
'Indole-benzene_T-shape_complex': {
'description': "Complex, S22, S26",
'name': "Indole-benzene_T-shape_complex",
's26_number': "21",
'interaction energy CC':-0.2437,
'interaction energies s22x5':[-0.2164,-0.2489,-0.2116,-0.1214,-0.0477],
'offset': -0.0052,
'symbols': 'CCCCCCHHHHHHHNCCCCCCCCHHHHHH',
'magmoms': None,
'dimer atoms': [12,16],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ 2.5118997, 1.6250148, 0.0000000],
[ 2.7130094, 0.9578537, -1.2082918],
[ 3.1177821, -0.3767436, -1.2083647],
[ 3.3213848, -1.0437307, 0.0000000],
[ 3.1177821, -0.3767436, 1.2083647],
[ 2.7130094, 0.9578537, 1.2082918],
[ 2.2024038, 2.6611358, 0.0000000],
[ 2.5511760, 1.4736908, -2.1445900],
[ 3.2702999, -0.8951406, -2.1448379],
[ 3.6368139, -2.0781521, 0.0000000],
[ 3.2702999, -0.8951406, 2.1448379],
[ 2.5511760, 1.4736908, 2.1445900],
[ 0.8065245, -0.4358866, 0.0000000],
[ -0.1442408, -0.7686927, 0.0000000],
[ -0.5161122, -2.0893220, 0.0000000],
[ -1.8898755, -2.1814495, 0.0000000],
[ -2.3932317, -0.8470830, 0.0000000],
[ -1.2640653, 0.0195887, 0.0000000],
[ -1.3896004, 1.4117668, 0.0000000],
[ -2.6726501, 1.9366450, 0.0000000],
[ -3.8054511, 1.0974790, 0.0000000],
[ -3.6798167, -0.2817209, 0.0000000],
[ 0.2310024, -2.8653173, 0.0000000],
[ -2.4585759, -3.0956052, 0.0000000],
[ -0.5188733, 2.0539520, 0.0000000],
[ -2.8077570, 3.0097859, 0.0000000],
[ -4.7905991, 1.5439372, 0.0000000],
[ -4.5580187, -0.9142916, 0.0000000]],
'positions 0.9':[[ -0.052652077 , -1.393225783 , 0.000000000 ],
[ -0.025543347 , -0.696940104 , -1.208292000 ],
[ 0.026348254 , 0.696724226 , -1.208365000 ],
[ 0.051042263 , 1.393657541 , 0.000000000 ],
[ 0.026348254 , 0.696724226 , 1.208365000 ],
[ -0.025543347 , -0.696940104 , 1.208292000 ],
[ -0.097430661 , -2.473655966 , 0.000000000 ],
[ -0.040509756 , -1.237360068 , -2.144590000 ],
[ 0.050955575 , 1.236531293 , -2.144838000 ],
[ 0.089657645 , 2.474412421 , 0.000000000 ],
[ 0.050955575 , 1.236531293 , 2.144838000 ],
[ -0.040509756 , -1.237360068 , 2.144590000 ],
[ 2.007797424 , 0.000000000 , 0.000000000 ],
[ 3.015114828 , 0.005056388 , 0.000000000 ],
[ 3.796769012 , 1.132604937 , 0.000000000 ],
[ 5.125653739 , 0.772354616 , 0.000000000 ],
[ 5.167047225 , -0.653193161 , 0.000000000 ],
[ 3.817202589 , -1.104920876 , 0.000000000 ],
[ 3.482542920 , -2.462094972 , 0.000000000 ],
[ 4.524735226 , -3.376178892 , 0.000000000 ],
[ 5.869058665 , -2.951641292 , 0.000000000 ],
[ 6.199398544 , -1.606705567 , 0.000000000 ],
[ 3.343074787 , 2.109594763 , 0.000000000 ],
[ 5.961043541 , 1.451489921 , 0.000000000 ],
[ 2.450153978 , -2.785730808 , 0.000000000 ],
[ 4.303017780 , -4.434822780 , 0.000000000 ],
[ 6.655123584 , -3.694570139 , 0.000000000 ],
[ 7.235724321 , -1.294593877 , 0.000000000 ]],
'positions 1.0':[[ -0.052652077000000 , -1.393225783000000 , 0.000000000000000 ],
[ -0.025543347000000 , -0.696940104000000 , -1.208292000000000 ],
[ 0.026348254000000 , 0.696724226000000 , -1.208365000000000 ],
[ 0.051042263000000 , 1.393657541000000 , 0.000000000000000 ],
[ 0.026348254000000 , 0.696724226000000 , 1.208365000000000 ],
[ -0.025543347000000 , -0.696940104000000 , 1.208292000000000 ],
[ -0.097430661000000 , -2.473655966000000 , 0.000000000000000 ],
[ -0.040509756000000 , -1.237360068000000 , -2.144590000000000 ],
[ 0.050955575000000 , 1.236531293000000 , -2.144838000000000 ],
[ 0.089657645000000 , 2.474412421000000 , 0.000000000000000 ],
[ 0.050955575000000 , 1.236531293000000 , 2.144838000000000 ],
[ -0.040509756000000 , -1.237360068000000 , 2.144590000000000 ],
[ 2.230886026727273 , 0.000000000000000 , 0.000000000000000 ],
[ 3.238203430727273 , 0.005056388000000 , 0.000000000000000 ],
[ 4.019857614727273 , 1.132604937000000 , 0.000000000000000 ],
[ 5.348742341727273 , 0.772354616000000 , 0.000000000000000 ],
[ 5.390135827727273 , -0.653193161000000 , 0.000000000000000 ],
[ 4.040291191727273 , -1.104920876000000 , 0.000000000000000 ],
[ 3.705631522727273 , -2.462094972000000 , 0.000000000000000 ],
[ 4.747823828727273 , -3.376178892000000 , 0.000000000000000 ],
[ 6.092147267727273 , -2.951641292000000 , 0.000000000000000 ],
[ 6.422487146727273 , -1.606705567000000 , 0.000000000000000 ],
[ 3.566163389727273 , 2.109594763000000 , 0.000000000000000 ],
[ 6.184132143727273 , 1.451489921000000 , 0.000000000000000 ],
[ 2.673242580727273 , -2.785730808000000 , 0.000000000000000 ],
[ 4.526106382727273 , -4.434822780000000 , 0.000000000000000 ],
[ 6.878212186727272 , -3.694570139000000 , 0.000000000000000 ],
[ 7.458812923727273 , -1.294593877000000 , 0.000000000000000 ]],
'positions 1.2':[[ -0.052652077 , -1.393225783 , 0.000000000 ],
[ -0.025543347 , -0.696940104 , -1.208292000 ],
[ 0.026348254 , 0.696724226 , -1.208365000 ],
[ 0.051042263 , 1.393657541 , 0.000000000 ],
[ 0.026348254 , 0.696724226 , 1.208365000 ],
[ -0.025543347 , -0.696940104 , 1.208292000 ],
[ -0.097430661 , -2.473655966 , 0.000000000 ],
[ -0.040509756 , -1.237360068 , -2.144590000 ],
[ 0.050955575 , 1.236531293 , -2.144838000 ],
[ 0.089657645 , 2.474412421 , 0.000000000 ],
[ 0.050955575 , 1.236531293 , 2.144838000 ],
[ -0.040509756 , -1.237360068 , 2.144590000 ],
[ 2.677063232 , 0.000000000 , 0.000000000 ],
[ 3.684380636 , 0.005056388 , 0.000000000 ],
[ 4.466034820 , 1.132604937 , 0.000000000 ],
[ 5.794919547 , 0.772354616 , 0.000000000 ],
[ 5.836313033 , -0.653193161 , 0.000000000 ],
[ 4.486468397 , -1.104920876 , 0.000000000 ],
[ 4.151808728 , -2.462094972 , 0.000000000 ],
[ 5.194001034 , -3.376178892 , 0.000000000 ],
[ 6.538324473 , -2.951641292 , 0.000000000 ],
[ 6.868664352 , -1.606705567 , 0.000000000 ],
[ 4.012340595 , 2.109594763 , 0.000000000 ],
[ 6.630309349 , 1.451489921 , 0.000000000 ],
[ 3.119419786 , -2.785730808 , 0.000000000 ],
[ 4.972283588 , -4.434822780 , 0.000000000 ],
[ 7.324389392 , -3.694570139 , 0.000000000 ],
[ 7.904990129 , -1.294593877 , 0.000000000 ]],
'positions 1.5':[[ -0.052652077 , -1.393225783 , 0.000000000 ],
[ -0.025543347 , -0.696940104 , -1.208292000 ],
[ 0.026348254 , 0.696724226 , -1.208365000 ],
[ 0.051042263 , 1.393657541 , 0.000000000 ],
[ 0.026348254 , 0.696724226 , 1.208365000 ],
[ -0.025543347 , -0.696940104 , 1.208292000 ],
[ -0.097430661 , -2.473655966 , 0.000000000 ],
[ -0.040509756 , -1.237360068 , -2.144590000 ],
[ 0.050955575 , 1.236531293 , -2.144838000 ],
[ 0.089657645 , 2.474412421 , 0.000000000 ],
[ 0.050955575 , 1.236531293 , 2.144838000 ],
[ -0.040509756 , -1.237360068 , 2.144590000 ],
[ 3.346329040 , 0.000000000 , 0.000000000 ],
[ 4.353646444 , 0.005056388 , 0.000000000 ],
[ 5.135300628 , 1.132604937 , 0.000000000 ],
[ 6.464185355 , 0.772354616 , 0.000000000 ],
[ 6.505578841 , -0.653193161 , 0.000000000 ],
[ 5.155734205 , -1.104920876 , 0.000000000 ],
[ 4.821074536 , -2.462094972 , 0.000000000 ],
[ 5.863266842 , -3.376178892 , 0.000000000 ],
[ 7.207590281 , -2.951641292 , 0.000000000 ],
[ 7.537930160 , -1.606705567 , 0.000000000 ],
[ 4.681606403 , 2.109594763 , 0.000000000 ],
[ 7.299575157 , 1.451489921 , 0.000000000 ],
[ 3.788685594 , -2.785730808 , 0.000000000 ],
[ 5.641549396 , -4.434822780 , 0.000000000 ],
[ 7.993655200 , -3.694570139 , 0.000000000 ],
[ 8.574255937 , -1.294593877 , 0.000000000 ]],
'positions 2.0':[[ -0.052652077 , -1.393225783 , 0.000000000 ],
[ -0.025543347 , -0.696940104 , -1.208292000 ],
[ 0.026348254 , 0.696724226 , -1.208365000 ],
[ 0.051042263 , 1.393657541 , 0.000000000 ],
[ 0.026348254 , 0.696724226 , 1.208365000 ],
[ -0.025543347 , -0.696940104 , 1.208292000 ],
[ -0.097430661 , -2.473655966 , 0.000000000 ],
[ -0.040509756 , -1.237360068 , -2.144590000 ],
[ 0.050955575 , 1.236531293 , -2.144838000 ],
[ 0.089657645 , 2.474412421 , 0.000000000 ],
[ 0.050955575 , 1.236531293 , 2.144838000 ],
[ -0.040509756 , -1.237360068 , 2.144590000 ],
[ 4.461772054 , 0.000000000 , 0.000000000 ],
[ 5.469089458 , 0.005056388 , 0.000000000 ],
[ 6.250743642 , 1.132604937 , 0.000000000 ],
[ 7.579628369 , 0.772354616 , 0.000000000 ],
[ 7.621021855 , -0.653193161 , 0.000000000 ],
[ 6.271177219 , -1.104920876 , 0.000000000 ],
[ 5.936517550 , -2.462094972 , 0.000000000 ],
[ 6.978709856 , -3.376178892 , 0.000000000 ],
[ 8.323033295 , -2.951641292 , 0.000000000 ],
[ 8.653373174 , -1.606705567 , 0.000000000 ],
[ 5.797049417 , 2.109594763 , 0.000000000 ],
[ 8.415018171 , 1.451489921 , 0.000000000 ],
[ 4.904128608 , -2.785730808 , 0.000000000 ],
[ 6.756992410 , -4.434822780 , 0.000000000 ],
[ 9.109098214 , -3.694570139 , 0.000000000 ],
[ 9.689698951 , -1.294593877 , 0.000000000 ]]},
'Methane_dimer': {
'description': "Complex, S22, S26, dispersion bonded",
'name': "Methane_dimer",
's26_number': "08",
'interaction energy CC':-0.0230,
'interaction energies s22x5':[-0.0147,-0.0230,-0.0108,-0.0026,-0.0004],
'offset': 0.0000,
'symbols': 'CHHHHCHHHH',
'magmoms': None,
'dimer atoms': [5,5],
# Optimisation level: CCSD(T)/cc-pVTZ
'positions':[[ 0.000000, -0.000140, 1.859161],
[ -0.888551, 0.513060, 1.494685],
[ 0.888551, 0.513060, 1.494685],
[ 0.000000, -1.026339, 1.494868],
[ 0.000000, 0.000089, 2.948284],
[ 0.000000, 0.000140, -1.859161],
[ 0.000000, -0.000089, -2.948284],
[ -0.888551, -0.513060, -1.494685],
[ 0.888551, -0.513060, -1.494685],
[ 0.000000, 1.026339, -1.494868]],
'positions 0.9':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 0.364514644 , 0.513239461 , -0.888512354 ],
[ 0.364514644 , 0.513105641 , 0.888589641 ],
[ 0.364215723 , -1.026226426 , -0.000077278 ],
[ -1.089122980 , 0.000311014 , 0.000000023 ],
[ 3.346489810 , 0.000000000 , 0.000000000 ],
[ 4.435612789 , -0.000311014 , -0.000000023 ],
[ 2.981975165 , -0.513105641 , -0.888589641 ],
[ 2.981975165 , -0.513239461 , 0.888512354 ],
[ 2.982274086 , 1.026226426 , 0.000077278 ]],
'positions 1.0':[[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ 0.364514644000000 , 0.513239461000000 , -0.888512354000000 ],
[ 0.364514644000000 , 0.513105641000000 , 0.888589641000000 ],
[ 0.364215723000000 , -1.026226426000000 , -0.000077278000000 ],
[ -1.089122980000000 , 0.000311014000000 , 0.000000023000000 ],
[ 3.718322011090909 , 0.000000000000000 , 0.000000000000000 ],
[ 4.807444990090909 , -0.000311014000000 , -0.000000023000000 ],
[ 3.353807366090909 , -0.513105641000000 , -0.888589641000000 ],
[ 3.353807366090909 , -0.513239461000000 , 0.888512354000000 ],
[ 3.354106287090909 , 1.026226426000000 , 0.000077278000000 ]],
'positions 1.2':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 0.364514644 , 0.513239461 , -0.888512354 ],
[ 0.364514644 , 0.513105641 , 0.888589641 ],
[ 0.364215723 , -1.026226426 , -0.000077278 ],
[ -1.089122980 , 0.000311014 , 0.000000023 ],
[ 4.461986413 , 0.000000000 , 0.000000000 ],
[ 5.551109392 , -0.000311014 , -0.000000023 ],
[ 4.097471768 , -0.513105641 , -0.888589641 ],
[ 4.097471768 , -0.513239461 , 0.888512354 ],
[ 4.097770689 , 1.026226426 , 0.000077278 ]],
'positions 1.5':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 0.364514644 , 0.513239461 , -0.888512354 ],
[ 0.364514644 , 0.513105641 , 0.888589641 ],
[ 0.364215723 , -1.026226426 , -0.000077278 ],
[ -1.089122980 , 0.000311014 , 0.000000023 ],
[ 5.577483016 , 0.000000000 , 0.000000000 ],
[ 6.666605995 , -0.000311014 , -0.000000023 ],
[ 5.212968371 , -0.513105641 , -0.888589641 ],
[ 5.212968371 , -0.513239461 , 0.888512354 ],
[ 5.213267292 , 1.026226426 , 0.000077278 ]],
'positions 2.0':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 0.364514644 , 0.513239461 , -0.888512354 ],
[ 0.364514644 , 0.513105641 , 0.888589641 ],
[ 0.364215723 , -1.026226426 , -0.000077278 ],
[ -1.089122980 , 0.000311014 , 0.000000023 ],
[ 7.436644022 , 0.000000000 , 0.000000000 ],
[ 8.525767001 , -0.000311014 , -0.000000023 ],
[ 7.072129377 , -0.513105641 , -0.888589641 ],
[ 7.072129377 , -0.513239461 , 0.888512354 ],
[ 7.072428298 , 1.026226426 , 0.000077278 ]]},
'Phenol_dimer': {
'description': "Complex, S22, S26",
'name': "Phenol_dimer",
's26_number': "22",
'interaction energy CC':-0.3075,
'interaction energies s22x5':[-0.2784,-0.3057,-0.2511,-0.1479,-0.0598],
'offset': 0.0018,
'symbols': 'COHCCCCCHHHHHOCHCCCCCHHHHH',
'magmoms': None,
'dimer atoms': [13,13],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ -2.0071056, 0.7638459, -0.1083509],
[ -1.3885044, 1.9298523, -0.4431206],
[ -0.5238121, 1.9646519, -0.0064609],
[ -1.4630807, -0.1519120, 0.7949930],
[ -2.1475789, -1.3295094, 1.0883677],
[ -3.3743208, -1.6031427, 0.4895864],
[ -3.9143727, -0.6838545, -0.4091028],
[ -3.2370496, 0.4929609, -0.7096126],
[ -0.5106510, 0.0566569, 1.2642563],
[ -1.7151135, -2.0321452, 1.7878417],
[ -3.9024664, -2.5173865, 0.7197947],
[ -4.8670730, -0.8822939, -0.8811319],
[ -3.6431662, 1.2134345, -1.4057590],
[ 1.3531168, 1.9382724, 0.4723133],
[ 2.0369747, 0.7865043, 0.1495491],
[ 1.7842846, 2.3487495, 1.2297110],
[ 1.5904026, 0.0696860, -0.9574153],
[ 2.2417367, -1.1069765, -1.3128110],
[ 3.3315674, -1.5665603, -0.5748636],
[ 3.7696838, -0.8396901, 0.5286439],
[ 3.1224836, 0.3383498, 0.8960491],
[ 0.7445512, 0.4367983, -1.5218583],
[ 1.8921463, -1.6649726, -2.1701843],
[ 3.8330227, -2.4811537, -0.8566666],
[ 4.6137632, -1.1850101, 1.1092635],
[ 3.4598854, 0.9030376, 1.7569489]],
'positions 0.9':[[ -1.445967355 , -1.221065858 , 0.265808750 ],
[ -0.945229913 , -0.047318091 , -0.209467563 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.683142700 , -2.127785201 , 1.005109011 ],
[ -1.257798399 , -3.314090975 , 1.456540663 ],
[ -2.590627730 , -3.605427919 , 1.179051667 ],
[ -3.348500619 , -2.695116849 , 0.443286115 ],
[ -2.782549405 , -1.509701903 , -0.013287247 ],
[ 0.352786431 , -1.905463972 , 1.224781047 ],
[ -0.656349187 , -4.009576034 , 2.026231320 ],
[ -3.032993188 , -4.526384329 , 1.531085059 ],
[ -4.385512900 , -2.907317436 , 0.221017935 ],
[ -3.357888956 , -0.796017014 , -0.586234960 ],
[ 1.743489077 , 0.000000000 , 0.000000000 ],
[ 2.341981491 , -1.142898789 , -0.483732445 ],
[ 2.342838533 , 0.417604441 , 0.628041164 ],
[ 1.645485086 , -1.867622674 , -1.447211527 ],
[ 2.204739700 , -3.035912794 , -1.954567993 ],
[ 3.449296078 , -3.479350313 , -1.509647408 ],
[ 4.136609561 , -2.744696418 , -0.547410307 ],
[ 3.584309534 , -1.574952605 , -0.029436748 ],
[ 0.681454799 , -1.513028491 , -1.784467064 ],
[ 1.661729182 , -3.600082357 , -2.699896207 ],
[ 3.877956013 , -4.387511286 , -1.908204233 ],
[ 5.102623102 , -3.077497147 , -0.194005162 ],
[ 4.116289930 , -1.004251641 , 0.722333197 ]],
'positions 1.0':[[ -1.445967355000000 , -1.221065858000000 , 0.265808750000000 ],
[ -0.945229913000000 , -0.047318091000000 , -0.209467563000000 ],
[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ -0.683142700000000 , -2.127785201000000 , 1.005109011000000 ],
[ -1.257798399000000 , -3.314090975000000 , 1.456540663000000 ],
[ -2.590627730000000 , -3.605427919000000 , 1.179051667000000 ],
[ -3.348500619000000 , -2.695116849000000 , 0.443286115000000 ],
[ -2.782549405000000 , -1.509701903000000 , -0.013287247000000 ],
[ 0.352786431000000 , -1.905463972000000 , 1.224781047000000 ],
[ -0.656349187000000 , -4.009576034000000 , 2.026231320000000 ],
[ -3.032993188000000 , -4.526384329000000 , 1.531085059000000 ],
[ -4.385512900000000 , -2.907317436000000 , 0.221017935000000 ],
[ -3.357888956000000 , -0.796017014000000 , -0.586234960000000 ],
[ 1.937210085636364 , 0.000000000000000 , 0.000000000000000 ],
[ 2.535702499636364 , -1.142898789000000 , -0.483732445000000 ],
[ 2.536559541636364 , 0.417604441000000 , 0.628041164000000 ],
[ 1.839206094636364 , -1.867622674000000 , -1.447211527000000 ],
[ 2.398460708636364 , -3.035912794000000 , -1.954567993000000 ],
[ 3.643017086636364 , -3.479350313000000 , -1.509647408000000 ],
[ 4.330330569636364 , -2.744696418000000 , -0.547410307000000 ],
[ 3.778030542636364 , -1.574952605000000 , -0.029436748000000 ],
[ 0.875175807636364 , -1.513028491000000 , -1.784467064000000 ],
[ 1.855450190636364 , -3.600082357000000 , -2.699896207000000 ],
[ 4.071677021636363 , -4.387511286000000 , -1.908204233000000 ],
[ 5.296344110636364 , -3.077497147000000 , -0.194005162000000 ],
[ 4.310010938636363 , -1.004251641000000 , 0.722333197000000 ]],
'positions 1.2':[[ -1.445967355 , -1.221065858 , 0.265808750 ],
[ -0.945229913 , -0.047318091 , -0.209467563 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.683142700 , -2.127785201 , 1.005109011 ],
[ -1.257798399 , -3.314090975 , 1.456540663 ],
[ -2.590627730 , -3.605427919 , 1.179051667 ],
[ -3.348500619 , -2.695116849 , 0.443286115 ],
[ -2.782549405 , -1.509701903 , -0.013287247 ],
[ 0.352786431 , -1.905463972 , 1.224781047 ],
[ -0.656349187 , -4.009576034 , 2.026231320 ],
[ -3.032993188 , -4.526384329 , 1.531085059 ],
[ -4.385512900 , -2.907317436 , 0.221017935 ],
[ -3.357888956 , -0.796017014 , -0.586234960 ],
[ 2.324652103 , 0.000000000 , 0.000000000 ],
[ 2.923144517 , -1.142898789 , -0.483732445 ],
[ 2.924001559 , 0.417604441 , 0.628041164 ],
[ 2.226648112 , -1.867622674 , -1.447211527 ],
[ 2.785902726 , -3.035912794 , -1.954567993 ],
[ 4.030459104 , -3.479350313 , -1.509647408 ],
[ 4.717772587 , -2.744696418 , -0.547410307 ],
[ 4.165472560 , -1.574952605 , -0.029436748 ],
[ 1.262617825 , -1.513028491 , -1.784467064 ],
[ 2.242892208 , -3.600082357 , -2.699896207 ],
[ 4.459119039 , -4.387511286 , -1.908204233 ],
[ 5.683786128 , -3.077497147 , -0.194005162 ],
[ 4.697452956 , -1.004251641 , 0.722333197 ]],
'positions 1.5':[[ -1.445967355 , -1.221065858 , 0.265808750 ],
[ -0.945229913 , -0.047318091 , -0.209467563 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.683142700 , -2.127785201 , 1.005109011 ],
[ -1.257798399 , -3.314090975 , 1.456540663 ],
[ -2.590627730 , -3.605427919 , 1.179051667 ],
[ -3.348500619 , -2.695116849 , 0.443286115 ],
[ -2.782549405 , -1.509701903 , -0.013287247 ],
[ 0.352786431 , -1.905463972 , 1.224781047 ],
[ -0.656349187 , -4.009576034 , 2.026231320 ],
[ -3.032993188 , -4.526384329 , 1.531085059 ],
[ -4.385512900 , -2.907317436 , 0.221017935 ],
[ -3.357888956 , -0.796017014 , -0.586234960 ],
[ 2.905815129 , 0.000000000 , 0.000000000 ],
[ 3.504307543 , -1.142898789 , -0.483732445 ],
[ 3.505164585 , 0.417604441 , 0.628041164 ],
[ 2.807811138 , -1.867622674 , -1.447211527 ],
[ 3.367065752 , -3.035912794 , -1.954567993 ],
[ 4.611622130 , -3.479350313 , -1.509647408 ],
[ 5.298935613 , -2.744696418 , -0.547410307 ],
[ 4.746635586 , -1.574952605 , -0.029436748 ],
[ 1.843780851 , -1.513028491 , -1.784467064 ],
[ 2.824055234 , -3.600082357 , -2.699896207 ],
[ 5.040282065 , -4.387511286 , -1.908204233 ],
[ 6.264949154 , -3.077497147 , -0.194005162 ],
[ 5.278615982 , -1.004251641 , 0.722333197 ]],
'positions 2.0':[[ -1.445967355 , -1.221065858 , 0.265808750 ],
[ -0.945229913 , -0.047318091 , -0.209467563 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.683142700 , -2.127785201 , 1.005109011 ],
[ -1.257798399 , -3.314090975 , 1.456540663 ],
[ -2.590627730 , -3.605427919 , 1.179051667 ],
[ -3.348500619 , -2.695116849 , 0.443286115 ],
[ -2.782549405 , -1.509701903 , -0.013287247 ],
[ 0.352786431 , -1.905463972 , 1.224781047 ],
[ -0.656349187 , -4.009576034 , 2.026231320 ],
[ -3.032993188 , -4.526384329 , 1.531085059 ],
[ -4.385512900 , -2.907317436 , 0.221017935 ],
[ -3.357888956 , -0.796017014 , -0.586234960 ],
[ 3.874420172 , 0.000000000 , 0.000000000 ],
[ 4.472912586 , -1.142898789 , -0.483732445 ],
[ 4.473769628 , 0.417604441 , 0.628041164 ],
[ 3.776416181 , -1.867622674 , -1.447211527 ],
[ 4.335670795 , -3.035912794 , -1.954567993 ],
[ 5.580227173 , -3.479350313 , -1.509647408 ],
[ 6.267540656 , -2.744696418 , -0.547410307 ],
[ 5.715240629 , -1.574952605 , -0.029436748 ],
[ 2.812385894 , -1.513028491 , -1.784467064 ],
[ 3.792660277 , -3.600082357 , -2.699896207 ],
[ 6.008887108 , -4.387511286 , -1.908204233 ],
[ 7.233554197 , -3.077497147 , -0.194005162 ],
[ 6.247221025 , -1.004251641 , 0.722333197 ]]},
'Pyrazine_dimer': {
'description': "Complex, S22, S26, dispersion bonded",
'name': "Pyrazine_dimer",
's26_number': "12",
'interaction energy CC':-0.1821,
'interaction energies s22x5':[-0.0733,-0.1956,-0.1310,-0.0425,-0.0082],
'offset': -0.0135,
'symbols': 'CCNCCNHHHHCCNCCNHHHH',
'magmoms': None,
'dimer atoms': [10,10],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ -1.2471894, -1.1718212, -0.6961388],
[ -1.2471894, -1.1718212, 0.6961388],
[ -0.2589510, -1.7235771, 1.4144796],
[ 0.7315327, -2.2652221, 0.6967288],
[ 0.7315327, -2.2652221, -0.6967288],
[ -0.2589510, -1.7235771, -1.4144796],
[ -2.0634363, -0.7223199, -1.2472797],
[ -2.0634363, -0.7223199, 1.2472797],
[ 1.5488004, -2.7128282, 1.2475604],
[ 1.5488004, -2.7128282, -1.2475604],
[ -0.3380031, 2.0800608, 1.1300452],
[ 0.8540254, 1.3593471, 1.1306308],
[ 1.4701787, 0.9907598, 0.0000000],
[ 0.8540254, 1.3593471, -1.1306308],
[ -0.3380031, 2.0800608, -1.1300452],
[ -0.9523059, 2.4528836, 0.0000000],
[ -0.8103758, 2.3643033, 2.0618643],
[ 1.3208583, 1.0670610, 2.0623986],
[ 1.3208583, 1.0670610, -2.0623986],
[ -0.8103758, 2.3643033, -2.0618643]],
'positions 0.9':[[ 0.395653045 , 1.059432142 , -0.696139000 ],
[ 0.395653045 , 1.059432142 , 0.696139000 ],
[ -0.003263357 , 0.000227377 , 1.414480000 ],
[ -0.391847355 , -1.059697307 , 0.696729000 ],
[ -0.391847355 , -1.059697307 , -0.696729000 ],
[ -0.003263357 , 0.000227377 , -1.414480000 ],
[ 0.718983381 , 1.933370245 , -1.247280000 ],
[ 0.718983381 , 1.933370245 , 1.247280000 ],
[ -0.713152254 , -1.934362753 , 1.247560000 ],
[ -0.713152254 , -1.934362753 , -1.247560000 ],
[ 3.398538200 , 0.643131999 , 1.130045000 ],
[ 2.862793235 , -0.642689433 , 1.130631000 ],
[ 2.589772167 , -1.306738847 , 0.000000000 ],
[ 2.862793235 , -0.642689433 , -1.130631000 ],
[ 3.398538200 , 0.643131999 , -1.130045000 ],
[ 3.676023139 , 1.305979850 , 0.000000000 ],
[ 3.609496345 , 1.152471205 , 2.061864000 ],
[ 2.643057716 , -1.147744338 , 2.062399000 ],
[ 2.643057716 , -1.147744338 , -2.062399000 ],
[ 3.609496345 , 1.152471205 , -2.061864000 ]],
'positions 1.0':[[ 0.395653045000000 , 1.059432142000000 , -0.696139000000000 ],
[ 0.395653045000000 , 1.059432142000000 , 0.696139000000000 ],
[ -0.003263357000000 , 0.000227377000000 , 1.414480000000000 ],
[ -0.391847355000000 , -1.059697307000000 , 0.696729000000000 ],
[ -0.391847355000000 , -1.059697307000000 , -0.696729000000000 ],
[ -0.003263357000000 , 0.000227377000000 , -1.414480000000000 ],
[ 0.718983381000000 , 1.933370245000000 , -1.247280000000000 ],
[ 0.718983381000000 , 1.933370245000000 , 1.247280000000000 ],
[ -0.713152254000000 , -1.934362753000000 , 1.247560000000000 ],
[ -0.713152254000000 , -1.934362753000000 , -1.247560000000000 ],
[ 3.746481288363636 , 0.643131999000000 , 1.130045000000000 ],
[ 3.210736323363636 , -0.642689433000000 , 1.130631000000000 ],
[ 2.937715255363636 , -1.306738847000000 , 0.000000000000000 ],
[ 3.210736323363636 , -0.642689433000000 , -1.130631000000000 ],
[ 3.746481288363636 , 0.643131999000000 , -1.130045000000000 ],
[ 4.023966227363637 , 1.305979850000000 , 0.000000000000000 ],
[ 3.957439433363636 , 1.152471205000000 , 2.061864000000000 ],
[ 2.991000804363636 , -1.147744338000000 , 2.062399000000000 ],
[ 2.991000804363636 , -1.147744338000000 , -2.062399000000000 ],
[ 3.957439433363636 , 1.152471205000000 , -2.061864000000000 ]],
'positions 1.2':[[ 0.395653045 , 1.059432142 , -0.696139000 ],
[ 0.395653045 , 1.059432142 , 0.696139000 ],
[ -0.003263357 , 0.000227377 , 1.414480000 ],
[ -0.391847355 , -1.059697307 , 0.696729000 ],
[ -0.391847355 , -1.059697307 , -0.696729000 ],
[ -0.003263357 , 0.000227377 , -1.414480000 ],
[ 0.718983381 , 1.933370245 , -1.247280000 ],
[ 0.718983381 , 1.933370245 , 1.247280000 ],
[ -0.713152254 , -1.934362753 , 1.247560000 ],
[ -0.713152254 , -1.934362753 , -1.247560000 ],
[ 4.442367465 , 0.643131999 , 1.130045000 ],
[ 3.906622500 , -0.642689433 , 1.130631000 ],
[ 3.633601432 , -1.306738847 , 0.000000000 ],
[ 3.906622500 , -0.642689433 , -1.130631000 ],
[ 4.442367465 , 0.643131999 , -1.130045000 ],
[ 4.719852404 , 1.305979850 , 0.000000000 ],
[ 4.653325610 , 1.152471205 , 2.061864000 ],
[ 3.686886981 , -1.147744338 , 2.062399000 ],
[ 3.686886981 , -1.147744338 , -2.062399000 ],
[ 4.653325610 , 1.152471205 , -2.061864000 ]],
'positions 1.5':[[ 0.395653045 , 1.059432142 , -0.696139000 ],
[ 0.395653045 , 1.059432142 , 0.696139000 ],
[ -0.003263357 , 0.000227377 , 1.414480000 ],
[ -0.391847355 , -1.059697307 , 0.696729000 ],
[ -0.391847355 , -1.059697307 , -0.696729000 ],
[ -0.003263357 , 0.000227377 , -1.414480000 ],
[ 0.718983381 , 1.933370245 , -1.247280000 ],
[ 0.718983381 , 1.933370245 , 1.247280000 ],
[ -0.713152254 , -1.934362753 , 1.247560000 ],
[ -0.713152254 , -1.934362753 , -1.247560000 ],
[ 5.486196730 , 0.643131999 , 1.130045000 ],
[ 4.950451765 , -0.642689433 , 1.130631000 ],
[ 4.677430697 , -1.306738847 , 0.000000000 ],
[ 4.950451765 , -0.642689433 , -1.130631000 ],
[ 5.486196730 , 0.643131999 , -1.130045000 ],
[ 5.763681669 , 1.305979850 , 0.000000000 ],
[ 5.697154875 , 1.152471205 , 2.061864000 ],
[ 4.730716246 , -1.147744338 , 2.062399000 ],
[ 4.730716246 , -1.147744338 , -2.062399000 ],
[ 5.697154875 , 1.152471205 , -2.061864000 ]],
'positions 2.0':[[ 0.395653045 , 1.059432142 , -0.696139000 ],
[ 0.395653045 , 1.059432142 , 0.696139000 ],
[ -0.003263357 , 0.000227377 , 1.414480000 ],
[ -0.391847355 , -1.059697307 , 0.696729000 ],
[ -0.391847355 , -1.059697307 , -0.696729000 ],
[ -0.003263357 , 0.000227377 , -1.414480000 ],
[ 0.718983381 , 1.933370245 , -1.247280000 ],
[ 0.718983381 , 1.933370245 , 1.247280000 ],
[ -0.713152254 , -1.934362753 , 1.247560000 ],
[ -0.713152254 , -1.934362753 , -1.247560000 ],
[ 7.225912172 , 0.643131999 , 1.130045000 ],
[ 6.690167207 , -0.642689433 , 1.130631000 ],
[ 6.417146139 , -1.306738847 , 0.000000000 ],
[ 6.690167207 , -0.642689433 , -1.130631000 ],
[ 7.225912172 , 0.643131999 , -1.130045000 ],
[ 7.503397111 , 1.305979850 , 0.000000000 ],
[ 7.436870317 , 1.152471205 , 2.061864000 ],
[ 6.470431688 , -1.147744338 , 2.062399000 ],
[ 6.470431688 , -1.147744338 , -2.062399000 ],
[ 7.436870317 , 1.152471205 , -2.061864000 ]]},
'Uracil_dimer_h-bonded': {
'description': "Complex, S22, S26, 2 h-bonds, double h-bond, nucleic base",
'name': "Uracil_dimer_h-bonded",
's26_number': "05",
'interaction energy CC':-0.8972,
'interaction energies s22x5':[-0.8122,-0.8872,-0.7441,-0.4536,-0.1986],
'offset': 0.0100,
'symbols': 'OCNCCCNOHHHHOCNCCCNOHHHH',
'magmoms': None,
'dimer atoms': [12,12],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ -1.4663316, 1.0121693, 0.0000000],
[ -0.6281464, 1.9142678, 0.0000000],
[ 0.7205093, 1.6882688, 0.0000000],
[ 1.6367290, 2.7052764, 0.0000000],
[ 1.2769036, 4.0061763, 0.0000000],
[ -0.1286005, 4.3621549, 0.0000000],
[ -0.9777230, 3.2396433, 0.0000000],
[ -0.5972229, 5.4864066, 0.0000000],
[ 2.0103504, 4.7938642, 0.0000000],
[ 1.0232515, 0.7061820, 0.0000000],
[ -1.9700268, 3.4323850, 0.0000000],
[ 2.6690620, 2.3883417, 0.0000000],
[ 1.4663316, -1.0121693, 0.0000000],
[ 0.6281464, -1.9142678, 0.0000000],
[ -0.7205093, -1.6882688, 0.0000000],
[ -1.6367290, -2.7052764, 0.0000000],
[ -1.2769036, -4.0061763, 0.0000000],
[ 0.1286005, -4.3621549, 0.0000000],
[ 0.9777230, -3.2396433, 0.0000000],
[ 0.5972229, -5.4864066, 0.0000000],
[ -2.0103504, -4.7938642, 0.0000000],
[ -1.0232515, -0.7061820, 0.0000000],
[ 1.9700268, -3.4323850, 0.0000000],
[ -2.6690620, -2.3883417, 0.0000000]],
'positions 0.9':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.664243938 , 1.036879148 , 0.000000000 ],
[ -0.108663437 , 2.286389518 , 0.000000000 ],
[ -0.864691937 , 3.427521953 , 0.000000000 ],
[ -2.214231597 , 3.403909532 , 0.000000000 ],
[ -2.909869859 , 2.131803891 , 0.000000000 ],
[ -2.034924624 , 1.029301194 , 0.000000000 ],
[ -4.115521524 , 1.958733959 , 0.000000000 ],
[ -2.793840332 , 4.310799346 , 0.000000000 ],
[ 0.917908194 , 2.334329905 , 0.000000000 ],
[ -2.469325804 , 0.116551326 , 0.000000000 ],
[ -0.300037631 , 4.348024043 , 0.000000000 ],
[ 2.515009084 , 2.334329905 , 0.000000000 ],
[ 3.179253022 , 1.297450757 , 0.000000000 ],
[ 2.623672521 , 0.047940387 , 0.000000000 ],
[ 3.379701020 , -1.093192048 , 0.000000000 ],
[ 4.729240680 , -1.069579627 , 0.000000000 ],
[ 5.424878943 , 0.202526014 , 0.000000000 ],
[ 4.549933708 , 1.305028711 , 0.000000000 ],
[ 6.630530608 , 0.375595946 , 0.000000000 ],
[ 5.308849416 , -1.976469441 , 0.000000000 ],
[ 1.597100890 , 0.000000000 , 0.000000000 ],
[ 4.984334888 , 2.217778579 , 0.000000000 ],
[ 2.815046715 , -2.013694138 , 0.000000000 ]],
'positions 1.0':[[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ -0.664243938000000 , 1.036879148000000 , 0.000000000000000 ],
[ -0.108663437000000 , 2.286389518000000 , 0.000000000000000 ],
[ -0.864691937000000 , 3.427521953000000 , 0.000000000000000 ],
[ -2.214231597000000 , 3.403909532000000 , 0.000000000000000 ],
[ -2.909869859000000 , 2.131803891000000 , 0.000000000000000 ],
[ -2.034924624000000 , 1.029301194000000 , 0.000000000000000 ],
[ -4.115521524000000 , 1.958733959000000 , 0.000000000000000 ],
[ -2.793840332000000 , 4.310799346000000 , 0.000000000000000 ],
[ 0.917908194000000 , 2.334329905000000 , 0.000000000000000 ],
[ -2.469325804000000 , 0.116551326000000 , 0.000000000000000 ],
[ -0.300037631000000 , 4.348024043000000 , 0.000000000000000 ],
[ 2.692464738545454 , 2.334329905000000 , 0.000000000000000 ],
[ 3.356708676545455 , 1.297450757000000 , 0.000000000000000 ],
[ 2.801128175545454 , 0.047940387000000 , 0.000000000000000 ],
[ 3.557156674545455 , -1.093192048000000 , 0.000000000000000 ],
[ 4.906696334545455 , -1.069579627000000 , 0.000000000000000 ],
[ 5.602334597545455 , 0.202526014000000 , 0.000000000000000 ],
[ 4.727389362545455 , 1.305028711000000 , 0.000000000000000 ],
[ 6.807986262545454 , 0.375595946000000 , 0.000000000000000 ],
[ 5.486305070545455 , -1.976469441000000 , 0.000000000000000 ],
[ 1.774556544545455 , 0.000000000000000 , 0.000000000000000 ],
[ 5.161790542545455 , 2.217778579000000 , 0.000000000000000 ],
[ 2.992502369545454 , -2.013694138000000 , 0.000000000000000 ]],
'positions 1.2':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.664243938 , 1.036879148 , 0.000000000 ],
[ -0.108663437 , 2.286389518 , 0.000000000 ],
[ -0.864691937 , 3.427521953 , 0.000000000 ],
[ -2.214231597 , 3.403909532 , 0.000000000 ],
[ -2.909869859 , 2.131803891 , 0.000000000 ],
[ -2.034924624 , 1.029301194 , 0.000000000 ],
[ -4.115521524 , 1.958733959 , 0.000000000 ],
[ -2.793840332 , 4.310799346 , 0.000000000 ],
[ 0.917908194 , 2.334329905 , 0.000000000 ],
[ -2.469325804 , 0.116551326 , 0.000000000 ],
[ -0.300037631 , 4.348024043 , 0.000000000 ],
[ 3.047376048 , 2.334329905 , 0.000000000 ],
[ 3.711619986 , 1.297450757 , 0.000000000 ],
[ 3.156039485 , 0.047940387 , 0.000000000 ],
[ 3.912067984 , -1.093192048 , 0.000000000 ],
[ 5.261607644 , -1.069579627 , 0.000000000 ],
[ 5.957245907 , 0.202526014 , 0.000000000 ],
[ 5.082300672 , 1.305028711 , 0.000000000 ],
[ 7.162897572 , 0.375595946 , 0.000000000 ],
[ 5.841216380 , -1.976469441 , 0.000000000 ],
[ 2.129467854 , 0.000000000 , 0.000000000 ],
[ 5.516701852 , 2.217778579 , 0.000000000 ],
[ 3.347413679 , -2.013694138 , 0.000000000 ]],
'positions 1.5':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.664243938 , 1.036879148 , 0.000000000 ],
[ -0.108663437 , 2.286389518 , 0.000000000 ],
[ -0.864691937 , 3.427521953 , 0.000000000 ],
[ -2.214231597 , 3.403909532 , 0.000000000 ],
[ -2.909869859 , 2.131803891 , 0.000000000 ],
[ -2.034924624 , 1.029301194 , 0.000000000 ],
[ -4.115521524 , 1.958733959 , 0.000000000 ],
[ -2.793840332 , 4.310799346 , 0.000000000 ],
[ 0.917908194 , 2.334329905 , 0.000000000 ],
[ -2.469325804 , 0.116551326 , 0.000000000 ],
[ -0.300037631 , 4.348024043 , 0.000000000 ],
[ 3.579743012 , 2.334329905 , 0.000000000 ],
[ 4.243986950 , 1.297450757 , 0.000000000 ],
[ 3.688406449 , 0.047940387 , 0.000000000 ],
[ 4.444434948 , -1.093192048 , 0.000000000 ],
[ 5.793974608 , -1.069579627 , 0.000000000 ],
[ 6.489612871 , 0.202526014 , 0.000000000 ],
[ 5.614667636 , 1.305028711 , 0.000000000 ],
[ 7.695264536 , 0.375595946 , 0.000000000 ],
[ 6.373583344 , -1.976469441 , 0.000000000 ],
[ 2.661834818 , 0.000000000 , 0.000000000 ],
[ 6.049068816 , 2.217778579 , 0.000000000 ],
[ 3.879780643 , -2.013694138 , 0.000000000 ]],
'positions 2.0':[[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ -0.664243938 , 1.036879148 , 0.000000000 ],
[ -0.108663437 , 2.286389518 , 0.000000000 ],
[ -0.864691937 , 3.427521953 , 0.000000000 ],
[ -2.214231597 , 3.403909532 , 0.000000000 ],
[ -2.909869859 , 2.131803891 , 0.000000000 ],
[ -2.034924624 , 1.029301194 , 0.000000000 ],
[ -4.115521524 , 1.958733959 , 0.000000000 ],
[ -2.793840332 , 4.310799346 , 0.000000000 ],
[ 0.917908194 , 2.334329905 , 0.000000000 ],
[ -2.469325804 , 0.116551326 , 0.000000000 ],
[ -0.300037631 , 4.348024043 , 0.000000000 ],
[ 4.467021284 , 2.334329905 , 0.000000000 ],
[ 5.131265222 , 1.297450757 , 0.000000000 ],
[ 4.575684721 , 0.047940387 , 0.000000000 ],
[ 5.331713220 , -1.093192048 , 0.000000000 ],
[ 6.681252880 , -1.069579627 , 0.000000000 ],
[ 7.376891143 , 0.202526014 , 0.000000000 ],
[ 6.501945908 , 1.305028711 , 0.000000000 ],
[ 8.582542808 , 0.375595946 , 0.000000000 ],
[ 7.260861616 , -1.976469441 , 0.000000000 ],
[ 3.549113090 , 0.000000000 , 0.000000000 ],
[ 6.936347088 , 2.217778579 , 0.000000000 ],
[ 4.767058915 , -2.013694138 , 0.000000000 ]]},
'Uracil_dimer_stack': {
'description': "Complex, S22, S26, stack, dispersion bonded, nucleic base",
'name': "Uracil_dimer_stack",
's26_number': "13",
'interaction energy CC':-0.4224,
'interaction energies s22x5':[-0.2931,-0.4280,-0.2715,-0.1049,-0.0299],
'offset': -0.0056,
'symbols': 'NCHCHCONHCOHNCHCHCONHCOH',
'magmoms': None,
'dimer atoms': [12,12],
# Optimisation level: MP2/cc-pVTZ
'positions':[[ 2.0113587, -1.2132073, -0.0980673],
[ 2.0257076, -0.6971797, -1.3644029],
[ 2.2975208, -1.3910592, -2.1456459],
[ 1.7145226, 0.5919651, -1.6124892],
[ 1.7272873, 0.9908466, -2.6120050],
[ 1.3089605, 1.4575340, -0.5205890],
[ 0.9205926, 2.6110864, -0.6260457],
[ 1.3768885, 0.8397454, 0.7346356],
[ 1.0518040, 1.3862229, 1.5233710],
[ 1.6459909, -0.4852113, 1.0187267],
[ 1.5611090, -0.9718061, 2.1298059],
[ 2.1294635, -2.2015046, 0.0568134],
[ -2.0113587, 1.2132073, -0.0980673],
[ -2.0257076, 0.6971797, -1.3644029],
[ -2.2975208, 1.3910592, -2.1456459],
[ -1.7145226, -0.5919651, -1.6124892],
[ -1.7272873, -0.9908466, -2.6120050],
[ -1.3089605, -1.4575340, -0.5205890],
[ -0.9205926, -2.6110864, -0.6260457],
[ -1.3768885, -0.8397454, 0.7346356],
[ -1.0518040, -1.3862229, 1.5233710],
[ -1.6459909, 0.4852113, 1.0187267],
[ -1.5611090, 0.9718061, 2.1298059],
[ -2.1294635, 2.2015046, 0.0568134]],
'positions 0.9':[[ -0.277905006 , 1.293679543 , 0.176141970 ],
[ -0.313143400 , 0.778657200 , -1.090194030 ],
[ -0.556628453 , 1.482976305 , -1.871437030 ],
[ -0.054429325 , -0.522034140 , -1.338280030 ],
[ -0.083339176 , -0.920071815 , -2.337796030 ],
[ 0.315741834 , -1.403319766 , -0.246380030 ],
[ 0.657066634 , -2.571655559 , -0.351837030 ],
[ 0.272892517 , -0.783286382 , 1.008844970 ],
[ 0.575575188 , -1.342483138 , 1.797579970 ],
[ 0.057676398 , 0.551482081 , 1.292935970 ],
[ 0.162197796 , 1.034239706 , 2.404014970 ],
[ -0.355882042 , 2.285950208 , 0.331021970 ],
[ 3.306699593 , -1.293679543 , 0.176141970 ],
[ 3.341937987 , -0.778657200 , -1.090194030 ],
[ 3.585423040 , -1.482976305 , -1.871437030 ],
[ 3.083223911 , 0.522034140 , -1.338280030 ],
[ 3.112133763 , 0.920071815 , -2.337796030 ],
[ 2.713052753 , 1.403319766 , -0.246380030 ],
[ 2.371727953 , 2.571655559 , -0.351837030 ],
[ 2.755902070 , 0.783286382 , 1.008844970 ],
[ 2.453219399 , 1.342483138 , 1.797579970 ],
[ 2.971118189 , -0.551482081 , 1.292935970 ],
[ 2.866596791 , -1.034239706 , 2.404014970 ],
[ 3.384676629 , -2.285950208 , 0.331021970 ]],
'positions 1.0':[[ -0.277905006000000 , 1.293679543000000 , 0.176141970000000 ],
[ -0.313143400000000 , 0.778657200000000 , -1.090194030000000 ],
[ -0.556628453000000 , 1.482976305000000 , -1.871437030000000 ],
[ -0.054429325000000 , -0.522034140000000 , -1.338280030000000 ],
[ -0.083339176000000 , -0.920071815000000 , -2.337796030000000 ],
[ 0.315741834000000 , -1.403319766000000 , -0.246380030000000 ],
[ 0.657066634000000 , -2.571655559000000 , -0.351837030000000 ],
[ 0.272892517000000 , -0.783286382000000 , 1.008844970000000 ],
[ 0.575575188000000 , -1.342483138000000 , 1.797579970000000 ],
[ 0.057676398000000 , 0.551482081000000 , 1.292935970000000 ],
[ 0.162197796000000 , 1.034239706000000 , 2.404014970000000 ],
[ -0.355882042000000 , 2.285950208000000 , 0.331021970000000 ],
[ 3.643232324909091 , -1.293679543000000 , 0.176141970000000 ],
[ 3.678470718909091 , -0.778657200000000 , -1.090194030000000 ],
[ 3.921955771909091 , -1.482976305000000 , -1.871437030000000 ],
[ 3.419756642909091 , 0.522034140000000 , -1.338280030000000 ],
[ 3.448666494909091 , 0.920071815000000 , -2.337796030000000 ],
[ 3.049585484909091 , 1.403319766000000 , -0.246380030000000 ],
[ 2.708260684909091 , 2.571655559000000 , -0.351837030000000 ],
[ 3.092434801909091 , 0.783286382000000 , 1.008844970000000 ],
[ 2.789752130909091 , 1.342483138000000 , 1.797579970000000 ],
[ 3.307650920909091 , -0.551482081000000 , 1.292935970000000 ],
[ 3.203129522909091 , -1.034239706000000 , 2.404014970000000 ],
[ 3.721209360909091 , -2.285950208000000 , 0.331021970000000 ]],
'positions 1.2':[[ -0.277905006 , 1.293679543 , 0.176141970 ],
[ -0.313143400 , 0.778657200 , -1.090194030 ],
[ -0.556628453 , 1.482976305 , -1.871437030 ],
[ -0.054429325 , -0.522034140 , -1.338280030 ],
[ -0.083339176 , -0.920071815 , -2.337796030 ],
[ 0.315741834 , -1.403319766 , -0.246380030 ],
[ 0.657066634 , -2.571655559 , -0.351837030 ],
[ 0.272892517 , -0.783286382 , 1.008844970 ],
[ 0.575575188 , -1.342483138 , 1.797579970 ],
[ 0.057676398 , 0.551482081 , 1.292935970 ],
[ 0.162197796 , 1.034239706 , 2.404014970 ],
[ -0.355882042 , 2.285950208 , 0.331021970 ],
[ 4.316297789 , -1.293679543 , 0.176141970 ],
[ 4.351536183 , -0.778657200 , -1.090194030 ],
[ 4.595021236 , -1.482976305 , -1.871437030 ],
[ 4.092822107 , 0.522034140 , -1.338280030 ],
[ 4.121731959 , 0.920071815 , -2.337796030 ],
[ 3.722650949 , 1.403319766 , -0.246380030 ],
[ 3.381326149 , 2.571655559 , -0.351837030 ],
[ 3.765500266 , 0.783286382 , 1.008844970 ],
[ 3.462817595 , 1.342483138 , 1.797579970 ],
[ 3.980716385 , -0.551482081 , 1.292935970 ],
[ 3.876194987 , -1.034239706 , 2.404014970 ],
[ 4.394274825 , -2.285950208 , 0.331021970 ]],
'positions 1.5':[[ -0.277905006 , 1.293679543 , 0.176141970 ],
[ -0.313143400 , 0.778657200 , -1.090194030 ],
[ -0.556628453 , 1.482976305 , -1.871437030 ],
[ -0.054429325 , -0.522034140 , -1.338280030 ],
[ -0.083339176 , -0.920071815 , -2.337796030 ],
[ 0.315741834 , -1.403319766 , -0.246380030 ],
[ 0.657066634 , -2.571655559 , -0.351837030 ],
[ 0.272892517 , -0.783286382 , 1.008844970 ],
[ 0.575575188 , -1.342483138 , 1.797579970 ],
[ 0.057676398 , 0.551482081 , 1.292935970 ],
[ 0.162197796 , 1.034239706 , 2.404014970 ],
[ -0.355882042 , 2.285950208 , 0.331021970 ],
[ 5.325895984 , -1.293679543 , 0.176141970 ],
[ 5.361134378 , -0.778657200 , -1.090194030 ],
[ 5.604619431 , -1.482976305 , -1.871437030 ],
[ 5.102420302 , 0.522034140 , -1.338280030 ],
[ 5.131330154 , 0.920071815 , -2.337796030 ],
[ 4.732249144 , 1.403319766 , -0.246380030 ],
[ 4.390924344 , 2.571655559 , -0.351837030 ],
[ 4.775098461 , 0.783286382 , 1.008844970 ],
[ 4.472415790 , 1.342483138 , 1.797579970 ],
[ 4.990314580 , -0.551482081 , 1.292935970 ],
[ 4.885793182 , -1.034239706 , 2.404014970 ],
[ 5.403873020 , -2.285950208 , 0.331021970 ]],
'positions 2.0':[[ -0.277905006 , 1.293679543 , 0.176141970 ],
[ -0.313143400 , 0.778657200 , -1.090194030 ],
[ -0.556628453 , 1.482976305 , -1.871437030 ],
[ -0.054429325 , -0.522034140 , -1.338280030 ],
[ -0.083339176 , -0.920071815 , -2.337796030 ],
[ 0.315741834 , -1.403319766 , -0.246380030 ],
[ 0.657066634 , -2.571655559 , -0.351837030 ],
[ 0.272892517 , -0.783286382 , 1.008844970 ],
[ 0.575575188 , -1.342483138 , 1.797579970 ],
[ 0.057676398 , 0.551482081 , 1.292935970 ],
[ 0.162197796 , 1.034239706 , 2.404014970 ],
[ -0.355882042 , 2.285950208 , 0.331021970 ],
[ 7.008559644 , -1.293679543 , 0.176141970 ],
[ 7.043798038 , -0.778657200 , -1.090194030 ],
[ 7.287283091 , -1.482976305 , -1.871437030 ],
[ 6.785083962 , 0.522034140 , -1.338280030 ],
[ 6.813993814 , 0.920071815 , -2.337796030 ],
[ 6.414912804 , 1.403319766 , -0.246380030 ],
[ 6.073588004 , 2.571655559 , -0.351837030 ],
[ 6.457762121 , 0.783286382 , 1.008844970 ],
[ 6.155079450 , 1.342483138 , 1.797579970 ],
[ 6.672978240 , -0.551482081 , 1.292935970 ],
[ 6.568456842 , -1.034239706 , 2.404014970 ],
[ 7.086536680 , -2.285950208 , 0.331021970 ]]},
'Water_dimer': {
'description': "Complex, S22, S26, 1 h-bond, OH-O",
'name': "Water_dimer",
's26_number': "02",
'interaction energy CC':-0.2177,
'interaction energies s22x5':[-0.1873,-0.2155,-0.1752,-0.0993,-0.0416],
'offset': 0.0022,
'symbols': 'OHHOHH',
'magmoms': None,
'dimer atoms': [3,3],
# Optimisation level: CCSD(T)/cc-pVQZ
'positions':[[ -1.551007, -0.114520, 0.000000],
[ -1.934259, 0.762503, 0.000000],
[ -0.599677, 0.040712, 0.000000],
[ 1.350625, 0.111469, 0.000000],
[ 1.680398, -0.373741, -0.758561],
[ 1.680398, -0.373741, 0.758561]],
'positions 0.9':[[ -0.956332646 , -0.120638358 , 0.000000000 ],
[ -1.307535174 , 0.769703274 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 1.756426600 , 0.000000000 , 0.000000000 ],
[ 2.068390928 , -0.496847294 , -0.758561000 ],
[ 2.068390928 , -0.496847294 , 0.758561000 ]],
'positions 1.0':[[ -0.956332646000000 , -0.120638358000000 , 0.000000000000000 ],
[ -1.307535174000000 , 0.769703274000000 , 0.000000000000000 ],
[ 0.000000000000000 , 0.000000000000000 , 0.000000000000000 ],
[ 1.951585111090909 , 0.000000000000000 , 0.000000000000000 ],
[ 2.263549439090909 , -0.496847294000000 , -0.758561000000000 ],
[ 2.263549439090909 , -0.496847294000000 , 0.758561000000000 ]],
'positions 1.2':[[ -0.956332646 , -0.120638358 , 0.000000000 ],
[ -1.307535174 , 0.769703274 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 2.341902133 , 0.000000000 , 0.000000000 ],
[ 2.653866461 , -0.496847294 , -0.758561000 ],
[ 2.653866461 , -0.496847294 , 0.758561000 ]],
'positions 1.5':[[ -0.956332646 , -0.120638358 , 0.000000000 ],
[ -1.307535174 , 0.769703274 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 2.927377666 , 0.000000000 , 0.000000000 ],
[ 3.239341994 , -0.496847294 , -0.758561000 ],
[ 3.239341994 , -0.496847294 , 0.758561000 ]],
'positions 2.0':[[ -0.956332646 , -0.120638358 , 0.000000000 ],
[ -1.307535174 , 0.769703274 , 0.000000000 ],
[ 0.000000000 , 0.000000000 , 0.000000000 ],
[ 3.903170222 , 0.000000000 , 0.000000000 ],
[ 4.215134550 , -0.496847294 , -0.758561000 ],
[ 4.215134550 , -0.496847294 , 0.758561000 ]]},
# --- s26 ---#
'Methanol_dimer': {
'description': "1 h-bond, OH-O, S26",
'name': "Methanol_dimer",
's26_number': "23",
'interaction energy MP2':-0.1947,
'interaction energy CC':-0.2472,
'symbols': 'COHHHHCOHHHH',
'magmoms': None,
# Optimisation level: MP2/cc-pVTZ
'positions':[[ -2.114335, -0.445120, 0.221169],
[ -1.298032, 0.687432, -0.091609],
[ -1.514720, -1.087407, 0.858397],
[ -2.389026, -0.999598, -0.675819],
[ -3.014036, -0.146131, 0.758353],
[ -1.779011, 1.249219, -0.706289],
[ 2.245711, 0.159561, 0.329180],
[ 1.285289, -0.472004, -0.501635],
[ 3.156806, -0.431037, 0.275178],
[ 1.921474, 0.200114, 1.371809],
[ 2.472512, 1.174527, -0.005695],
[ 0.459691, 0.030236, -0.432082]]},
'Methanol-formaldehyde_complex': {
'description': "1 h-bond, OH-O, S26",
's26_number': "24",
'name': "Methanol-formaldehyde_complex",
'interaction energy MP2':-0.1375,
'interaction energy CC':-0.2303,
'symbols': 'COHHHHCOHH',
'magmoms': None,
# Optimisation level: MP2/cc-pVTZ
'positions':[[ 1.4073776162, 1.0401758064, 2.0396751091],
[ 0.9349167370, 0.2900025037, 0.9338944612],
[ 2.1022348002, 0.4092302046, 2.5857336738],
[ 0.6031517696, 1.3305232490, 2.7201012084],
[ 1.9382206717, 1.9424443037, 1.7274684180],
[ 0.2386426835, 0.8096239461, 0.5150020113],
[ -2.0809868810, -0.1309834084, 0.2601720974],
[ -1.6206107677, 0.9480216819, -0.1003790153],
[ -3.1316901290, -0.3840062180, 0.0820343467],
[ -1.4275985002, -0.8637260692, 0.7543476894]]},
'Methyl_amide_dimer_alpha': {
'description': "1 h-bond, NH-O, S26",
's26_number': "25",
'name': "Methyl_amide_dimer_alpha",
'interaction energy MP2':-0.2068,
'interaction energy CC':-0.2901,
'symbols': 'CCOHHHNHHCCOHHHNHH',
'magmoms': None,
# Optimisation level: DFT TPSS/TZVP (hydrogen positions optimized)
'positions':[[ 5.575000, 7.306000, -12.014000],
[ 4.318000, 8.065000, -12.345000],
[ 4.212000, 9.236000, -11.986000],
[ 6.072000, 7.809000, -11.186000],
[ 6.246000, 7.323000, -12.882000],
[ 5.392000, 6.256000, -11.755000],
[ 3.378000, 7.446000, -13.058000],
[ 3.468000, 6.488000, -13.367000],
[ 2.561000, 7.968000, -13.350000],
[ 0.768000, 8.395000, -9.9890000],
[ 1.666000, 9.133000, -8.9870000],
[ 1.355000, 9.267000, -7.8060000],
[ -0.014000, 9.085000, -10.326000],
[ 0.289000, 7.561000, -9.4730000],
[ 1.315000, 8.032000, -10.865000],
[ 2.798000, 9.666000, -9.4430000],
[ 3.139000, 9.599000, -10.401000],
[ 3.350000, 10.195000, -8.779000]]},
'Methyl_amide_dimer_beta': {
'description': "1 h-bond, NH-O, S26",
'name': "Methyl_amide_dimer_beta",
's26_number': "26",
'interaction energy MP2':-0.2342,
'interaction energy CC':-0.3317,
'symbols': 'CCOHHHNHHCCOHHHNHH',
'magmoms': None,
# Optimisation level: DFT TPSS/TZVP (hydrogen positions optimized)
'positions':[[ 0.300000, -7.945000, -4.8440000],
[ -1.133000, -7.581000, -4.4840000],
[ -1.612000, -7.787000, -3.3770000],
[ 0.650000, -7.434000, -5.7440000],
[ 0.351000, -9.028000, -5.0100000],
[ 0.952000, -7.712000, -3.9990000],
[ -1.811000, -7.075000, -5.4730000],
[ -2.781000, -6.832000, -5.3080000],
[ -1.403000, -6.863000, -6.3820000],
[ -0.931000, -6.425000, -10.105000],
[ 0.041000, -6.447000, -8.9820000],
[ -0.356000, -6.488000, -7.8210000],
[ -0.492000, -6.635000, -11.086000],
[ -1.398000, -5.434000, -10.143000],
[ -1.724000, -7.150000, -9.9060000],
[ 1.318000, -6.364000, -9.3020000],
[ 1.636000, -6.336000, -10.260000],
[ 2.015000, -6.339000, -8.5670000]]},
}
def create_s22_system(name, dist=None, **kwargs):
"""Create S22/S26/s22x5 system.
"""
s22_,s22x5_,s22_name,dist = identify_s22_sys(name,dist)
if s22_ is True:
d = data[s22_name]
return Atoms(d['symbols'], d['positions'], **kwargs)
elif s22x5_ is True:
d = data[s22_name]
pos = 'positions '+dist
return Atoms(d['symbols'], d[pos], **kwargs)
else:
raise NotImplementedError('s22/s26/s22x5 creation failed')
def identify_s22_sys(name,dist=None):
s22_ = False
s22x5_ = False
if (name in s22 or name in s26) and dist == None:
s22_name = name
s22_ = True
elif name in s22x5 and dist == None:
s22_name, dist = get_s22x5_id(name)
s22x5_ = True
elif name in s22 and dist != None:
dist_ = str(dist)
if dist_ not in ['0.9','1.0','1.2','1.5','2.0']:
raise KeyError('Bad s22x5 distance specified: %s' % dist_)
else:
s22_name = name
dist = dist_
s22x5_ = True
if s22_ is False and s22x5_ is False:
raise KeyError('s22 combination %s %s not in database' %(name,str(dist)))
return s22_, s22x5_, s22_name, dist
def get_s22x5_id(name):
"""Get main name and relative separation distance of an S22x5 system.
"""
s22_name = name[:-4]
dist = name[-3:]
return s22_name, dist
def get_s22_number(name,dist=None):
"""Returns the S22/S26 database number of a system as a string.
"""
s22_,s22x5_,s22_name,dist_ = identify_s22_sys(name,dist)
return data[s22_name]['s26_number']
def get_interaction_energy_cc(name,dist=None):
"""Returns the S22/S26 CCSD(T)/CBS CP interaction energy in eV.
"""
s22_,s22x5_,s22_name,dist_ = identify_s22_sys(name,dist)
return data[s22_name]['interaction energy CC']
def get_interaction_energy_s22(name,dist=None):
"""Returns the S22/S26 CCSD(T)/CBS CP interaction energy in eV.
"""
s22_,s22x5_,s22_name,dist_ = identify_s22_sys(name,dist)
e = get_interaction_energy_cc(s22_name)
return e
def get_interaction_energy_s22x5(name, dist=None, correct_offset=True):
"""Returns the S22x5 CCSD(T)/CBS CP interaction energy in eV.
"""
s22_,s22x5_,s22_name,dist_ = identify_s22_sys(name,dist)
if dist_ == '0.9':
i = 0
elif dist_ == '1.0':
i = 1
elif dist_ == '1.2':
i = 2
elif dist_ == '1.5':
i = 3
elif dist_ == '2.0':
i = 4
else:
raise KeyError('error, mate!')
e = data[s22_name]['interaction energies s22x5'][i]
if correct_offset == True:
e *= data[s22_name]['interaction energy CC']/data[s22_name]['interaction energies s22x5'][1]
return e
def get_name(name,dist=None):
"""Returns the database name of an s22 system
"""
s22_,s22x5_,s22_name,dist_ = identify_s22_sys(name,dist)
if s22x5_ is True:
raise KeyError('System may not be in s22x5')
return data[name]['name']
def get_number_of_dimer_atoms(name,dist=None):
"""Returns the number of atoms in each s22 dimer as a list; [x,y].
"""
s22_,s22x5_,s22_name,dist_ = identify_s22_sys(name,dist)
return data[s22_name]['dimer atoms']
def get_s22x5_distance(name, dist=None):
"""Returns the relative intermolecular distance in angstroms.
Values are in Angstrom and are relative to the original s22 distance.
"""
s22_,s22x5_,s22_name,dist_ = identify_s22_sys(name,dist)
if s22_ is True:
raise KeyError('System must be in s22x5')
else:
x00 = data[s22_name]['positions 1.0'][0][0]
x01 = data[s22_name]['positions 1.0'][-1][0]
x10 = data[s22_name]['positions '+dist_][0][0]
x11 = data[s22_name]['positions '+dist_][-1][0]
d0 = x01 - x00
d1 = x11 - x10
return d1-d0
|
grhawk/ASE
|
tools/ase/data/s22.py
|
Python
|
gpl-2.0
| 197,931
|
[
"ASE"
] |
f4bc1f6588be4749e72c80fb6ce3a18509b1458803c0705d62c67cf4583f1756
|
from glob import glob
import os
exclude = ["cceom/read_guess.cc", "detci/calc_pt_block.cc", "detci/graphset.cc"]
libs = []
for d in [name for name in os.listdir(".") if os.path.isdir(os.path.join(".", name))]:
print "Processing", d
if d in ["attic", "psi4"]:
continue
libs.append(d)
files = glob("%s/*.cc" % d)
# Remove the lib part now
filenames = []
for f in files:
if f in exclude:
continue
filenames.append(os.path.basename(f))
# These are handled differently
outfile = open("%s/CMakeLists.txt" % d, "w")
outfile.write("set(SRC %s)\n" % (" ".join(filenames)))
outfile.write("add_library(%s ${SRC})\n" % d)
outfile.close()
outfile = open("CMakeLists.txt", "w")
for l in libs:
outfile.write("add_subdirectory(%s)\n" % l)
outfile.write("add_subdirectory(psi4)\n")
outfile.close()
|
spring01/libPSI
|
src/bin/make_cmake_files.py
|
Python
|
gpl-2.0
| 871
|
[
"Psi4"
] |
3eaa7a4451472b5e162974b1890ba3809b56f315da6f766a712ea05ce0e58fe7
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for synapse handling."""
import dataclasses as dc
import enum
import functools as ft
from typing import Callable, List, Sequence, Text, Union, Optional
import jax.numpy as jp
import numpy as np
import tensorflow.compat.v1 as tf
from blur import blur_env
TensorShape = tf.TensorShape
Tensor = Union[tf.Tensor, np.ndarray, jp.array]
@dc.dataclass
class SynapseInitializerParams:
shape: TensorShape
in_neurons: int
out_neurons: int
class UpdateType(enum.Enum):
FORWARD = 1
BACKWARD = 2
BOTH = 3
NONE = 4
SynapseInitializer = Callable[[SynapseInitializerParams], Tensor]
# A callable that takes a sequence of layers and SynapseInitializer and creates
# appropriately shaped list of Synapses.
CreateSynapseFn = Callable[[Sequence[Tensor], SynapseInitializer], List[Tensor]]
def random_uniform_symmetric(shape, seed):
return (tf.random.uniform(shape, seed=seed) - 0.5) * 2
def random_initializer(start_seed=0,
scale_by_channels=False,
scale=1,
bias=0,
random_fn=random_uniform_symmetric):
"""Returns initializer that generates random sequence."""
seed = [hash(str(start_seed))]
def impl(params):
if len(params.shape) >= 3:
# shape: species x (in+out) x (in+out) x states
num_channels = int(params.shape[-2])
seed[0] += 1
v = random_fn(params.shape, seed[0])
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels**0.5)
return r
return impl
def _random_uniform_fn(start_seed):
rng = np.random.RandomState(start_seed)
return lambda shape: tf.constant( # pylint: disable=g-long-lambda
rng.uniform(low=-1, high=1, size=shape), dtype=np.float32)
def fixed_random_initializer(start_seed=0,
scale_by_channels=False,
scale=1,
bias=0,
random_fn=None):
"""Returns an initializer that generates random (but fixed) sequence.
The resulting tensors are backed by a constant so they produce the same
value across all calls.
This initializer uses its own random state that is independent of default
random sequence.
Args:
start_seed: initial seed passed to np.random.RandomStates
scale_by_channels: whether to scale by number of channels.
scale: target scale (default: 1)
bias: mean of the resulting distribution.
random_fn: random generator if none will use use _random_uniform_fn
Returns:
callable that accepts shape and returns tensorflow constant tensor.
"""
if random_fn is None:
random_fn = _random_uniform_fn(start_seed)
def impl(params):
if len(params.shape) >= 3:
# shape: species x (in+out) x (in+out) x states
num_channels = int(params.shape[-2])
v = random_fn(shape=params.shape)
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels**0.5)
return r
return impl
def create_synapse_init_fns(
layers,
initializer):
"""Generates network synapse initializers.
Arguments:
layers: Sequence of network layers (used for shape calculation).
initializer: SynapseInitializer used to initialize synapse tensors.
Returns:
A list of functions that produce synapse tensors for all layers upon
execution.
"""
synapse_init_fns = []
for pre, post in zip(layers, layers[1:]):
# shape: population_dims, batch_size, in_channels, neuron_state
pop_dims = pre.shape[:-3]
# -2: is the number of channels
num_inputs = pre.shape[-2] + post.shape[-2] + 1
# -1: is the number of states in a single neuron.
synapse_shape = (*pop_dims, num_inputs, num_inputs, pre.shape[-1])
params = SynapseInitializerParams(
shape=synapse_shape,
in_neurons=pre.shape[-2],
out_neurons=post.shape[-2])
synapse_init_fns.append(ft.partial(initializer, params))
return synapse_init_fns
def create_synapses(layers,
initializer):
"""Generates arbitrary form synapses.
Arguments:
layers: Sequence of network layers (used for shape calculation).
initializer: SynapseInitializer used to initialize synapse tensors.
Returns:
A list of created synapse tensors for all layers.
"""
return [init_fn() for init_fn in create_synapse_init_fns(layers, initializer)]
def transpose_synapse(synapse, env):
num_batch_dims = len(synapse.shape[:-3])
perm = [
*range(num_batch_dims), num_batch_dims + 1, num_batch_dims,
num_batch_dims + 2
]
return env.transpose(synapse, perm)
def synapse_submatrix(synapse,
in_channels,
update_type,
include_bias = True):
"""Returns a submatrix of a synapse matrix given the update type."""
bias = 1 if include_bias else 0
if update_type == UpdateType.FORWARD:
return synapse[Ellipsis, :(in_channels + bias), (in_channels + bias):, :]
if update_type == UpdateType.BACKWARD:
return synapse[Ellipsis, (in_channels + 1):, :(in_channels + bias), :]
def combine_in_out_synapses(in_out_synapse, out_in_synapse,
env):
"""Combines forward and backward synapses into a single matrix."""
batch_dims = in_out_synapse.shape[:-3]
out_channels, in_channels, num_states = in_out_synapse.shape[-3:]
synapse = env.concat([
env.concat([
env.zeros((*batch_dims, out_channels, out_channels, num_states)),
in_out_synapse
],
axis=-2),
env.concat([
out_in_synapse,
env.zeros((*batch_dims, in_channels, in_channels, num_states))
],
axis=-2)
],
axis=-3)
return synapse
def sync_all_synapses(synapses, layers, env):
"""Sync synapses across all layers.
For each synapse, syncs its first state forward synapse with backward synapse
and copies it arocess all the states.
Args:
synapses: list of synapses in the network.
layers: list of layers in the network.
env: Environment
Returns:
Synchronized synapses.
"""
for i in range(len(synapses)):
synapses[i] = sync_in_and_out_synapse(synapses[i], layers[i].shape[-2], env)
return synapses
def sync_in_and_out_synapse(synapse, in_channels, env):
"""Copies forward synapse to backward one."""
in_out_synapse = synapse_submatrix(
synapse,
in_channels=in_channels,
update_type=UpdateType.FORWARD,
include_bias=True)
return combine_in_out_synapses(in_out_synapse,
transpose_synapse(in_out_synapse, env), env)
def sync_states_synapse(synapse, env, num_states=None):
"""Sync synapse's first state across all the other states."""
if num_states is None:
num_states = synapse.shape[-1]
return env.stack(num_states * [synapse[Ellipsis, 0]], axis=-1)
def normalize_synapses(synapses,
rescale_to,
env,
axis = -3):
"""Normalizes synapses across a particular axis (across input by def.)."""
# Default value axis=-3 corresponds to normalizing across the input neuron
# dimension.
squared = env.sum(synapses**2, axis=axis, keepdims=True)
synapses /= env.sqrt(squared + 1e-9)
if rescale_to is not None:
synapses *= rescale_to
return synapses
|
google-research/google-research
|
blur/synapse_util.py
|
Python
|
apache-2.0
| 8,092
|
[
"NEURON"
] |
af3307b32bd51208dbb33bc886232c4ddc67b3b9aeb51b7401f7bfb49f8b90bb
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008-2012 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012-2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from functools import partial
import datetime
import time
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.const import URL_HOMEPAGE
from gramps.gen.display.name import displayer as _nd
from gramps.gen.errors import ReportError
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SERIF, PARA_ALIGN_CENTER,
PARA_ALIGN_LEFT, PARA_ALIGN_RIGHT,
IndexMark, INDEX_TYPE_TOC)
from gramps.gen.plug.docgen.fontscale import string_trim
from gramps.gen.plug.menu import (BooleanOption, StringOption, NumberOption,
EnumeratedListOption, FilterOption,
PersonOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.utils.alive import probably_alive
from gramps.gen.datehandler import displayer as date_displayer
from gramps.gen.lib import (Date, EventRoleType, EventType, Name, NameType,
Person, Surname)
from gramps.gen.lib.date import gregorian
import gramps.plugins.lib.libholiday as libholiday
from gramps.plugins.lib.libholiday import g2iso
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
pt2cm = utils.pt2cm
cm2pt = utils.cm2pt
# _T_ is a gramps-defined keyword -- see po/update_po.py and po/genpot.sh
def _T_(value, context=''): # enable deferred translations
return "%s\x04%s" % (context, value) if context else value
_TITLE1 = _T_("My Calendar")
_TITLE2 = _T_("Produced with Gramps")
#------------------------------------------------------------------------
#
# Calendar
#
#------------------------------------------------------------------------
class Calendar(Report):
"""
Create the Calendar object that produces the report.
incl_private - Whether to include private data
"""
def __init__(self, database, options, user):
Report.__init__(self, database, options, user)
menu = options.menu
self._user = user
stdoptions.run_private_data_option(self, menu)
get_value = lambda name: menu.get_option_by_name(name).get_value()
self.year = get_value('year')
self.name_format = get_value('name_format')
self.country = get_value('country')
self.anniversaries = get_value('anniversaries')
self.start_dow = get_value('start_dow')
self.maiden_name = get_value('maiden_name')
self.alive = get_value('alive')
self.birthdays = get_value('birthdays')
self.text1 = get_value('text1')
self.text2 = get_value('text2')
self.text3 = get_value('text3')
self.filter_option = menu.get_option_by_name('filter')
self.filter = self.filter_option.get_filter()
pid = get_value('pid')
self.center_person = self.database.get_person_from_gramps_id(pid)
if self.center_person is None:
raise ReportError(_("Person %s is not in the Database") % pid)
self.set_locale(get_value('trans'))
def get_name(self, person, maiden_name = None):
""" Return person's name, unless maiden_name given,
unless married_name listed.
"""
# Get all of a person's names:
primary_name = person.get_primary_name()
married_name = None
names = [primary_name] + person.get_alternate_names()
for name in names:
if int(name.get_type()) == NameType.MARRIED:
married_name = name
break # use first
# Now, decide which to use:
if maiden_name is not None:
if married_name is not None:
name = Name(married_name)
else:
name = Name(primary_name)
surname = Surname()
surname.set_surname(maiden_name)
name.set_surname_list([surname])
else:
name = Name(primary_name)
name.set_display_as(self.name_format)
return _nd.display_name(name)
def draw_rectangle(self, style, sx, sy, ex, ey):
""" This should be in BaseDoc """
self.doc.draw_line(style, sx, sy, sx, ey)
self.doc.draw_line(style, sx, sy, ex, sy)
self.doc.draw_line(style, ex, sy, ex, ey)
self.doc.draw_line(style, sx, ey, ex, ey)
### The rest of these all have to deal with calendar specific things
def add_day_item(self, text, month, day, format="CAL-Text", marks=[None]):
""" Add an item to a day. """
month_dict = self.calendar.get(month, {})
day_list = month_dict.get(day, [])
day_list.append((format, text, marks))
month_dict[day] = day_list
self.calendar[month] = month_dict
def __get_holidays(self):
""" Get the holidays for the specified country and year """
holiday_table = libholiday.HolidayTable()
country = holiday_table.get_countries()[self.country]
holiday_table.load_holidays(self.year, country)
for month in range(1, 13):
for day in range(1, 32):
holiday_names = holiday_table.get_holidays(month, day)
for holiday_name in holiday_names:
self.add_day_item(self._(holiday_name), month, day,
"CAL-Holiday")
# FIXME translation only works for a limited set of things
# (the right fix is to somehow feed the locale into the
# HolidayTable class in plugins/lib/libholiday.py and then
# probably changing all the holiday code to somehow defer
# the translation of holidays, until it can be based
# on the passed-in locale, but since that would probably
# also mean checking every use of holidays I don't think
# it is advisable to do, with a release so imminent)
# it is also debatable whether it is worth bothering at
# all, since it is hard for me to imagine why a user would
# be wanting to generate a translated report with holidays
# since I believe its main use will be for dates of people
def write_report(self):
"""
The short method that runs through each month and creates a page.
"""
# initialize the dict to fill:
self.calendar = {}
# get the information, first from holidays:
if self.country != 0:
self.__get_holidays()
# get data from database:
self.collect_data()
# generate the report:
with self._user.progress(_('Calendar Report'),
_('Formatting months...'),
12) as step:
for month in range(1, 13):
step()
self.print_page(month)
def print_page(self, month):
"""
This method actually writes the calendar page.
"""
style_sheet = self.doc.get_style_sheet()
ptitle = style_sheet.get_paragraph_style("CAL-Title")
ptext = style_sheet.get_paragraph_style("CAL-Text")
pdaynames = style_sheet.get_paragraph_style("CAL-Daynames")
pnumbers = style_sheet.get_paragraph_style("CAL-Numbers")
numpos = pt2cm(pnumbers.get_font().get_size())
ptext1style = style_sheet.get_paragraph_style("CAL-Text1style")
long_days = self._ldd.long_days
self.doc.start_page()
width = self.doc.get_usable_width()
height = self.doc.get_usable_height()
header = 2.54 # one inch
mark = None
if month == 1:
mark = IndexMark(self._('Calendar Report'), INDEX_TYPE_TOC, 1)
self.draw_rectangle("CAL-Border", 0, 0, width, height)
self.doc.draw_box("CAL-Title", "", 0, 0, width, header, mark)
self.doc.draw_line("CAL-Border", 0, header, width, header)
year = self.year
# assume every calendar header in the world is "<month-name> <year>"
title = "%s %s" % (self._ldd.long_months[month].capitalize(),
self._get_date(Date(self.year))) # localized year
mark = IndexMark(title, INDEX_TYPE_TOC, 2)
font_height = pt2cm(ptitle.get_font().get_size())
self.doc.center_text("CAL-Title", title,
width/2, font_height * 0.25, mark)
cell_width = width / 7
cell_height = (height - header)/ 6
current_date = datetime.date(year, month, 1)
spacing = pt2cm(1.25 * ptext.get_font().get_size()) # 158
if current_date.isoweekday() != g2iso(self.start_dow + 1):
# Go back to previous first day of week, and start from there
current_ord = (current_date.toordinal() -
((current_date.isoweekday() + 7) -
g2iso(self.start_dow + 1)) % 7)
else:
current_ord = current_date.toordinal()
for day_col in range(7):
font_height = pt2cm(pdaynames.get_font().get_size())
self.doc.center_text("CAL-Daynames",
long_days[(day_col+ g2iso(self.start_dow + 1))
% 7 + 1].capitalize(),
day_col * cell_width + cell_width/2,
header - font_height * 1.5)
for week_row in range(6):
something_this_week = 0
for day_col in range(7):
thisday = current_date.fromordinal(current_ord)
if thisday.month == month:
something_this_week = 1
self.draw_rectangle("CAL-Border", day_col * cell_width,
header + week_row * cell_height,
(day_col + 1) * cell_width,
header + (week_row + 1) * cell_height)
last_edge = (day_col + 1) * cell_width
self.doc.center_text("CAL-Numbers", str(thisday.day),
day_col * cell_width + cell_width/2,
header + week_row * cell_height)
list_ = self.calendar.get(month, {}).get(thisday.day, [])
list_.sort() # to get CAL-Holiday on bottom
position = spacing
for (format, p, m_list) in list_:
for line in reversed(p.split("\n")):
# make sure text will fit:
if position - 0.1 >= cell_height - numpos: # font daynums
break
font = ptext.get_font()
line = string_trim(font, line, cm2pt(cell_width + 0.2))
self.doc.draw_text(format, line,
day_col * cell_width + 0.1,
header + (week_row + 1) * cell_height - position - 0.1, m_list[0])
if len(m_list) > 1: # index the spouse too
self.doc.draw_text(format, "",0,0, m_list[1])
position += spacing
current_ord += 1
if not something_this_week:
last_edge = 0
font_height = pt2cm(1.5 * ptext1style.get_font().get_size())
x = last_edge + (width - last_edge)/2
text1 = str(self.text1)
if text1 == _(_TITLE1):
text1 = self._(_TITLE1)
self.doc.center_text("CAL-Text1style", text1,
x, height - font_height * 3)
text2 = str(self.text2)
if text2 == _(_TITLE2):
text2 = self._(_TITLE2)
self.doc.center_text("CAL-Text2style", text2,
x, height - font_height * 2)
self.doc.center_text("CAL-Text3style", self.text3,
x, height - font_height * 1)
self.doc.end_page()
def collect_data(self):
"""
This method runs through the data, and collects the relevant dates
and text.
"""
db = self.database
people = db.iter_person_handles()
people = self.filter.apply(self.database, people, user=self._user)
ngettext = self._locale.translation.ngettext # to see "nearby" comments
with self._user.progress(_('Calendar Report'),
_('Reading database...'),
len(people)) as step:
for person_handle in people:
step()
person = db.get_person_from_handle(person_handle)
mark = utils.get_person_mark(db, person)
birth_ref = person.get_birth_ref()
birth_date = None
if birth_ref:
birth_event = db.get_event_from_handle(birth_ref.ref)
birth_date = birth_event.get_date_object()
if (self.birthdays and birth_date is not None and birth_date.is_valid()):
birth_date = gregorian(birth_date)
year = birth_date.get_year()
month = birth_date.get_month()
day = birth_date.get_day()
prob_alive_date = Date(self.year, month, day)
nyears = self.year - year
# add some things to handle maiden name:
father_lastname = None # husband, actually
if self.maiden_name in ['spouse_first', 'spouse_last']: # get husband's last name:
if person.get_gender() == Person.FEMALE:
family_list = person.get_family_handle_list()
if family_list:
if self.maiden_name == 'spouse_first':
fhandle = family_list[0]
else:
fhandle = family_list[-1]
fam = db.get_family_from_handle(fhandle)
father_handle = fam.get_father_handle()
mother_handle = fam.get_mother_handle()
if mother_handle == person_handle:
if father_handle:
father = db.get_person_from_handle(father_handle)
if father:
father_lastname = father.get_primary_name().get_surname()
short_name = self.get_name(person, father_lastname)
alive = probably_alive(person, db, prob_alive_date)
if not self.alive or alive:
if nyears == 0:
text = self._('%(person)s, birth') % {
'person' : short_name }
else:
# Translators: leave all/any {...} untranslated
text = ngettext('{person}, {age}',
'{person}, {age}',
nyears).format(person=short_name,
age=nyears)
self.add_day_item(text, month, day, marks=[mark])
if self.anniversaries:
family_list = person.get_family_handle_list()
for fhandle in family_list:
fam = db.get_family_from_handle(fhandle)
father_handle = fam.get_father_handle()
mother_handle = fam.get_mother_handle()
if father_handle == person.get_handle():
spouse_handle = mother_handle
else:
continue # with next person if the father is not "person"
# this will keep from duplicating the anniversary
if spouse_handle:
spouse = db.get_person_from_handle(spouse_handle)
if spouse:
s_m = utils.get_person_mark(db, spouse)
spouse_name = self.get_name(spouse)
short_name = self.get_name(person)
# TEMP: this will handle ordered events
# Gramps 3.0 will have a new mechanism for start/stop events
are_married = None
for event_ref in fam.get_event_ref_list():
event = db.get_event_from_handle(event_ref.ref)
et = EventType
rt = EventRoleType
if event.type in [et.MARRIAGE,
et.MARR_ALT] and \
(event_ref.get_role() == rt.FAMILY or
event_ref.get_role() == rt.PRIMARY ):
are_married = event
elif event.type in [et.DIVORCE,
et.ANNULMENT,
et.DIV_FILING] and \
(event_ref.get_role() == rt.FAMILY or
event_ref.get_role() == rt.PRIMARY ):
are_married = None
if are_married is not None:
for event_ref in fam.get_event_ref_list():
event = db.get_event_from_handle(event_ref.ref)
event_obj = event.get_date_object()
if event_obj.is_valid():
event_obj = gregorian(event_obj)
year = event_obj.get_year()
month = event_obj.get_month()
day = event_obj.get_day()
prob_alive_date = Date(self.year, month, day)
nyears = self.year - year
if nyears == 0:
text = self._('%(spouse)s and\n %(person)s, wedding') % {
'spouse' : spouse_name,
'person' : short_name,
}
else:
# Translators: leave all/any {...} untranslated
text = ngettext("{spouse} and\n {person}, {nyears}",
"{spouse} and\n {person}, {nyears}",
nyears).format(spouse=spouse_name, person=short_name, nyears=nyears)
alive1 = probably_alive(person,
self.database,
prob_alive_date)
alive2 = probably_alive(spouse,
self.database,
prob_alive_date)
if ((self.alive and alive1 and alive2) or not self.alive):
self.add_day_item(text, month, day,
marks=[mark,s_m])
#------------------------------------------------------------------------
#
# CalendarOptions
#
#------------------------------------------------------------------------
class CalendarOptions(MenuReportOptions):
""" Calendar options for graphic calendar """
def __init__(self, name, dbase):
self.__db = dbase
self.__pid = None
self.__filter = None
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
return self.__filter.get_filter().get_name()
def add_menu_options(self, menu):
""" Add the options for the graphical calendar """
##########################
category_name = _("Report Options")
add_option = partial(menu.add_option, category_name)
##########################
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Select filter to restrict people that appear on calendar"))
add_option("filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Center Person"))
self.__pid.set_help(_("The center person for the report"))
menu.add_option(category_name, "pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
text1 = StringOption(_("Text Area 1"), _(_TITLE1))
text1.set_help(_("First line of text at bottom of calendar"))
add_option("text1", text1)
text2 = StringOption(_("Text Area 2"), _(_TITLE2))
text2.set_help(_("Second line of text at bottom of calendar"))
add_option("text2", text2)
text3 = StringOption(_("Text Area 3"), URL_HOMEPAGE)
text3.set_help(_("Third line of text at bottom of calendar"))
add_option("text3", text3)
##########################
category_name = _("Report Options (2)")
add_option = partial(menu.add_option, category_name)
##########################
self._nf = stdoptions.add_name_format_option(menu, category_name)
self._nf.connect('value-changed', self.__update_filters)
self.__update_filters()
stdoptions.add_private_data_option(menu, category_name)
alive = BooleanOption(_("Include only living people"), True)
alive.set_help(_("Include only living people in the calendar"))
add_option("alive", alive)
stdoptions.add_localization_option(menu, category_name)
##########################
category_name = _("Content")
add_option = partial(menu.add_option, category_name)
##########################
year = NumberOption(_("Year of calendar"), time.localtime()[0],
1000, 3000)
year.set_help(_("Year of calendar"))
add_option("year", year)
country = EnumeratedListOption(_("Country for holidays"), 0)
holiday_table = libholiday.HolidayTable()
countries = holiday_table.get_countries()
countries.sort()
if (len(countries) == 0 or
(len(countries) > 0 and countries[0] != '')):
countries.insert(0, '')
count = 0
for c in countries:
country.add_item(count, c)
count += 1
country.set_help(_("Select the country to see associated holidays"))
add_option("country", country)
start_dow = EnumeratedListOption(_("First day of week"), 1)
long_days = date_displayer.long_days
for count in range(1, 8):
# conversion between gramps numbering (sun=1)
# and iso numbering (mon=1) of weekdays below
start_dow.add_item((count + 5) % 7 + 1,
long_days[count].capitalize())
start_dow.set_help(
_("Select the first day of the week for the calendar"))
add_option("start_dow", start_dow)
maiden_name = EnumeratedListOption(_("Birthday surname"), "own")
maiden_name.add_item(
"spouse_first",
_("Wives use husband's surname (from first family listed)"))
maiden_name.add_item(
"spouse_last",
_("Wives use husband's surname (from last family listed)"))
maiden_name.add_item("own", _("Wives use their own surname"))
maiden_name.set_help(_("Select married women's displayed surname"))
add_option("maiden_name", maiden_name)
birthdays = BooleanOption(_("Include birthdays"), True)
birthdays.set_help(_("Whether to include birthdays"))
add_option("birthdays", birthdays)
anniversaries = BooleanOption(_("Include anniversaries"), True)
anniversaries.set_help(_("Whether to include anniversaries"))
add_option("anniversaries", anniversaries)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
nfv = self._nf.get_value()
filter_list = utils.get_person_filters(person,
include_single=False,
name_format=nfv)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
filter_value = self.__filter.get_value()
if filter_value == 0: # "Entire Database" (as "include_single=False")
self.__pid.set_available(False)
else:
# The other filters need a center person (assume custom ones too)
self.__pid.set_available(True)
def make_my_style(self, default_style, name, description,
size=9, font=FONT_SERIF, justified ="left",
color=None, align=PARA_ALIGN_CENTER,
shadow = None, italic=0, bold=0, borders=0, indent=None):
""" Create paragraph and graphic styles of the same name """
# Paragraph:
f = FontStyle()
f.set_size(size)
f.set_type_face(font)
f.set_italic(italic)
f.set_bold(bold)
p = ParagraphStyle()
p.set_font(f)
p.set_alignment(align)
p.set_description(description)
p.set_top_border(borders)
p.set_left_border(borders)
p.set_bottom_border(borders)
p.set_right_border(borders)
if indent:
p.set(first_indent=indent)
if justified == "left":
p.set_alignment(PARA_ALIGN_LEFT)
elif justified == "right":
p.set_alignment(PARA_ALIGN_RIGHT)
elif justified == "center":
p.set_alignment(PARA_ALIGN_CENTER)
default_style.add_paragraph_style(name, p)
# Graphics:
g = GraphicsStyle()
g.set_paragraph_style(name)
if shadow:
g.set_shadow(*shadow)
if color is not None:
g.set_fill_color(color)
if not borders:
g.set_line_width(0)
default_style.add_draw_style(name, g)
def make_default_style(self, default_style):
""" Add the styles used in this report """
self.make_my_style(default_style, "CAL-Title",
_('Title text and background color'), 20,
bold=1, italic=1,
color=(0xEA, 0xEA, 0xEA))
self.make_my_style(default_style, "CAL-Numbers",
_('Calendar day numbers'), 13,
bold=1)
self.make_my_style(default_style, "CAL-Text",
_('Daily text display'), 9)
self.make_my_style(default_style, "CAL-Holiday",
_('Holiday text display'), 9,
bold=1, italic=1)
self.make_my_style(default_style, "CAL-Daynames",
_('Days of the week text'), 12,
italic=1, bold=1,
color = (0xEA, 0xEA, 0xEA))
self.make_my_style(default_style, "CAL-Text1style",
_('Text at bottom, line 1'), 12)
self.make_my_style(default_style, "CAL-Text2style",
_('Text at bottom, line 2'), 12)
self.make_my_style(default_style, "CAL-Text3style",
_('Text at bottom, line 3'), 9)
self.make_my_style(default_style, "CAL-Border",
_('Borders'), borders=True)
|
Nick-Hall/gramps
|
gramps/plugins/drawreport/calendarreport.py
|
Python
|
gpl-2.0
| 30,476
|
[
"Brian"
] |
693353c8935ee72410d1b91ad1694b91bc4106cddd0623eccc26e223ef00a0a0
|
"""
Global average annual temperature plot
======================================
Produces a time-series plot of North American temperature forecasts for 2
different emission scenarios. Constraining data to a limited spatial area also
features in this example.
The data used comes from the HadGEM2-AO model simulations for the A1B and E1
scenarios, both of which were derived using the IMAGE Integrated Assessment
Model (Johns et al. 2011; Lowe et al. 2009).
References
----------
Johns T.C., et al. (2011) Climate change under aggressive mitigation: the
ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10,
doi:10.1007/s00382-011-1005-5.
Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F.
Royer, and P. van der Linden, 2009. New Study For Climate Modeling,
Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21,
doi:10.1029/2009EO210001.
.. seealso::
Further details on the aggregation functionality being used in this example
can be found in :ref:`cube-statistics`.
"""
import numpy as np
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
import iris.analysis.cartography
def main():
# Load data into three Cubes, one for each set of NetCDF files.
e1 = iris.load_cube(iris.sample_data_path('E1_north_america.nc'))
a1b = iris.load_cube(iris.sample_data_path('A1B_north_america.nc'))
# load in the global pre-industrial mean temperature, and limit the domain
# to the same North American region that e1 and a1b are at.
north_america = iris.Constraint(longitude=lambda v: 225 <= v <= 315,
latitude=lambda v: 15 <= v <= 60)
pre_industrial = iris.load_cube(iris.sample_data_path('pre-industrial.pp'),
north_america)
# Generate area-weights array. As e1 and a1b are on the same grid we can
# do this just once and re-use. This method requires bounds on lat/lon
# coords, so let's add some in sensible locations using the "guess_bounds"
# method.
e1.coord('latitude').guess_bounds()
e1.coord('longitude').guess_bounds()
e1_grid_areas = iris.analysis.cartography.area_weights(e1)
pre_industrial.coord('latitude').guess_bounds()
pre_industrial.coord('longitude').guess_bounds()
pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial)
# Perform the area-weighted mean for each of the datasets using the
# computed grid-box areas.
pre_industrial_mean = pre_industrial.collapsed(['latitude', 'longitude'],
iris.analysis.MEAN,
weights=pre_grid_areas)
e1_mean = e1.collapsed(['latitude', 'longitude'],
iris.analysis.MEAN,
weights=e1_grid_areas)
a1b_mean = a1b.collapsed(['latitude', 'longitude'],
iris.analysis.MEAN,
weights=e1_grid_areas)
# Plot the datasets
qplt.plot(e1_mean, label='E1 scenario', lw=1.5, color='blue')
qplt.plot(a1b_mean, label='A1B-Image scenario', lw=1.5, color='red')
# Draw a horizontal line showing the pre-industrial mean
plt.axhline(y=pre_industrial_mean.data, color='gray', linestyle='dashed',
label='pre-industrial', lw=1.5)
# Constrain the period 1860-1999 and extract the observed data from a1b
constraint = iris.Constraint(time=lambda
cell: 1860 <= cell.point.year <= 1999)
observed = a1b_mean.extract(constraint)
# Assert that this data set is the same as the e1 scenario:
# they share data up to the 1999 cut off.
assert np.all(np.isclose(observed.data,
e1_mean.extract(constraint).data))
# Plot the observed data
qplt.plot(observed, label='observed', color='black', lw=1.5)
# Add a legend and title
plt.legend(loc="upper left")
plt.title('North American mean air temperature', fontsize=18)
plt.xlabel('Time / year')
plt.grid()
iplt.show()
if __name__ == '__main__':
main()
|
dkillick/iris
|
docs/iris/example_code/Meteorology/COP_1d_plot.py
|
Python
|
lgpl-3.0
| 4,186
|
[
"NetCDF"
] |
ea3ba13ce54e28392b2db6353454184c9079bf921efa095b12f1e962dbbb57ea
|
import py
import os.path
dirpath = py.path.local("./")
def pytest_generate_tests(metafunc):
if "filename" in metafunc.funcargnames:
for fpath in dirpath.visit('*.scad'):
metafunc.addcall(id=fpath.basename, funcargs=dict(filename=fpath.basename))
for fpath in dirpath.visit('*.py'):
name = fpath.basename
if not (name.startswith('test_') or name.startswith('_')):
metafunc.addcall(id=fpath.basename, funcargs=dict(filename=fpath.basename))
def test_README(filename):
README = dirpath.join('README').read()
assert filename in README
|
Obijuan/tutorial-openscad
|
temporada-2/T16-estudiando-codigo-de-otros/04-cyclone/smooth_rod_fix/MCAD/test_docs.py
|
Python
|
gpl-2.0
| 615
|
[
"VisIt"
] |
32da298156d346f13cee2bd9a8e29bb59d76e98ef0cad2ebda60b3443921174a
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
from espressomd.interactions import *
from tests_common import abspath
class ParticleProperties(ut.TestCase):
# def __init__(self,particleId):
# self.pid=particleId
# the system which will be tested
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
# Particle id to work on
pid = 17
# Error tolerance when comparing arrays/tuples...
tol = 1E-9
def bondsMatch(self, inType, outType, inParams, outParams):
"""Check, if the bond type set and gotten back as well as the bond
parameters set and gotten back match. Only check keys present in
inParams.
"""
if inType != outType:
return False
for k in list(inParams.keys()):
if k not in outParams:
return False
if outParams[k] != inParams[k]:
return False
return True
def setUp(self):
if not self.system.part.exists(self.pid):
self.system.part.add(id=self.pid, pos=(0, 0, 0, 0))
def generateTestForBondParams(_bondId, _bondClass, _params):
"""Generates test cases for checking bond parameters set and gotten back
from Es actually match. Only keys which are present in _params are checked
1st arg: Id of the bonded ia in Espresso to test on, i.e., 0,2,1...
2nd: Class of the bond potential to test, ie.e, FeneBond, HarmonicBond
3rd: Bond parameters as dictionary, i.e., {"k"=1.,"r_0"=0.
"""
bondId = _bondId
bondClass = _bondClass
params = _params
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
self.system.bonded_inter[bondId] = bondClass(**params)
outBond = self.system.bonded_inter[bondId]
tnIn = bondClass(**params).type_number()
tnOut = outBond.type_number()
outParams = outBond.params
self.assertTrue(
self.bondsMatch(
tnIn,
tnOut,
params,
outParams),
bondClass(
**params).type_name() +
": value set and value gotten back differ for bond id " +
str(bondId) +
": " +
params.__str__() +
" vs. " +
outParams.__str__())
return func
test_fene = generateTestForBondParams(
0, FeneBond, {"r_0": 1.1, "k": 5.2, "d_r_max": 3.})
test_fene2 = generateTestForBondParams(
1, FeneBond, {"r_0": 1.1, "k": 5.2, "d_r_max": 3.})
test_harmonic = generateTestForBondParams(
0, HarmonicBond, {"r_0": 1.1, "k": 5.2})
test_harmonic2 = generateTestForBondParams(
0, HarmonicBond, {"r_0": 1.1, "k": 5.2, "r_cut": 1.3})
if espressomd.has_features(["ROTATION"]):
test_harmonic_dumbbell = generateTestForBondParams(
0, HarmonicDumbbellBond, {"k1": 1.1, "k2": 2.2, "r_0": 1.5})
test_harmonic_dumbbell2 = generateTestForBondParams(
0, HarmonicDumbbellBond, {"k1": 1.1, "k2": 2.2, "r_0": 1.5, "r_cut": 1.9})
test_dihedral = generateTestForBondParams(
0, Dihedral, {"mult": 3.0, "bend": 5.2, "phase": 3.})
if espressomd.has_features(["BOND_ANGLE"]):
test_angle_harm = generateTestForBondParams(
0, AngleHarmonic, {"bend": 5.2, "phi0": 3.2})
test_angle_cos = generateTestForBondParams(
0, AngleCosine, {"bend": 5.2, "phi0": 3.2})
test_angle_cossquare = generateTestForBondParams(
0, AngleCossquare, {"bend": 5.2, "phi0": 0.})
if espressomd.has_features(["LENNARD_JONES"]):
test_subt_lj = generateTestForBondParams(
0, SubtLJ, {})
if espressomd.has_features(["TABULATED"]):
test_tabulated = generateTestForBondParams(0, Tabulated, {"type": "distance",
"min": 1.,
"max": 2.,
"energy": [1.,2.,3.],
"force": [3.,4.,5.]})
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
|
KonradBreitsprecher/espresso
|
testsuite/interactions_bonded_interface.py
|
Python
|
gpl-3.0
| 5,347
|
[
"ESPResSo"
] |
88833feb2acbbb9aeab5c9f0e9f207ecaf03f7d8eb79585681b92e15e04dfc61
|
# We want 1/2==0.5
from __future__ import division
"""Copyright (c) 2005-2017, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# Initial work on a Python tool for processing CellML files.
# Eventual features:
# - Featureful & draconian validation
# - Apply various automatic optimisations, including PE & LUT
# - Easy code generation
# - Work around common problems in models, such as 0/0
# - As in Memfem, convert from alpha&beta form to tau&inf
# This module contains code common to both validation and transformation.
# Ideas:
# - CellML has the potential for introducing scripting functionality.
# This may be a good way of incorporating LUT into the data model.
# A special component could represent the table generation, and a
# scripted function the lookup.
# Alternatively we could add attributes in an extension namespace
# to specify LUT parameters (var name, min, max, step).
# - Add attributes in an extension namespace to represent binding
# time annotations.
# - We could do units conversions in a separate processing pass,
# adding in extra mathematics to the CellML.
# Turn off DeprecationWarnings
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
# Pythonic XML bindings
import amara
from amara import bindery as bt
from Ft.Xml import SplitQName
from xml.dom import Node # For nodeType values
import copy
import itertools
import math
import operator
import sys
import types
from cStringIO import StringIO
from utilities import *
from enum import Enum # Pythonic enums
import cellml_metadata # Handle RDF metadata for CellML
processors = None
def import_processors():
"""Lazy import."""
global processors
if processors is None:
import processors
__version__ = "$Revision$"[11:-2]
######################################################################
# Logging #
######################################################################
import logging
# Default config for root logger
logging.basicConfig(level=logging.CRITICAL,
format="%(name)s: %(levelname)s: %(message)s",
stream=sys.stderr)
logging.getLogger().handlers[0].setLevel(logging.CRITICAL)
# Extra logging levels
# This level is a warning according to the spec, but an unrecoverable
# condition as far as translation is concerned.
logging.WARNING_TRANSLATE_ERROR = logging.WARNING + 5
logging.addLevelName(logging.WARNING_TRANSLATE_ERROR, 'WARNING')
# We specify some namespace prefixes; others are picked
# up automatically. These are the standard namespaces we
# expect to see in CellML documents; a warning will be given
# if others are found.
NSS = {u'm' : u'http://www.w3.org/1998/Math/MathML',
u'cml': u'http://www.cellml.org/cellml/1.0#',
# Our extensions; URIs will probably change?
u'pe': u'https://chaste.comlab.ox.ac.uk/cellml/ns/partial-evaluation#',
u'lut': u'https://chaste.comlab.ox.ac.uk/cellml/ns/lookup-tables',
u'solver': u'https://chaste.comlab.ox.ac.uk/cellml/ns/solver-info',
u'oxmeta': u'https://chaste.comlab.ox.ac.uk/cellml/ns/oxford-metadata#',
u'pycml': u'https://chaste.comlab.ox.ac.uk/cellml/ns/pycml#',
u'proto': u'https://chaste.cs.ox.ac.uk/nss/protocol/0.1#',
# Metadata-related
u'cmeta' : u"http://www.cellml.org/metadata/1.0#",
u'rdf' : u"http://www.w3.org/1999/02/22-rdf-syntax-ns#",
u'dc' : u"http://purl.org/dc/elements/1.1/",
u'dcterms': u"http://purl.org/dc/terms/",
u'bqs' : u"http://www.cellml.org/bqs/1.0#",
u'vCard' : u"http://www.w3.org/2001/vcard-rdf/3.0#",
u'cg' : u"http://www.cellml.org/metadata/graphs/1.0#",
u'cs' : u"http://www.cellml.org/metadata/simulation/1.0#",
u'csub' : u"http://www.cellml.org/metadata/custom_subset/1.0#",
u'bqbiol' : u"http://biomodels.net/biology-qualifiers/",
# Temporary documentation namespace
u'doc' : u"http://cellml.org/tmp-documentation"
}
# Variable classifications
VarTypes = Enum('Unknown', 'Free', 'State', 'MaybeConstant', 'Constant',
'Computed', 'Mapped')
# Elements in the CellML subset of MathML
CELLML_SUBSET_ELTS = frozenset(
['math', 'cn', 'sep', 'ci', 'apply', 'piecewise', 'piece', 'otherwise',
'eq', 'neq', 'gt', 'lt', 'geq', 'leq',
'plus', 'minus', 'times', 'divide', 'power', 'root', 'abs',
'exp', 'ln', 'log', 'floor', 'ceiling', 'factorial',
'and', 'or', 'not', 'xor',
'diff', 'degree', 'bvar', 'logbase',
'sin', 'cos', 'tan', 'sec', 'csc', 'cot',
'sinh', 'cosh', 'tanh', 'sech', 'csch', 'coth',
'arcsin', 'arccos', 'arctan', 'arcsec', 'arccsc', 'arccot',
'arcsinh', 'arccosh', 'arctanh', 'arcsech', 'arccsch', 'arccoth',
'true', 'false', 'notanumber', 'pi', 'infinity', 'exponentiale',
'semantics', 'annotation', 'annotation-xml'])
# Binding times for BTA
BINDING_TIMES = Enum('static', 'dynamic')
######################################################################
# Helpful utility functions #
######################################################################
def make_xml_binder():
"""
Create a specialised binder, given some mappings from element names
to python classes, and setting namespace prefixes.
"""
binder = amara.bindery.binder(prefixes=NSS)
binder.set_binding_class(NSS[u'cml'], "model", cellml_model)
binder.set_binding_class(NSS[u'cml'], "component", cellml_component)
binder.set_binding_class(NSS[u'cml'], "variable", cellml_variable)
binder.set_binding_class(NSS[u'cml'], "units", cellml_units)
binder.set_binding_class(NSS[u'cml'], "unit", cellml_unit)
for mathml_elt in ['math', 'degree', 'logbase', 'otherwise',
'diff', 'plus', 'minus', 'times', 'divide',
'exp', 'ln', 'log', 'abs', 'power', 'root',
'leq', 'geq', 'lt', 'gt', 'eq', 'neq',
'rem',
'ci', 'cn', 'apply', 'piecewise', 'piece',
'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan',
'csymbol']:
exec "binder.set_binding_class(NSS[u'm'], '%s', mathml_%s)" % (mathml_elt, mathml_elt)
binder.set_binding_class(NSS[u'm'], "and_", mathml_and)
binder.set_binding_class(NSS[u'm'], "or_", mathml_or)
return binder
def amara_parse_cellml(source, uri=None, prefixes=None):
"""Parse a CellML source with default rules and bindings."""
binder = make_xml_binder()
rules = [bt.ws_strip_element_rule(u'*')]
return amara_parse(source, rules=rules, binderobj=binder)
def check_append_safety(elt):
"""
Check whether elt is safe to make a child, i.e. that it isn't
already a child elsewhere.
"""
assert getattr(elt, 'next_elem', None) is None
parent = getattr(elt, 'xml_parent', None)
if parent:
assert elt not in parent.xml_children
class element_base(amara.bindery.element_base):
"""
Base element class to allow me to set certain attributes on my instances
that are Python objects rather than unicode strings.
"""
def __init__(self):
self.xml_attributes = {} # Amara should really do this!
super(element_base, self).__init__()
def __delattr__(self, key):
"""
Bypass Amara's __delattr__ for attribute names that start with _cml_
"""
if key.startswith('_cml_'):
del self.__dict__[key]
else:
amara.bindery.element_base.__delattr__(self, key)
def __setattr__(self, key, value):
"""
Bypass Amara's __setattr__ for attribute names that start with _cml_
"""
if key.startswith('_cml_'):
self.__dict__[key] = value
else:
amara.bindery.element_base.__setattr__(self, key, value)
@property
def rootNode(self):
p = self.parentNode
if p:
return p.rootNode
elif isinstance(self, mathml):
raise ValueError('MathML element with no parent!')
return self
@property
def cmeta_id(self):
"""Get the value of the cmeta:id attribute, or the empty string if not set."""
return self.getAttributeNS(NSS['cmeta'], u'id')
def xml_remove_child_at(self, index=-1):
"""
Remove child object at a given index
index - optional, 0-based index of child to remove (defaults to the last child)
"""
obj = self.xml_children[index]
if isinstance(obj, unicode):
del self.xml_children[index]
else:
# Remove references to the object
# Probably a slow way to go about this
for attr, val in self.__dict__.items():
if not (attr.startswith('xml') or
attr.startswith('_cml_') or
attr in self.xml_ignore_members):
next = getattr(val, 'next_elem', None)
if val == obj:
del self.__dict__[attr]
if next: self.__dict__[attr] = next
while next:
prev, val = val, next
next = getattr(val, 'next_elem', None)
if val == obj:
prev.next_elem = next
break
del self.xml_children[index]
def xml_doc(self):
msg = []
xml_attrs = []
if hasattr(self, 'xml_attributes'):
msg.append('Object references based on XML attributes:')
for apyname in self.xml_attributes:
local, ns = self.xml_attributes[apyname]
if ns:
source_phrase = " based on '{%s}%s' in XML"%(ns, local)
else:
source_phrase = " based on '%s' in XML"%(local)
msg.append(apyname+source_phrase)
xml_attrs.append(apyname)
msg.append('Object references based on XML child elements:')
for attr, val in self.__dict__.items():
if not (attr.startswith('xml') or
attr.startswith('_cml_') or
attr in self.xml_ignore_members):
if attr not in xml_attrs:
count = len(list(getattr(self, attr)))
if count == 1:
count_phrase = " (%s element)"%count
else:
count_phrase = " (%s elements)"%count
local, ns = val.localName, val.namespaceURI
if ns:
source_phrase = " based on '{%s}%s' in XML"%(ns, local)
else:
source_phrase = " based on '%s' in XML"%(local)
msg.append(attr+count_phrase+source_phrase)
return u'\n'.join(msg)
@property
def xml_properties(self):
"""
Return a dictionary whose keys are Python properties on this
object that represent XML attributes and elements, and whose vaues
are the corresponding objects (a subset of __dict__)
"""
properties = {}
for attr in self.__dict__:
if (not (attr.startswith('xml')
or attr.startswith('_cml_')
or attr in self.xml_ignore_members)):
properties[attr] = self.__dict__[attr]
return properties
# Add some improved/new methods to all bindings
def add_methods_to_amara():
def getAttributeNS(self, ns, local, default=u""):
"""
Get the value of an attribute specified by namespace and localname.
Optionally can also pass a default value if the attribute
doesn't exist (defaults to the empty string).
"""
attrs = getattr(self, 'xml_attributes', {})
keys = [ (ns_, SplitQName(qname)[1])
for _, (qname, ns_) in attrs.items() ]
values = [ unicode(getattr(self, attr))
for attr, (qname, ns_) in attrs.items() ]
attr_dict = dict(zip(keys, values))
return attr_dict.get((ns, local), default)
def xml_element_children(self, elt=None):
"""Return an iterable over child elements of this element."""
if elt is None:
elt = self
for child in elt.xml_children:
if getattr(child, 'nodeType', None) == Node.ELEMENT_NODE:
yield child
def safe_remove_child(self, child, parent=None):
"""Remove a child element from parent in such a way that it can safely be added elsewhere."""
if parent is None: parent = self
parent.xml_remove_child(child)
child.next_elem = None
def replace_child(self, old, new, parent=None):
"""Replace child old of parent with new."""
if parent is None: parent = self
parent.xml_insert_after(old, new)
self.safe_remove_child(old, parent)
import new
for method in ['getAttributeNS', 'xml_element_children', 'safe_remove_child', 'replace_child']:
meth = new.instancemethod(locals()[method], None, amara.bindery.element_base)
setattr(amara.bindery.element_base, method, meth)
add_methods_to_amara()
class comment_base(amara.bindery.comment_base):
"""An iterable version of comment nodes."""
def __init__(self, body=None):
amara.bindery.comment_base.__init__(self, body)
def __iter__(self):
return unitary_iterator(self)
######################################################################
# CellML elements #
######################################################################
class cellml_model(element_base):
"""
Specialised class for the model element of a CellML document.
Adds methods for collecting and reporting validation errors, etc.
"""
def __init__(self):
element_base.__init__(self)
self._cml_validation_errors = []
self._cml_validation_warnings = []
self._cml_variables = {}
self._cml_components = {}
self._cml_units = {}
self._cml_standard_units = {}
self._cml_units_map = {}
# Topologically sorted assignments list
self._cml_assignments = []
def __del__(self):
self.clean_up()
def clean_up(self):
"""Try to get the RDF library to clean up nicely."""
cellml_metadata.remove_model(self)
def get_config(self, config_attr=None):
"""Get the configuration store if it exists, or an attribute thereof."""
config = getattr(self.xml_parent, '_cml_config', None)
if config_attr:
config = getattr(config, config_attr, None)
return config
def get_option(self, option_name):
"""Get the value of a command-line option."""
config = getattr(self.xml_parent, '_cml_config', None)
return config and getattr(config.options, option_name)
def get_component_by_name(self, compname):
"""Return the component object that has name `compname'."""
return self._cml_components[compname]
def get_variable_by_name(self, compname, varname):
"""
Return the variable object with name `varname' in component
`compname'.
"""
try:
var = self._cml_variables[(compname, varname)]
except KeyError, e:
if compname == u'':
if self.component.ignore_component_name:
compname = self.component.name
else:
try:
compname, varname = cellml_variable.split_name(varname)
except ValueError:
raise e
var = self._cml_variables[(compname, varname)]
else:
raise e
return var
def get_variable_by_oxmeta_name(self, name, throw=True):
"""
Get the unique variable in this model with the given Oxford metadata
name annotation.
If throw is True, will raise ValueError if there is no such variable,
or more than 1 match. If throw is False, returns None in these cases.
"""
vars = cellml_metadata.find_variables(self,
('bqbiol:is', NSS['bqbiol']),
('oxmeta:'+str(name), NSS['oxmeta']))
if len(vars) == 1:
var = vars[0]
elif throw:
raise ValueError('"%s" does not name a unique variable (matches: %s)'
% (name, str(vars)))
else:
var = None
return var
def get_variables_by_ontology_term(self, term, transitive=False):
"""Return a list of variables annotated with the given ontology term.
The annotations have the same form as for oxmeta name annotations (see get_variable_by_oxmeta_name).
However, here we are not restricted to namespace, and no check is done on the number of results returned.
:param term: must be a (prefixed_name, nsuri) tuple.
:param transitive: if True, look not just for direct annotations but also for terms belonging to
the class given by prefixed_name, searching transitively along rdf:type predicates.
"""
assert isinstance(term, tuple)
assert len(term) == 2
vars = []
if transitive:
terms = cellml_metadata.transitive_subjects(term)
else:
terms = [term]
for term in terms:
vars.extend(cellml_metadata.find_variables(self, ('bqbiol:is', NSS['bqbiol']), term))
vars.extend(cellml_metadata.find_variables(self, ('bqbiol:isVersionOf', NSS['bqbiol']), term))
return vars
def get_variable_by_cmeta_id(self, cmeta_id):
"""
Get the unique variable in this model with the given cmeta:id attribute value.
"""
vars = self.xml_xpath(u'cml:component/cml:variable[@cmeta:id="%s"]' % cmeta_id)
if len(vars) != 1:
raise ValueError('"%s" does not ID a unique variable (matches: %s)'
% (cmeta_id, str(vars)))
return vars[0]
def get_all_variables(self):
"""Return an iterator over the variables in the model."""
for comp in getattr(self, u'component', []):
for var in getattr(comp, u'variable', []):
yield var
def _add_variable(self, var, varname, compname):
"""Add a new variable to the model."""
assert (compname, varname) not in self._cml_variables
self._cml_variables[(compname, varname)] = var
def _del_variable(self, varname, compname):
"""Remove a variable from the model."""
del self._cml_variables[(compname, varname)]
def _add_component(self, comp, special=False):
"""Add a new component to the model."""
if special:
comp.xml_parent = self
else:
self.xml_append(comp)
self._cml_components[comp.name] = comp
def _del_component(self, comp):
"""Remove the given component from the model."""
self.xml_remove_child(comp)
del self._cml_components[comp.name]
def validation_error(self, errmsg, level=logging.ERROR):
"""Log a validation error message.
Message should be a unicode string.
"""
self._cml_validation_errors.append(errmsg)
logging.getLogger('validator').log(level, errmsg.encode('UTF-8'))
def get_validation_errors(self):
"""Return the list of all errors found (so far) while validating this model."""
return self._cml_validation_errors
def validation_warning(self, errmsg, level=logging.WARNING):
"""Log a validation warning message.
Message should be a unicode string.
"""
self._cml_validation_warnings.append(errmsg)
logging.getLogger('validator').log(level, errmsg.encode('UTF-8'))
def get_validation_warnings(self):
"""Return the list of all warnings found (so far) while validating this model.
"""
return self._cml_validation_warnings
def _report_exception(self, e, show_xml_context):
"""Report an exception e as a validation error or warning.
If show_xml_context is True, display the XML of the context
of the exception as well.
"""
e.show_xml_context = show_xml_context
if e.warn:
self.validation_warning(unicode(e), level=e.level)
else:
self.validation_error(unicode(e), level=e.level)
def validate(self, xml_context=False,
invalid_if_warnings=False,
warn_on_units_errors=False,
check_for_units_conversions=False,
assume_valid=False, **ignored_kwargs):
"""Validate this model.
Assumes that RELAX NG validation has been done. Checks rules
3.4.2.2, 3.4.3.2, 3.4.3.3, 3.4.5.2, 3.4.5.3, 3.4.5.4, 3.4.6.2, 3.4.6.3, 3.4.6.4,
4.4.2.1, 4.4.3.2, 4.4.4, 5.4.1.2, 5.4.2.2, 6.4.2.5, 6.4.3.2, and 6.4.3.3
in the CellML 1.0 spec, and performs units checking.
Note that if some checks fail, most of the remaining checks
will not be performed. Hence when testing a model validate
repeatedly until it passes.
If xml_context is True, then the failing MathML tree will be
displayed with every units error.
If check_for_units_conversions is True, then generate a warning if
units conversions will be needed.
If assume_valid is True then fewer checks will be done - only
what is required to set up the data structures needed for model
transformation.
Returns True iff the model validates.
When invalid_if_warnings is True the model will fail to validate
if there are any warnings, as well as if there are any errors.
"""
self._validate_component_hierarchies()
# Rule 5.4.2.2: units definitions may not be circular.
# Also checks 5.4.1.2: no duplicate units names.
if not assume_valid:
for unit in self.get_all_units():
self._check_unit_cycles(unit)
DEBUG('validator', 'Checked for units cycles')
# Check rule 3.4.3.3 too.
self._check_variable_units_exist()
if not self._cml_validation_errors:
self._check_variable_mappings() # This sets up source variable links
if not self._cml_validation_errors and not assume_valid:
self._check_connection_units(check_for_units_conversions)
# Rules 4.4.2.1 and 4.4.3.2: check name references in mathematics
if not self._cml_validation_errors:
assignment_exprs = self.search_for_assignments()
if not assume_valid:
for expr in assignment_exprs:
self._check_maths_name_references(expr, xml_context)
# Rule 4.4.4: mathematical expressions may only modify
# variables belonging to the current component.
if not self._cml_validation_errors and not assume_valid:
self._check_assigned_vars(assignment_exprs, xml_context)
# Warn if mathematics outside the CellML subset is used.
if not self._cml_validation_errors and not assume_valid:
math_elts = self.xml_xpath(self.math_xpath_1 + u' | ' + self.math_xpath_2)
self._check_cellml_subset(math_elts)
# Classify variables and check for circular equations.
# Does a topological sort of all equations in the process.
# TODO: Handle reactions properly.
if not self._cml_validation_errors:
self._classify_variables(assignment_exprs, xml_context)
self._order_variables(assignment_exprs, xml_context)
# Appendix C.3.6: Equation dimension checking.
if not self._cml_validation_errors and (not assume_valid or check_for_units_conversions):
self._check_dimensional_consistency(assignment_exprs,
xml_context,
warn_on_units_errors,
check_for_units_conversions)
# Warn if unknown namespaces are used, just in case.
unknown_nss = set(self.rootNode.xml_namespaces.keys()).difference(set(NSS.values()))
if unknown_nss:
self.validation_warning(u'Unrecognised namespaces used:\n ' +
u'\n '.join(list(unknown_nss)))
# Return validation result
return not self._cml_validation_errors and \
(not invalid_if_warnings or not self._cml_validation_warnings)
def _validate_component_hierarchies(self):
"""Check Rule 6.4.3.2 (4): hierarchies must not be circular.
Builds all the hierarchies, and checks for cycles.
In the process, we also check the other rules in 6.4.3, and 6.4.2.5.
"""
# First, we find the hierarchies that are defined.
hiers = set()
rels = []
for group in getattr(self, u'group', []):
local_hiers = set()
for rel in getattr(group, u'relationship_ref', []):
rels.append(rel)
reln = rel.relationship
ns = rel.xml_attributes[u'relationship'][1]
name = getattr(rel, u'name', None)
hier = (reln, ns, name)
if hier in local_hiers:
self.validation_error("A group element must not contain two or more relationship_ref"
" elements that define a relationship attribute in a common"
" namespace with the same value and that have the same name"
" attribute value (which may be non-existent) (6.4.2.5)."
" Relationship '%s' name '%s' in namespace '%s' is repeated."
% (reln, name or '', ns))
local_hiers.add(hier)
hiers.add(hier)
# Now build & check each hierarchy
for hier in hiers:
self.build_component_hierarchy(hier[0], hier[1], hier[2], rels=rels)
DEBUG('validator', 'Checked component hierachies')
def _check_variable_mappings(self):
"""Check Rules 3.4.{5,6}: check variable mappings and interfaces are sane."""
# First check connection elements and build mappings dict
self.build_name_dictionaries()
connected_components = set()
for connection in getattr(self, u'connection', []):
comps = frozenset([connection.map_components.component_1, connection.map_components.component_2])
if comps in connected_components:
self.validation_error("Each map_components element must map a unique pair of components "
"(3.4.5.4). The pair ('%s', '%s') is repeated." % tuple(comps))
connected_components.add(comps)
self._validate_connection(connection)
# Now check for variables that should receive a value but don't
for comp in getattr(self, u'component', []):
for var in getattr(comp, u'variable', []):
for iface in [u'private_interface', u'public_interface']:
if getattr(var, iface, u'none') == u'in':
try:
var.get_source_variable()
except TypeError:
# No source variable found
self.validation_error("Variable '%s' has a %s attribute with value 'in', "
"but no component exports a value to that variable."
% (var.fullname(), iface))
DEBUG('validator', 'Checked variable mappings')
def _validate_connection(self, conn):
"""Validate the given connection element.
Check that the given connection object defines valid mappings
between variables, according to rules 3.4.5 and 3.4.6.
"""
# Check we are allowed to connect these components
try:
comp1 = self.get_component_by_name(conn.map_components.component_1)
except KeyError:
self.validation_error("Connections must be between components defined in the current model "
"(3.4.5.2). There is no component '%s'." % conn.map_components.component_1)
return
try:
comp2 = self.get_component_by_name(conn.map_components.component_2)
except KeyError:
self.validation_error("Connections must be between components defined in the current model "
"(3.4.5.3). There is no component '%s'." % conn.map_components.component_2)
return
if comp1 is comp2:
self.validation_error("A connection must link two different components (3.4.5.4). "
"The component '%s' is being connected to itself." % comp1.name)
return
# Get the parent of each component in the encapsulation hierarchy
par1, par2 = comp1.parent(), comp2.parent()
# The two components must either be siblings (maybe top-level) or parent & child.
if not (par1 == comp2 or par2 == comp1 or par1 == par2):
self.validation_error(u' '.join([
'Connections are only permissible between sibling',
'components, or where one is the parent of the other.\n',
comp1.name,'and',comp2.name,'are unrelated.']))
return
# Now check each variable mapping
for mapping in conn.map_variables:
try:
var1 = self.get_variable_by_name(comp1.name, mapping.variable_1)
except KeyError:
self.validation_error("A variable mapping must be between existing variables (3.4.6.2). "
"Variable '%s' doesn't exist in component '%s'."
% (mapping.variable_1, comp1.name))
continue
try:
var2 = self.get_variable_by_name(comp2.name, mapping.variable_2)
except KeyError:
self.validation_error("A variable mapping must be between existing variables (3.4.6.2). "
"Variable '%s' doesn't exist in component '%s'."
% (mapping.variable_2, comp2.name))
continue
errm, e = ['Interface mismatch mapping',var1.fullname(),'and',var2.fullname(),':\n'], None
if par1 == par2:
# Siblings, so should have differing public interfaces
if not hasattr(var1, 'public_interface'):
e = 'missing public_interface attribute on ' + \
var1.fullname() + '.'
elif not hasattr(var2, 'public_interface'):
e = 'missing public_interface attribute on ' + \
var2.fullname() + '.'
elif var1.public_interface == var2.public_interface:
e = 'public_interface attributes are identical.'
else:
if var1.public_interface == 'in':
var1._set_source_variable(var2)
else:
var2._set_source_variable(var1)
else:
if par2 == comp1:
# Component 1 is the parent of component 2
var1, var2 = var2, var1
# Now var2 is in the parent component, and var1 in the child
if not hasattr(var1, 'public_interface'):
e = var1.fullname()+' missing public_interface.'
elif not hasattr(var2, 'private_interface'):
e = var2.fullname()+' missing private_interface.'
elif var1.public_interface == var2.private_interface:
e = 'relevant interfaces have identical values.'
else:
if var1.public_interface == 'in':
var1._set_source_variable(var2)
else:
var2._set_source_variable(var1)
# If there was an error, log it
if e:
errm.append(e)
self.validation_error(u' '.join(errm))
def _check_variable_units_exist(self):
"""Check rule 3.4.3.3: that the units declared for variables exist."""
for var in self.get_all_variables():
try:
var.get_units()
except KeyError:
self.validation_error("The value of the units attribute on a variable must be either "
"one of the standard units or the name of a unit defined in the "
"current component or model (3.4.3.3). The units '%s' on the "
"variable '%s' in component '%s' do not exist."
% (var.units, var.name, var.component.name))
def _check_connection_units(self, check_for_units_conversions=False):
"""Check that the units of mapped variables are dimensionally consistent.
If check_for_units_conversions is True we also warn if they are not equivalent,
since much processing software may not be able to handle that case.
"""
for conn in getattr(self, u'connection', []):
comp1 = self.get_component_by_name(conn.map_components.component_1)
comp2 = self.get_component_by_name(conn.map_components.component_2)
for mapping in conn.map_variables:
var1 = self.get_variable_by_name(comp1.name, mapping.variable_1)
var2 = self.get_variable_by_name(comp2.name, mapping.variable_2)
# Check the units
u1 = var1.get_units()
u2 = var2.get_units()
if not u1 == u2:
if not u1.dimensionally_equivalent(u2):
self.validation_error(u' '.join([
var1.fullname(), 'and', var2.fullname(),
'are mapped, but have dimensionally inconsistent units.']))
elif check_for_units_conversions:
self.validation_warning(
u' '.join(['Warning: mapping between', var1.fullname(), 'and',
var2.fullname(), 'will require a units conversion.']),
level=logging.WARNING_TRANSLATE_ERROR)
def _check_maths_name_references(self, expr, xml_context=False):
"""Check rules 4.4.2.1 and 4.4.3.2: name references in mathematics."""
if isinstance(expr, mathml_ci):
# Check variable exists
try:
_ = expr.variable
except KeyError:
self._report_exception(
MathsError(expr,
"The content of a MathML ci element must match the name of a variable "
"in the enclosing component, once whitespace normalisation has been "
"performed (4.4.2.1). Variable '%s' does not exist in component '%s'."
% (unicode(expr).strip(), expr.component.name)),
xml_context)
elif isinstance(expr, mathml_cn):
# Check units exist
try:
expr.get_units()
except KeyError:
self._report_exception(
MathsError(expr,
"Units on a cn element must be standard or defined in the current "
"component or model (4.4.3.2). Units '%s' are not defined in "
"component '%s'." % (expr.units, expr.component.name)),
xml_context)
else:
# Recurse
for child in expr.xml_element_children():
self._check_maths_name_references(child, xml_context)
def _check_assigned_vars(self, assignments, xml_context=False):
"""Check Rule 4.4.4: mathematical expressions may only modify
variables belonging to the current component.
"""
for expr in assignments:
try:
expr.check_assigned_var()
except MathsError, e:
self._report_exception(e, xml_context)
DEBUG('validator', 'Checked variable assignments')
def _check_cellml_subset(self, math_elts, root=True):
"""Warn if MathML outside the CellML subset is used."""
for elt in math_elts:
if not elt.localName in CELLML_SUBSET_ELTS and \
elt.namespaceURI == NSS[u'm']:
self.validation_warning(u' '.join([
u'MathML element', elt.localName,
u'is not in the CellML subset.',
u'Some tools may not be able to process it.']))
self._check_cellml_subset(self.xml_element_children(elt), False)
if root:
DEBUG('validator', 'Checked for CellML subset')
def _classify_variables(self, assignment_exprs, xml_context=False):
"""Determine the type of each variable.
Note that mapped vars must have already been classified by
self._check_variable_mappings, and the RELAX NG schema ensures
that a variable cannot be both Mapped and MaybeConstant.
Builds the equation dependency graph in the process.
"""
# Classify those vars that might be constants,
# i.e. have an initial value assigned.
for var in self.get_all_variables():
if hasattr(var, u'initial_value'):
var._set_type(VarTypes.MaybeConstant)
# Now classify by checking usage in mathematics, building
# an equation dependency graph in the process.
for expr in assignment_exprs:
try:
expr.classify_variables(root=True)
except MathsError, e:
self._report_exception(e, xml_context)
# Unused vars still classified as MaybeConstant are constants
for var in self.get_all_variables():
if var.get_type() == VarTypes.MaybeConstant:
var._set_type(VarTypes.Constant)
DEBUG('validator', 'Classified variables')
def _order_variables(self, assignment_exprs, xml_context=False):
"""Topologically sort the equation dependency graph.
This orders all the assignment expressions in the model, to
allow procedural code generation. It also checks that equations
are not cyclic (we don't support DAEs).
"""
self.clear_assignments() # Ensure we start from a clean slate
try:
self._cml_sorting_variables_stack = []
for var in self.get_all_variables():
if var.get_colour() == DFS.White:
self.topological_sort(var)
for expr in assignment_exprs:
if expr.get_colour() == DFS.White:
self.topological_sort(expr)
except MathsError, e:
self._report_exception(e, xml_context)
DEBUG('validator', 'Topologically sorted variables')
math_xpath_1 = u'cml:component/m:math'
math_xpath_2 = u'cml:component/cml:reaction/cml:variable_ref/cml:role/m:math'
apply_xpath_1 = u'/m:apply[m:eq]'
apply_xpath_2 = u'/m:semantics/m:apply[m:eq]'
def search_for_assignments(self, comp=None):
"""Search for assignment expressions in the model's mathematics.
If comp is supplied, will only return assignments in that component.
"""
assignments_xpath = u' | '.join([self.math_xpath_1 + self.apply_xpath_1,
self.math_xpath_1 + self.apply_xpath_2,
self.math_xpath_2 + self.apply_xpath_1,
self.math_xpath_2 + self.apply_xpath_2])
if comp:
assignments_xpath = assignments_xpath.replace(u'component',
u'component[@name="%s"]' % comp.name)
return self.xml_xpath(assignments_xpath)
def build_name_dictionaries(self, rebuild=False):
"""
Create dictionaries mapping names of variables and components to
the objects representing them.
Dictionary keys for variables will be
(component_name, variable_name).
If rebuild is True, clear the dictionaries first.
"""
if rebuild:
self._cml_variables = {}
self._cml_components = {}
if not self._cml_components:
for comp in getattr(self, u'component', []):
if comp.name in self._cml_components:
self.validation_error("Component names must be unique within a model (3.4.2.2)."
" The name '%s' is repeated." % comp.name)
self._cml_components[comp.name] = comp
for var in getattr(comp, u'variable', []):
key = (comp.name, var.name)
if key in self._cml_variables:
self.validation_error("Variable names must be unique within a component (3.4.3.2)."
" The name '%s' is repeated in component '%s'."
% (var.name, comp.name))
self._cml_variables[key] = var
def build_component_hierarchy(self, relationship, namespace=None, name=None, rels=None):
"""
Create all the parent-child links for the given component hierarchy.
relationship gives the type of the hierarchy. If it is not one of the
CellML types (i.e. encapsulation or containment) then the namespace URI
must be specified. Multiple non-encapsulation hierarchies of the same
type can be specified by giving the name argument.
"""
key = (relationship, namespace, name)
# Set all components to have no parent or children, under this hierarchy
for comp in getattr(self, u'component', []):
comp._clear_hierarchy(key)
self.build_name_dictionaries()
# Find nodes defining this hierarchy
if rels is None:
rels = self.xml_xpath(u'cml:group/cml:relationship_ref')
groups = []
for rel in rels:
# NB: The first test below relies on there only being one
# attr with localname of 'relationship' on each rel.
# So let's check this...
if hasattr(rel, u'relationship_'):
self.validation_error(u' '.join([
'relationship_ref element has multiple relationship',
'attributes in different namespaces:\n'] +
map(lambda qn,ns: '('+qn+','+ns+')',
rel.xml_attributes.values())))
return
if rel.relationship == relationship and \
rel.xml_attributes[u'relationship'][1] == namespace and \
getattr(rel, u'name', None) == name:
# It's in the hierarchy
groups.append(rel.xml_parent)
# Now build all the parent links for this hierarchy
def set_parent(p, crefs):
for cref in crefs:
# Find component cref refers to
try:
c = self.get_component_by_name(cref.component)
except KeyError:
self.validation_error("A component_ref element must reference a component in the current"
" model (6.4.3.3). Component '%s' does not exist." % cref.component)
return
# Set c's parent to p
c._set_parent_component(key, p)
if hasattr(cref, 'component_ref'):
# If we already have children under this hierarchy it's a validation error
if c._has_child_components(key):
self.validation_error("In a given hierarchy, only one component_ref element "
"referencing a given component may contain children (6.4.3.2)."
" Component '%s' has children in multiple locations." % c.name)
# Set parent of c's children to c
set_parent(c, cref.component_ref)
elif p is None and namespace is None:
self.validation_error("Containment and encapsulation relationships must be hierarchical"
" (6.4.3.2). Potentially top-level component '%s' has not been"
" given children." % cref.component)
for group in groups:
set_parent(None, group.component_ref)
if self._cml_validation_errors:
return
# Check for a cycle in the hierarchy (rule 6.4.3.2 (4)).
# Note that since we have already ensured that no component is a parent in more than one location,
# nor is any component a child more than once, so the only possibility for a cycle is if one of
# the components referenced as a child of a group element is also referenced as a (leaf) descendent
# of one of its children. We check for this by following parent links backwards.
def has_cycle(root_comp, cur_comp):
if cur_comp is None:
return False
elif cur_comp is root_comp:
return True
else:
return has_cycle(root_comp, cur_comp.parent(reln_key=key))
for group in groups:
for cref in group.component_ref:
# Find component cref refers to
c = self.get_component_by_name(cref.component)
if has_cycle(c, c.parent(reln_key = key)):
n, ns = name or "", namespace or ""
self.validation_error("The '%s' relationship hierarchy with name '%s' and namespace"
" '%s' has a cycle" % (relationship, n, ns))
return
def topological_sort(self, node):
"""
Do a topological sort of all assignment expressions and variables
in the model.
node should be an expression or variable object that inherits from
Colourable and has methods get_dependencies, get_component
"""
node.set_colour(DFS.Gray)
# Keep track of gray variables, for reporting cycles
if isinstance(node, cellml_variable):
self._cml_sorting_variables_stack.append(node.fullname())
elif node.is_ode():
# This is an expression defining an ODE; the name on the
# stack will look something like d(V)/d(t)
n1, n2 = map(lambda v: v.fullname(), node.assigned_variable())
self._cml_sorting_variables_stack.append(u'd'+n1+u'/d'+n2)
# Visit children in the dependency graph
for dep in node.get_dependencies():
if type(dep) == types.TupleType:
# This is an ODE dependency, so get the defining expression
dep = dep[0].get_ode_dependency(dep[1], node)
if dep.get_colour() == DFS.White:
self.topological_sort(dep)
elif dep.get_colour() == DFS.Gray:
# We have a cyclic dependency
if isinstance(dep, cellml_variable):
i = self._cml_sorting_variables_stack.index(dep.fullname())
elif node.is_ode():
n1, n2 = map(lambda v: v.fullname(),
dep.assigned_variable())
i = self._cml_sorting_variables_stack.index(
u'd'+n1+u'/d'+n2)
else:
# Since any variable always depends on a mathematical
# expression defining it, the only case where the
# expression is gray before the corresponding variable
# (apart from an ODE, dealt with above) is if a tree
# started at an expression. Hence the whole stack
# is in the cycle.
i = 0
varnames = self._cml_sorting_variables_stack[i:]
self.validation_error(u' '.join([
u'There is a cyclic dependency involving the following',
u'variables:', u','.join(varnames)]))
# Finish this node, and add it to the appropriate sorted list
node.set_colour(DFS.Black)
self._add_sorted_assignment(node)
# Pop the gray variables stack
if (isinstance(node, cellml_variable) or node.is_ode()):
self._cml_sorting_variables_stack.pop()
return
def _add_sorted_assignment(self, a):
"""
During the topological sort, add a finished assignment to the
list. This list can then be executed in order to simulate the
model.
The element added can either be a MathML expression
representing an assignment, or a CellML variable element,
indicating an assignment due to a variable mapping.
"""
self._cml_assignments.append(a)
def _remove_assignment(self, a):
"""Remove the given assignment from our list.
This method is used by the partial evaluator."""
self._cml_assignments.remove(a)
def get_assignments(self):
"""
Return a sorted list of all the assignments in the model.
Assignments can either be instances of cellml_variable, in
which case they represent a variable mapping, or instances of
mathml_apply, representing top-level assignment expressions.
"""
return self._cml_assignments
def clear_assignments(self):
"""Clear the assignments list."""
self._cml_assignments = []
def do_binding_time_analysis(self):
"""Perform a binding time analysis on the model's mathematics.
This requires variables to have been classified and a
topological sort of the mathematics to have been performed.
Variables and top-level expressions are processed in the order
given by the topological sort, hence only a single pass is
necessary.
Variables are classified based on their type:
State, Free -> dynamic
Constant -> static
Mapped -> binding time of source variable
Computed -> binding time of defining expression
Expressions are dealt with by recursively annotating
subexpressions. See code in the MathML classes for details.
"""
for item in self.get_assignments():
if isinstance(item, cellml_variable):
# Set binding time based on type
item._get_binding_time()
else:
# Compute binding time recursively
item._get_binding_time()
def _check_dimensional_consistency(self, assignment_exprs,
xml_context=False,
warn_on_units_errors=False,
check_for_units_conversions=False):
"""Appendix C.3.6: Equation dimension checking."""
self._cml_conversions_needed = False
# Check dimensions
for expr in assignment_exprs:
try:
expr.get_units()
except UnitsError, e:
if warn_on_units_errors:
e.warn = True
e.level = logging.WARNING
self._report_exception(e, xml_context)
# Check if units conversions will be needed
if check_for_units_conversions and not self._cml_validation_errors:
boolean = self.get_units_by_name('cellml:boolean')
for expr in assignment_exprs:
try:
DEBUG('validator', "Checking units in", element_xpath(expr), expr.component.name)
expr._set_in_units(boolean, no_act=True)
except UnitsError:
pass
# Warn if conversions used
if self._cml_conversions_needed:
self.validation_warning('The mathematics in this model require units conversions.',
level=logging.WARNING)
DEBUG('validator', 'Checked units')
def _check_unit_cycles(self, unit):
"""Check for cyclic units definitions.
We do this by doing a depth-first search from unit.
"""
if unit.get_colour() != DFS.White:
# Allow self.validate to call us without colour check
return
unit.set_colour(DFS.Gray)
# Get the object unit is defined in
parent = unit.xml_parent or self
for u in getattr(unit, u'unit', []):
# Explore units that this unit is defined in terms of
try:
v = parent.get_units_by_name(u.units)
except KeyError:
self.validation_error("The value of the units attribute on a unit element must be taken"
" from the dictionary of standard units or be the name of a"
" user-defined unit in the current component or model (5.4.2.2)."
" Units '%s' are not defined." % u.units)
continue
if v.get_colour() == DFS.White:
self._check_unit_cycles(v)
elif v.get_colour() == DFS.Gray:
# We have a cycle
self.validation_error("Units %s and %s are in a cyclic units definition"
% (unit.name, v.name))
unit.set_colour(DFS.Black)
def _build_units_dictionary(self):
"""Create a dictionary mapping units names to objects, for all units definitions in this element."""
# Standard units
std_units = self._cml_standard_units
def make(name, bases):
return cellml_units.create_new(self, name, bases, standard=True)
# SI base units & dimensionless
base_units = [u'ampere', u'candela', u'dimensionless', u'kelvin',
u'kilogram', u'metre', u'mole', u'second']
base_units.append(u'#FUDGE#') # Used for PE of naughty models
for units in base_units:
std_units[units] = make(units, [])
# Special cellml:boolean units
boolean = make(u'cellml:boolean', [])
std_units[u'cellml:boolean'] = boolean
# Convenience derived units
gram = make('gram', [{'units': 'kilogram', 'multiplier': '0.001'}])
litre = make('litre', [{'multiplier': '1000', 'prefix': 'centi',
'units': 'metre', 'exponent': '3'}])
# SI derived units
radian = make('radian', [{'units': 'metre'},
{'units': 'metre', 'exponent': '-1'}])
steradian = make('steradian', [{'units': 'metre', 'exponent': '2'},
{'units': 'metre', 'exponent': '-2'}])
hertz = make('hertz', [{'units': 'second', 'exponent': '-1'}])
newton = make('newton', [{'units': 'metre'},
{'units': 'kilogram'},
{'units': 'second', 'exponent': '-2'}])
pascal = make('pascal', [{'units': 'newton'},
{'units': 'metre', 'exponent': '-2'}])
joule = make('joule', [{'units': 'newton'},
{'units': 'metre'}])
watt = make('watt', [{'units': 'joule'},
{'units': 'second', 'exponent': '-1'}])
coulomb = make('coulomb', [{'units': 'second'},
{'units': 'ampere'}])
volt = make('volt', [{'units': 'watt'},
{'units': 'ampere', 'exponent': '-1'}])
farad = make('farad', [{'units': 'coulomb'},
{'units': 'volt', 'exponent': '-1'}])
ohm = make('ohm', [{'units': 'volt'},
{'units': 'ampere', 'exponent': '-1'}])
siemens = make('siemens', [{'units': 'ampere'},
{'units': 'volt', 'exponent': '-1'}])
weber = make('weber', [{'units': 'volt'},
{'units': 'second'}])
tesla = make('tesla', [{'units': 'weber'},
{'units': 'metre', 'exponent': '-2'}])
henry = make('henry', [{'units': 'weber'},
{'units': 'ampere', 'exponent': '-1'}])
celsius = make('celsius', [{'units': 'kelvin', 'offset': '-273.15'}])
lumen = make('lumen', [{'units': 'candela'},
{'units': 'steradian'}])
lux = make('lux', [{'units': 'lumen'},
{'units': 'metre', 'exponent': '-2'}])
becquerel = make('becquerel', [{'units': 'second', 'exponent': '-1'}])
gray = make('gray', [{'units': 'joule'},
{'units': 'kilogram', 'exponent': '-1'}])
sievert = make('sievert', [{'units': 'joule'},
{'units': 'kilogram', 'exponent': '-1'}])
katal = make('katal', [{'units': 'second', 'exponent': '-1'},
{'units': 'mole'}])
for units in [becquerel, celsius, coulomb, farad, gram, gray, henry,
hertz, joule, katal, litre, lumen, lux, newton, ohm,
pascal, radian, siemens, sievert, steradian, tesla,
volt, watt, weber]:
std_units[units.name] = units
# American spellings
std_units[u'meter'] = std_units[u'metre']
std_units[u'liter'] = std_units[u'litre']
# User-defined units
model_units = self._cml_units
model_units.update(std_units)
if hasattr(self, u'units'):
for units in self.units:
if units.name in model_units:
self.validation_error("Units names must be unique within the parent component or model,"
" and must not redefine the standard units (5.4.1.2)."
" The units definition named '%s' in the model is a duplicate." % units.name)
model_units[units.name] = units
# Update units hashmap
for u in model_units.itervalues():
self._add_units_obj(u)
def get_standard_units(self):
"""Get a dictionary mapping the names of the standard CellML units to their definitions."""
if not self._cml_standard_units:
self._build_units_dictionary()
return self._cml_standard_units
def get_all_units(self):
"""Get a list of all units objects, including the standard units."""
if not self._cml_units:
self._build_units_dictionary()
units = self._cml_units.values()
for comp in getattr(self, u'component', []):
units.extend(comp.get_all_units())
return units
def get_units_by_name(self, uname):
"""Return an object representing the element that defines the units named `uname'."""
if not self._cml_units:
self._build_units_dictionary()
try:
return self._cml_units[uname]
except KeyError:
raise KeyError("Units '%s' are not defined in the current component or model." % uname)
def add_units(self, name, units):
"""Add an entry in our units dictionary for units named `name' with element object `units'."""
if not self._cml_units:
self._build_units_dictionary()
assert name not in self._cml_units
self._cml_units[name] = units
self._add_units_obj(units)
return
def has_units(self, units):
"""Test whether a given units definition appears in the model."""
return units in self._cml_units.itervalues()
def _add_units_obj(self, units):
"""Add a units object into the global hashmap."""
if not units.uniquify_tuple in self._cml_units_map:
self._cml_units_map[units.uniquify_tuple] = units
return
def _get_units_obj(self, units):
"""Unique-ify this units object.
If an object with the same definition already exists, return that.
Otherwise return the given units object.
'Same definition' is based on the cellml_units.uniquify_tuple
property, which in turn is based partly on the generated name
which would be given to these units, since that really *must*
be unique in generated models.
"""
return self._cml_units_map.get(units.uniquify_tuple, units)
def _is_new_units_obj(self, units):
"""Have these units been generated already?
i.e. is a units object with this definition in our map?
"""
return units.uniquify_tuple not in self._cml_units_map
def add_units_conversions(self):
"""Add explicit units conversion mathematics where necessary."""
import_processors()
processors.UnitsConverter(self).add_all_conversions()
def find_state_vars(self):
"""Return a list of the state variable elements in this model."""
state_vars = []
for var in self.get_all_variables():
if var.get_type() == VarTypes.State:
state_vars.append(var)
return state_vars
def find_free_vars(self):
"""Return a list of the free variable elements in this model."""
free_vars = []
for var in self.get_all_variables():
if var.get_type() == VarTypes.Free:
free_vars.append(var)
return free_vars
def calculate_extended_dependencies(self, nodes, prune=[],
prune_deps=[],
state_vars_depend_on_odes=False,
state_vars_examined=set()):
"""Calculate the extended dependencies of the given nodes.
Recurse into the dependency graph, in order to construct a
set, for each node in nodes, of all the nodes on which it
depends, either directly or indirectly.
Each node IS included in its own dependency set.
If prune is specified, it should be a set of nodes for which
we won't include their dependencies or the nodes themselves.
This is useful e.g. for pruning variables required for calculating
a stimulus if the stimulus is being provided by another method.
prune_deps is similar: dependencies of these nodes will be excluded,
but the nodes themselves will be included if asked for.
If state_vars_depend_on_odes is True, then considers state variables
to depend on the ODE defining them.
Requires the dependency graph to be acyclic.
Return the union of all the dependency sets.
"""
deps = set()
for node in nodes:
if node in prune or (isinstance(node, mathml_apply) and
isinstance(node.operator(), mathml_eq) and
isinstance(node.eq.lhs, mathml_ci) and
node.eq.lhs.variable in prune):
continue
if type(node) == tuple:
# This is an ODE dependency, so get the defining expression instead.
ode = True
orig_node = node
node = node[0].get_ode_dependency(node[1])
if orig_node in prune_deps:
# Include the defining expression, but skip its dependencies
deps.add(node)
continue
free_var = node.eq.lhs.diff.independent_variable
else:
ode = False
deps.add(node)
if node in prune_deps:
# Skip dependencies of this node
continue
nodedeps = set(node.get_dependencies())
if ode and not node._cml_ode_has_free_var_on_rhs:
# ODEs depend on their independent variable. However,
# when writing out code we don't want to pull the free
# variable in just for this, as the compiler then
# gives us unused variable warnings.
nodedeps.remove(free_var)
if (state_vars_depend_on_odes and isinstance(node, cellml_variable)
and node.get_type() == VarTypes.State
and node not in state_vars_examined):
nodedeps.update(node.get_all_expr_dependencies())
state_vars_examined.add(node)
deps.update(self.calculate_extended_dependencies(nodedeps,
prune=prune,
prune_deps=prune_deps,
state_vars_depend_on_odes=state_vars_depend_on_odes,
state_vars_examined=state_vars_examined))
return deps
def is_self_excitatory(self):
"""Determine whether this model is self-excitatory,
i.e. does not require an external stimulus.
"""
meta_id = self.cmeta_id
if not meta_id:
return False
property = cellml_metadata.create_rdf_node(('pycml:is-self-excitatory', NSS['pycml']))
source = cellml_metadata.create_rdf_node(fragment_id=meta_id)
return cellml_metadata.get_target(self, source, property) == 'yes'
def xml(self, stream=None, writer=None, **wargs):
"""Serialize back to XML.
If stream is given, output to stream.
If writer is given, use it directly.
If neither a stream nor a writer is given, return the output text
as a Python string (not Unicode) encoded as UTF-8.
This overrides Amara's method, in order to force declaration of
various namespaces with prefixes on this element, and to ensure
the RDF annotations are up-to-date.
See base class docs for possible keyword arguments.
"""
extra_namespaces = {u'cellml': NSS[u'cml'],
u'pe': NSS[u'pe'],
u'lut': NSS[u'lut']}
# Update RDF block if necessary
cellml_metadata.update_serialized_rdf(self)
temp_stream = None
close_document = 0
if not writer:
#Change the default to *not* generating an XML decl
if not wargs.get('omitXmlDeclaration'):
wargs['omitXmlDeclaration'] = u'yes'
if stream:
writer = amara.bindery.create_writer(stream, wargs)
else:
temp_stream = StringIO()
writer = amara.bindery.create_writer(temp_stream, wargs)
writer.startDocument()
close_document = 1
writer.startElement(self.nodeName, self.namespaceURI,
extraNss=extra_namespaces)
if hasattr(self, 'xml_attributes'):
for apyname in self.xml_attributes:
aqname, ans = self.xml_attributes[apyname]
val = self.__dict__[apyname]
writer.attribute(aqname, val, ans)
for child in self.xml_children:
if isinstance(child, unicode):
writer.text(child)
else:
child.xml(writer=writer)
writer.endElement(self.nodeName, self.namespaceURI)
if close_document:
writer.endDocument()
return temp_stream and temp_stream.getvalue()
class cellml_component(element_base):
"""
Specialised component class, with additional helper methods.
"""
def __init__(self):
element_base.__init__(self)
self._cml_parents = {}
self._cml_children = {}
self._cml_units = None
self._cml_created_by_pe = False
@property
def ignore_component_name(self):
"""Whether to not include the component name in the full names of contained variables."""
return self._cml_created_by_pe or self.name == u''
def parent(self, relationship=u'encapsulation', namespace=None, name=None, reln_key=None):
"""Find the parent of this component in the given hierarchy.
We default to the encapsulation hierarchy.
relationship gives the type of the hierarchy. If it is not one
of the CellML types (i.e. encapsulation or containment) then
the namespace URI must be specified. Multiple non-encapsulation
hierarchies of the same type can be specified by giving the name
argument.
Results are cached for efficiency.
"""
key = reln_key or (relationship, namespace, name)
if not key in self._cml_parents:
assert(reln_key is None)
self.xml_parent.build_component_hierarchy(relationship, namespace, name)
return self._cml_parents[key]
def _clear_hierarchy(self, reln_key):
"""Unset our parent & children in the given hierarchy."""
self._cml_parents[reln_key] = None
self._cml_children[reln_key] = []
def _set_parent_component(self, reln_key, parent):
"""Set the parent of this component in the relationship hierarchy indexed by reln_key to parent.
Trigger a validation error if we already have a parent in this hierarchy.
Also add ourselves to parent's children.
"""
if not reln_key in self._cml_parents or self._cml_parents[reln_key] is None:
# Only set parent if we don't already have one
self._cml_parents[reln_key] = parent
else:
self.xml_parent.validation_error("In a given hierarchy, a component may not be a child more"
" than once (6.4.3.2). Component '%s' has multiple parents."
% self.name)
if not parent is None:
parent._add_child_component(reln_key, self)
def _add_child_component(self, reln_key, child):
"""Add child to our list of children in the relationship hierarchy indexed by reln_key."""
if not reln_key in self._cml_children:
self._cml_children[reln_key] = []
self._cml_children[reln_key].append(child)
def _has_child_components(self, reln_key):
"""Determine whether we have any children in the given relationship hierarchy."""
return self._cml_children.get(reln_key, []) != []
def _build_units_dictionary(self):
"""Create a dictionary mapping units names to objects, for all units definitions in this element."""
self._cml_units = {}
for units in getattr(self, u'units', []):
if units.name in self._cml_units:
self.validation_error("Units names must be unique within the parent component (5.4.1.2)."
" The name '%s' in component '%s' is duplicated."
% (units.name, self.name))
try:
if self.xml_parent.get_units_by_name(units.name).standard == u'yes':
self.validation_error("Units definitions must not redefine the standard units (5.4.1.2)."
" The name '%s' in component '%s' is not allowed."
% (units.name, self.name))
except:
pass
self._cml_units[units.name] = units
self.xml_parent._add_units_obj(units)
def get_units_by_name(self, uname):
"""Return an object representing the element that defines the units named `uname'."""
if self._cml_units is None:
self._build_units_dictionary()
if uname in self._cml_units:
# Units are defined in this component
return self._cml_units[uname]
else:
# Look up units in model element instead
return self.xml_parent.get_units_by_name(uname)
def add_units(self, name, units):
"""Add an entry in our units dictionary for units named `name' with element object `units'."""
if self._cml_units is None:
self._build_units_dictionary()
self._cml_units[name] = units
self.xml_parent._add_units_obj(units)
return
def get_all_units(self):
"""Get a list of all units objects defined in this component."""
if self._cml_units is None:
self._build_units_dictionary()
return self._cml_units.values()
def get_variable_by_name(self, varname):
"""Return the variable object with name `varname' in this component."""
return self.xml_parent.get_variable_by_name(self.name, varname)
def _add_variable(self, var):
"""Add a variable to this component."""
# Add element
self.xml_append(var)
# Add to dictionary
self.xml_parent._add_variable(var, var.name, self.name)
return
def _del_variable(self, var, keep_annotations=False):
"""Remove a variable from this component."""
if not keep_annotations:
# Remove metadata about the variable
var.remove_rdf_annotations()
# Remove the element
self.xml_remove_child(var)
# Remove from dictionary
self.xml_parent._del_variable(var.name, self.name)
return
@staticmethod
def create_new(elt, name):
"""Create a new component with the given name."""
new_comp = elt.xml_create_element(u'component', NSS[u'cml'],
attributes={u'name': unicode(name)})
return new_comp
class cellml_variable(Colourable, element_base):
"""
Class representing CellML <variable> elements.
"""
def __init__(self):
super(cellml_variable, self).__init__()
self.clear_dependency_info()
return
def clear_dependency_info(self):
"""Clear the type, dependency, etc. information for this variable.
This allows us to re-run the type & dependency analysis for the model.
"""
# The type of this variable is not yet known
self._cml_var_type = VarTypes.Unknown
self._cml_source_var = None
self._cml_value = {}
self._cml_binding_time = None
# Dependency graph edges
self._cml_depends_on = []
self._cml_depends_on_ode = {}
self._cml_usage_count = 0
self.clear_colour()
def __hash__(self):
"""Hashing function for variables.
Hash is based on hashing the full name of the variable, as
this must be unique within a model. Unfortunately, when we do
partial evaluation, the full name changes!
TODO: do we need a hash function?
"""
return hash(self.fullname(cellml=True))
def fullname(self, cellml=False, debug=False):
"""
Return the full name of this variable, i.e.
'(component_name,variable_name)'.
If cellml is given as True, return the name in a form compatible with
the CellML spec instead, i.e. component_name__variable_name, unless
the component has its ignore_component_name property set, in which case
just use variable_name.
If debug is True, also give information about the variable object.
"""
if hasattr(self, 'xml_parent'):
parent_name = self.xml_parent.name
ignore_component = self.component.ignore_component_name
else:
parent_name = '*orphan*'
ignore_component = True
if cellml:
if ignore_component:
vn = self.name
else:
vn = parent_name + u'__' + self.name
else:
vn = u'(' + parent_name + u',' + self.name + u')'
if debug:
vn = '%s@0x%x' % (vn, id(self))
return vn
@staticmethod
def split_name(varname):
"""Split a variable name as given by cellml_variable.fullname into constituent parts.
Returns a tuple (component name, local variable name). If the component name cannot
be identified, it will be returned as the empty string.
"""
try:
if varname[0] == u'(':
cname, vname = varname[1:-1].split(u',')
else:
cname, vname = varname.split(u'__', 1)
except ValueError:
cname, vname = u'', varname
return cname, vname
@staticmethod
def get_variable_object(model, varname):
"""Return the variable object that has name varname.
This method tries to handle various forms of fully qualified variable name, i.e. names which
include the name of the component the variable occurs in, including those created by
CellMLTranslator.code_name.
"""
varname = unicode(varname)
if varname[:4] == 'var_':
varname = varname[4:]
cname, vname = cellml_variable.split_name(varname)
if len(model.component) == 1 and cname == model.component.name:
var = model.component.get_variable_by_name(vname)
else:
try:
var = model.get_variable_by_name(cname, vname)
except KeyError, e:
try:
if cname:
vname = cname + u'__' + vname
var = model.component.get_variable_by_name(vname)
except KeyError:
raise e
return var
def __str__(self):
return 'cellml_variable' + self.fullname()
def __repr__(self):
return '<cellml_variable %s @ 0x%x>' % (self.fullname(), id(self))
def get_component(self):
return self.xml_parent
component = property(get_component)
@property
def model(self):
return self.component.xml_parent
def get_units(self):
"""Get the cellml_units object giving this variable's units."""
return self.component.get_units_by_name(self.units)
def _add_dependency(self, dep):
"""Add a dependency of this variable.
This could be an expression defining it, or a variable it's mapped from.
Triggers a validation error if we already have another dependency,
since a variable can't be defined in more than one way.
"""
if self._cml_depends_on:
if not dep in self._cml_depends_on:
# Multiple dependencies. TODO: Give more info.
raise MathsError(dep, u' '.join([
u'The variable',self.fullname(),
u'gets its value from multiple locations.']))
else:
self._cml_depends_on.append(dep)
return
def get_dependencies(self):
"""
Return the list of things this variable depends on.
"""
return self._cml_depends_on
def _add_ode_dependency(self, independent_var, expr):
"""Add a dependency of this variable as the dependent variable in an ODE.
independent_var is the corresponding independent variable, and expr is the
expression defining the ODE.
Triggers a validation error if the same ODE is defined by multiple expressions.
"""
if independent_var in self._cml_depends_on_ode:
if self._cml_depends_on_ode[independent_var] != expr:
# Multiple definitions. TODO: Give more info.
raise MathsError(expr, u''.join([
u'There are multiple definitions of the ODE d',self.fullname(),
u'/d',independent_var.fullname()]))
else:
self._cml_depends_on_ode[independent_var] = expr
return
def _update_ode_dependency(self, free_var, defn):
"""Update an ODE dependency due to partial evaluation.
When the PE processes the LHS of a derivative equation, it alters the independent
variable to reference its source directly. If this new source wasn't originally
our independent variable's source (e.g. due to a units conversion expression) then
this will break lookups of the ODE via _get_ode_dependency, so we need to update
the reference.
"""
if self.get_type() == VarTypes.Mapped:
return self.get_source_variable()._update_ode_dependency(free_var, defn)
self._cml_depends_on_ode.clear()
self._cml_depends_on_ode[free_var] = defn
def get_ode_dependency(self, independent_var, context=None):
"""
Return the expression defining the ODE giving the derivative of this
variable w.r.t. independent_var.
Triggers a validation error if the ODE has not been defined.
"""
if self.get_type() == VarTypes.Mapped:
return self.get_source_variable().get_ode_dependency(independent_var, context=context)
free_vars = self._cml_depends_on_ode.keys()
free_vars = dict(zip(map(lambda v: v.get_source_variable(recurse=True), free_vars),
free_vars))
independent_src = independent_var.get_source_variable(recurse=True)
if not independent_src in free_vars:
raise MathsError(context or self, u''.join([
u'The ODE d',self.fullname(),u'/d',independent_var.fullname(),
u'is used but not defined.']))
return self._cml_depends_on_ode[free_vars[independent_src]]
def get_all_expr_dependencies(self):
"""Return all expressions this variable depends on, either directly or as an ODE."""
deps = filter(lambda d: isinstance(d, mathml_apply), self._cml_depends_on)
deps.extend(self._cml_depends_on_ode.values())
return deps
def _set_source_variable(self, src_var):
"""Set this to be a mapped variable which imports its value from src_var.
A validation error is generated if we are already mapped.
"""
if not self._cml_source_var is None:
# Mapping already exists
model = self.model
debug = model.get_option('debug')
model.validation_error(u' '.join([
'A variable with interface "in" may only obtain its',
'value from one location.\nBoth',
self._cml_source_var.fullname(debug=debug), 'and',
src_var.fullname(debug=debug), 'are exported to', self.fullname(debug=debug)]))
else:
self._cml_source_var = src_var
self._set_type(VarTypes.Mapped)
self._add_dependency(src_var)
return
def get_source_variable(self, recurse=False):
"""
Assuming this variable imports its value, return the variable
from which we obtain our value.
If our value is determined within this component, raise a
TypeError.
If recurse is set to True, recursively follow mappings until
a non-mapped variable is found.
"""
if self._cml_source_var is None:
if recurse:
src = self
else:
raise TypeError(u' '.join([
'Variable', self.fullname(), u'is not a mapped variable',
'(i.e. does not obtain its value from another component).'
]))
else:
src = self._cml_source_var
if recurse:
src = src.get_source_variable(recurse=True)
return src
def _set_type(self, var_type, _orig=None):
"""Update the type of this variable.
The caller should check that the update makes sense.
If this variable already has type Mapped, then update the type of
our source variable, instead.
"""
# If this is becoming a state variable, increment its usage count
if var_type is VarTypes.State and not self._cml_var_type is VarTypes.State:
self._cml_usage_count += 1
if self._cml_var_type == VarTypes.Mapped and not _orig is self:
# Guard against infinite loops, since we haven't done a cyclic dependency check yet
if _orig is None: _orig = self
self.get_source_variable()._set_type(var_type, _orig=_orig)
else:
self._cml_var_type = var_type
return
def get_type(self, follow_maps=False):
"""Return the type of this variable.
If follow_maps is True and the value of this variable is imported
from another component, then return the type of the variable we
get our value from instead.
"""
if follow_maps and self._cml_var_type == VarTypes.Mapped:
return self.get_source_variable().get_type(follow_maps=True)
return self._cml_var_type
def _used(self):
"""Note this variable as being used in an expression.
Keep track of the usage count.
If this is a mapped variable, note its source as being used,
as well.
Note that if a variable is used in 2 branches of a conditional
then this counts as 2 uses.
"""
self._cml_usage_count += 1
if self._cml_var_type == VarTypes.MaybeConstant:
self._cml_var_type = VarTypes.Constant
elif self._cml_var_type == VarTypes.Mapped:
self.get_source_variable()._used()
return
def get_usage_count(self):
"""
Return the number of times this variable is used in an expression.
"""
return self._cml_usage_count
def _decrement_usage_count(self, follow_maps=True):
"""Decrement our usage count."""
DEBUG('partial-evaluator', "Dec usage for", self.fullname())
assert self._cml_usage_count > 0
self._cml_usage_count -= 1
if follow_maps and self._cml_var_type == VarTypes.Mapped:
self.get_source_variable()._decrement_usage_count()
# Note in the model if a usage count has decreased to 1, in
# order to repeat the partial evaluation loop.
if self._cml_usage_count == 1:
model = self.xml_parent.xml_parent
model._pe_repeat = u'yes'
return
def add_rdf_annotation(self, property, target, allow_dup=False):
"""Add an RDF annotation about this variable.
property must be a tuple (qname, namespace_uri).
target may either be a tuple as above, or a unicode string, in which
case it is interpreted as a literal RDF node.
If the variable does not already have a cmeta:id, one will be created
for it with value self.fullname(cellml=True).
The actual RDF will be added to the main RDF block in the model, which
will be created if it does not exist. Any existing annotations of this
variable with the same property will be removed, unless allow_dup=True.
"""
meta_id = self.cmeta_id
if not meta_id:
# Create ID for this variable, so we can refer to it in RDF
meta_id = cellml_metadata.create_unique_id(self.model, unicode(self.fullname(cellml=True)))
self.xml_set_attribute((u'cmeta:id', NSS['cmeta']), meta_id)
property = cellml_metadata.create_rdf_node(property)
target = cellml_metadata.create_rdf_node(target)
source = cellml_metadata.create_rdf_node(fragment_id=meta_id)
if allow_dup:
cellml_metadata.add_statement(self.model, source, property, target)
else:
cellml_metadata.replace_statement(self.model, source, property, target)
def get_rdf_annotation(self, property):
"""Get an RDF annotation about this variable.
property must be a tuple (qname, namespace_uri).
Will return the unique annotation found with source being this variable's id,
and the given property. If no annotation is found (or if the variable does
not have a cmeta:id), returns None. Throws an error if more than one
annotation is found.
"""
meta_id = self.cmeta_id
if not meta_id:
return None
property = cellml_metadata.create_rdf_node(property)
source = cellml_metadata.create_rdf_node(fragment_id=meta_id)
return cellml_metadata.get_target(self.model, source, property)
def get_rdf_annotations(self, property):
"""Get all RDF annotations about this variable that use the given property.
property must be a tuple (qname, namespace_uri).
Will return all annotations found with source being this variable's id,
and the given property. If no annotation is found (or if the variable does
not have a cmeta:id), returns the empty list
"""
meta_id = self.cmeta_id
if not meta_id:
return []
property = cellml_metadata.create_rdf_node(property)
source = cellml_metadata.create_rdf_node(fragment_id=meta_id)
return cellml_metadata.get_targets(self.model, source, property)
def remove_rdf_annotations(self, property=None):
"""Remove all RDF annotations about this variable.
If property is given, only remove annotations with the given property.
"""
meta_id = self.cmeta_id
if meta_id:
DEBUG('cellml-metadata', "Removing RDF annotations for", self, "with id", meta_id)
source = cellml_metadata.create_rdf_node(fragment_id=meta_id)
if property:
property = cellml_metadata.create_rdf_node(property)
cellml_metadata.remove_statements(self.model, source, property, None)
def set_rdf_annotation_from_boolean(self, property, is_yes):
"""Set an RDF annotation as 'yes' or 'no' depending on a boolean value."""
if is_yes:
val = 'yes'
else:
val = 'no'
self.add_rdf_annotation(property, val)
def _set_binding_time(self, bt, temporary=False):
"""Set the binding time of this variable.
Options are members of the BINDING_TIMES Enum.
If temporary is True, then we're temporarily overriding the normal setting,
so save any existing annotation for later replacement.
"""
#print "set bt", self, bt, temporary
assert bt in BINDING_TIMES
if temporary and not hasattr(self, '_cml_saved_bt'):
self._cml_saved_bt = self.get_rdf_annotation(('pe:binding_time', NSS[u'pe']))
self.add_rdf_annotation(('pe:binding_time', NSS[u'pe']), str(bt))
self._cml_binding_time = bt
return
def _unset_binding_time(self, only_temporary=False):
"""Unset any stored binding time.
If the stored BT was a temporary setting, replace it with the original value.
"""
#print "unset bt", self, only_temporary
self._cml_binding_time = None
if hasattr(self, '_cml_saved_bt'):
if self._cml_saved_bt:
self._set_binding_time(getattr(BINDING_TIMES, self._cml_saved_bt))
else:
self.remove_rdf_annotations(('pe:binding_time', NSS[u'pe']))
del self._cml_saved_bt
elif not only_temporary:
self.remove_rdf_annotations(('pe:binding_time', NSS[u'pe']))
def _get_binding_time(self):
"""Return the binding time of this variable, as a member of
the BINDING_TIMES Enum.
This method will (try to) compute & cache the binding time if
necessary.
Variables are classified based on their type:
State, Free -> dynamic
Constant -> static
Mapped -> binding time of source variable
Computed -> binding time of defining expression
"""
if self._cml_binding_time is None:
# Check for an annotation setting the BT
bt_annotation = self.get_rdf_annotation(('pe:binding_time', NSS[u'pe']))
if bt_annotation:
bt = getattr(BINDING_TIMES, bt_annotation)
DEBUG('partial-evaluator', "BT var", self.fullname(), "is annotated as", bt)
elif self.pe_keep:
# This variable must appear in the specialised model
bt = BINDING_TIMES.dynamic
DEBUG('partial-evaluator', "BT var", self.fullname(), "is kept")
else:
# Compute BT based on our type
t = self.get_type()
DEBUG('partial-evaluator', "BT var", self.fullname(), "type", str(t))
if t in [VarTypes.State, VarTypes.Free, VarTypes.Unknown]:
bt = BINDING_TIMES.dynamic
elif t == VarTypes.Constant:
bt = BINDING_TIMES.static
elif t == VarTypes.Mapped:
bt = self.get_source_variable()._get_binding_time()
elif t == VarTypes.Computed:
bt = self._cml_depends_on[0]._get_binding_time()
else:
raise TypeError("Unexpected variable type " + str(t) +
" of variable " + self.fullname() +
" in BTA.")
DEBUG('partial-evaluator', "BT var", self.fullname(), "is", bt)
self._set_binding_time(bt)
else:
bt = self._cml_binding_time
return bt
def is_statically_const(self, ignore_annotations=False):
"""Determine loosely if this variable is considered constant.
Checks if we're Constant, or Computed with a static binding time (or
of unknown type).
If ignore_annotations is True, will ignore cached binding time values and
pe:keep annotations. It instead finds all variables we depend on, directly or
indirectly, and gives a dynamic result iff any is a state or free variable.
"""
result = False
t = self.get_type()
if t in [VarTypes.Constant, VarTypes.Unknown]:
result = True
elif t == VarTypes.Computed:
if ignore_annotations:
dependencies = self.model.calculate_extended_dependencies([self])
result = True
for node in dependencies:
if isinstance(node, cellml_variable) and node.get_type() in [VarTypes.State, VarTypes.Free]:
result = False
break
else:
result = self._get_binding_time() == BINDING_TIMES.static
elif t == VarTypes.Mapped:
result = self.get_source_variable().is_statically_const(ignore_annotations)
return result
def set_value(self, value, ode=None, follow_maps=True):
"""Set the value of this variable.
Expects a floating point or boolean value.
If ode is given, it should be an instance of cellml_variable.
In this case, we're setting the value of d(self)/d(ode).
If this is a mapped variable, assign the value to its source
variable instead, unless follow_maps is set to False
"""
#print "set_value", self, ode, value
if follow_maps and self.get_type() == VarTypes.Mapped:
self.get_source_variable().set_value(value, ode=ode)
else:
assert type(value) in [types.FloatType, types.BooleanType]
self._cml_value[ode] = float(value)
return
def unset_values(self):
"""Unset all values for this variable set with set_value."""
#print "unset_values", self
if self.get_type() == VarTypes.Mapped:
self.get_source_variable().unset_values()
self._cml_value.clear()
def get_value(self, ode=None):
"""Return the value of this variable.
If a value has been set with set_value(), return that.
Otherwise, the behaviour depends on the type of this variable.
If it is Mapped, return the value of the source variable.
If it is Constant or State, return its initial value.
Otherwise, raise an EvaluationError.
If ode is given, it should be an instance of cellml_variable.
In this case, we're getting the value of d(self)/d(ode).
"""
# TODO: Might want to alter the handling of initial value for an ODE?
if ode in self._cml_value:
val = self._cml_value[ode]
elif self.get_type() == VarTypes.Mapped:
val = self.get_source_variable().get_value(ode=ode)
elif ode is None and self.get_type() in [VarTypes.Unknown,
VarTypes.State, VarTypes.Constant]:
if hasattr(self, 'initial_value'):
val = float(self.initial_value)
else:
raise EvaluationError("Variable " + self.fullname() + " has no initial value set.")
elif self.get_type() == VarTypes.Computed and self._get_binding_time() == BINDING_TIMES.static:
# Evaluate the defining expression
val = self._cml_depends_on[0].evaluate()
else:
raise EvaluationError("Unable to find a suitable value for variable " + self.fullname())
return val
@property
def is_modifiable_parameter(self):
"""Whether this variable is a parameter that should be modifiable at run-time."""
return (self.get_type() == VarTypes.Constant and
self.get_rdf_annotation(('pycml:modifiable-parameter', NSS['pycml'])) == 'yes')
def set_is_modifiable_parameter(self, is_param):
"""Set method for the is_modifiable_parameter property.
We need a separate method for this to bypass Amara's property setting checks.
"""
if is_param and self.get_type() != VarTypes.Constant:
raise ValueError("A non-constant variable (%s) cannot be set as a parameter" % (self.fullname(),))
self.set_rdf_annotation_from_boolean(('pycml:modifiable-parameter', NSS[u'pycml']), is_param)
@property
def is_derived_quantity(self):
"""Whether this variable should be included in reports of derived quantities."""
return self.get_rdf_annotation(('pycml:derived-quantity', NSS['pycml'])) == 'yes'
def set_is_derived_quantity(self, is_dq):
"""Set method for the is_derived_quantity property.
We need a separate method for this to bypass Amara's property setting checks.
"""
self.set_rdf_annotation_from_boolean(('pycml:derived-quantity', NSS[u'pycml']), is_dq)
@property
def is_output_variable(self):
"""Whether a protocol has requested this variable as a model output."""
return self.get_rdf_annotation(('pycml:output-variable', NSS['pycml'])) == 'yes'
def set_is_output_variable(self, is_ov):
"""Set method for the is_output_variable property.
We need a separate method for this to bypass Amara's property setting checks.
"""
self.set_rdf_annotation_from_boolean(('pycml:output-variable', NSS[u'pycml']), is_ov)
@property
def pe_keep(self):
"""Whether PE should retain this variable in the specialised model."""
return (self.get_rdf_annotation(('pe:keep', NSS[u'pe'])) == 'yes' or
self.is_modifiable_parameter or
self.is_derived_quantity or
self.is_output_variable)
def set_pe_keep(self, keep):
"""Set method for the pe_keep property.
We need a separate method for this to bypass Amara's property setting checks.
"""
self.set_rdf_annotation_from_boolean(('pe:keep', NSS[u'pe']), keep)
@property
def oxmeta_name(self):
"""The canonical name of this variable, as given by Oxford metadata.
Returns the empty string if no annotation is given.
"""
for annotation in self.get_rdf_annotations(('bqbiol:is', NSS['bqbiol'])):
name = cellml_metadata.namespace_member(annotation, NSS['oxmeta'], wrong_ns_ok=True)
if name:
return name
# No suitable annotation found
return ""
def set_oxmeta_name(self, name):
"""Set method for the oxmeta_name property.
Sets a bqbiol:is RDF annotation with the name.
We need a separate method for this to bypass Amara's property setting checks.
"""
self.add_rdf_annotation(('bqbiol:is', NSS['bqbiol']), ('oxmeta:'+name, NSS['oxmeta']))
def _reduce(self, update_usage=False):
"""Reduce this dynamic variable that is being kept.
If it has a static source, then turn it into a constant.
Otherwise make it depend directly on an appropriate source:
either one of its source variables that is also being kept,
or the ultimate defining expression.
If update_usage is True then this variable is used in an equation,
so reduce the usage of any source variables it no longer depends on.
"""
assert self.pe_keep
src = self.get_source_variable(recurse=True)
if src._get_binding_time() is BINDING_TIMES.static:
# Become a constant
self._cml_var_type = VarTypes.Constant
# Set our value
value = unicode(src.get_value())
self.initial_value = value
# Fix up usage counts
if update_usage:
self.get_source_variable()._decrement_usage_count()
# We now have no dependencies
self._cml_depends_on = []
self._cml_source_var = None
elif self.get_type() == VarTypes.Mapped:
# Manually recurse down maps to find a suitable source
src = self.get_source_variable()
while not src.pe_keep and src.get_type() == VarTypes.Mapped and src.get_usage_count() == 1:
src = src.get_source_variable()
if src.pe_keep or src.get_usage_count() > 1:
# Depend directly on src
self._cml_depends_on = [src]
self._cml_source_var = src
# Fix up usage counts
if update_usage:
src._used()
self.get_source_variable()._decrement_usage_count()
else: # src.get_type() != VarTypes.Mapped
# This variable is the only reference to the ultimate defining
# expression, so become computed.
self._cml_var_type = VarTypes.Computed
defn = src.get_dependencies()[0]
assert isinstance(defn, mathml_apply)
## Move the definition to this component
#defn._unset_cached_links()
#defn.xml_parent.xml_remove_child(defn)
#self.component.math.xml_append(defn)
# Schedule the LHS of the defining expression for update
defn._cml_assigns_to = self
defn._pe_process = u'retarget'
# Fix up usage counts
if update_usage:
self.get_source_variable()._decrement_usage_count()
# Depend directly on the assignment expression
self._cml_depends_on = [defn]
self._cml_source_var = None
@staticmethod
def create_new(elt, name, units, id=None, initial_value=None, interfaces={}):
"""Create a new <variable> element with the given name and units.
Optionally id, initial_value, and interfaces may also be given.
elt may be any existing XML element.
"""
attrs = {(u'units', None): unicode(units),
(u'name', None): unicode(name)}
if id:
attrs[(u'cmeta:id', NSS[u'cmeta'])] = unicode(id)
if initial_value is not None and initial_value != u'':
attrs[(u'initial_value', None)] = unicode(initial_value)
for iface, val in interfaces.items():
attrs[(iface + u'_interface', None)] = unicode(val)
new_elt = elt.xml_create_element(u'variable', NSS[u'cml'], attributes=attrs)
return new_elt
class UnitsSet(set):
"""A set of cellml_units objects.
This class behaves like a normal set, but also has additional
methods for operations specific to sets of <units> objects:
simplify - allow for multiplication of sets of units
dimensionally_equivalent - compare 2 sets of units for dimensional equivalence
description - describe the units in this set
All units in the set (normally) must be dimensionally equivalent. The exception is when dealing with
Functional Curation protocols which can defined units conversion rules for non-scaling cases. We can
then have sets of alternative units in different dimensions, and the get_consistent_set method helps
with selecting from these.
"""
def __new__(cls, iterable=[], expression=None):
"""
Work around annoyance in set implementation of python 2.4.2c1 and on.
setobject.c checks for keyword arguments in its __new__ instead of its
__init__, so we get an error
'TypeError: set() does not take keyword arguments' if we don't do this.
"""
return super(UnitsSet, cls).__new__(cls, iterable)
def __init__(self, iterable=[], expression=None):
super(UnitsSet, self).__init__(iterable)
self._expression = expression
self._sources = {}
return
def copy(self):
"""Do a shallow copy of this UnitsSet."""
new_set = super(UnitsSet, self).copy()
new_set._expression = self._expression
new_set._sources = {}
for units, src_list in self._sources.iteritems():
new_set._sources[units] = copy.copy(src_list)
return new_set
def get_consistent_set(self, desired_units):
"""Extract a subset of the units in this set that are dimensionally equivalent.
When dealing with potential non-scaling conversions, an expression may have potential units that are
not within the same dimension. However, when deciding on the units for the expression most operations
need to handle a set of options that *are* within the same dimension, and this operation supports that.
Given a cellml_units object for the desired units of the expression, this method first tries to create
a UnitsSet containing just our members in the same dimension. If we have no members in the desired
dimension, then we check that all members of this set are in the same dimension, and return the set
itself - there should never be more than 2 different dimensions to choose from (I hope!).
"""
new_set = UnitsSet([], expression=self._expression)
for units in self:
if units.dimensionally_equivalent(desired_units):
new_set.add(units)
new_set._sources[units] = copy.copy(self._sources[units])
if not new_set:
rep_u = self.extract()
for units in self:
if not units.dimensionally_equivalent(rep_u):
raise ValueError("Unexpected dimensional variation in UnitsSet; " + rep_u.description() + " and " + units.description()
+ " do not match.")
new_set = self
return new_set
def equals(self, other):
"""Test whether the units in the set are equal to those in another set."""
try:
equal = self.extract(check_equality=True).equals(other.extract(check_equality=True))
except ValueError:
equal = False
return equal
def extract(self, check_equality=False):
"""Extract a representative element from this set.
This is intended to be used to get the cellml_units object from a singleton set.
If check_equality is True, check that all members of this set have the same multiplicative factor.
"""
representative = iter(self).next()
if check_equality:
for u in self:
if not u._rel_error_ok(u.expand().get_multiplicative_factor(),
representative.expand().get_multiplicative_factor(),
1e-6):
raise ValueError("UnitsSet equality check failed")
if u.is_simple() and not u._rel_error_ok(u.expand().get_offset(),
representative.expand().get_offset(),
1e-6):
raise ValueError("UnitsSet equality check failed")
return representative
def set_expression(self, expr):
"""Store a reference to the expression that has these units."""
self._expression = expr
return
def _add_source(self, units, src_units_set, src_units):
"""Add source units for the given units.
This stores references to how units were arrived at when doing a
simplify. It manages lists of (src_units_set, src_units) pairs for
each units definition in this set.
If src_units_set has no associated expression, then it is
considered to be a temporary object, created for example whilst
doing an n-ary times operation. In this case, we add its list of
sources for src_units to our sources list instead.
"""
if not units in self._sources:
self._sources[units] = []
if not hasattr(src_units_set, '_expression'):
print self.description(), units.description()
print src_units_set.description(), src_units.description()
if src_units_set._expression:
self._sources[units].append((src_units_set, src_units))
else:
try:
self._sources[units].extend(src_units_set._sources[src_units])
except KeyError:
# No sources list found. This can happen if we do
# dimensionless.simplify(...), for example, to evaluate powers.
pass
return
def _get_sources(self, units):
"""Return the sources list for the given units."""
return self._sources.get(units, [])
def get_expression(self):
"""Return an expression that has these units."""
return self._expression
def simplify(self, other_units=None, other_exponent=1):
"""Simplify the units in this set.
Each cellml_units object in this set is simplified, and a new
UnitsSet returned with the results.
If other_units is not None, then products of units are
calculated. The returned set is essentially the cartesian
product of the input sets under the simplify operator, i.e.
u1.simplify(other_units=u2, other_exponent=other_exponent)
will be called for each member u1 of self and each member u2
of other_units (if other_units is a UnitsSet; otherwise
u2=other_units).
"""
result_set = UnitsSet()
for units in self:
if other_units is None:
res_u = units.simplify()
result_set.add(res_u)
result_set._add_source(res_u, self, units)
else:
if isinstance(other_units, cellml_units):
other_units = UnitsSet([other_units])
for u in other_units:
res_u = units.simplify(u, other_exponent)
result_set.add(res_u)
result_set._add_source(res_u, self, units)
result_set._add_source(res_u, other_units, u)
return result_set
def dimensionally_equivalent(self, other_units):
"""Check for dimensional equivalence between sets of units.
Since all units in each set should be equivalent, we just compare
an arbitrary member from each set.
other_units may be a single cellml_units instance, in which case we
compare an arbitrary member of self to it.
"""
u1 = self.extract()
u2 = other_units.extract()
return u1.dimensionally_equivalent(u2)
def description(self):
"""Describe these units.
Shows the descriptions of each member, as though this were a set of
unicode strings. If multiple members have the same description,
only one instance is shown. If only one description is being shown,
then the curly brackets are not added.
"""
desc = list(set(u.description() for u in self))
desc.sort()
if len(desc) > 1:
desc = u'{' + u','.join(desc) + u'}'
else:
desc = desc[0]
return desc
class cellml_units(Colourable, element_base):
"""
Specialised units class.
Contains useful methods for defining the standard units dictionary,
and checking for units consistency.
After being defined, a units definition should be regarded as being
immutable, as should individual <unit> elements, so the expansion
and simplification methods create new objects, which don't really
live in the document tree (they're not a child of any element in the
model), although they do have their xml_parent attribute set.
Note that <unit> elements must not be made a child of more than one
<units> element, otherwise the linked lists get tangled up.
"""
def __init__(self):
super(cellml_units, self).__init__()
self._cml_expanded = None
self._cml_simplified = {}
self._cml_generated = False
self._cml_quotients = {}
self._cml_hash = None
return
def __repr__(self):
return '<cellml_units %s @ 0x%x>' % (self.name, id(self))
def __contains__(self, item):
"""Prevent the default implementation using Amara's __iter__ to give unexpected results."""
return False
@property
def _hash_tuple(self):
"""Generate a tuple used as the basis for our hash value."""
if self._cml_hash is None:
hash_list = [self.is_base_unit()]
if not self._cml_generated:
hash_list.append(self.name)
hash_list.extend(list(getattr(self, u'unit', [])))
hash_tup = tuple(hash_list)
self._cml_hash_tup = hash_tup
self._cml_hash = hash(hash_tup)
return self._cml_hash_tup
@property
def uniquify_tuple(self):
"""For avoiding duplicate identical units definitions.
Based on description(cellml=True), since that is what we really want to be unique.
Also includes offset information, since that is omitted in the name given by description.
"""
l = [self.description(cellml=True)]
if self.is_simple():
# Include offset information
l.append((self.unit.get_units_element().uniquify_tuple,
self.unit.get_offset()))
return tuple(l)
def __hash__(self):
"""Generate a hash for these units.
Hashes a tuple, the first element of which is the result of self.is_base_unit(),
the second of which is our name if we are not auto-generated,
and the remaining elements of which are our <unit> elements.
"""
if self._cml_hash is None:
_ = self._hash_tuple
return self._cml_hash
def __cmp__(self, other):
"""Compare 2 units objects, by comparing their hashes.
Means using units as dictionary keys is more sane."""
if isinstance(other, cellml_units):
if hash(self) == hash(other):
return cmp(self._cml_hash_tup, other._cml_hash_tup)
else:
return cmp(hash(self), hash(other))
else:
return super(cellml_units, self).__cmp__(other)
# The following currently causes infinite loops/recursions
## def __eq__(self, other):
## """Compare 2 units elements for equality.
## return self.equals(other)
## def __ne__(self, other):
## """Inverse of self.__eq__(other)."""
## return not self.__eq__(other)
def _rel_error_ok(self, value1, value2, tol):
"""Test if the relative difference of 2 values is within tol."""
if abs(value1) == 0.0:
return abs(value2) < tol
else:
return (abs(value1-value2)/abs(value1)) < tol
def equals(self, other):
"""Compare 2 units elements for equality.
Two units are deemed equal if they are both dimensionally equivalent and have the same
multiplicative factor (to within a relative tolerance of 10^-6).
If both are simple units, they must also have the same offset.
"""
equal = isinstance(other, cellml_units) and \
self.dimensionally_equivalent(other) and \
self._rel_error_ok(self.expand().get_multiplicative_factor(),
other.expand().get_multiplicative_factor(),
1e-6)
if equal and self.is_simple():
equal = self._rel_error_ok(self.unit.get_offset(),
other.unit.get_offset(),
1e-6)
return equal
@property
def model(self):
return self.rootNode.model
def extract(self, check_equality=False):
"""Return these units.
Used for interface compatibility with UnitsSet."""
return self
def copy(self):
"""Return a new UnitsSet containing this cellml_units object.
Used for interface compatibility with UnitsSet, where the method performs a shallow copy.
"""
return UnitsSet([self])
def description(self, force=False, cellml=False):
"""Return a human-readable name for these units.
The name will be descriptive and based on the consituent <unit> elements, e.g. 'volt per second^2'
By default, if these are user-defined units, then return self.name. Set force to True to override this behaviour.
Set cellml to True to get a description that is also a valid CellML identifier.
"""
if self.is_base_unit():
desc = self.name
elif not force and not self._cml_generated:
desc = self.name
else:
descs, per_descs = [], []
# Multiplier
m = self.get_multiplier()
if m != 1:
descs.append(unicode(m))
# Constituent units
dimensionless = self.get_units_by_name('dimensionless')
for unit in self.unit:
if unit.get_units_element() is dimensionless:
continue
desc = [getattr(unit, u'prefix_', u''), unit.get_units_element().name]
e = unit.get_exponent()
if int(e) == e:
# Cast to integer so name looks nicer.
e = int(e)
if abs(e) != 1:
desc.extend(['^', str(abs(e))])
desc = u''.join(desc)
if e < 0:
per_descs.append(desc)
else:
descs.append(desc)
# Sort unit descriptions for readability
descs.sort()
descs = u' '.join(descs)
per_descs.sort()
desc = u' per '.join([descs] + per_descs)
if not desc:
desc = u'dimensionless'
elif descs:
desc = u"'" + desc + u"'"
else:
# Remove unwanted space from the start
desc = u"'" + desc[1:] + u"'"
# Convert to CellML identifier?
if cellml:
desc = desc.replace(u"'", u"").replace(u"^", u"")
desc = desc.replace(u"*", u"").replace(u".", u"_")
desc = desc.replace(u" ", u"_")
return desc
def get_units_by_name(self, uname):
"""Return an object representing the element that defines the units named `uname'."""
# Look up units in our parent model or component element instead
return self.xml_parent.get_units_by_name(uname)
def expand(self):
"""Expand to a product of powers of base units.
Expand this units definition according to the algorithm given in appendix C.3.4 of the CellML spec.
Caches and returns the resulting <units> object, which will be
newly created if there are any changes made, or this object if not.
"""
if self._cml_expanded is None:
# Do the expansion
if self.is_base_unit():
# We are a base unit, so stop the recursion; the result is us
self._cml_expanded = self
elif self.is_simple():
# Simple units definition, i.e. only one <unit> element,
# exponent=1, and referenced units are simple or base.
# Expand the units referenced by our <unit> element
expanded_child = self.unit.get_units_element().expand()
# Get our multiplicative factor & offset as numbers
m1p1 = self.unit.get_multiplicative_factor()
o1 = self.unit.get_offset()
if expanded_child.is_base_unit():
# New multiplier, etc. are just ours
m_new = m1p1
o_new = o1
# Referenced units are expanded_child
ref_obj_new = expanded_child
else:
# Get the multiplier & offset of our expanded child
m2p2 = expanded_child.unit.get_multiplicative_factor()
o2 = expanded_child.unit.get_offset()
# Calculate new multiplier, etc. per Equation (11)
m_new = m1p1*m2p2
o_new = o1 + o2/m1p1
# Referenced units are those referenced by expanded_child
# These will be base units
ref_obj_new = expanded_child.unit.get_units_element()
# Create the new units & unit elements
attrs = {u'name': self.name}
self._cml_expanded = self.xml_create_element(u'units',
NSS[u'cml'],
attributes=attrs)
self._cml_expanded._cml_generated = True
attrs = {u'units': ref_obj_new.name,
u'multiplier': unicode(m_new),
u'offset': unicode(o_new)}
unit = self.xml_create_element(u'unit', NSS[u'cml'],
attributes=attrs)
# Manually set the reference object for unit, since we have
# it handy
unit._set_units_element(ref_obj_new)
self._cml_expanded.xml_append(unit)
else:
# Complex units definition, i.e. multiple <unit> elements,
# or non-unitary exponent, at some point within this defn.
# Create the new units element so we can add children to it
attrs = {u'name': self.name}
exp_units = self.xml_create_element(u'units', NSS[u'cml'],
attributes=attrs)
exp_units._cml_generated = True
# Compute m_t (Equation (18)) expanding referenced
# units as we go
m_t = 1
for unit in self.unit:
m_t *= unit.get_multiplicative_factor() # * is assoc. :)
# We'll need the exponent to modify units referenced
# by this reference
e = unit.get_exponent()
# Expand referenced units
exp_u = unit.get_units_element().expand()
if exp_u.is_base_unit():
# Create reference to exp_u
attrs = {u'units': exp_u.name,
u'exponent': unicode(e)}
u = self.xml_create_element(u'unit', NSS[u'cml'],
attributes=attrs)
exp_units.xml_append(u)
else:
# Process units referenced by exp_u, which will be
# base units.
for u in exp_u.unit:
m_t *= u.get_multiplicative_factor() ** e
attrs = {u'units': u.units,
u'exponent': unicode(
u.get_exponent() * e)}
u_new = u.xml_create_element(u'unit',
NSS[u'cml'],
attributes=attrs)
exp_units.xml_append(u_new)
# Set the multiplier for the expanded units to m_t.
# Since a <units> elements doesn't have a multiplier
# attribute, we set it on the first <unit> element.
# Note that all the <unit> elements have been newly created,
# so have an implicit multiplier of 1 currently.
# Note also that the fact that each <unit> has an exponent
# doesn't matter, since the exponent doesn't affect the
# multiplier.
# Alan pointed out a bug: if the <unit> elements are
# in non-canonical order then we can get identical
# units (according to the intended canonical form)
# comparing as non-equal, because expansion put the
# multiplicative factor on different <unit> elements
# in each case. Hence we have to put the factor on
# the first <unit> element according to a canonical
# sorting order.
# TODO: Be a bit cleverer about this? In the base unit case
# above we could do exp_units.xml_append(unit.clone()) and
# not have its multiplicative factor contributing to m_t.
# Would then need to multiply here, since first unit may
# already have multiplier != 1.
# Alternative idea from Alan: I have just noticed that
# when expanding a complex units definition based on a
# complex units definition, we can get away with not
# sorting the unit elements. All that is required is
# to create a new unit element which type is
# dimensionless and value of its multiplier attribute
# m*10^(p*e). This avoids having to sort things and
# propagating the multiplier attribute...
first_unit = sorted(exp_units.unit, key=lambda u: u.units)[0]
first_unit.multiplier = unicode(m_t)
# Cache result
self._cml_expanded = exp_units
# Set parent of the expanded units to be our parent
self._cml_expanded.xml_parent = self.xml_parent
# Returned the cached result
return self._cml_expanded
def is_base_unit(self):
"""Return True iff this is a base units definition."""
return getattr(self, u'base_units', u'no') == u'yes'
def is_simple(self):
"""Return True iff this is a simple units definition.
Units are simple if:
there is 1 <unit> element
the exponent is omitted or has value 1.0
the referenced units are simple or base units
"""
simple = False
if not self.is_base_unit() and len(self.unit) == 1:
if self.unit.get_exponent() == 1.0:
u = self.unit.get_units_element()
if u.is_base_unit() or u.is_simple():
simple = True
return simple
def get_multiplicative_factor(self):
"""Return the multiplicative factor of this units definition.
The multiplicative factor of a units definition can be defined as
the product of the multiplicative factors of its unit children.
"""
m = reduce(operator.mul,
map(lambda unit: unit.get_multiplicative_factor(),
getattr(self, u'unit', [])),
1)
return m
def get_multiplier(self):
"""Return the multiplier of this units definition.
The multiplier of a units definition can be defined as the product
of the multipliers of its unit children.
"""
return reduce(operator.mul,
map(lambda unit: unit.get_multiplier(),
getattr(self, u'unit', [])),
1)
def get_offset(self):
"""Return the offset associated with this units definition.
If these are simple units, return the offset on our unit child.
Otherwise, return 0.
"""
if self.is_simple():
o = self.unit.get_offset()
else:
o = 0
return o
@staticmethod
def create_new(parent, name, bases, add_to_parent=False, standard=False):
"""Create a new units definition element.
It requires either a cellml_model or cellml_component element
to be passed as the parent for the definition. If
add_to_parent is set to true the new units element will be
appended to parent's children.
The bases parameter should be a list of dictionaries suitable
for use as the keyword arguments of cellml_units._based_on.
If the list is empty it will be defined as a base unit.
"""
# Create the units element
attrs = {u'name': unicode(name)}
if not bases:
attrs[u'base_units'] = u'yes'
if standard:
attrs[u'standard'] = u'yes'
u = parent.xml_create_element(u'units', NSS[u'cml'], attributes=attrs)
if add_to_parent:
parent.xml_append(u)
else:
u.xml_parent = parent # Hack so units lookups work
# Add references to units we're based on
for basis in bases:
u._based_on(**basis)
return u
def _based_on(self, units=None, prefix=None, exponent=None,
multiplier=None, offset=None):
"""Convenience function for defining new units."""
for v in ['units', 'prefix', 'exponent', 'multiplier', 'offset']:
# Coerce from str to unicode
exec "if type(%s) == str: %s = unicode(%s)" % (v,v,v)
# Check type
exec "assert(%s is None or type(%s) == unicode)" % (v,v)
assert(not hasattr(self, 'base_units') or self.base_units == u'no')
attrs = {u'units': units}
if offset:
# Simple units definition
assert(not hasattr(self, 'unit'))
attrs[u'offset'] = offset
if not prefix is None: attrs[u'prefix'] = prefix
if not exponent is None: attrs[u'exponent'] = exponent
else:
# Complex units definition
if not prefix is None: attrs[u'prefix'] = prefix
if not exponent is None: attrs[u'exponent'] = exponent
if not multiplier is None: attrs[u'multiplier'] = multiplier
self.xml_append(self.xml_create_element(u'unit', NSS[u'cml'],
attributes=attrs))
return
# Class attribute, so all <units> elements share the same value.
units_name_counter = [0]
def simplify(self, other_units=None, other_exponent=1):
"""Simplify these units.
Create a new <units> element representing a simplified version of
this element. This implements the algorithm of appendix C.3.1. It
is however slightly different, in order to allow for units
conversions rather than just preserving dimensional equivalence.
If other_units is not None, then produce a simplified version of
the product of these units and other_units. other_units are
considered to be raised to the power of other_exponent (so a
quotient can be performed by passing other_exponent=-1). Note that
other_exponent should have numeric type.
If other_units is a UnitsSet, then we construct a UnitsSet
containing self, and call the simplify method on that, thus
returning a UnitsSet instance.
Multiplicative factors on the original <unit> objects are
maintained on the generated references, by taking the product of
all factors from <unit> objects that contribute to the new <unit>
object. Note that this means that we may need to retain a
reference to dimensionless with a multiplier, for example if the
quotient of centimetres by metres is taken.
Offsets are only permitted on simple units, so may not appear where
there are multiple <unit> elements. Hence when a product of units
is taken, any offsets are discarded.
"""
if isinstance(other_units, UnitsSet):
# Use the set version of simplify
return UnitsSet([self]).simplify(other_units, other_exponent)
# Check for result in cache (see #1714)
if (other_units, other_exponent) in self._cml_simplified:
return self._cml_simplified[(other_units, other_exponent)]
# Make a list of all <unit> elements to be included
units = []
if self.is_base_unit():
# We are a base unit, so invent a reference to ourselves
u = self.xml_create_element(u'unit', NSS[u'cml'],
attributes={u'units': self.name})
u.xml_parent = self # Hack, but it might need a parent...
u._set_units_element(self)
units.append(u)
else:
units = list(self.unit)
our_unit_elements = frozenset(units)
other_unit_elements = None
if not other_units is None:
if other_units.is_base_unit():
# other_units are base units, so invent a reference
attrs = {u'units': other_units.name,
u'exponent': unicode(other_exponent)}
u = self.xml_create_element(u'unit', NSS[u'cml'],
attributes=attrs)
u.xml_parent = other_units
u._set_units_element(other_units)
units.append(u)
if other_exponent == 1:
other_unit_elements = frozenset([u])
else:
if other_exponent == 1:
units.extend(list(other_units.unit))
other_unit_elements = frozenset(other_units.unit)
else:
for unit in other_units.unit:
# Need to create a new <unit> element with different
# exponent and multiplier
u = unit.clone()
u.exponent = unicode(other_exponent *
u.get_exponent())
u.multiplier = unicode(u.get_multiplier() **
other_exponent)
units.append(u)
# Sort <unit> elements according to the <units> objects they
# reference
dimensionless = self.get_units_by_name(u'dimensionless')
d = {dimensionless: []}
for unit in units:
obj = unit.get_units_element()
if obj in d:
d[obj].append(unit)
else:
d[obj] = [unit]
# Collapse equivalent units references into a single reference.
# That is, all references to the same object get collapsed into a new
# reference to that object with different exponent, etc.
for obj in d.keys():
if obj != dimensionless and len(d[obj]) > 1:
# Sum the exponents
expt = sum(map(lambda u: u.get_exponent(), d[obj]))
# If exponents cancel, replace with ref to dimensionless
if expt == 0:
attrs = {u'units': u'dimensionless'}
else:
attrs = {u'units': d[obj][0].units,
u'exponent': unicode(expt)}
# Compute the multiplier for the new unit reference, as
# the product of multiplicative factors on the originals
m = reduce(operator.mul,
map(lambda u: u.get_multiplicative_factor(),
d[obj]))
attrs[u'multiplier'] = unicode(m)
# Create a new reference
new = self.xml_create_element(u'unit', NSS[u'cml'],
attributes=attrs)
new.xml_parent = self
if expt == 0:
# Note an extra reference to dimensionless...
d[dimensionless].append(new)
# ...and remove the references to obj from d
del d[obj]
else:
d[obj] = new
elif obj != dimensionless and d[obj]:
# d[obj] is a singleton list. Create a new reference and
# store it instead (a new reference is needed to avoid
# altering the linked list from self.unit).
d[obj] = d[obj][0].clone()
# Note d must have at least one key, namely dimensionless
# If dimensionless is referenced in addition to other units,
# remove the references to dimensionless.
# But remember the multipliers!
m = reduce(operator.mul,
map(lambda u: u.get_multiplicative_factor(),
d[dimensionless]),
1)
if m == 1:
del d[dimensionless]
else:
# Retain a single reference to dimensionless, with the
# combined multiplier.
d[dimensionless] = d[dimensionless][0].clone()
d[dimensionless].multiplier = unicode(m)
## print "Keeping d'less ref with m =",m,"from",
## print self.description(),
## if other_units:
## print "and",other_units.description()
## else:
## print
if not d:
# The units definition only referenced dimensionless, and
# the multiplier was 1, so we can replace it by dimensionless
# itself
new_units = dimensionless
else:
# Avoid creating new <units> elements unnecessarily
unit_elements = set(d.values())
if unit_elements == our_unit_elements:
new_units = self
elif unit_elements == other_unit_elements:
new_units = other_units
else:
# Create new <units> element
self.units_name_counter[0] += 1
uname = u'___units_' + str(self.units_name_counter[0])
## print ".simplify", uname
new_units = self.xml_create_element(
u'units', NSS[u'cml'], attributes={u'name': uname})
new_units._cml_generated = True
# Set its parent to be ours or other_units'
new_units.xml_parent = self._best_parent(other_units)
# Add <unit> children
for unit in unit_elements:
new_units.xml_append(unit)
if self.model._is_new_units_obj(new_units):
# Add name to units dictionary
## print "Adding",uname,hash(new_units),new_units.description()
new_units.xml_parent.add_units(uname, new_units)
# Cache and return result
new_units = self.model._get_units_obj(new_units)
self._cml_simplified[(other_units, other_exponent)] = new_units
return new_units
def _best_parent(self, other_units):
"""Return a suitable parent for units formed from self and other_units.
If either constituent is in a component, that component should be the
parent, otherwise the model should be.
"""
p1, p2 = self.xml_parent, other_units and other_units.xml_parent
if p2 and p1 != p2 and isinstance(p1, cellml_model):
p1 = p2
return p1
def quotient(self, other_units):
"""Form the quotient of two units definitions.
This method does not simplify the resulting units.
Quotient units will be cached.
"""
if not other_units in self._cml_quotients:
# Create new <units> element
self.units_name_counter[0] += 1
uname = u'___units_' + str(self.units_name_counter[0])
quotient_units = self.xml_create_element(
u'units', NSS[u'cml'], attributes={u'name': uname})
quotient_units._cml_generated = True
quotient_units.xml_parent = self._best_parent(other_units)
# Invent references to the constituent units
u = self.xml_create_element(u'unit', NSS[u'cml'],
attributes={u'units': self.name})
u._set_units_element(self)
quotient_units.xml_append(u)
u = self.xml_create_element(u'unit', NSS[u'cml'],
attributes={u'units': self.name,
u'exponent': u'-1'})
u._set_units_element(other_units)
quotient_units.xml_append(u)
# Cache
if self.model._is_new_units_obj(quotient_units):
quotient_units.xml_parent.add_units(uname, quotient_units)
quotient_units = self.model._get_units_obj(quotient_units)
self._cml_quotients[other_units] = quotient_units
return self._cml_quotients[other_units]
def dimensionally_equivalent(self, other_units):
"""Return True iff other_units is dimensionally equivalent to self.
As per appendix C.2.2, two units definitions have dimensional
equivalence if, when each is recursively expanded and
simplified into a product of base units, each has the same set
of base units with the same exponent on corresponding base units.
"""
u1 = self.expand().simplify()
if isinstance(other_units, UnitsSet):
other_units = other_units.extract()
u2 = other_units.expand().simplify()
# Build dictionaries mapping base_unit_obj to exponent.
d1, d2 = {}, {}
if u1.is_base_unit():
d1[u1] = 1
else:
for u in u1.unit:
d1[u.get_units_element()] = u.get_exponent()
if u2.is_base_unit():
d2[u2] = 1
else:
for u in u2.unit:
d2[u.get_units_element()] = u.get_exponent()
# Compare keys: do u1 & u2 have the same set of base units?
sym_diff = set(d1.keys()) ^ set(d2.keys())
if sym_diff:
dimensionless = self.get_units_by_name(u'dimensionless')
if not sym_diff == set([dimensionless]):
# Symmetric difference is non-empty, but not an ignorable
# instance of dimensionless
return False
# Compare corresponding exponents
for k in d1:
try:
if d1[k] != d2[k]: return False
except KeyError:
# One may have dimensionless as a key
pass
# We have a match!
return True
class cellml_unit(element_base):
"""Specialised class for <unit> elements.
Maintains a reference to the object representing the units definition
it references, provides some helpful accessor type methods, and allows
safe, easy cloning of <unit> elements.
"""
def __init__(self):
element_base.__init__(self)
self._cml_units_obj = None
return
def _hash_tup(self):
"""Create a tuple to be used for hashing/equality tests."""
return (self.get_units_element(), getattr(self, u'prefix_', ''),
self.get_multiplier(), self.get_exponent(),
self.get_offset())
def __eq__(self, other):
"""Compare two <unit> elements.
Two <unit> elements are equal if they reference the same <units>
element, and have the same prefix, multiplier, etc.
"""
eq = False
if isinstance(other, cellml_unit):
eq = self._hash_tup() == other._hash_tup()
return eq
def __ne__(self, other):
"""The inverse of self.__eq__(other)."""
return not self.__eq__(other)
def __hash__(self):
"""Richer hashing function than the default based on object id.
Returns the hash of a tuple of relevant attributes."""
return hash(self._hash_tup())
def get_units_element(self):
"""
Return the object representing the <units> element that this <unit> element references.
"""
if self._cml_units_obj is None:
# Chase the reference and cache it
self._cml_units_obj = self.xml_parent.get_units_by_name(self.units)
return self._cml_units_obj
def _set_units_element(self, obj, override=False):
"""
Set the object representing the <units> element that this <unit> element references.
Don't use unless you know what you're doing.
"""
assert override or self._cml_units_obj is None
self._cml_units_obj = obj
return
SI_PREFIXES = {"yotta": 24, "zetta": 21, "exa": 18, "peta": 15,
"tera": 12, "giga": 9, "mega": 6, "kilo": 3,
"hecto": 2, "deka": 1,
"deci": -1, "centi": -2,
"milli": -3, "micro": -6, "nano": -9, "pico": -12,
"femto": -15, "atto": -18, "zepto": -21, "yocto": -24}
def get_multiplicative_factor(self):
"""Return the factor this units reference is multiplied by.
Return the quantity m.p^e as a floating point number, where:
m is the multiplier (default value 1.0)
p is the multiplicative factor due to the prefix (default 10^0=1)
e is the exponent (default 1)
"""
m = self.get_multiplier()
e = self.get_exponent()
p = getattr(self, u'prefix_', 0) # Since prefix is a method :(
if p in self.SI_PREFIXES:
p = self.SI_PREFIXES[p]
else:
p = int(p) # RELAX NG schema says it's an integer
p = 10**p
return m * (p**e)
def get_multiplier(self):
"""Return the multiplier of this units reference, as a float."""
return float(getattr(self, u'multiplier', 1))
def get_exponent(self):
"""Return the exponent of this units reference, as a float."""
return float(getattr(self, u'exponent', 1))
def get_offset(self):
"""Return the offset of this units reference, as a float."""
return float(getattr(self, u'offset', 0))
def clone(self):
"""Clone this object.
Return a new <unit> element that has the same attributes as this
one.
"""
attrs = {}
for apyname, aname in self.xml_attributes.iteritems():
attrs[aname] = getattr(self, apyname)
new = self.xml_create_element(u'unit', NSS[u'cml'], attributes=attrs)
if self._cml_units_obj:
new._set_units_element(self._cml_units_obj)
new.xml_parent = self.xml_parent
return new
class EvaluationError(Exception):
"""
Exception class for errors raised trying to evaluate a MathML expression.
"""
pass
class MathsError(Exception):
"""
Exception class for validation errors raised while checking mathematics.
"""
def __init__(self, context_obj, message, warn=False, level=None):
"""Create a mathematics validation error.
context_class should be the object that is reporting the error.
message gives more details on what went wrong.
If warn is set to true then produce a warning message, not an error.
level gives the level of the message logged.
"""
self.message = message
self.warn = warn
self.level = level or (logging.ERROR,logging.WARNING)[warn]
self.show_xml_context = False
self._show_xml_context_only = False
self.cname = context_obj.component.name
self.ename = context_obj.localName
self.context = context_obj
# Nicer context explanation
if isinstance(context_obj, cellml_variable):
self.expr_index = self.math_index = 0
self.reaction_spec = ''
return
expr_root = context_obj.xml_xpath(u'ancestor-or-self::*[local-name(..)="math"]')[0]
self.expr_index = self.math_index = 1
math = expr_root.xml_parent
for elt in math.xml_element_children():
if elt is expr_root: break
if elt.localName in [u'apply', u'semantics']:
self.expr_index += 1
for elt in math.xml_element_children(math.xml_parent):
if elt is math: break
if elt.localName == u'math':
self.math_index += 1
# Is this in a reaction?
vref = context_obj.xml_xpath(u'ancestor::cml:variable_ref')
if vref:
self.reaction_spec = ' in mathematics for variable "%s" in a reaction' % vref[0].variable
else:
self.reaction_spec = ''
return
def show_xml_context_only(self):
"""Only show the XML where the error occurred."""
self.show_xml_context = True
self._show_xml_context_only = True
def __str__(self):
msg = unicode(self)
return msg.encode('UTF-8')
def ordinal(self, i):
"Convert integer i to an ordinal string."
if i // 10 == 1: suf = 'th'
elif i % 10 == 1: suf = 'st'
elif i % 10 == 2: suf = 'nd'
elif i % 10 == 3: suf = 'rd'
else: suf = 'th'
return "%d%s" % (i, suf)
def _generate_message(self, where):
if self.warn: type = 'Warning'
else: type = 'Error'
msg = [type, ' ', where, ': ', self.message]
if not self._show_xml_context_only:
msg.extend(['\n Context: ', self.ordinal(self.expr_index),
' expression in the ', self.ordinal(self.math_index),
' math element', self.reaction_spec,
' in component ', self.cname, '\n XPath: ',
element_xpath(self.context)])
msg = u''.join(msg)
if self.show_xml_context:
# Return the context XML tree as well.
xml = self.context.xml(indent = u'yes',
omitXmlDeclaration = u'yes')
msg = msg + u'\n' + unicode(xml, encoding='UTF-8')
return msg
def __unicode__(self):
return self._generate_message('checking mathematics')
class UnitsError(MathsError):
"""
Exception class for validation errors raised during units checking
of mathematics.
"""
def __init__(self, context_obj, message, warn=False, level=None):
"""Create a units validation error.
context_class should be the object that is reporting the error.
message gives more details on what went wrong.
If warn is set to true then produce a warning message, not an error.
level gives the level of the message logged.
"""
MathsError.__init__(self, context_obj, message, warn=warn, level=level)
return
def __unicode__(self):
return self._generate_message('checking units')
def child_i(elt, i):
"Return the i'th child element of elt. Indexing starts at 1."
j = 0
for e in elt.xml_children:
if getattr(e, 'nodeType', None) == Node.ELEMENT_NODE:
j += 1
if j == i: return e
else:
# Not found :(
raise ValueError("<"+elt.localName+"> does not have "+str(i)+
" child element(s).")
def _child1(elt):
"Get the first child element of elt."
return child_i(elt, 1)
######################################################################
# MathML elements #
######################################################################
class mathml_units_mixin(object):
"""Base class for units mixin classes."""
def _add_units_conversion(self, expr, defn_units, to_units, no_act=False):
"""Add mathematics for an explicit units conversion.
Wraps expr in the expression
m[to_units/defn_units]*(expr-o1[defn_units]) + o2[to_units].
"""
# print '_add_units_conv for', element_xpath(expr), 'from', defn_units.description(), 'to', to_units.description()
if hasattr(expr.model, '_cml_special_units_converter') and not defn_units.dimensionally_equivalent(to_units):
# This may be a special conversion case defined by a functional curation protocol
if no_act:
model._cml_conversions_needed = True
return
else:
expr = expr.model._cml_special_units_converter(expr, defn_units, to_units)
# print 'post special, expr units=', expr.get_units().description(), 'm=', expr.get_units().extract().expand().simplify().get_multiplicative_factor()
# print 'orig defn_units=', defn_units.description(), 'm=', defn_units.expand().simplify().get_multiplicative_factor()
# print 'orig to_units=', to_units.description(), 'm=', to_units.expand().simplify().get_multiplicative_factor()
try:
defn_units = expr.get_units().extract(check_equality=True)
except:
print 'ouch', expr.xml()
for u in expr.get_units():
print u.description(), u.get_multiplier(), expr.get_units()._get_sources(u)
raise
defn_units_exp = defn_units.expand().simplify()
to_units_exp = to_units.expand().simplify()
# Conversion factor
m = (defn_units_exp.get_multiplicative_factor() / to_units_exp.get_multiplicative_factor())
# Replace expr by m[to_units/defn_units]*(expr-o1[defn_units]) + o2[to_units]
orig_expr, parent = expr, expr.xml_parent
dummy = expr.xml_create_element(u'dummy', NSS[u'm'])
model = expr.model # So we still have a reference after the next line
parent.replace_child(expr, dummy) # Mark where to put the new elt
if defn_units_exp.get_offset() != 0:
# Create expr-o1 expression
uattr = orig_expr._ensure_units_exist(defn_units, no_act=no_act)
new_expr = mathml_apply.create_new(expr, u'minus',
[expr, (unicode(defn_units_exp.get_offset()), uattr)])
new_expr._cml_units = defn_units
expr = new_expr
if m != 1:
quotient_units = to_units.quotient(defn_units)
# Add units element to model if needed
uattr = orig_expr._ensure_units_exist(quotient_units, no_act=no_act)
# Create m*expr expression
new_expr = mathml_apply.create_new(expr, u'times', [(unicode(m), uattr), expr])
new_expr._cml_units = to_units
expr = new_expr
if to_units_exp.get_offset() != 0:
# Create expr+o2 expression
uattr = orig_expr._ensure_units_exist(to_units, no_act=no_act)
new_expr = mathml_apply.create_new(expr, u'plus',
[expr, (unicode(to_units_exp.get_offset()), uattr)])
new_expr._cml_units = to_units
expr = new_expr
# Note that the model needed conversions
if expr is not orig_expr:
model._cml_conversions_needed = True
if no_act:
expr = orig_expr
parent.replace_child(dummy, expr)
return
def _set_element_in_units(self, elt, units, no_act=False):
"""Try to set the units of the given element.
Generates a debug message if this isn't possible.
"""
if hasattr(elt, '_set_in_units') and callable(elt._set_in_units):
elt._set_in_units(units, no_act)
elif elt.localName in [u'false', u'true']:
boolean = self.component.get_units_by_name('cellml:boolean')
if boolean is not units:
# TODO: *blink* this should never happen
self._add_units_conversion(elt, boolean, units, no_act)
elif elt.localName in [u'notanumber', u'pi', u'infinity', u'exponentiale']:
dimensionless = self.component.get_units_by_name('dimensionless')
if dimensionless is not units:
# TODO: *blink* this should never happen
self._add_units_conversion(elt, dimensionless, units, no_act)
else:
DEBUG('validator',
'Cannot set units (to', units.description(), ') for element', elt.localName)
return
class mathml_units_mixin_tokens(mathml_units_mixin):
"""Contains the _set_in_units method for ci, cn, etc."""
def _set_in_units(self, units, no_act=False):
"""Set the units this element should be expressed in.
Where these aren't the units it's defined in, replace self by suitable units conversion mathematics.
"""
defn_units = self.get_units(return_set=False)
if defn_units != units:
self._add_units_conversion(self, defn_units, units, no_act)
# Store the units
if not no_act:
self._cml_units = units
return
class mathml_units_mixin_set_operands(mathml_units_mixin):
def _set_in_units(self, units, no_act=False):
"""Set the units of the application of this operator.
The default behaviour for many operators is to simply set all operands to have the given units.
"""
# TODO: Do the conversion at this level sometimes rather than pushing it down the tree?
app = self.xml_parent
# We need to convert the operands to a list, because the tree
# may be modified if a conversion is thought to be needed
for operand in list(app.operands()):
self._set_element_in_units(operand, units, no_act)
# And set our units to what they now are
if not no_act:
app._cml_units = units
return
class mathml_units_mixin_equalise_operands(mathml_units_mixin):
def _set_in_units(self, units, no_act=False):
"""Set the units of the application of this operator.
This method is used for the relational operators. It ignores the
given units, and instead ensures that all operands have the same
units. The units it chooses are those that are 'least' amongst the
possibilities for the operand units, i.e. that have the smallest
multiplicative factor when expanded.
"""
app = self.xml_parent
min_factor, operand_units = None, None
for us in app._get_operand_units():
if isinstance(us, cellml_units):
us = [us]
for u in us:
f = u.expand().get_multiplicative_factor()
if f < min_factor or min_factor is None:
min_factor = f
operand_units = u
# We need to convert the operands to a list, because the tree
# may be modified if a conversion is thought to be needed
for operand in list(app.operands()):
# TODO: Modify tree to collect same conversions together?
self._set_element_in_units(operand, operand_units, no_act)
# And set our units to what they now are
if not no_act:
app._cml_units = units
return
class mathml_units_mixin_choose_nearest(mathml_units_mixin):
def _set_in_units(self, desired_units, no_act=False):
"""Set the units of the application of this operator.
This mixin is used for the <times> and <divide> operators.
There are 2 possible strategies here. One is to pick one of the
operands and convert it so that the overall units match those
required. The other is to pick units from the set of those
possible for this application, and convert the result to the
desired units. We go with the latter option, picking the units
that are least in the sense that they have the least multipicative
factor, but where possible that factor is no less than that on the
desired units.
"""
app = self.xml_parent
min_factor, best_factor = None, None
least_units, best_units = None, None
desired_factor = desired_units.expand().get_multiplicative_factor()
DEBUG('validator', '>',self.localName,':',desired_factor,
desired_units.description())
units_set = app.get_units().get_consistent_set(desired_units)
for possible_units in units_set:
f = possible_units.expand().get_multiplicative_factor()
if min_factor is None or f<min_factor:
least_units, min_factor = possible_units, f
if f >= desired_factor and (best_factor is None or f < best_factor):
best_units, best_factor = possible_units, f
if best_units is None:
# All factors were less than that desired, so just go with the least
units = least_units
else:
units = best_units
DEBUG('validator', '\t<-',
units.expand().get_multiplicative_factor(),
units.description())
# Add units conversion code
app._add_units_conversion(app, units, desired_units, no_act)
# Set the operand units
for src_units_set, src_units in app.get_units()._get_sources(units):
expr = src_units_set.get_expression()
DEBUG('validator', '#',self.localName,':',
src_units.description(),expr.localName)
self._set_element_in_units(expr, src_units, no_act)
# Record which units we used
if not no_act:
app._cml_units = units
return
class mathml_units_mixin_container(mathml_units_mixin):
def _set_in_units(self, units, no_act=False):
"""Set the units of this element.
For container elements, we set the units of the child(ren).
"""
# We need to copy the children list, because the tree
# may be modified if a conversion is thought to be needed
for elt in self.xml_children[:]:
if getattr(elt, 'nodeType', None) == Node.ELEMENT_NODE:
self._set_element_in_units(elt, units, no_act)
# And set our units to what they now are
if not no_act:
self._cml_units = units
return
class mathml(element_base):
"""Base class for MathML elements."""
def __init__(self):
super(mathml, self).__init__()
self._cml_component = None
self._cml_model = None
self._cml_source_expr_for_clone = None
return
def __repr__(self):
return '<%s (%s) at 0x%x>' % (self.__class__.__name__, str(self), id(self))
def __deepcopy__(self, memo):
"""Customise copying of MathML expressions.
When copying an expression tree, we only want to deepcopy the
XML, not the additional info that we have attached to it -
these should be copied as references to the originals.
"""
new_elt = copy.copy(self)
# Children may refer to us, so need to update memo before copying children
assert id(self) not in memo
memo[id(self)] = new_elt
new_dict = {}
for name, value in self.__dict__.iteritems():
name_copy = copy.deepcopy(name, memo)
if not name.startswith('_cml'):
new_dict[name_copy] = copy.deepcopy(value, memo)
else:
new_dict[name_copy] = value
new_elt.__dict__.update(new_dict)
return new_elt
@staticmethod
def clone(expr):
"""Properly clone a MathML sub-expression.
Makes sure siblings and parent don't get copied too.
"""
# print "Cloning MathML", prid(expr, True)
next_elem, par = expr.next_elem, getattr(expr, 'xml_parent', None)
expr.next_elem = None # Make sure we don't copy siblings...
expr.xml_parent = None # ...and parent
new_expr = copy.deepcopy(expr) # Do the clone
expr.next_elem = next_elem # Restore siblings...
expr.xml_parent = par # ...and parent to original
return new_expr
def clone_self(self, register=False):
"""Properly clone this expression.
If register is True, then keep a link to this expression in the clone.
"""
clone = mathml.clone(self)
if register:
clone._cml_source_expr_for_clone = self
return clone
def get_original_of_clone(self):
"""
If this is a clone with a registered original expression, return it.
Otherwise returns None.
"""
return self._cml_source_expr_for_clone
def get_component(self):
"Cache & return the enclosing component element."
if self._cml_component is None:
def get_ancestor(elt, name):
while elt and elt.localName != name:
elt = elt.parentNode
return elt
comp = get_ancestor(self, u'component')
if comp:
self._cml_component = comp
else:
# It may be in the solver_info section, in which case fake a component
solver_info = get_ancestor(self, u'solver_info')
if solver_info:
self._cml_component = self.model.get_component_by_name(u'')
else:
raise ValueError("MathML element " + str(self) + " does not occur in a model component!")
return self._cml_component
component = property(get_component)
def _unset_cached_links(self, elt=None):
"""Forget cached component and variable references in this MathML tree.
Used by partial evaluator when moving maths to a new component, and by
simulation protocols.
"""
if elt is None:
elt = self
if isinstance(elt, mathml):
elt._cml_component = None
for child in self.xml_element_children(elt):
if hasattr(child, '_unset_cached_links'):
child._unset_cached_links()
else:
self._unset_cached_links(child)
return
@property
def model(self):
"""Cache & return the enclosing model element."""
if self._cml_model is None:
self._cml_model = self.rootNode.model
return self._cml_model
def eval(self, elt):
"""Evaluate the given element.
Tries to evaluate the given element, and raises an EvaluationError
if this is not possible.
"""
if hasattr(elt, 'evaluate') and callable(elt.evaluate):
return elt.evaluate()
elif elt.localName == u'pi':
return math.pi
else:
# No evaluate() method on elt
raise EvaluationError("Don't know how to evaluate element " +
elt.localName)
return
def _ensure_units_exist(self, units=None, no_act=False):
"""Ensure that there is an element in the XML tree giving this
expression's units.
Add a new <units> element if this expression has generated units.
If units is not None, use the given units rather than those of
this expression.
Return an attribute dictionary with the appropriate units
attribute."""
if no_act:
# Doesn't matter what we return, as it wont be used
return {(u'cml:units', NSS[u'cml']): u'#UNUSED#'}
try:
if units is None:
units = self.get_units().extract()
## _u = units
units = self.model._get_units_obj(units)
if units._cml_generated and units.name[:3] == "___":
## print "Adding",units.name, hash(units), units.description(),
## print "(was",id(_u),"now",id(units),")"
# Ensure referenced units exist
for unit in getattr(units, u'unit', []):
self._ensure_units_exist(unit.get_units_element())
unit.units = unit.get_units_element().name
# Rename units and add to XML tree
msg = "Adding units " + units.name + " as "
units.name = units.description(cellml=True)
msg = msg + units.name
DEBUG('partial-evaluator', msg.encode('UTF-8'))
if units.name == units.unit.units:
# Uh-oh
DEBUG('partial-evaluator', 'Generated units',
units.name, 'identical to referenced units; ignoring.')
assert units.get_multiplicative_factor() == 1
assert units.get_offset() == 0
else:
units.xml_parent.add_units(units.name, units)
units.xml_parent.xml_append(units)
attrs = {(u'cml:units', NSS[u'cml']): units.name}
except UnitsError:
# Hack to allow PE on broken (wrt units) models
attrs = {(u'cml:units', NSS[u'cml']): u'#FUDGE#'}
return attrs
def varobj(self, ci_elt):
"""Return the variable object for the given ci element.
This method is more general than ci_elt.variable, working even
for ci elements outside of a component. Such elements *must*
contain a fully qualified variable name, i.e. including the
name of the component the variable occurs in. This method
handles a variety of encodings of variable names that contain
the component name.
"""
try:
var = ci_elt.variable
except:
varname = unicode(ci_elt).strip()
var = cellml_variable.get_variable_object(self.model, varname)
return var
def vars_in(self, expr):
"""Return a list of 'variable' objects used in the given expression.
This method doesn't make use of the dependency information
generated when validating the model, but parses the
mathematics afresh. It is used to refresh the dependency
lists after partial evaluation, and to determine dependencies
in mathematics added outside the normal model structure
(e.g. Jacobian calculation).
If an ODE appears, includes the mathml_apply instance defining
the ODE. Otherwise all returned objects will be
cellml_variable instances.
"""
res = set()
if isinstance(expr, mathml_ci):
res.add(self.varobj(expr))
elif isinstance(expr, mathml_apply) and \
expr.operator().localName == u'diff':
dep_var = self.varobj(expr.ci)
indep_var = self.varobj(expr.bvar.ci)
res.add(dep_var.get_ode_dependency(indep_var))
elif hasattr(expr, 'xml_children'):
for child in expr.xml_children:
res.update(self.vars_in(child))
return res
def same_tree(self, other, this=None):
"""Check whether this element represents the same tree as a given element."""
if this is None: this = self
equal = (this.localName == other.localName
and len(getattr(this, 'xml_children', [])) == len(getattr(other, 'xml_children', [])))
if equal and this.localName in [u'cn', u'ci']:
equal = unicode(this) == unicode(other)
if equal and hasattr(this, 'xml_children'):
for tc, oc in zip(self.xml_element_children(this), self.xml_element_children(other)):
if not self.same_tree(oc, tc):
equal = False
break
return equal
def _xfer_complexity(self, new_elt):
"""Transfer our complexity to the new element.
PE is replacing us by a new element. If we are annotated with
a complexity - the complexity of this expression prior to PE -
then transfer the annotation to the new element.
"""
try:
new_elt._cml_complexity = self._cml_complexity
except AttributeError:
pass
return
def _adjust_complexity(self, old_elt, new_elt):
"""Adjust ancestor complexity because old_elt changed to new_elt.
The purpose of this method is to allow us to keep track of
what the complexity of each expression node *was* prior to PE
being performed. Thus we cannot just re-compute complexities,
but must update values using the original complexities. If a
variable definition is instantiated, then the complexity of
the expression containing the lookup must be adjusted to
reflect the additional expense of evaluating the defining
expression.
When this method is called, only new_elt is a child of self.
"""
#print "Adjusting", element_xpath(self), "due to", element_xpath(old_elt),
#if isinstance(old_elt, mathml_ci):
# print unicode(old_elt)
#else:
# print
try:
new = new_elt._cml_complexity
old = old_elt._cml_complexity
except AttributeError:
return
#print " by", new-old
elt = self
while elt:
if isinstance(elt, mathml_piecewise):
# Piecewise is tricky to adjust! So we 're-compute' instead.
ac, piece_ac = 0, []
for piece in getattr(elt, u'piece', []):
ac += child_i(piece, 2)._cml_complexity
piece_ac.append(child_i(piece, 1)._cml_complexity)
if hasattr(elt, u'otherwise'):
piece_ac.append(child_i(elt.otherwise, 1)._cml_complexity)
ac += max(piece_ac)
elt._cml_complexity = ac
elif hasattr(elt, '_cml_complexity'):
elt._cml_complexity += (new - old)
elt = getattr(elt, 'xml_parent', None)
return
def classify_child_variables(self, elt, **kwargs):
"""Classify variables in the given expression according to how they are used.
In the process, compute and return a set of variables on which that expression depends.
If dependencies_only then the variable classification will not be
done, only dependencies will be analysed. This is useful for doing
a 'light' re-analysis if the dependency set has been reduced; if the
set has increased then the topological sort of equations may need to
be redone.
The function needs_special_treatment may be supplied to override the
default recursion into sub-trees. It takes a single sub-tree as
argument, and should either return the dependency set for that
sub-tree, or None to use the default recursion. This is used when
re-analysing dependencies after applying lookup tables, since table
lookups only depend on the keying variable.
"""
if hasattr(elt, 'classify_variables'):
dependencies = elt.classify_variables(**kwargs)
else:
dependencies = set()
needs_special_treatment = kwargs.get('needs_special_treatment', lambda e: None)
for e in elt.xml_element_children():
child_deps = needs_special_treatment(e)
if child_deps is None:
child_deps = self.classify_child_variables(e, **kwargs)
dependencies.update(child_deps)
return dependencies
class mathml_math(mathml):
def __init__(self):
super(mathml_math, self).__init__()
return
class mathml_constructor(mathml):
"""
Base class for MathML constructor elements, e.g. apply and piecewise.
"""
def __init__(self):
super(mathml_constructor, self).__init__()
return
def _tree_complexity(self, elt, **kw):
"""
Calculate a rough estimate of the computation time for
evaluating the given element.
If lookup_tables is True, then assume we're using lookup tables
where possible.
If store_result is True, the complexity is saved to the
_cml_complexity attribute.
If algebraic is True, the complexity is calculated as a dictionary,
mapping node types to the number of occurences of that type.
"""
kw['lookup_tables'] = kw.get('lookup_tables', False)
kw['store_result'] = kw.get('store_result', False)
kw['algebraic'] = kw.get('algebraic', False)
if kw['algebraic']:
ac = {}
if kw['lookup_tables'] and \
elt.getAttributeNS(NSS['lut'], u'possible', u'no') == u'yes':
# This elt will be replaced by a lookup table
if hasattr(self.rootNode, 'num_lookup_tables'):
# Count the number of used lookup tables
self.rootNode.num_lookup_tables += 1
# Cost per table: 2 lookups, 2 +, -, *, 3 ci
if kw['algebraic']:
ac['lookup'] = 2
ac['op'] = 3
ac['times'] = 1
ac['variable'] = 3
else:
ac = 2*1 + 2*1 + 1 + 1 + 3*0.7
elif hasattr(elt, 'tree_complexity') \
and callable(elt.tree_complexity):
ac = elt.tree_complexity(**kw)
elif elt.localName in ['true', 'false', 'cn', 'exponentiale', 'pi']:
if kw['algebraic']: ac['constant'] = 1
else: ac = 0.5
elif elt.localName == 'ci':
if kw['algebraic']: ac['variable'] = 1
else: ac = 0.7
elif elt.localName in ['degree', 'logbase']:
ac = self._tree_complexity(child_i(elt, 1), **kw)
else:
raise EvaluationError("Don't know complexity of element " +
elt.localName)
if kw['store_result'] and isinstance(elt, mathml):
elt._cml_complexity = ac
return ac
def _get_element_binding_time(self, elt):
"""Helper method to get the binding time of a MathML element."""
if hasattr(elt, '_get_binding_time'):
# Call the element's method
return elt._get_binding_time()
elif elt.localName in [u'true', u'false', u'exponentiale', u'pi']:
return BINDING_TIMES.static
else:
raise EvaluationError("Don't know how to compute binding time"
" of element " + elt.localName)
def _get_element_units(self, elt, return_set=True):
"""Helper method to get the units of a MathML element."""
if hasattr(elt, 'get_units'):
# Element has a method to get its units, so call it
u = elt.get_units()
elif hasattr(elt, '_cml_units') and elt._cml_units:
# Units have been cached
u = elt._cml_units
else:
# Let's figure it out ourselves...
if elt.localName in [u'false', u'true']:
u = UnitsSet(
[self.component.get_units_by_name('cellml:boolean')],
expression=elt)
elif elt.localName in [u'notanumber', u'pi', u'infinity',
u'exponentiale']:
u = UnitsSet(
[self.component.get_units_by_name('dimensionless')],
expression=elt)
else:
# Unknown or unexpected element
raise UnitsError(self, u''.join([
u'Unsupported element "', elt.localName, '".']))
if return_set:
assert isinstance(u, UnitsSet)
else:
u = u.extract()
return u
def _reduce_elt(self, elt):
"""Try to reduce the given element.
Call the _reduce method on elt, if it has one.
If not, do nothing (we assume elt cannot be reduced).
"""
if hasattr(elt, '_reduce') and callable(elt._reduce):
elt._reduce()
else:
DEBUG('partial-evaluator', "Don't know how to reduce",
elt.localName)
return
def _eval_self(self):
"""Evaluate self and return <cn>, <true> or <false>, as appropriate."""
value = self.evaluate()
if value is True:
new_elt = self.xml_create_element(u'true', NSS[u'm'])
elif value is False:
new_elt = self.xml_create_element(u'false', NSS[u'm'])
else:
# Add a new <units> element to the document if needed
attrs = self._ensure_units_exist()
new_elt = self.xml_create_element(u'cn', NSS[u'm'],
content=unicode("%.17g" % value),
attributes=attrs)
return new_elt
def _update_usage_counts(self, expr, remove=False):
"""Update usage counts of variables used in the given expression.
By default, increment the usage count of any variable occuring
in a <ci> element within expr. If remove is set to True,
then decrement the usage counts instead.
"""
if isinstance(expr, mathml_ci):
if remove:
expr.variable._decrement_usage_count()
else:
raise NotImplementedError("_update_usage_counts currently only reliable for remove=True")
expr.variable._used()
elif isinstance(expr, mathml_apply) and isinstance(expr.operator(),
mathml_diff):
# TODO: Check if this is a suitable handling of ODEs on a RHS
# It matches the current behaviour in apply.classify_variables.
pass
else:
for e in self.xml_element_children(expr):
self._update_usage_counts(e, remove=remove)
class mathml_cn(mathml, mathml_units_mixin_tokens):
def __init__(self):
super(mathml_cn, self).__init__()
self._cml_units = None
return
def evaluate(self):
"""
Convert the text content of this element to a floating point
value and return it. Will handle the type attribute and, if
relevant to the type, the sep child element, but does not yet
handle the base attribute.
"""
if hasattr(self, u'base'):
raise ValueError('pycml does not yet support the base attribute on cn elements')
if hasattr(self, u'type'):
if self.type == u'real':
val = float(unicode(self))
elif self.type == u'integer':
val = int(unicode(self))
elif self.type == u'e-notation':
assert len(self.xml_children) == 3
assert self.xml_children[1] is self.sep
mantissa = unicode(self.xml_children[0]).strip()
exponent = unicode(self.xml_children[2]).strip()
val = float(mantissa + 'e' + exponent)
elif self.type == u'rational':
assert len(self.xml_children) == 3
assert self.xml_children[1] is self.sep
numer = int(unicode(self.xml_children[0]))
denom = int(unicode(self.xml_children[2]))
val = numer / denom
else:
raise ValueError('Unsupported type attribute for cn element: '
+ self.type)
else:
val = float(unicode(self))
return val
def _get_binding_time(self):
"""Return the binding time of this expression.
The binding time of a <cn> element is always static,
unless the CellML is annotated otherwise.
"""
bt = self.getAttributeNS(NSS['pe'], u'binding_time', u'static')
return getattr(BINDING_TIMES, bt)
def _reduce(self):
"""Reduce this expression by evaluating its static parts.
Is actually a no-op; we must have been annotated explicitly as dynamic.
"""
return
def get_units(self, return_set=True):
"""Return the units this number is expressed in."""
if not self._cml_units:
self._cml_units = UnitsSet([self.component.get_units_by_name(self.units)], expression=self)
if not return_set:
u = self._cml_units.extract()
else:
if not isinstance(self._cml_units, UnitsSet):
self._cml_units = UnitsSet([self._cml_units], expression=self)
u = self._cml_units
return u
@staticmethod
def create_new(elt, value, units):
"""Create a new <cn> element with the given value and units."""
attrs = {(u'cml:units', NSS[u'cml']): unicode(units)}
new_elt = elt.xml_create_element(u'cn', NSS[u'm'],
attributes=attrs,
content=unicode(value))
return new_elt
class mathml_ci(mathml, mathml_units_mixin_tokens):
def __init__(self):
super(mathml_ci, self).__init__()
self._cml_variable = None
self._cml_units = None
return
def _unset_cached_links(self, elt=None):
"""Forget cached component and variable references in this MathML tree.
Used by partial evaluator when moving maths to a new component, and by
simulation protocols.
"""
self._cml_variable = None
super(mathml_ci, self)._unset_cached_links()
@property
def variable(self):
"""Cache & return the variable object refered to by this element."""
if self._cml_variable is None:
vname = unicode(self).strip()
self._rename(vname) # Remove excess whitespace from our text content
self._cml_variable = self.component.get_variable_by_name(vname)
return self._cml_variable
def _set_variable_obj(self, var):
"""Set the variable object referred to by this element."""
self._cml_variable = var
def get_units(self, return_set=True):
"""Return the units of the variable represented by this element."""
if not self._cml_units:
self._cml_units = UnitsSet([self.component.get_units_by_name(self.variable.units)],
expression=self)
if not return_set:
u = self._cml_units.extract()
else:
if not isinstance(self._cml_units, UnitsSet):
self._cml_units = UnitsSet([self._cml_units], expression=self)
u = self._cml_units
return u
def evaluate(self):
"""
Evaluate this expression by returning the value of the
variable it represents.
"""
return self.variable.get_value()
def _get_binding_time(self):
"""Return the binding time of this expression.
The binding time of a <ci> element is that of the variable it
represents.
"""
return self.variable._get_binding_time()
def _rename(self, new_name=None):
"""Update the variable reference to use a canonical name."""
self.xml_remove_child(unicode(self))
if new_name is None:
new_name = self.variable.fullname(cellml=True)
self.xml_append(unicode(new_name))
return
def _reduce(self):
"""Reduce this expression by evaluating its static parts.
If this is a static variable, replace by its value (as a <cn> element).
Otherwise the behaviour depends on the number of uses of this
variable. If there is only one, instantiate the definition of
this variable here in place of the <ci> element, otherwise
leave the element unchanged to avoid code duplication.
"""
bt = self._get_binding_time()
DEBUG('partial-evaluator', "Reducing", self.variable.fullname(),
"which is", bt)
if bt is BINDING_TIMES.static:
value = self.evaluate()
attrs = {(u'cml:units', NSS[u'cml']): self.variable.units}
cn = self.xml_create_element(u'cn', NSS[u'm'],
content=unicode("%.17g" % value),
attributes=attrs)
DEBUG('partial-evaluator', " value =", unicode(cn))
self._xfer_complexity(cn)
self.xml_parent.xml_insert_after(self, cn)
self.xml_parent.xml_remove_child(self)
self.variable._decrement_usage_count()
else:
defns = self.variable.get_dependencies()
if defns:
defn = defns[0]
else:
# Just need to update the name to be canonical - done in later pass
defn = None
if isinstance(defn, cellml_variable):
if self.variable.pe_keep:
# Don't remove this var, just reduce its source
DEBUG('partial-evaluator', "Keeping",
self.variable.fullname())
self.variable._reduce(update_usage=True)
else:
# Create a new <ci> element
ci = self.xml_create_element(
u'ci', NSS[u'm'], content=defn.fullname(cellml=True))
self._xfer_complexity(ci)
ci._set_variable_obj(defn)
DEBUG('partial-evaluator', " to", defn.fullname())
self.xml_parent.xml_insert_after(self, ci)
self.xml_parent.xml_remove_child(self)
# Decrement the usage count of just us, not source vars
self.variable._decrement_usage_count(follow_maps=False)
# May need to recurse down maps
ci._reduce()
elif isinstance(defn, mathml_apply):
if (not self.variable.pe_keep and
(self.variable.get_usage_count() == 1 or
self.rootNode.partial_evaluator.is_instantiable(defn))):
# defn is defining expression, so will be a MathML element already,
# and should be reduced already as well due to topological sort.
# Clone the RHS and instantiate it here.
rhs = mathml.clone(list(defn.operands())[1])
DEBUG('partial-evaluator', " to", rhs)
parent = self.xml_parent
parent.xml_insert_after(self, rhs)
parent.xml_remove_child(self)
parent._adjust_complexity(self, rhs)
# Flag the defining expression for removal
defn._pe_process = u'remove'
self.variable._decrement_usage_count()
elif defn is not None:
raise ValueError("Unexpected variable definition: " + defn.xml())
return
def classify_variables(self, dependencies_only=False,
needs_special_treatment=lambda n: None):
"""Classify variables in this expression according to how they are used.
For ci elements we just return a set containing the referenced variable
as the single dependency. If dependencies_only is False, we also mark
the variable as used.
"""
var = self.variable
if not dependencies_only:
var._used()
return set([var])
@staticmethod
def create_new(elt, variable_name):
"""Create a new <ci> element with the given variable name."""
new_elt = elt.xml_create_element(u'ci', NSS[u'm'],
content=unicode(variable_name))
return new_elt
class mathml_apply(Colourable, mathml_constructor, mathml_units_mixin):
QUALIFIERS = frozenset(('degree', 'bvar', 'logbase',
'lowlimit', 'uplimit', 'interval', 'condition',
'domainofapplication', 'momentabout'))
class OPS:
"""Classifications of operators."""
absRound = frozenset(('abs', 'floor', 'ceiling', 'rem'))
timesDivide = frozenset(('times', 'divide'))
plusMinus = frozenset(('plus', 'minus'))
trig = frozenset(('sin', 'cos', 'tan', 'sec', 'csc', 'cot',
'sinh', 'cosh', 'tanh', 'sech', 'csch', 'coth',
'arcsin', 'arccos', 'arctan',
'arcsec', 'arccsc', 'arccot',
'arcsinh', 'arccosh', 'arctanh',
'arcsech', 'arccsch', 'arccoth'))
elementary = frozenset(('exp', 'log', 'ln')).union(trig)
relations = frozenset(('eq', 'neq', 'gt', 'lt', 'geq', 'leq'))
logical = frozenset(('and', 'or', 'xor', 'not'))
def __init__(self):
super(mathml_apply, self).__init__()
self._cml_units = None
self.clear_dependency_info()
def clear_dependency_info(self):
"""Clear the type, dependency, etc. information for this equation.
This allows us to re-run the type & dependency analysis for the model."""
self._cml_binding_time = None
# Dependency graph edges
self._cml_depends_on = []
self._cml_assigns_to = None
self.clear_colour()
def get_dependencies(self):
"""Return the list of variables this expression depends on."""
return self._cml_depends_on
def operator(self):
"""Return the element representing the operator being applied."""
return _child1(self)
def _is_qualifier(self, element):
"""Return True iff element is a qualifier element."""
return element.localName in self.QUALIFIERS
def qualifiers(self):
"""
Return an iterable over the elements representing the qualifiers for
this application.
"""
quals = self.xml_element_children()
return filter(self._is_qualifier, quals)
def operands(self):
"""
Return an iterable over the elements representing the operands for
this application.
"""
# Get all element children and strip the first (the operator)
operands = self.xml_element_children()
operands.next()
# Now strip qualifiers from the front
return itertools.dropwhile(self._is_qualifier, operands)
def _get_operand_units(self):
"""
Return an iterable containing a <units> element for each operand
of this expression.
"""
for o in self.operands():
yield self._get_element_units(o)
def evaluate(self):
"""
Evaluate this expression, and return its value, if possible.
"""
# Result depends on the operator
op = self.operator()
if hasattr(op, 'evaluate') and callable(op.evaluate):
return op.evaluate()
else:
raise EvaluationError("Don't know how to evaluate the operator " + op.localName)
def tree_complexity(self, **kw):
"""
Calculate a rough estimate of the computation time for
evaluating this <apply> element.
Operates recursively, so the complexity of a function call is
given by summing the complexity of the arguments and the time
for evaluating the function itself.
If lookup_tables is True, then assume we're using lookup tables
where possible.
If algebraic is True, the complexity is calculated as a dictionary,
mapping node types to the number of occurences of that type.
"""
kw['algebraic'] = kw.get('algebraic', False)
if kw['algebraic']: ac = {}
else: ac = 0
# Complexity of this function
op_name = self.operator().localName
OPS = self.OPS
if op_name in OPS.plusMinus or op_name in OPS.logical or op_name in OPS.relations:
if kw['algebraic']: ac['op'] = (len(list(self.operands())) - 1)
else: ac += 1 * (len(list(self.operands())) - 1)
elif op_name in OPS.absRound:
if op_name == 'abs':
if kw['algebraic']: ac['abs'] = 1
else: ac += 5
else:
if kw['algebraic']: ac['round'] = 1
else: ac += 20
elif op_name in OPS.elementary:
if kw['algebraic']: ac['elementary'] = 1
else: ac += 70
elif op_name == 'times':
if kw['algebraic']: ac['times'] = (len(list(self.operands())) - 1)
else: ac += 1 * (len(list(self.operands())) - 1)
elif op_name == 'divide':
if kw['algebraic']: ac['divide'] = 1
else: ac += 15
elif op_name == 'power':
# This will vary depending on the exponent - gcc can optimise for 2 and 3, it seems.
exponent = list(self.operands())[1]
if exponent.localName == u'cn' and unicode(exponent).strip() in [u'2', u'3']:
if kw['algebraic']: ac['power2'] = 1
else: ac += 5
else:
if kw['algebraic']: ac['power'] = 1
else: ac += 30
elif op_name == 'root':
if kw['algebraic']: ac['root'] = 1
else: ac += 30
elif op_name == 'diff':
if kw['algebraic']: ac['variable'] = 1
else: ac += 0.7
else:
raise EvaluationError("Don't know complexity of operator " + op_name)
# Complexity of operands
for elt in self.operands():
if kw['algebraic']:
add_dicts(ac, self._tree_complexity(elt, **kw))
else: ac += self._tree_complexity(elt, **kw)
return ac
def _set_in_units(self, units, no_act=False):
"""Set the units this expression should be given in.
If these aren't our natural units (as given by an initial
get_units) then we need to add units conversion code.
"""
# First check we have something to do
current_units = self.get_units()
if units is current_units:
return
# Next, check if the required units can be achieved by suitable choices for operand units
done = False
if units in current_units:
# They can!
if not no_act:
self._cml_units = units
for src_units_set, src_units in current_units._get_sources(units):
expr = src_units_set.get_expression()
self._set_element_in_units(expr, src_units, no_act)
done = True
if not done and not no_act:
# Some operators need this to be a UnitsSet
self._cml_units = UnitsSet([units], self)
if not done:
# The behaviour now depends on the operator
op = self.operator()
if hasattr(op, '_set_in_units') and callable(op._set_in_units):
op._set_in_units(units, no_act)
else:
raise UnitsError(self, u' '.join([
"Don't know how to select units for operands of operator",
op.localName, "when its units are", units.description()]))
def get_units(self):
"""Recursively check this expression for dimensional consistency.
Checks that the operands have suitable units.
What constitutes 'suitable' depends on the operator; see appendix
C.3.2 of the CellML 1.0 spec.
If yes, returns a <units> element for the whole expression, based
on the rules in appendix C.3.3.
Throws a UnitsError if the units are inconsistent.
"""
if self._cml_units:
return self._cml_units
our_units = None
op = self.operator().localName
operand_units = self._get_operand_units()
# Tuples where second item is an index
operand_units_idx = itertools.izip(operand_units, itertools.count(1))
# Standard units objects
dimensionless = self.model.get_units_by_name(u'dimensionless')
boolean = self.model.get_units_by_name(u'cellml:boolean')
if op in self.OPS.relations | self.OPS.plusMinus:
our_units = operand_units.next().copy()
# Operands mustn't be booleans
if boolean in our_units:
raise UnitsError(self, u' '.join([
u'Operator',op,u'has boolean operands, which does not make sense.']))
# Operand units must be 'equivalent' (perhaps dimensionally)
for u in operand_units:
if not hasattr(self.model, '_cml_special_units_converter') and not our_units.dimensionally_equivalent(u):
raise UnitsError(self, u' '.join([
u'Operator',op,u'requires its operands to have',
u'dimensionally equivalent units;',u.description(),
u'and',our_units.description(),u'differ']))
our_units.update(u)
if op in self.OPS.relations:
# Result has cellml:boolean units
our_units = UnitsSet([boolean])
elif op in self.OPS.logical:
# Operand units must be cellml:boolean
for u, i in operand_units_idx:
if not boolean in u:
raise UnitsError(self, u' '.join([
u'Operator',op,u'requires operands to be booleans;',
u'operand',str(i),u'has units',u.description()]))
# Result has cellml:boolean units
our_units = UnitsSet([boolean])
elif op in self.OPS.elementary:
# Operand units must be dimensionless
for u, i in operand_units_idx:
if not u.dimensionally_equivalent(dimensionless):
raise UnitsError(self, u' '.join([
u'Operator',op,u'requires operands to be dimensionless;',
u'operand',str(i),u'has units',u.description()]))
if op == 'log':
# <logbase> qualifier must have units dimensionless
if hasattr(self, u'logbase'):
base = _child1(self.logbase)
u = self._get_element_units(base)
if not u.dimensionally_equivalent(dimensionless):
raise UnitsError(self, u' '.join([u'The logbase qualifier must have dimensionless',
u'units, not',u.description()]))
# Result has units of dimensionless
our_units = UnitsSet([dimensionless])
elif op == 'power':
# Arg1 : any, Arg2 : dimensionless
arg_units = operand_units.next()
if boolean in arg_units:
raise UnitsError(self, u'The argument of <power> should not be boolean')
exponent_units = operand_units.next()
if not exponent_units.dimensionally_equivalent(dimensionless):
raise UnitsError(self, u' '.join([u'The second operand to power must have dimensionless',
u'units, not',exponent_units.description()]))
# Result has units that are the units on the (first)
# operand raised to the power of the second operand. If
# units on the first operand are dimensionless, then so is
# the result.
# TODO: Check how we could allow equiv. to d'less, instead of equal.
# Need to consider any multiplicative factor...
if arg_units.equals(dimensionless):
our_units = UnitsSet([dimensionless])
else:
opers = self.operands()
opers.next()
# Make sure exponent is static
expt = opers.next()
if self._get_element_binding_time(expt) != BINDING_TIMES.static:
raise UnitsError(self, 'Unable to units check power with an exponent that can vary at run-time',
warn=True,
level=logging.WARNING_TRANSLATE_ERROR)
# Try to evaluate the exponent
try:
expt = self.eval(expt)
except EvaluationError, e:
raise UnitsError(self, u' '.join([u'Unable to evaluate the exponent of a power element:', unicode(e)]),
warn=True,
level=logging.WARNING_TRANSLATE_ERROR)
our_units = dimensionless.simplify(arg_units, expt)
elif op == 'root':
# Arg : any, <degree> : dimensionless
arg_units = operand_units.next()
if boolean in arg_units:
raise UnitsError(self, u'The argument of <root> should not be boolean')
if hasattr(self, u'degree'):
degree = _child1(self.degree)
u = self._get_element_units(degree)
if not u.dimensionally_equivalent(dimensionless):
raise UnitsError(self, u' '.join([
u'The degree qualifier must have dimensionless units, not',u.description()]))
else:
degree = 2.0 # Default is square root
# Result has units that are the units on the (first) operand
# raised to the power of the reciprocal of the value of the
# degree qualifier.
# TODO: If units on the first operand are dimensionless,
# then so is the result.
if not type(degree) is float:
# Make sure degree is static
if self._get_element_binding_time(degree) != BINDING_TIMES.static:
raise UnitsError(self, 'Unable to units check root with a degree that can vary at run-time',
warn=True,
level=logging.WARNING_TRANSLATE_ERROR)
try:
degree = self.eval(degree)
except EvaluationError, e:
raise UnitsError(self, u' '.join([u'Unable to evaluate the degree of a root element:', unicode(e)]),
warn=True,
level=logging.WARNING_TRANSLATE_ERROR)
our_units = dimensionless.simplify(arg_units, 1/degree)
elif op == 'diff':
# Arg : any, <bvar> : any, <degree> : dimensionless
arg_units = operand_units.next()
if boolean in arg_units:
raise UnitsError(self, u'The argument of <diff> should not be boolean')
if hasattr(self, u'bvar'):
if hasattr(self.bvar, u'degree'):
degree = _child1(self.bvar.degree)
u = self._get_element_units(degree)
if not u.dimensionally_equivalent(dimensionless):
raise UnitsError(self, u' '.join([
u'The degree qualifier must have dimensionless units, not',u.description()]))
else:
degree = 1.0 # Default is first derivative
else:
raise UnitsError(self, u'A diff operator must have a bvar qualifier')
# Result has units that are the quotient of the units of the
# operand, over the units of the term in the bvar qualifier
# raised to the value of the degree qualifier
if not type(degree) is float:
# Make sure exponent is static
if self._get_element_binding_time(degree) != BINDING_TIMES.static:
raise UnitsError(self, 'Unable to units check derivative with a degree that can vary at run-time',
warn=True,
level=logging.WARNING_TRANSLATE_ERROR)
try:
degree = self.eval(degree)
except EvaluationError, e:
raise UnitsError(self, u' '.join([u'Unable to evaluate the degree of a diff element:', unicode(e)]),
warn=True,
level=logging.WARNING_TRANSLATE_ERROR)
for e in self.xml_element_children(self.bvar):
if not e.localName == u'degree':
bvar_units = self._get_element_units(e)
break
else:
raise UnitsError(self, u'diff element does not have a valid bvar')
our_units = arg_units.simplify(bvar_units, -degree)
elif op in self.OPS.absRound | self.OPS.timesDivide:
# No restrictions on operand units, except that they shouldn't be boolean
for u in self._get_operand_units():
if boolean in u:
raise UnitsError(self, u' '.join([
u'Operator',op,u'has boolean operands, which does not make sense.']))
if op == 'times':
# Result has units that are the product of the operand units
our_units = operand_units.next().copy()
for u in operand_units:
our_units = our_units.simplify(other_units=u)
elif op == 'divide':
# Result has units that are the quotient of the units
# on the first and second operands
our_units = operand_units.next()
our_units = our_units.simplify(other_units=operand_units.next(), other_exponent=-1)
else:
# Result has same units as operands
our_units = operand_units.next().copy()
else:
# Warning: unsupported operator!
raise UnitsError(self, u' '.join([u'Unsupported operator for units checking:', op]),
warn=True,
level=logging.WARNING_TRANSLATE_ERROR)
# Cache & return result
if isinstance(our_units, cellml_units):
# Units conversion has been done, then PE
our_units = UnitsSet([our_units])
self._cml_units = our_units
our_units.set_expression(self)
return self._cml_units
def check_assigned_var(self):
"""Check the current component owns the variable being assigned to.
Should only be called if this object represents an assignment
expression. Checks that the variable being assigned to doesn't
have an interface value of 'in'. If this isn't a simple assignment
(i.e. the LHS isn't a plain ci element) then the check succeeds
automatically.
Adds to the model's error list if the check fails. This method
always returns None.
"""
# Check this is an application of eq
if self.operator().localName != u'eq':
raise MathsError(self, u'Top-level mathematics expressions should be assigment expressions.')
first_operand = self.operands().next()
if first_operand.localName == u'ci':
# We've already checked that the variable exists
var = first_operand.variable
for iface in [u'public_interface', u'private_interface']:
if getattr(var, iface, u'none') == u'in':
raise MathsError(self, u' '.join([
u'Variable', var.fullname(),
u'is assigned to in a math element, but has its',
iface, u'set to "in".']))
def classify_variables(self, root=False,
dependencies_only=False,
needs_special_treatment=lambda n: None):
"""
Classify variables in this expression according to how they are
used.
In the process, compute and return a set of variables on which
this expression depends. If root is True, store this set as a
list, to represent edges of a dependency graph.
Also, if root is True then this node is the root of an expression
(so it will be an application of eq); treat the LHS differently.
If dependencies_only then the variable classification will not be
done, only dependencies will be analysed. This is useful for doing
a 'light' re-analysis if the dependency set has been reduced; if the
set has increased then the topological sort of equations may need to
be redone.
The function needs_special_treatment may be supplied to override the
default recursion into sub-trees. It takes a single sub-tree as
argument, and should either return the dependency set for that
sub-tree, or None to use the default recursion. This is used when
re-analysing dependencies after applying lookup tables, since table
lookups only depend on the keying variable.
"""
dependencies = set()
ode_indep_var = None
op = self.operator()
if op.localName == u'diff':
# This is a derivative dy/dx on the RHS of an assignment.
# Store the dependency as a pair (y,x)
dependencies.add((op.dependent_variable, op.independent_variable))
if not dependencies_only:
# Set variable types
op._set_var_types()
else:
opers = self.operands()
if root:
# Treat the LHS of the assignment
lhs = opers.next()
if lhs.localName == u'ci':
# Direct assignment to variable
var = lhs.variable
var._add_dependency(self)
self._cml_assigns_to = var
if not dependencies_only:
# Check for possibly conflicting types
t = var.get_type()
if t == VarTypes.Constant or t == VarTypes.MaybeConstant:
self.model.validation_warning(
u' '.join([
u'Variable',var.fullname(),u'is assigned to',
u'and has an initial value set.']),
level=logging.WARNING_TRANSLATE_ERROR)
elif t == VarTypes.State or t == VarTypes.Free:
self.model.validation_warning(
u' '.join([
u'Variable',var.fullname(),u'is assigned to',
u'and appears on the LHS of an ODE.']),
level=logging.WARNING_TRANSLATE_ERROR)
var._set_type(VarTypes.Computed)
elif lhs.localName == u'apply':
# This could be an ODE
diff = lhs.operator()
if diff.localName == u'diff':
# It is an ODE. TODO: Record it somewhere?
if not dependencies_only:
diff._set_var_types()
dep = diff.dependent_variable
indep = diff.independent_variable
dep._add_ode_dependency(indep, self)
# An ODE should depend on its independent variable
ode_indep_var = indep
if not dependencies_only:
indep._used()
# TODO: Hack; may remove.
self._cml_assigns_to = (dep, indep)
else:
raise MathsError(self, u'Assignment statements are expected to be an ODE or assign to a variable.',
warn=True,
level=logging.WARNING_TRANSLATE_ERROR)
else:
raise MathsError(self, u'Assignment statements are expected to be an ODE or assign to a variable.',
warn=True,
level=logging.WARNING_TRANSLATE_ERROR)
# Consider operands other than the LHS of an assignment
for oper in opers:
dependencies.update(self.classify_child_variables(oper, dependencies_only=dependencies_only,
needs_special_treatment=needs_special_treatment))
if ode_indep_var:
# ODEs should depend on their independent variable.
# However, for code generation we wish to distinguish
# whether the independent variable appears on the RHS or
# not.
if ode_indep_var in dependencies:
self._cml_ode_has_free_var_on_rhs = True
else:
self._cml_ode_has_free_var_on_rhs = False
dependencies.add(ode_indep_var)
if root:
# Store dependencies
self._cml_depends_on = list(dependencies)
return dependencies
def is_top_level(self):
"""Test whether this is a top-level assignment expression."""
return self._cml_assigns_to is not None
def is_ode(self):
"""Return True iff this is the assignment of an ODE.
Only makes sense if called on a top-level assignment
expression, and checks if it represents an ODE, i.e. if the
LHS is a derivative.
"""
if not self.is_top_level():
return False
return type(self._cml_assigns_to) == types.TupleType
def is_assignment(self):
"""Return True iff this is a straightforward assignment expression.
Only makes sense if called on a top-level assignment expression.
Checks that this is *not* an ODE, but assigns to a single variable.
"""
if not self.is_top_level():
return False
return isinstance(self._cml_assigns_to, cellml_variable)
def assigned_variable(self):
"""Return the variable assigned to by this assignment.
Should only be called on a top-level assignment expression.
If it's a straightforward assignment (so self.is_assignment()
returns True) then return the cellml_variable object
representing the variable assigned to.
If it's an ODE, return a pair
(dependent variable, independent variable).
"""
if not self.is_top_level():
raise TypeError("not a top-level apply element")
else:
return self._cml_assigns_to
def _get_binding_time(self, check_operator=True):
"""Return the binding time of this expression.
The binding time will be computed recursively and cached.
It will also be made available as an attribute in the XML.
It is computed by taking the least upper bound of the binding
times of our operands, unless the operator possesses an
alternative method.
"""
if self._cml_binding_time is not None:
return self._cml_binding_time
# Do we have an annotation?
if hasattr(self, u'binding_time'):
self._cml_binding_time = getattr(BINDING_TIMES, self.binding_time)
return self._cml_binding_time
# Does operator have a specialised method for this?
op = self.operator()
if check_operator and hasattr(op, '_get_binding_time'):
self._cml_binding_time = op._get_binding_time()
else:
# Compute operand binding times
bts = [BINDING_TIMES.static]
for operand in self.operands():
bts.append(self._get_element_binding_time(operand))
# Take l.u.b.
self._cml_binding_time = max(bts)
# Annotate the element with the binding time
self.xml_set_attribute((u'pe:binding_time', NSS[u'pe']), unicode(self._cml_binding_time))
return self._cml_binding_time
def _reduce(self, check_operator=True):
"""Reduce this expression by evaluating its static parts."""
# Check to see if this operator requires a special
# reduction strategy
op = self.operator()
DEBUG('partial-evaluator', "Reducing", op.localName, getattr(self, u'id', ''))
if check_operator and hasattr(op, '_reduce'):
op._reduce()
else:
bt = self._get_binding_time()
if bt == BINDING_TIMES.static:
# Evaluate self and replace by a <cn>, <true> or <false>
new_elt = self._eval_self()
self._xfer_complexity(new_elt)
self.replace_child(self, new_elt, self.xml_parent)
# Update usage counts
self._update_usage_counts(self, remove=True)
elif bt == BINDING_TIMES.dynamic:
# Recurse into operands and reduce those
for op in self.operands():
self._reduce_elt(op)
return
@staticmethod
def create_new(elt, operator, operands=[], qualifiers=[]):
"""Create a new MathML apply element, with given content.
elt should be any element in the document.
operator is used as the name of the first, empty, child.
operands is a list, possibly empty, of operand elements. If
any member is a unicode object, it is considered to be the
name of a variable. If a tuple, then it should be a pair of
unicode objects: (number, units). (Although units can be an
attribute dictionary.)
qualifiers specifies a list of qualifier elements.
"""
app = elt.xml_create_element(u'apply', NSS[u'm'])
app.xml_append(app.xml_create_element(unicode(operator), NSS[u'm']))
for qual in qualifiers:
check_append_safety(qual)
app.xml_append(qual)
for op in operands:
if isinstance(op, basestring):
# Variable name
op = app.xml_create_element(u'ci', NSS[u'm'],
content=unicode(op))
elif isinstance(op, tuple):
# Constant with units
if isinstance(op[1], dict):
attrs = op[1]
else:
attrs = {(u'cml:units', NSS[u'cml']): unicode(op[1])}
op = app.xml_create_element(u'cn', NSS[u'm'],
attributes=attrs,
content=unicode(op[0]))
else:
# Should already be an element
check_append_safety(op)
app.xml_append(op)
return app
class mathml_piecewise(mathml_constructor, mathml_units_mixin):
def __init__(self):
super(mathml_piecewise, self).__init__()
self._cml_units = None
self._cml_binding_time = None
def tree_complexity(self, **kw):
"""
Calculate a rough estimate of the computation time for
evaluating this <piecewise> element.
The real evaluation time will generally depend on run time
data, which makes things tricky. Here we estimate by taking
the sum of the complexities of the conditions and the maximum
of the complexity of the cases, in order to obtain an upper
bound.
If lookup_tables is True, then assume we're using lookup tables
where possible.
If algebraic is True, the complexity is calculated as a dictionary,
mapping node types to the number of occurences of that type.
If self.rootNode.num_lookup_tables exists, this method will
update the count of lookup tables based on this expression,
unless the argument 'count_tables' is False or algebraic is True.
"""
kw['algebraic'] = kw.get('algebraic', False)
alg = kw['algebraic']
if alg: ac, piece_dicts = {}, []
else: ac = 0
piece_acs = []
count_lts = hasattr(self.rootNode, 'num_lookup_tables') and kw.get('count_tables', True) and not alg
if count_lts:
# Alternative method of counting number of lookup tables;
# handles the Zhang model better!
num_lts = self.rootNode.num_lookup_tables
piece_num_lts = []
for piece in getattr(self, u'piece', []):
test_ac = self._tree_complexity(child_i(piece, 2), **kw)
if alg: add_dicts(ac, test_ac)
else: ac += test_ac
if count_lts:
nlts = self.rootNode.num_lookup_tables
piece_ac = self._tree_complexity(child_i(piece, 1), **kw)
if alg:
piece_dicts.append(piece_ac)
piece_acs.append(self._tree_complexity(child_i(piece, 1), count_tables=False))
else:
piece_acs.append(piece_ac)
if count_lts:
piece_num_lts.append(self.rootNode.num_lookup_tables - nlts)
if hasattr(self, u'otherwise'):
if count_lts:
nlts = self.rootNode.num_lookup_tables
ow_ac = self._tree_complexity(child_i(self.otherwise, 1), **kw)
if alg:
piece_dicts.append(ow_ac)
piece_acs.append(
self._tree_complexity(child_i(self.otherwise, 1), count_tables=False))
else:
piece_acs.append(ow_ac)
if count_lts:
piece_num_lts.append(self.rootNode.num_lookup_tables - nlts)
max_idx, max_piece_ac = max_i(piece_acs)
if alg:
add_dicts(ac, piece_dicts[max_idx])
else:
ac += max_piece_ac
if count_lts:
self.rootNode.num_lookup_tables -= sum(piece_num_lts)
self.rootNode.num_lookup_tables += piece_num_lts[max_idx]
return ac
def _set_in_units(self, units, no_act=False):
"""Set the units this expression should be given in.
This is done recursively by setting the units for each option.
We also set the units on each condition to be boolean, since
subexpressions of the conditions may need units conversions added.
"""
# First, record our units
if not no_act:
self._cml_units = units
# Now process our children
boolean = self.model.get_units_by_name(u'cellml:boolean')
for piece in getattr(self, u'piece', []):
self._set_element_in_units(child_i(piece, 1), units, no_act)
self._set_element_in_units(child_i(piece, 2), boolean, no_act)
if hasattr(self, u'otherwise'):
self._set_element_in_units(child_i(self.otherwise, 1), units, no_act)
def get_units(self):
"""Recursively check this expression for dimensional consistency.
The first child elements of each <piece> and <otherwise> element
should have dimensionally equivalent units (the resulting <units>
element will be dimensionally equivalent to these). The second child
elements of each <piece> should have units of cellml:boolean.
If consistent, returns a <units> element for the whole expression.
Throws a UnitsError if the units are inconsistent.
"""
if self._cml_units:
return self._cml_units
# Check the second child of each <piece> element
boolean = self.model.get_units_by_name(u'cellml:boolean')
for piece in getattr(self, u'piece', []):
cond_elt = child_i(piece, 2)
units = self._get_element_units(cond_elt)
if not boolean in units:
raise UnitsError(self, u' '.join([
u'The second child element of a <piece> element must have'
u'units of cellml:boolean, not',units.description()]))
# Compare the first child element of each <piece> and the <otherwise>,
# if present.
our_units = None
if hasattr(self, u'otherwise'):
value_elt = child_i(self.otherwise, 1)
our_units = self._get_element_units(value_elt).copy()
for piece in getattr(self, u'piece', []):
value_elt = child_i(piece, 1)
if our_units is None:
our_units = self._get_element_units(value_elt).copy()
else:
units = self._get_element_units(value_elt)
if not our_units.dimensionally_equivalent(units):
raise UnitsError(self, u' '.join([
u'The first child elements of children of a piecewise',
u'element must have dimensionally equivalent units;',
units.description(),'and',our_units.description(),
u'differ']))
our_units.update(units)
# Check that we have some units for this element
if our_units is None:
raise UnitsError(self, u' '.join([
u'A piecewise element must have at least one piece or',
u'otherwise child in order to have defined units.']))
# Cache & return units
self._cml_units = our_units
our_units.set_expression(self)
return self._cml_units
def classify_variables(self, dependencies_only=False,
needs_special_treatment=lambda n: None):
"""Classify variables in this expression according to how they are used.
In the process, compute and return a set of variables on which
this expression depends.
If dependencies_only then the variable classification will not be
done, only dependencies will be analysed. This is useful for doing
a 'light' re-analysis if the dependency set has been reduced; if the
set has increased then the topological sort of equations may need to
be redone.
The function needs_special_treatment may be supplied to override the
default recursion into sub-trees. It takes a single sub-tree as
argument, and should either return the dependency set for that
sub-tree, or None to use the default recursion. This is used when
re-analysing dependencies after applying lookup tables, since table
lookups only depend on the keying variable.
"""
dependencies = set()
pieces = list(getattr(self, u'piece', []))
if hasattr(self, u'otherwise'):
pieces.append(self.otherwise)
for piece in pieces:
dependencies.update(self.classify_child_variables(piece, dependencies_only=dependencies_only,
needs_special_treatment=needs_special_treatment))
return dependencies
def evaluate(self):
"""Evaluate this piecewise expression.
Tests choices in the order they occur in the file.
Only evaluates a choice if its condition evaluates to True.
"""
for piece in getattr(self, u'piece', []):
condition = child_i(piece, 2)
cond_value = self.eval(condition)
if cond_value is True:
# This is the option to take
value = self.eval(child_i(piece, 1))
break
else:
# Evaluate the <otherwise>
if hasattr(self, u'otherwise'):
value = self.eval(_child1(self.otherwise))
else:
raise EvaluationError(u' '.join([
"A piecewise element where the pieces aren't mutually",
"exhaustive requires an otherwise element."]))
return value
def _get_binding_time(self):
"""Return the binding time of this expression.
The binding time will be computed recursively and cached.
It will also be made available as an attribute in the XML.
It is computed by taking the least upper bound of the binding
times of (some of) the conditions and cases.
Condition & case binding times are computed in the order given
in the file. If a condition is static with value False, its
associated case is not considered. If a condition is static
with value True, subsequent conditions & cases and the
otherwise (if present) will not be considered.
"""
if self._cml_binding_time is not None:
return self._cml_binding_time
# Do we have an annotation?
if hasattr(self, u'binding_time'):
self._cml_binding_time = getattr(BINDING_TIMES,
self.binding_time)
return self._cml_binding_time
# Compute condition binding times
bts = [BINDING_TIMES.static]
for piece in getattr(self, u'piece', []):
condition = child_i(piece, 2)
bt = self._get_element_binding_time(condition)
if bt is BINDING_TIMES.static:
cond_value = self.eval(condition)
if cond_value is True:
# Compute BT for associated case
bts.append(self._get_element_binding_time(
child_i(piece, 1)))
# Skip remaining conditions & otherwise
break
else:
# Don't need to append extra statics, since bts
# contains at least one member that is static
bts.append(bt)
# Compute BT for associated case
bts.append(self._get_element_binding_time(
child_i(piece, 1)))
else:
# Consider the <otherwise> element
if hasattr(self, u'otherwise'):
bts.append(self._get_element_binding_time(
child_i(self.otherwise, 1)))
# Take least upper bound of appropriate binding times
self._cml_binding_time = max(bts)
# Annotate the element with the binding time
self.xml_set_attribute((u'pe:binding_time', NSS[u'pe']),
unicode(self._cml_binding_time))
return self._cml_binding_time
def _reduce(self, check_operator=True):
"""Reduce this expression by evaluating its static parts.
Even in a dynamic conditional, where a condition is static and
evaluates to False, the associated case is discarded.
"""
# Element to replace this <piecewise> with, if any
new_elt = None
if self._get_binding_time() == BINDING_TIMES.static:
# Evaluate self and replace by a <cn>, <true> or <false>
new_elt = self._eval_self()
elif self._get_binding_time() == BINDING_TIMES.dynamic:
# Go through pieces and reduce where appropriate
deletable_pieces = []
found_dynamic_piece = False
for piece in getattr(self, u'piece', []):
condition = child_i(piece, 2)
bt = self._get_element_binding_time(condition)
if bt is BINDING_TIMES.static:
cond_value = self.eval(condition)
if cond_value is True:
if not found_dynamic_piece:
# Replace the entire piecewise element by our case
# We don't replace if a previous piece had a
# dynamic condition, since this would change
# execution order, which could alter the semantics.
new_elt = child_i(piece, 1)
break
else:
# Discard this condition & case
deletable_pieces.append(piece)
elif bt is BINDING_TIMES.dynamic:
found_dynamic_piece = True
# Reduce the condition & case
self._reduce_elt(condition)
self._reduce_elt(child_i(piece, 1))
else:
# Didn't replace entire conditional
# Remove pieces with False conditions
for piece in deletable_pieces:
self._update_usage_counts(piece, remove=True)
self.xml_remove_child(piece)
# Consider the <otherwise> element
if hasattr(self, u'otherwise'):
if not found_dynamic_piece:
# All the <piece> elements were removed, so replace
# the entire conditional by this <otherwise>
new_elt = child_i(self.otherwise, 1)
else:
# Just reduce the <otherwise>
self._reduce_elt(child_i(self.otherwise, 1))
# Replace this element, if required
if new_elt is not None:
# Update usage counts for removed expressions
for piece in getattr(self, u'piece', []):
if not new_elt is child_i(piece, 1):
self._update_usage_counts(piece, remove=True)
else:
# Condition is being removed
self._update_usage_counts(child_i(piece, 2), remove=True)
piece.xml_remove_child(child_i(piece, 2))
piece.xml_remove_child(new_elt)
if hasattr(self, u'otherwise') and \
not new_elt is child_i(self.otherwise, 1):
self._update_usage_counts(child_i(self.otherwise, 1),
remove=True)
# Do the replace
self._xfer_complexity(new_elt)
self.replace_child(self, new_elt, self.xml_parent)
# May need to reduce our replacement
self._reduce_elt(new_elt)
@staticmethod
def create_new(elt, pieces, otherwise=None):
"""Create a new piecewise element.
elt is any element in the current document.
pieces is a list of pairs of expressions: (case, condition).
otherwise, if given, is the default case.
"""
piecewise = elt.xml_create_element(u'piecewise', NSS[u'm'])
for piece in pieces:
case, cond = piece
check_append_safety(case)
check_append_safety(cond)
piece_elt = elt.xml_create_element(u'piece', NSS[u'm'])
piece_elt.xml_append(case)
piece_elt.xml_append(cond)
piecewise.xml_append(piece_elt)
if otherwise:
check_append_safety(otherwise)
otherwise_elt = elt.xml_create_element(u'otherwise', NSS[u'm'])
otherwise_elt.xml_append(otherwise)
piecewise.xml_append(otherwise_elt)
return piecewise
class mathml_lambda(mathml_constructor):
"""Class representing the MathML lambda construct.
Note that we don't support lambda occuring in CellML models. However, it is used
for defining special units conversion rules using the protocol syntax.
"""
@staticmethod
def create_new(elt, bound_var_names, body_expr):
"""Create a new lambda from the sequence of bvar names and expression."""
lambda_ = elt.xml_create_element(u'lambda', NSS[u'm'])
for bvar_name in bound_var_names:
bvar = elt.xml_create_element(u'bvar', NSS[u'm'])
bvar.xml_append(mathml_ci.create_new(elt, bvar_name))
lambda_.xml_append(bvar)
check_append_safety(body_expr)
lambda_.xml_append(body_expr)
return lambda_
class mathml_operator(mathml):
"""Base class for MathML operator elements."""
def wrong_number_of_operands(self, found, wanted):
"""Raise an EvaluationError due to wrong operand count.
found is the number of operands found; wanted is a list of suitable
numbers of operands.
"""
raise EvaluationError(u''.join([
"Wrong number of operands for <", self.localName, "> ",
"(found ", str(found), "; wanted", ' or '.join(map(str, wanted)), ")"]))
class mathml_diff(mathml_operator):
"""Class representing the diff element, containing some useful methods."""
@property
def independent_variable(self):
"""
Return the variable object w.r.t which we are differentiating.
"""
# Note that after units checking the <bvar> qualifier can be
# assumed to exist.
apply_elt = self.xml_parent
if not hasattr(apply_elt.bvar, u'ci'):
raise MathsError(apply_elt, u'Differential operator does not have a variable element as bound variable.')
return apply_elt.bvar.ci.variable
@property
def dependent_variable(self):
"""
Return the variable object being differentiated.
"""
apply_elt = self.xml_parent
operand = apply_elt.operands().next()
if not operand.localName == u'ci':
raise MathsError(apply_elt, u'Derivatives of non-variables are not supported.')
return operand.variable
def _set_var_types(self):
"""
Set the types of the dependent & independent variables: State for
the dependent variable and Free for the independent variable.
Gives a validation warning if they already have 'incompatible'
types.
"""
dep, indep = self.dependent_variable, self.independent_variable
model = self.xml_parent.model
# The dependent variable should have an initial value
if not dep.get_type() == VarTypes.Mapped and \
not hasattr(dep, u'initial_value'):
model.validation_warning(u' '.join([
u'The state variable',dep.fullname(),
u'does not have an initial value given.']),
level=logging.WARNING_TRANSLATE_ERROR)
# It doesn't make sense to compute a state variable
if dep.get_type(follow_maps=True) == VarTypes.Computed:
model.validation_warning(u' '.join([
u'The state variable',dep.fullname(),
u'is also assigned to directly.']),
level=logging.WARNING_TRANSLATE_ERROR)
dep._set_type(VarTypes.State)
t = indep.get_type(follow_maps=True)
if t != VarTypes.Free:
if t != VarTypes.Unknown:
if t == VarTypes.Computed:
reason = u'is computed in an expression.'
elif t == VarTypes.State:
reason = u'is a state variable itself.'
else:
reason = u'has an initial value specified.'
model.validation_warning(u' '.join([
u'The derivative of',dep.fullname(),
u'is taken with respect to',indep.fullname(),
u'but the latter', reason]),
level=logging.WARNING_TRANSLATE_ERROR)
# TODO: Add to list of independent vars?
indep._set_type(VarTypes.Free)
def _get_binding_time(self):
"""Return the binding time of the enclosing <apply> element.
This is the binding time of the expression defining this ODE.
"""
expr = self.dependent_variable.get_ode_dependency(
self.independent_variable)
return expr._get_binding_time()
def _reduce(self):
"""Reduce this expression by evaluating its static parts.
If the whole expression is static, proceed as normal for an
<apply>. Otherwise just rename the variable references. We
can't instantiate the definition, because there will always be
another user - external code.
This operator is special cased because we need to alter its
qualifier, but mathml_apply only considers operands. MathML
data binding can be annoying at times!
"""
app = self.xml_parent
bt = app._get_binding_time()
if bt == BINDING_TIMES.static:
# Evaluate this expression as normal
app._reduce(check_operator=False)
else:
# Just update names to be canonical.
for ci in [app.ci, app.bvar.ci]:
ci._set_variable_obj(ci.variable.get_source_variable(recurse=True))
ci._rename()
@staticmethod
def create_new(elt, bvar, state_var, rhs):
"""Construct an ODE expression: d(state_var)/d(bvar) = rhs."""
bvar_elt = elt.xml_create_element(u'bvar', NSS[u'm'])
bvar_elt.xml_append(mathml_ci.create_new(elt, bvar))
diff = mathml_apply.create_new(elt, u'diff', [state_var], [bvar_elt])
ode = mathml_apply.create_new(elt, u'eq', [diff, rhs])
return ode
class reduce_commutative_nary(object):
def _reduce(self):
"""Reduce this expression by evaluating its static parts.
If the whole expression is static, proceed as normal for an
<apply>. Otherwise check if we have more than one static
operand. If we do, we can combine them in a new static
expression and evaluate that as a whole.
"""
app = self.xml_parent
bt = app._get_binding_time()
if bt == BINDING_TIMES.static or not self.model.get_option('partial_pe_commutative'):
# Evaluate this expression as normal
app._reduce(check_operator=False)
else:
# Get binding times of operands
static_opers = filter(lambda e: app._get_element_binding_time(e) == BINDING_TIMES.static,
app.operands())
if len(static_opers) > 1:
# Remove them from app
for oper in static_opers:
app.safe_remove_child(oper)
# Create the new expression
new_expr = mathml_apply.create_new(self, self.localName, static_opers)
# Put it in the model and reduce it to evaluate it properly
app.xml_append(new_expr)
new_expr._reduce()
# Now reduce all our operands as normal
app._reduce(check_operator=False)
class mathml_plus(mathml_operator, mathml_units_mixin_set_operands, reduce_commutative_nary):
"""Class representing the MathML <plus> operator."""
def __init__(self):
super(mathml_plus, self).__init__()
return
def evaluate(self):
"""Evaluate by summing the operands of the enclosing <apply>."""
app = self.xml_parent
ops = app.operands()
value = 0
for operand in ops:
value += self.eval(operand)
return value
class mathml_minus(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <minus> operator."""
def evaluate(self):
"""Evaluate the enclosing <apply> element.
Behaviour depends on the number of operands: we perform
either a unary or binary minus.
"""
app = self.xml_parent
ops = list(app.operands())
if len(ops) == 1:
value = -self.eval(ops[0])
elif len(ops) == 2:
value = self.eval(ops[0]) - self.eval(ops[1])
else:
self.wrong_number_of_operands(len(ops), [1, 2])
return value
class mathml_times(mathml_operator, mathml_units_mixin_choose_nearest, reduce_commutative_nary):
"""Class representing the MathML <times> operator."""
def evaluate(self):
"""
Evaluate by taking the product of the operands of the enclosing <apply>.
"""
app = self.xml_parent
ops = app.operands()
value = 1
for operand in ops:
value *= self.eval(operand)
return value
class mathml_divide(mathml_operator, mathml_units_mixin_choose_nearest):
"""Class representing the MathML <divide> operator."""
def evaluate(self):
"""Evaluate by dividing the 2 operands of the enclosing <apply>."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
numer = self.eval(ops[0])
denom = self.eval(ops[1])
return numer/denom
def _reduce(self):
"""Reduce this expression by evaluating its static parts.
If the whole expression is static, proceed as normal for an
<apply>. If just the denominator is static, transform the
expression into a multiplication.
"""
app = self.xml_parent
bt = app._get_binding_time()
if bt == BINDING_TIMES.static:
# Evaluate this expression as normal
app._reduce(check_operator=False)
else:
# Check binding time of the denominator
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
bt = app._get_element_binding_time(ops[1])
if bt == BINDING_TIMES.static:
# Create inverse expression and evaluate it
dummy = self.xml_create_element(u'dummy', NSS[u'm'])
app.replace_child(ops[1], dummy)
new_expr = mathml_apply.create_new(
self, u'divide', [(u'1', u'dimensionless'),
ops[1]])
app.replace_child(dummy, new_expr)
app._reduce_elt(new_expr)
# Change this expression to a <times>
times = self.xml_create_element(u'times', NSS[u'm'])
app.replace_child(self, times)
# And finally reduce it as normal
app._reduce(check_operator=False)
else:
# Evaluate this expression as normal
app._reduce(check_operator=False)
class mathml_exp(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <exp> operator."""
def evaluate(self):
"""Return e to the power of the single operand."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
return math.exp(self.eval(ops[0]))
class mathml_ln(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <ln> operator."""
def evaluate(self):
"""Return the natural logarithm of the single operand."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
return math.log(self.eval(ops[0]))
class mathml_log(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <log> operator."""
def evaluate(self):
"""Return the logarithm of the single operand."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
if hasattr(app, u'logbase'):
base = self.eval(app.logbase)
else:
base = 10
return math.log(self.eval(ops[0]), base)
class mathml_abs(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <abs> operator."""
def evaluate(self):
"""Return the absolute value of the single operand."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
return abs(self.eval(ops[0]))
class mathml_power(mathml_operator, mathml_units_mixin):
"""Class representing the MathML <power> operator."""
def _set_in_units(self, units, no_act=False):
"""Set the units of the application of this operator.
Set the exponent to have units of dimensionless, and the operand to
have an arbitrary member of its possible units set.
Where these mean the <apply> doesn't have the given units, wrap it
in suitable units conversion mathematics.
"""
app = self.xml_parent
defn_units_set = app.get_units()
defn_units = defn_units_set.extract()
app._add_units_conversion(app, defn_units, units, no_act)
# Record which member of the set we used
if not no_act:
app._cml_units = defn_units
# Set exponent units
dimensionless = app.model.get_units_by_name('dimensionless')
ops = list(app.operands())
self._set_element_in_units(ops[1], dimensionless, no_act)
# Set operand units
for src_units_set, src_units in defn_units_set._get_sources(defn_units):
expr = src_units_set.get_expression()
self._set_element_in_units(expr, src_units, no_act)
return
def evaluate(self):
"""Return the first operand to the power of the second."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
return self.eval(ops[0]) ** self.eval(ops[1])
def _reduce(self):
"""Reduce this expression by evaluating its static parts.
If the whole expression is static, proceed as normal for an <apply>.
Otherwise check if the exponent is static and the expression being exponentiated
is a <ci>. If so, and the exponent is equal to 2, 3, or 4, convert the expression
to a multiplication.
"""
app = self.xml_parent
bt = app._get_binding_time()
converted = False
if bt != BINDING_TIMES.static and self.model.get_option('pe_convert_power'):
base, expt = list(app.operands())
expt_bt = app._get_element_binding_time(expt)
if expt_bt == BINDING_TIMES.static and isinstance(base, mathml_ci):
expt_val = self.eval(expt)
if expt_val in [2,3,4]:
# Do the conversion
app.safe_remove_child(base)
operands = [base]
for _ in range(1, expt_val):
operands.append(base.clone_self())
base.variable._used()
new_app = mathml_apply.create_new(app, u'times', operands)
app.replace_child(app, new_app, app.xml_parent)
# Finally, reduce the new expression, just to be safe!
new_app._reduce()
converted = True
if not converted:
# Evaluate this expression as normal
app._reduce(check_operator=False)
class mathml_root(mathml_operator, mathml_units_mixin):
"""Class representing the MathML <root> operator."""
def _set_in_units(self, units, no_act=False):
"""Set the units of the application of this operator.
Set the degree to have units of dimensionless, and the operand to
have an arbitrary member of its possible units set.
Where these mean the <apply> doesn't have the given units, wrap it
in suitable units conversion mathematics.
"""
app = self.xml_parent
defn_units_set = app.get_units()
defn_units = defn_units_set.extract()
app._add_units_conversion(app, defn_units, units, no_act)
# Record which member of the set we used
if not no_act:
app._cml_units = defn_units
# Set degree units
if hasattr(app, u'degree'):
dimensionless = app.model.get_units_by_name('dimensionless')
self._set_element_in_units(_child1(app.degree), dimensionless, no_act)
# Set operand units
for src_units_set, src_units in defn_units_set._get_sources(defn_units):
expr = src_units_set.get_expression()
self._set_element_in_units(expr, src_units, no_act)
def evaluate(self):
"""
Return the operand to the given degree, if present.
Otherwise return the square root of the operand.
"""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
if hasattr(app, u'degree'):
degree = self.eval(app.degree)
else:
degree = 2
return self.eval(ops[0]) ** (1/degree)
class mathml_and(mathml_operator, mathml_units_mixin_equalise_operands):
"""Class representing the MathML <and> operator."""
def evaluate(self):
"""Return the logical conjunction of the operands.
Evaluates operands in the order given in the file, and will
short-circuit at the first which evaluates to False.
"""
app = self.xml_parent
ops = app.operands()
value = True
for operand in ops:
value = value and self.eval(operand)
if not value: break
return value
def _get_binding_time(self):
"""Return the binding time of the enclosing <apply> element.
Short-circuit if a static False operand occurs before any dynamic
operands, returning static. Otherwise return the least upper bound
of operand binding times, as usual.
"""
app = self.xml_parent
bts = [BINDING_TIMES.static]
for operand in app.operands():
bt = app._get_element_binding_time(operand)
if bt is BINDING_TIMES.static:
value = self.eval(operand)
if not value and len(bts) == 1:
# Short-circuit
break
else:
bts.append(bt)
# Take least upper bound
return max(bts)
# TODO: Write a _reduce method
class mathml_or(mathml_operator, mathml_units_mixin_equalise_operands):
"""Class representing the MathML <or> operator."""
def evaluate(self):
"""Return the logical disjunction of the operands.
Evaluates operands in the order given in the file, and will
short-circuit at the first which evaluates to True.
"""
app = self.xml_parent
ops = app.operands()
value = False
for operand in ops:
value = value or self.eval(operand)
if value: break
return value
def _get_binding_time(self):
"""Return the binding time of the enclosing <apply> element.
Short-circuit if a static True operand occurs before any dynamic
operands, returning static. Otherwise return the least upper bound
of operand binding times, as usual.
"""
app = self.xml_parent
bts = [BINDING_TIMES.static]
for operand in app.operands():
bt = app._get_element_binding_time(operand)
if bt is BINDING_TIMES.static:
value = self.eval(operand)
if value and len(bts) == 1:
# Short-circuit
break
else:
bts.append(bt)
# Take least upper bound
return max(bts)
# TODO: Write a _reduce method
class mathml_leq(mathml_operator, mathml_units_mixin_equalise_operands):
"""Class representing the MathML <leq> operator."""
def evaluate(self):
"""
Return True iff the value of the first operand is
less than or equal to the value of the second.
"""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
return self.eval(ops[0]) <= self.eval(ops[1])
class mathml_lt(mathml_operator, mathml_units_mixin_equalise_operands):
"""Class representing the MathML <lt> operator."""
def evaluate(self):
"""
Return True iff the value of the first operand is
less than the value of the second.
"""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
return self.eval(ops[0]) < self.eval(ops[1])
class mathml_geq(mathml_operator, mathml_units_mixin_equalise_operands):
"""Class representing the MathML <geq> operator."""
def evaluate(self):
"""
Return True iff the value of the first operand is
greater than or equal to the value of the second.
"""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
return self.eval(ops[0]) >= self.eval(ops[1])
class mathml_gt(mathml_operator, mathml_units_mixin_equalise_operands):
"""Class representing the MathML <gt> operator."""
def evaluate(self):
"""
Return True iff the value of the first operand is
greater than the value of the second.
"""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
return self.eval(ops[0]) > self.eval(ops[1])
class mathml_neq(mathml_operator, mathml_units_mixin_equalise_operands):
"""Class representing the MathML <neq> operator."""
def evaluate(self):
"""Evaluate the enclosing <apply> element.
Return True iff the 2 operands are not equal.
"""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
return (self.eval(ops[0]) != self.eval(ops[1]))
class mathml_eq(mathml_operator, mathml_units_mixin_equalise_operands):
"""Class representing the MathML <eq> operator."""
def _is_top_level(self):
"""Return True iff the enclosing <apply> is a top-level expression."""
return self.xml_parent.is_top_level()
def _set_in_units(self, units, no_act=False):
"""Set the units of the application of this operator.
If this is a top-level <eq/>, then force the RHS to take the units
of the LHS. Otherwise, behave as for other relational operators.
"""
if self._is_top_level():
ops = self.xml_parent.operands()
lhs = ops.next()
lhs_units = lhs.get_units().extract()
self._set_element_in_units(lhs, lhs_units, no_act)
self._set_element_in_units(ops.next(), lhs_units, no_act)
if not no_act:
self.xml_parent._cml_units = units
else:
super(mathml_eq, self)._set_in_units(units, no_act)
def evaluate(self):
"""Evaluate the enclosing <apply> element.
The behaviour depends on whether the enclosing <apply> is a
top-level expression or not, i.e. whether this is an
assignment or a comparison.
If an assignment, evaluate the RHS, assign the value to
the variable on the LHS, and return it.
If a comparison, return True iff the 2 operands are equal.
"""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
if self._is_top_level():
# This is a top-level assignment or ODE
value = self.eval(ops[1])
var = app.assigned_variable()
if app.is_assignment():
var.set_value(value)
elif app.is_ode():
indepvar = var[1].get_source_variable(recurse=True)
var[0].set_value(value, ode=indepvar)
else:
raise EvaluationError("Weird sort of assignment expression.")
else:
# This is a comparison
value = (self.eval(ops[0]) == self.eval(ops[1]))
return value
def _get_binding_time(self):
"""Return the binding time of the enclosing <apply> element.
If this is a top-level expression, then only recurse into the RHS,
otherwise proceed as normal for an apply.
There is one further special case: if this is a top-level
expression and the variable assigned to is annotated to be
kept in the specialised model, then the expression is dynamic.
"""
app = self.xml_parent
if self._is_top_level():
annotated_as_kept = False
if app.is_ode():
DEBUG('partial-evaluator', "BT ODE",
map(lambda v: v.fullname(), app.assigned_variable()))
else:
DEBUG('partial-evaluator', "BT expr",
app.assigned_variable().fullname())
if app.assigned_variable().pe_keep:
annotated_as_kept = True
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
rhs = ops[1]
bt = app._get_element_binding_time(rhs)
if annotated_as_kept:
bt = BINDING_TIMES.dynamic
else:
bt = app._get_binding_time(check_operator=False)
return bt
def _reduce(self):
"""Reduce this expression by evaluating its static parts.
If this is a top-level assignment, then just reduce the RHS.
Otherwise proceed as normal for an <apply>.
"""
app = self.xml_parent
if self._is_top_level():
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
rhs = ops[1]
app._reduce_elt(rhs)
else:
app._reduce(check_operator=False)
@property
def rhs(self):
"""Return the right hand side of this expression.
Should only be called if we're actually an assignment.
"""
if self._is_top_level():
ops = self.xml_parent.operands()
ops.next()
return ops.next()
else:
raise ValueError("Not an assignment expression.")
@property
def lhs(self):
"""Return the left hand side of this expression.
Should only be called if we're actually an assignment.
"""
if self._is_top_level():
ops = self.xml_parent.operands()
return ops.next()
else:
raise ValueError("Not an assignment expression.")
class mathml_rem(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <rem> operator."""
def evaluate(self):
"""Return the remainder when the first operand is divided by the second."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 2:
self.wrong_number_of_operands(len(ops), [2])
return self.eval(ops[0]) % self.eval(ops[1])
class mathml_logbase(mathml, mathml_units_mixin_container):
"""Class representing the MathML <logbase> element."""
def evaluate(self):
"""Evaluate this element, by evaluating its child."""
return self.eval(_child1(self))
class mathml_degree(mathml, mathml_units_mixin_container):
"""Class representing the MathML <degree> element."""
def evaluate(self):
"""Evaluate this element, by evaluating its child."""
return self.eval(_child1(self))
class mathml_otherwise(mathml):
"""Class representing the MathML <otherwise> element.
Only defined to make it inherit from mathml.
"""
pass
class mathml_piece(mathml):
"""Class representing the MathML <piece> element.
Only defined to make it inherit from mathml.
"""
pass
class mathml_sin(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <sin> operator."""
def evaluate(self):
"""Return the sine of the single operand."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
return math.sin(self.eval(ops[0]))
class mathml_cos(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <cos> operator."""
def evaluate(self):
"""Return the cosine of the single operand."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
return math.cos(self.eval(ops[0]))
class mathml_tan(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <tan> operator."""
def evaluate(self):
"""Return the tangent of the single operand."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
return math.tan(self.eval(ops[0]))
class mathml_arcsin(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <arcsin> operator."""
def evaluate(self):
"""Return the arc sine of the single operand, in radians."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
return math.asin(self.eval(ops[0]))
class mathml_arccos(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <arccos> operator."""
def evaluate(self):
"""Return the arc cosine of the single operand, in radians."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
return math.acos(self.eval(ops[0]))
class mathml_arctan(mathml_operator, mathml_units_mixin_set_operands):
"""Class representing the MathML <arctan> operator."""
def evaluate(self):
"""Return the arc tangent of the single operand, in radians."""
app = self.xml_parent
ops = list(app.operands())
if len(ops) != 1:
self.wrong_number_of_operands(len(ops), [1])
return math.atan(self.eval(ops[0]))
class mathml_csymbol(mathml_operator, mathml_units_mixin):
"""Class representing the MathML <csymbol> operator.
This is used to represent special-case operations that are treated uniquely by the code generation phase.
"""
def evaluate(self):
raise NotImplementedError
def _set_in_units(self, units, no_act=False):
"""Set the units of the application of this operator, adding a conversion if needed."""
app = self.xml_parent
defn_units = app.get_units().extract()
if defn_units != units:
self._add_units_conversion(app, defn_units, units, no_act)
# Store the units
if not no_act:
app._cml_units = units
return
## Don't export module imports to people who do "from pycml import *"
#__all__ = filter(lambda n: type(globals()[n]) is not types.ModuleType, globals().keys())
|
qldhpc/eb_local
|
ebfiles/c/chaste/new-chaste/python/pycml/pycml.py
|
Python
|
apache-2.0
| 275,510
|
[
"VisIt"
] |
d66266e051c099a2c0b6c57572dcc49c442f67ba7a329b915b5a3d271a83204a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 15 21:50:09 2017
@author: alessandro
"""
from scipy.ndimage.interpolation import zoom
import numpy as np
# ffttools
def fftd(img):
return np.fft.fftn(img)
def ifftd(img):
return np.fft.ifftn(img)
def rearrange(img):
return np.fft.fftshift(img, axes=(0,1,2))
# recttools
def x2(rect):
return rect[0] + rect[3]
def y2(rect):
return rect[1] + rect[4]
def z2(rect):
return rect[2] + rect[5]
def limit(rect, limit):
if(rect[0]+rect[3] > limit[0]+limit[3]):
rect[3] = limit[0]+limit[3]-rect[0]
if(rect[1]+rect[4] > limit[1]+limit[4]):
rect[4] = limit[1]+limit[4]-rect[1]
if(rect[2]+rect[5] > limit[2]+limit[5]):
rect[5] = limit[2]+limit[5]-rect[2]
if(rect[0] < limit[0]):
rect[3] -= (limit[0]-rect[0])
rect[0] = limit[0]
if(rect[1] < limit[1]):
rect[4] -= (limit[1]-rect[1])
rect[1] = limit[1]
if(rect[2] < limit[2]):
rect[5] -= (limit[2]-rect[2])
rect[2] = limit[2]
if(rect[3] < 0):
rect[3] = 0
if(rect[4] < 0):
rect[4] = 0
if(rect[5] < 0):
rect[5] = 0
return rect
def getBorder(original, limited):
res = [0,0,0,0,0,0]
res[0] = limited[0] - original[0]
res[1] = limited[1] - original[1]
res[2] = limited[2] - original[2]
res[3] = x2(original) - x2(limited)
res[4] = y2(original) - y2(limited)
res[5] = z2(original) - z2(limited)
assert(np.all(np.array(res) >= 0))
return res
def subwindow(img, window, mode='edge'): #mode='reflect'
cutWindow = [x for x in window]
limit(cutWindow, [0,0,0,img.shape[2],img.shape[1],img.shape[0]]) # modify cutWindow
assert(cutWindow[3]>0 and cutWindow[4]>0 and cutWindow[5]>0)
border = getBorder(window, cutWindow)
res = img[cutWindow[2]:cutWindow[2]+cutWindow[5],
cutWindow[1]:cutWindow[1]+cutWindow[4],
cutWindow[0]:cutWindow[0]+cutWindow[3]]
if(border != [0,0,0,0,0,0]):
#res = cv2.copyMakeBorder(res, border[1], border[3], border[0], border[2], borderType)
res = np.pad(res,[(border[2],border[5]),
(border[1],border[4]),
(border[0],border[3])],mode='edge')
return res
# KCF tracker
class KCFTracker:
def __init__(self, hog=False, fixed_window=True, multiscale=False):
self.lambdar = 0.0001 # regularization
self.padding = 2.5 # extra area surrounding the target
self.output_sigma_factor = 0.625 # bandwidth of gaussian target
self.interp_factor = 0.075
self.sigma = 0.4
self.cell_size = 1
if(multiscale):
self.template_size = 96 # template size
self.scale_step = 1.05 # scale step for multi-scale estimation
self.scale_weight = 0.96 # to downweight detection scores of other scales for added stability
elif(fixed_window):
self.template_size = 96
self.scale_step = 1
else:
self.template_size = 1
self.scale_step = 1
self._tmpl_sz = [0,0,0]
self._roi = [0.,0.,0.,0.,0.,0.]
self.size_patch = [0,0,0]
self._scale = 1.
self._alphaf = None
self._prob = None
self._tmpl = None
self.hann = None
def subPixelPeak(self, left, center, right):
divisor = 2*center - right - left #float
return (0 if abs(divisor)<1e-3 else 0.5*(right-left)/float(divisor))
def createHanningMats(self):
hann3t, hann2t, hann1t = np.ogrid[0:self.size_patch[0],
0:self.size_patch[1],
0:self.size_patch[2]]
hann1t = 0.5 * (1 - np.cos(2*np.pi*hann1t/(self.size_patch[2]-1.)))
hann2t = 0.5 * (1 - np.cos(2*np.pi*hann2t/(self.size_patch[1]-1.)))
hann3t = 0.5 * (1 - np.cos(2*np.pi*hann3t/(self.size_patch[0]-1.)))
hann3d = hann3t * hann2t * hann1t
self.hann = hann3d
self.hann = self.hann.astype(np.float32)
def createGaussianPeak(self, sizez, sizey, sizex):
szh, syh, sxh = sizez/2., sizey/2., sizex/2.
output_sigma = np.sqrt(sizex*sizey*sizez) / self.padding * self.output_sigma_factor
mult = -0.5 / (output_sigma*output_sigma)
z, y, x = np.ogrid[0:sizez, 0:sizey, 0:sizex]
z, y, x = (z-szh)**2, (y-syh)**2, (x-sxh)**2
res = np.exp(mult * (z+y+x))
return res
def gaussianCorrelation(self, x1, x2):
c = fftd(x1)*np.conj(fftd(x2))
c = ifftd(c)
c = np.real(c)
c = rearrange(c)
d = (np.sum(x1*x1) + np.sum(x2*x2) - 2.0*c) / float(self.size_patch[0]*self.size_patch[1])#*self.size_patch[2])
d = d * (d>=0)
d = np.exp(-d / (self.sigma*self.sigma))
return d
def getFeatures(self, image, inithann, scale_adjust=1.0):
extracted_roi = [0,0,0,0,0,0] #[int,int,int,int]
cx = self._roi[0] + self._roi[3]/2. #float
cy = self._roi[1] + self._roi[4]/2. #float
cz = self._roi[2] + self._roi[5]/2. #float
if(inithann):
padded_w = self._roi[3] * self.padding
padded_h = self._roi[4] * self.padding
padded_d = self._roi[5] * self.padding
if(self.template_size > 1):
#if(padded_w >= padded_h):
# self._scale = padded_w / float(self.template_size)
#else:
# self._scale = padded_h / float(self.template_size)
self._scale = min(padded_w,
padded_h,
padded_d) / float(self.template_size)
self._tmpl_sz[0] = int(padded_w / self._scale)
self._tmpl_sz[1] = int(padded_h / self._scale)
self._tmpl_sz[2] = int(padded_d / self._scale)
else:
self._tmpl_sz[0] = int(padded_w)
self._tmpl_sz[1] = int(padded_h)
self._tmpl_sz[2] = int(padded_d)
self._scale = 1.
self._tmpl_sz[0] = int(self._tmpl_sz[0])
self._tmpl_sz[1] = int(self._tmpl_sz[1])
self._tmpl_sz[2] = int(self._tmpl_sz[2])
extracted_roi[3] = int(scale_adjust * self._scale * self._tmpl_sz[0])
extracted_roi[4] = int(scale_adjust * self._scale * self._tmpl_sz[1])
extracted_roi[5] = int(scale_adjust * self._scale * self._tmpl_sz[2])
extracted_roi[0] = int(cx - extracted_roi[3]/2.)
extracted_roi[1] = int(cy - extracted_roi[4]/2.)
extracted_roi[2] = int(cz - extracted_roi[5]/2.)
z = subwindow(image, extracted_roi)
if(z.shape[2]!=self._tmpl_sz[0] or z.shape[1]!=self._tmpl_sz[1] or z.shape[0]!=self._tmpl_sz[2]):
#z = cv2.resize(z, tuple(self._tmpl_sz))
z = zoom(z,(self._tmpl_sz[0]/z.shape[2],
self._tmpl_sz[1]/z.shape[1],
self._tmpl_sz[2]/z.shape[0]))
self._tmpl_sz[0] = z.shape[2]
self._tmpl_sz[1] = z.shape[1]
self._tmpl_sz[2] = z.shape[0]
print("zoooom")
FeaturesMap = z #(size_patch[0], size_patch[1]) #np.int8 #0~255
FeaturesMap = FeaturesMap.astype(np.float32) / 255.0 - 0.5
self.size_patch = [z.shape[0], z.shape[1], z.shape[2]]
if(inithann):
self.createHanningMats() # createHanningMats need size_patch
FeaturesMap = self.hann * FeaturesMap
return FeaturesMap
def detect(self, z, x):
k = self.gaussianCorrelation(x, z)
res = np.real(ifftd(self._alphaf*fftd(k)))
pv = np.max(res)
pi = list(np.unravel_index(res.argmax(), res.shape))[::-1]
p = [float(pi[0]), float(pi[1]), float(pi[2])]
if(pi[0]>0 and pi[0]<res.shape[2]-1):
p[0] += self.subPixelPeak(res[pi[2] ,pi[1] ,pi[0]-1], pv, res[pi[2] ,pi[1] ,pi[0]+1])
if(pi[1]>0 and pi[1]<res.shape[1]-1):
p[1] += self.subPixelPeak(res[pi[2] ,pi[1]-1,pi[0] ], pv, res[pi[2] ,pi[1]+1,pi[0] ])
if(pi[2]>0 and pi[2]<res.shape[0]-1):
p[2] += self.subPixelPeak(res[pi[2]-1,pi[1] ,pi[0] ], pv, res[pi[2]+1,pi[1] ,pi[0] ])
p[0] -= res.shape[2] / 2.
p[1] -= res.shape[1] / 2.
p[2] -= res.shape[0] / 2.
return p, pv
def train(self, x, train_interp_factor):
k = self.gaussianCorrelation(x, x)
alphaf = fftd(self._prob)/( fftd(k) + self.lambdar)
self._tmpl = (1-train_interp_factor)*self._tmpl + train_interp_factor*x
self._alphaf = (1-train_interp_factor)*self._alphaf + train_interp_factor*alphaf
def init(self, roi, image):
self._roi = list(map(float, roi))
assert(roi[3]>0 and roi[4]>0 and roi[5]>0)
self._tmpl = self.getFeatures(image, 1)
self._prob = self.createGaussianPeak(self.size_patch[0], self.size_patch[1], self.size_patch[2])
self._alphaf = np.zeros((self.size_patch[0], self.size_patch[1], self.size_patch[2]), np.complex128)
self.train(self._tmpl, 1.0)
def update(self, image):
if(self._roi[0]+self._roi[3] <= 0): self._roi[0] = -self._roi[3] + 1
if(self._roi[1]+self._roi[4] <= 0): self._roi[1] = -self._roi[4] + 1
if(self._roi[2]+self._roi[5] <= 0): self._roi[2] = -self._roi[5] + 1
if(self._roi[0] >= image.shape[2]-1): self._roi[0] = image.shape[2] - 2
if(self._roi[1] >= image.shape[1]-1): self._roi[1] = image.shape[1] - 2
if(self._roi[2] >= image.shape[0]-1): self._roi[2] = image.shape[0] - 2
cx = self._roi[0] + self._roi[3]/2.
cy = self._roi[1] + self._roi[4]/2.
cz = self._roi[2] + self._roi[5]/2.
loc, peak_value = self.detect(self._tmpl, self.getFeatures(image, 0, 1.0))
if(self.scale_step != 1):
# Test at a smaller _scale
new_loc1, new_peak_value1 = self.detect(self._tmpl, self.getFeatures(image, 0, 1.0/self.scale_step))
# Test at a bigger _scale
new_loc2, new_peak_value2 = self.detect(self._tmpl, self.getFeatures(image, 0, self.scale_step))
if(self.scale_weight*new_peak_value1 > peak_value and new_peak_value1>new_peak_value2):
loc = new_loc1
peak_value = new_peak_value1
self._scale /= self.scale_step
self._roi[3] /= self.scale_step
self._roi[4] /= self.scale_step
self._roi[5] /= self.scale_step
elif(self.scale_weight*new_peak_value2 > peak_value):
loc = new_loc2
peak_value = new_peak_value2
self._scale *= self.scale_step
self._roi[3] *= self.scale_step
self._roi[4] *= self.scale_step
self._roi[5] *= self.scale_step
self._roi[0] = cx - self._roi[3]/2.0 + loc[0]*self.cell_size*self._scale
self._roi[1] = cy - self._roi[4]/2.0 + loc[1]*self.cell_size*self._scale
self._roi[2] = cz - self._roi[5]/2.0 + loc[2]*self.cell_size*self._scale
if(self._roi[0] >= image.shape[2]-1): self._roi[0] = image.shape[2] - 1
if(self._roi[1] >= image.shape[1]-1): self._roi[1] = image.shape[1] - 1
if(self._roi[2] >= image.shape[0]-1): self._roi[2] = image.shape[0] - 1
if(self._roi[0]+self._roi[3] <= 0): self._roi[0] = -self._roi[3] + 2
if(self._roi[1]+self._roi[4] <= 0): self._roi[1] = -self._roi[4] + 2
if(self._roi[2]+self._roi[5] <= 0): self._roi[2] = -self._roi[5] + 2
assert(self._roi[3]>0 and self._roi[4]>0 and self._roi[5]>0)
x = self.getFeatures(image, 0, 1.0)
self.train(x, self.interp_factor)
return self._roi
|
ale93111/KCF_3D
|
kcftracker3d_numpy.py
|
Python
|
mit
| 11,910
|
[
"Gaussian"
] |
9e989de953c8635f01274afa784562afd1f1d494b95d4767a924410b622971f8
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import babel
from contextlib import contextmanager
import datetime
import subprocess
import io
import os
import collections
import passlib.utils
import pickle as pickle_
import re
import socket
import sys
import threading
import time
import types
import werkzeug.utils
import zipfile
from collections import defaultdict, Iterable, Mapping, MutableSet, OrderedDict
from itertools import islice, groupby, repeat
from lxml import etree
from .which import which
import traceback
from operator import itemgetter
try:
# pylint: disable=bad-python3-import
import cProfile
except ImportError:
import profile as cProfile
from .config import config
from .cache import *
from .parse_version import parse_version
from . import pycompat
import odoo
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from odoo.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase, etree._Entity)
# Configure default global parser
etree.set_default_parser(etree.XMLParser(resolve_entities=False))
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
return which(name, path=os.pathsep.join(path))
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
"""
Force the database PostgreSQL environment variables to the database
configuration of Odoo.
Note: On systems where pg_restore/pg_dump require an explicit password
(i.e. on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if odoo.tools.config['db_host']:
env['PGHOST'] = odoo.tools.config['db_host']
if odoo.tools.config['db_port']:
env['PGPORT'] = str(odoo.tools.config['db_port'])
if odoo.tools.config['db_user']:
env['PGUSER'] = odoo.tools.config['db_user']
if odoo.tools.config['db_password']:
env['PGPASSWORD'] = odoo.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import odoo.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.normcase(os.path.join(basedir, path)))
import odoo.modules as addons
paths = addons.module.ad_paths + [config['root_path']]
for addons_path in paths:
addons_path = os.path.normpath(os.path.normcase(addons_path)) + os.sep
if name.startswith(addons_path):
break
else:
raise ValueError("Unknown path: %s" % name)
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
zfile = zipfile.ZipFile(zpath)
try:
fo = io.BytesIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
r = []
for e in list:
if isinstance(e, (bytes, pycompat.text_type)) or not isinstance(e, collections.Iterable):
r.append(e)
else:
r.extend(flatten(e))
return r
def reverse_enumerate(l):
"""Like enumerate but in the other direction
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return pycompat.izip(range(len(l)-1, -1, -1), reversed(l))
def partition(pred, elems):
""" Return a pair equivalent to:
``filter(pred, elems), filter(lambda x: not pred(x), elems)` """
yes, nos = [], []
for elem in elems:
(yes if pred(elem) else nos).append(elem)
return yes, nos
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
for it in elems[n]:
visit(it)
result.append(n)
for el in elems:
visit(el)
return result
try:
import xlwt
# add some sanitizations to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedWorkbook(xlwt.Workbook):
def add_sheet(self, name, cell_overwrite_ok=False):
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedWorkbook, self).add_sheet(name, cell_overwrite_ok=cell_overwrite_ok)
xlwt.Workbook = PatchedWorkbook
except ImportError:
xlwt = None
try:
import xlsxwriter
# add some sanitizations to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedXlsxWorkbook(xlsxwriter.Workbook):
# TODO when xlsxwriter bump to 0.9.8, add worksheet_class=None parameter instead of kw
def add_worksheet(self, name=None, **kw):
if name:
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedXlsxWorkbook, self).add_worksheet(name, **kw)
xlsxwriter.Workbook = PatchedXlsxWorkbook
except ImportError:
xlsxwriter = None
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
csvpath = odoo.modules.module.get_resource_path('base', 'res', 'res.lang.csv')
try:
# read (code, name) from languages in base/res/res.lang.csv
with open(csvpath, 'rb') as csvfile:
reader = pycompat.csv_reader(csvfile, delimiter=',', quotechar='"')
fields = next(reader)
code_index = fields.index("code")
name_index = fields.index("name")
result = [
(row[code_index], row[name_index])
for row in reader
]
except Exception:
_logger.error("Could not read %s", csvpath)
result = []
return sorted(result or [('en_US', u'English')], key=itemgetter(1))
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def str2bool(s, default=None):
s = ustr(s).lower()
y = 'y yes 1 true t on'.split()
n = 'n no 0 false f off'.split()
if s not in (y + n):
if default is None:
raise ValueError('Use 0/1/yes/no/true/false/on/off')
return bool(default)
return s in y
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,pycompat.string_types):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.__name__,)))
return result
return wrapper
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)]
for ifname in [iface for iface in ifaces if iface if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
DATE_LENGTH = len(datetime.date.today().strftime(DEFAULT_SERVER_DATE_FORMAT))
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
:param int n: maximum size of each generated chunk
:param Iterable iterable: iterable to chunk into pieces
:param piece_maker: callable taking an iterable and collecting each
chunk from its slice, *must consume the entire slice*.
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('odoo.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('odoo.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, pycompat.string_types),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
__next__ = next
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-u', '--update', '-i', '--init', '--i18n-overwrite']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = {th.ident: {'name': th.name,
'uid': getattr(th, 'uid', 'n/a'),
'dbname': getattr(th, 'dbname', 'n/a'),
'url': getattr(th, 'url', 'n/a')}
for th in threading.enumerate()}
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId, {})
code.append("\n# Thread: %s (id:%s) (db:%s) (uid:%s) (url:%s)" %
(thread_info.get('name', 'n/a'),
threadId,
thread_info.get('dbname', 'n/a'),
thread_info.get('uid', 'n/a'),
thread_info.get('url', 'n/a')))
for line in extract_stack(stack):
code.append(line)
if odoo.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
def freehash(arg):
try:
return hash(arg)
except Exception:
if isinstance(arg, Mapping):
return hash(frozendict(arg))
elif isinstance(arg, Iterable):
return hash(frozenset(freehash(item) for item in arg))
else:
return id(arg)
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
def __hash__(self):
return hash(frozenset((key, freehash(val)) for key, val in self.items()))
class Collector(Mapping):
""" A mapping from keys to lists. This is essentially a space optimization
for ``defaultdict(list)``.
"""
__slots__ = ['_map']
def __init__(self):
self._map = {}
def add(self, key, val):
vals = self._map.setdefault(key, [])
if val not in vals:
vals.append(val)
def __getitem__(self, key):
return self._map.get(key, ())
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
class OrderedSet(MutableSet):
""" A set collection that remembers the elements first insertion order. """
__slots__ = ['_map']
def __init__(self, elems=()):
self._map = OrderedDict((elem, None) for elem in elems)
def __contains__(self, elem):
return elem in self._map
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
def add(self, elem):
self._map[elem] = None
def discard(self, elem):
self._map.pop(elem, None)
class LastOrderedSet(OrderedSet):
""" A set collection that remembers the elements last insertion order. """
def add(self, elem):
OrderedSet.discard(self, elem)
OrderedSet.add(self, elem)
def unique(it):
""" "Uniquifier" for the provided iterable: will output each element of
the iterable once.
The iterable's elements must be hashahble.
:param Iterable it:
:rtype: Iterator
"""
seen = set()
for e in it:
if e not in seen:
seen.add(e)
yield e
class Reverse(object):
""" Wraps a value and reverses its ordering, useful in key functions when
mixing ascending and descending sort on non-numeric data as the
``reverse`` parameter can not do piecemeal reordering.
"""
__slots__ = ['val']
def __init__(self, val):
self.val = val
def __eq__(self, other): return self.val == other.val
def __ne__(self, other): return self.val != other.val
def __ge__(self, other): return self.val <= other.val
def __gt__(self, other): return self.val < other.val
def __le__(self, other): return self.val >= other.val
def __lt__(self, other): return self.val > other.val
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
def formatLang(env, value, digits=None, grouping=True, monetary=False, dp=False, currency_obj=False):
"""
Assuming 'Account' decimal.precision=3:
formatLang(value) -> digits=2 (default)
formatLang(value, digits=4) -> digits=4
formatLang(value, dp='Account') -> digits=3
formatLang(value, digits=5, dp='Account') -> digits=5
"""
if digits is None:
digits = DEFAULT_DIGITS = 2
if dp:
decimal_precision_obj = env['decimal.precision']
digits = decimal_precision_obj.precision_get(dp)
elif currency_obj:
digits = currency_obj.decimal_places
elif (hasattr(value, '_field') and isinstance(value._field, (float_field, function_field)) and value._field.digits):
digits = value._field.digits[1]
if not digits and digits is not 0:
digits = DEFAULT_DIGITS
if isinstance(value, pycompat.string_types) and not value:
return ''
lang = env.user.company_id.partner_id.lang or 'en_US'
lang_objs = env['res.lang'].search([('code', '=', lang)])
if not lang_objs:
lang_objs = env['res.lang'].search([], limit=1)
lang_obj = lang_objs[0]
res = lang_obj.format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
if currency_obj and currency_obj.symbol:
if currency_obj.position == 'after':
res = '%s %s' % (res, currency_obj.symbol)
elif currency_obj and currency_obj.position == 'before':
res = '%s %s' % (currency_obj.symbol, res)
return res
def format_date(env, value, lang_code=False, date_format=False):
'''
Formats the date in a given format.
:param env: an environment.
:param date, datetime or string value: the date to format.
:param string lang_code: the lang code, if not specified it is extracted from the
environment context.
:param string date_format: the format or the date (LDML format), if not specified the
default format of the lang.
:return: date formatted in the specified format.
:rtype: string
'''
if not value:
return ''
if isinstance(value, datetime.datetime):
value = value.date()
elif isinstance(value, pycompat.string_types):
if len(value) < DATE_LENGTH:
return ''
value = value[:DATE_LENGTH]
value = datetime.datetime.strptime(value, DEFAULT_SERVER_DATE_FORMAT).date()
lang = env['res.lang']._lang_get(lang_code or env.context.get('lang') or 'en_US')
locale = babel.Locale.parse(lang.code)
if not date_format:
date_format = posix_to_ldml(lang.date_format, locale=locale)
return babel.dates.format_date(value, format=date_format, locale=locale)
def _consteq(str1, str2):
""" Constant-time string comparison. Suitable to compare bytestrings of fixed,
known length only, because length difference is optimized. """
return len(str1) == len(str2) and sum(ord(x)^ord(y) for x, y in pycompat.izip(str1, str2)) == 0
consteq = getattr(passlib.utils, 'consteq', _consteq)
# forbid globals entirely: str/unicode, int/long, float, bool, tuple, list, dict, None
class Unpickler(pickle_.Unpickler, object):
find_global = None # Python 2
find_class = None # Python 3
def _pickle_load(stream, errors=False):
unpickler = Unpickler(stream)
try:
return unpickler.load()
except Exception:
_logger.warning('Failed unpickling data, returning default: %r',
errors, exc_info=True)
return errors
pickle = types.ModuleType(__name__ + '.pickle')
pickle.load = _pickle_load
pickle.loads = lambda text: _pickle_load(io.BytesIO(text))
pickle.dump = pickle_.dump
pickle.dumps = pickle_.dumps
|
richard-willowit/odoo
|
odoo/tools/misc.py
|
Python
|
gpl-3.0
| 38,601
|
[
"VisIt"
] |
3da09fe98261f0fb00c5ae52508b8722cb69c348552f7ebefd546d9375a3baa4
|
class VehicleInfo(object):
def __init__(self):
"""
waf_target: option passed to waf's --target to create binary
default_params_filename: filename of default parameters file. Taken to be relative to autotest dir.
extra_mavlink_cmds: extra parameters that will be passed to mavproxy
"""
self.options = {
"ArduCopter": {
"default_frame": "quad",
"frames": {
# COPTER
"+": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"quad": {
"model": "+",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"X": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
# this param set FRAME doesn't actually work because mavproxy
# won't set a parameter unless it knows of it, and the
# param fetch happens asynchronously
"extra_mavlink_cmds": "param fetch frame; param set FRAME 1;",
},
"bfx": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-bfx.parm" ],
},
"djix": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-djix.parm" ],
},
"cwx": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-cwx.parm" ],
},
"hexa": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-hexa.parm" ],
},
"hexa-cwx": {
"waf_target": "bin/arducopter",
"default_params_filename": [
"default_params/copter.parm",
"default_params/copter-hexa.parm",
"default_params/copter-hexa-cwx.parm"
],
},
"hexa-dji": {
"waf_target": "bin/arducopter",
"default_params_filename": [
"default_params/copter.parm",
"default_params/copter-hexa.parm",
"default_params/copter-hexa-dji.parm"
],
},
"octa-cwx": {
"waf_target": "bin/arducopter",
"default_params_filename": [
"default_params/copter.parm",
"default_params/copter-octa.parm",
"default_params/copter-octa-cwx.parm"
],
},
"octa-quad-cwx": {
"waf_target": "bin/arducopter",
"default_params_filename": [
"default_params/copter.parm",
"default_params/copter-octaquad.parm",
"default_params/copter-octaquad-cwx.parm"
],
},
"octa-quad": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-octaquad.parm" ],
},
"octa": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-octa.parm" ],
},
"octa-dji": {
"waf_target": "bin/arducopter",
"default_params_filename": [
"default_params/copter.parm",
"default_params/copter-octa.parm",
"default_params/copter-octa-dji.parm"
],
},
"deca": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-deca.parm" ],
},
"deca-cwx": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"tri": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-tri.parm" ],
},
"y6": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-y6.parm" ],
},
"dodeca-hexa": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-dodecahexa.parm" ],
},
# SIM
"IrisRos": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
"external": True,
},
"gazebo-iris": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/gazebo-iris.parm"],
"external": True,
},
"airsim-copter": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/airsim-quadX.parm"],
"external": True,
},
# HELICOPTER
"heli": {
"waf_target": "bin/arducopter-heli",
"default_params_filename": "default_params/copter-heli.parm",
},
"heli-dual": {
"waf_target": "bin/arducopter-heli",
"default_params_filename": ["default_params/copter-heli.parm",
"default_params/copter-heli-dual.parm"],
},
"heli-blade360": {
"waf_target": "bin/arducopter-heli",
"default_params_filename": ["default_params/copter-heli.parm",
],
},
"singlecopter": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter-single.parm",
},
"coaxcopter": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter-single.parm",
"default_params/copter-coax.parm"],
},
"scrimmage-copter" : {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
"external": True,
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
"external": True, # lies! OTOH, hard to take off with this
},
"Callisto": {
"model": "octa-quad:@ROMFS/models/Callisto.json",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"models/Callisto.param"],
},
},
},
"Helicopter": {
"default_frame": "heli",
"frames": {
"heli": {
"waf_target": "bin/arducopter-heli",
"default_params_filename": "default_params/copter-heli.parm",
},
"heli-dual": {
"waf_target": "bin/arducopter-heli",
"default_params_filename": ["default_params/copter-heli.parm",
"default_params/copter-heli-dual.parm"],
},
# "heli-compound": {
# "waf_target": "bin/arducopter-heli",
# "default_params_filename": ["default_params/copter-heli.parm",
# "default_params/copter-heli-compound.parm"],
# },
"heli-blade360": {
"waf_target": "bin/arducopter-heli",
"default_params_filename": ["default_params/copter-heli.parm",
],
},
},
},
"Blimp": {
"default_frame": "Blimp",
"frames": {
"Blimp": {
"waf_target": "bin/blimp",
"default_params_filename": "default_params/blimp.parm",
},
},
},
"ArduPlane": {
"default_frame": "plane",
"frames": {
# PLANE
"quadplane-tilttri": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tilttri.parm",
},
"quadplane-tilttrivec": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tilttrivec.parm",
},
"quadplane-tilthvec": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/quadplane-tilthvec.parm"],
},
"quadplane-tri": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tri.parm",
},
"quadplane-cl84" : {
"waf_target" : "bin/arduplane",
"default_params_filename": "default_params/quadplane-cl84.parm",
},
"quadplane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane.parm",
},
"firefly": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/firefly.parm",
},
"plane-elevon": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-elevons.parm"],
},
"plane-vtail": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-vtail.parm"],
},
"plane-tailsitter": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-tailsitter.parm",
},
"plane-jet": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-jet.parm"],
},
"plane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
},
"plane-dspoilers": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-dspoilers.parm"]
},
"plane-soaring": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-soaring.parm"]
},
"gazebo-zephyr": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/gazebo-zephyr.parm",
"external": True,
},
"last_letter": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
"external": True,
},
"CRRCSim": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
"external": True,
},
"jsbsim": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-jsbsim.parm",
"external": True,
},
"scrimmage-plane" : {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
"external": True,
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
"external": True, # lies! OTOH, hard to take off with this
},
},
},
"Rover": {
"default_frame": "rover",
"frames": {
# ROVER
"rover": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover.parm",
},
"rover-skid": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm"],
},
"rover-vectored": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-vectored.parm"],
},
"balancebot": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm",
"default_params/balancebot.parm"],
},
"sailboat": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/sailboat.parm"],
},
"sailboat-motor": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/sailboat-motor.parm"],
},
"gazebo-rover": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm"],
},
"airsim-rover": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/airsim-rover.parm"],
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
},
},
"ArduSub": {
"default_frame": "vectored",
"frames": {
"vectored": {
"waf_target": "bin/ardusub",
"default_params_filename": "default_params/sub.parm",
},
"vectored_6dof": {
"waf_target": "bin/ardusub",
"default_params_filename": "default_params/sub-6dof.parm",
},
"gazebo-bluerov2": {
"waf_target": "bin/ardusub",
"default_params_filename": "default_params/sub.parm",
},
},
},
"AntennaTracker": {
"default_frame": "tracker",
"frames": {
"tracker": {
"waf_target": "bin/antennatracker",
},
},
},
}
def default_frame(self, vehicle):
return self.options[vehicle]["default_frame"]
def default_waf_target(self, vehicle):
"""Returns a waf target based on vehicle type, which is often determined by which directory the user is in"""
default_frame = self.default_frame(vehicle)
return self.options[vehicle]["frames"][default_frame]["waf_target"]
def options_for_frame(self, frame, vehicle, opts):
"""Return informatiom about how to sitl for frame e.g. build-type==sitl"""
ret = None
frames = self.options[vehicle]["frames"]
if frame in frames:
ret = self.options[vehicle]["frames"][frame]
else:
for p in ["octa", "tri", "y6", "firefly", "heli", "gazebo", "last_letter", "jsbsim", "quadplane", "plane-elevon", "plane-vtail", "plane", "airsim"]:
if frame.startswith(p):
ret = self.options[vehicle]["frames"][p]
break
if ret is None:
if frame.endswith("-heli"):
ret = self.options[vehicle]["frames"]["heli"]
if ret is None:
print("WARNING: no config for frame (%s)" % frame)
ret = {}
if "model" not in ret:
ret["model"] = frame
if "sitl-port" not in ret:
ret["sitl-port"] = True
if opts.model is not None:
ret["model"] = opts.model
if (ret["model"].find("xplane") != -1 or ret["model"].find("flightaxis") != -1):
ret["sitl-port"] = False
if "waf_target" not in ret:
ret["waf_target"] = self.default_waf_target(vehicle)
if opts.build_target is not None:
ret["waf_target"] = opts.build_target
return ret
|
khancyr/ardupilot
|
Tools/autotest/pysim/vehicleinfo.py
|
Python
|
gpl-3.0
| 17,804
|
[
"Firefly"
] |
9283bcce50298ca5b0cf0083a307be0b0cb81bc17aa66332268a106f5c1ab969
|
""" Client for the SandboxStore.
Will connect to the WorkloadManagement/SandboxStore service.
"""
__RCSID__ = "$Id$"
import os
import tarfile
import hashlib
import tempfile
import re
import StringIO
from DIRAC import gLogger, S_OK, S_ERROR, gConfig
from DIRAC.Core.DISET.TransferClient import TransferClient
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Utilities.File import getGlobbedTotalSize
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
class SandboxStoreClient(object):
__validSandboxTypes = ('Input', 'Output')
__smdb = None
def __init__(self, rpcClient=None, transferClient=None, **kwargs):
self.__serviceName = "WorkloadManagement/SandboxStore"
self.__rpcClient = rpcClient
self.__transferClient = transferClient
self.__kwargs = kwargs
self.__vo = None
if 'delegatedGroup' in kwargs:
self.__vo = getVOForGroup(kwargs['delegatedGroup'])
if SandboxStoreClient.__smdb is None:
try:
from DIRAC.WorkloadManagementSystem.DB.SandboxMetadataDB import SandboxMetadataDB
SandboxStoreClient.__smdb = SandboxMetadataDB()
result = SandboxStoreClient.__smdb._getConnection() # pylint: disable=protected-access
if not result['OK']:
SandboxStoreClient.__smdb = False
else:
result['Value'].close()
except (ImportError, RuntimeError, AttributeError):
SandboxStoreClient.__smdb = False
def __getRPCClient(self):
""" Get an RPC client for SB service """
if self.__rpcClient:
return self.__rpcClient
else:
return RPCClient(self.__serviceName, **self.__kwargs)
def __getTransferClient(self):
""" Get RPC client for TransferClient """
if self.__transferClient:
return self.__transferClient
else:
return TransferClient(self.__serviceName, **self.__kwargs)
# Upload sandbox to jobs and pilots
def uploadFilesAsSandboxForJob(self, fileList, jobId, sbType, sizeLimit=0):
""" Upload SB for a job """
if sbType not in self.__validSandboxTypes:
return S_ERROR("Invalid Sandbox type %s" % sbType)
return self.uploadFilesAsSandbox(fileList, sizeLimit, assignTo={"Job:%s" % jobId: sbType})
def uploadFilesAsSandboxForPilot(self, fileList, jobId, sbType, sizeLimit=0):
""" Upload SB for a pilot """
if sbType not in self.__validSandboxTypes:
return S_ERROR("Invalid Sandbox type %s" % sbType)
return self.uploadFilesAsSandbox(fileList, sizeLimit, assignTo={"Pilot:%s" % jobId: sbType})
# Upload generic sandbox
def uploadFilesAsSandbox(self, fileList, sizeLimit=0, assignTo=None):
""" Send files in the fileList to a Sandbox service for the given jobID.
This is the preferable method to upload sandboxes.
a fileList item can be:
- a string, which is an lfn name
- a file name (real), that is supposed to be on disk, in the current directory
- a fileObject that should be a StringIO.StringIO type of object
Parameters:
- assignTo : Dict containing { 'Job:<jobid>' : '<sbType>', ... }
"""
errorFiles = []
files2Upload = []
if assignTo is None:
assignTo = {}
for key in assignTo:
if assignTo[key] not in self.__validSandboxTypes:
return S_ERROR("Invalid sandbox type %s" % assignTo[key])
if not isinstance(fileList, (list, tuple)):
return S_ERROR("fileList must be a list or tuple!")
for sFile in fileList:
if isinstance(sFile, basestring):
if re.search('^lfn:', sFile, flags=re.IGNORECASE):
pass
else:
if os.path.exists(sFile):
files2Upload.append(sFile)
else:
errorFiles.append(sFile)
elif isinstance(sFile, StringIO.StringIO):
files2Upload.append(sFile)
else:
return S_ERROR("Objects of type %s can't be part of InputSandbox" % type(sFile))
if errorFiles:
return S_ERROR("Failed to locate files: %s" % ", ".join(errorFiles))
try:
fd, tmpFilePath = tempfile.mkstemp(prefix="LDSB.")
os.close(fd)
except Exception as e:
return S_ERROR("Cannot create temporary file: %s" % repr(e))
with tarfile.open(name=tmpFilePath, mode="w|bz2") as tf:
for sFile in files2Upload:
if isinstance(sFile, basestring):
tf.add(os.path.realpath(sFile), os.path.basename(sFile), recursive=True)
elif isinstance(sFile, StringIO.StringIO):
tarInfo = tarfile.TarInfo(name='jobDescription.xml')
tarInfo.size = len(sFile.buf)
tf.addfile(tarinfo=tarInfo, fileobj=sFile)
if sizeLimit > 0:
# Evaluate the compressed size of the sandbox
if getGlobbedTotalSize(tmpFilePath) > sizeLimit:
result = S_ERROR("Size over the limit")
result['SandboxFileName'] = tmpFilePath
return result
oMD5 = hashlib.md5()
with open(tmpFilePath, "rb") as fd:
bData = fd.read(10240)
while bData:
oMD5.update(bData)
bData = fd.read(10240)
transferClient = self.__getTransferClient()
result = transferClient.sendFile(tmpFilePath, ("%s.tar.bz2" % oMD5.hexdigest(), assignTo))
result['SandboxFileName'] = tmpFilePath
try:
if result['OK']:
os.unlink(tmpFilePath)
except OSError:
pass
return result
##############
# Download sandbox
def downloadSandbox(self, sbLocation, destinationDir="", inMemory=False, unpack=True):
"""
Download a sandbox file and keep it in bundled form
"""
if sbLocation.find("SB:") != 0:
return S_ERROR("Invalid sandbox URL")
sbLocation = sbLocation[3:]
sbSplit = sbLocation.split("|")
if len(sbSplit) < 2:
return S_ERROR("Invalid sandbox URL")
seName = sbSplit[0]
sePFN = "|".join(sbSplit[1:])
try:
tmpSBDir = tempfile.mkdtemp(prefix="TMSB.")
except IOError as e:
return S_ERROR("Cannot create temporary file: %s" % repr(e))
se = StorageElement(seName, vo=self.__vo)
result = returnSingleResult(se.getFile(sePFN, localPath=tmpSBDir))
if not result['OK']:
return result
sbFileName = os.path.basename(sePFN)
result = S_OK()
tarFileName = os.path.join(tmpSBDir, sbFileName)
if inMemory:
try:
with open(tarFileName, 'r') as tfile:
data = tfile.read()
except IOError as e:
return S_ERROR('Failed to read the sandbox archive: %s' % repr(e))
finally:
os.unlink(tarFileName)
os.rmdir(tmpSBDir)
return S_OK(data)
# If destination dir is not specified use current working dir
# If its defined ensure the dir structure is there
if not destinationDir:
destinationDir = os.getcwd()
else:
mkDir(destinationDir)
if not unpack:
result['Value'] = tarFileName
return result
try:
sandboxSize = 0
with tarfile.open(name=tarFileName, mode="r") as tf:
for tarinfo in tf:
tf.extract(tarinfo, path=destinationDir)
sandboxSize += tarinfo.size
# FIXME: here we return the size, but otherwise we always return the location: inconsistent
# FIXME: looks like this size is used by the JobWrapper
result['Value'] = sandboxSize
except IOError as e:
result = S_ERROR("Could not open bundle: %s" % repr(e))
try:
os.unlink(tarFileName)
os.rmdir(tmpSBDir)
except OSError as e:
gLogger.warn("Could not remove temporary dir %s: %s" % (tmpSBDir, repr(e)))
return result
##############
# Jobs
def getSandboxesForJob(self, jobId):
""" Download job sandbox """
return self.__getSandboxesForEntity("Job:%s" % jobId)
def assignSandboxesToJob(self, jobId, sbList, ownerName="", ownerGroup="", eSetup=""):
""" Assign SB to a job """
return self.__assignSandboxesToEntity("Job:%s" % jobId, sbList, ownerName, ownerGroup, eSetup)
def assignSandboxToJob(self, jobId, sbLocation, sbType, ownerName="", ownerGroup="", eSetup=""):
""" Assign SB to a job """
return self.__assignSandboxToEntity("Job:%s" % jobId, sbLocation, sbType, ownerName, ownerGroup, eSetup)
def unassignJobs(self, jobIdList):
""" Unassign SB to a job """
if isinstance(jobIdList, (int, long)):
jobIdList = [jobIdList]
entitiesList = []
for jobId in jobIdList:
entitiesList.append("Job:%s" % jobId)
return self.__unassignEntities(entitiesList)
def downloadSandboxForJob(self, jobId, sbType, destinationPath="", inMemory=False, unpack=True):
""" Download SB for a job """
result = self.__getSandboxesForEntity("Job:%s" % jobId)
if not result['OK']:
return result
sbDict = result['Value']
if sbType not in sbDict:
return S_ERROR("No %s sandbox registered for job %s" % (sbType, jobId))
# If inMemory, ensure we return the newest sandbox only
if inMemory:
sbLocation = sbDict[sbType][-1]
return self.downloadSandbox(sbLocation, destinationPath, inMemory, unpack)
downloadedSandboxesLoc = []
for sbLocation in sbDict[sbType]:
result = self.downloadSandbox(sbLocation, destinationPath, inMemory, unpack)
if not result['OK']:
return result
downloadedSandboxesLoc.append(result['Value'])
return S_OK(downloadedSandboxesLoc)
##############
# Pilots
def getSandboxesForPilot(self, pilotId):
""" Get SB for a pilot """
return self.__getSandboxesForEntity("Pilot:%s" % pilotId)
def assignSandboxesToPilot(self, pilotId, sbList, ownerName="", ownerGroup="", eSetup=""):
""" Assign SB to a pilot """
return self.__assignSandboxesToEntity("Pilot:%s" % pilotId, sbList, ownerName, ownerGroup, eSetup)
def assignSandboxToPilot(self, pilotId, sbLocation, sbType, ownerName="", ownerGroup="", eSetup=""):
""" Assign SB to a pilot """
return self.__assignSandboxToEntity("Pilot:%s" % pilotId, sbLocation, sbType, ownerName, ownerGroup, eSetup)
def unassignPilots(self, pilotIdIdList):
""" Unassign SB to a pilot """
if isinstance(pilotIdIdList, (int, long)):
pilotIdIdList = [pilotIdIdList]
entitiesList = []
for pilotId in pilotIdIdList:
entitiesList.append("Pilot:%s" % pilotId)
return self.__unassignEntities(entitiesList)
def downloadSandboxForPilot(self, jobId, sbType, destinationPath=""):
""" Download SB for a pilot """
result = self.__getSandboxesForEntity("Pilot:%s" % jobId)
if not result['OK']:
return result
sbDict = result['Value']
if sbType not in sbDict:
return S_ERROR("No %s sandbox registered for pilot %s" % (sbType, jobId))
downloadedSandboxesLoc = []
for sbLocation in sbDict[sbType]:
result = self.downloadSandbox(sbLocation, destinationPath)
if not result['OK']:
return result
downloadedSandboxesLoc.append(result['Value'])
return S_OK(downloadedSandboxesLoc)
##############
# Entities
def __getSandboxesForEntity(self, eId):
"""
Get the sandboxes assigned to jobs and the relation type
"""
rpcClient = self.__getRPCClient()
return rpcClient.getSandboxesAssignedToEntity(eId)
def __assignSandboxesToEntity(self, eId, sbList, ownerName="", ownerGroup="", eSetup=""):
"""
Assign sandboxes to a job.
sbList must be a list of sandboxes and relation types
sbList = [ ( "SB:SEName|SEPFN", "Input" ), ( "SB:SEName|SEPFN", "Output" ) ]
"""
for sbT in sbList:
if sbT[1] not in self.__validSandboxTypes:
return S_ERROR("Invalid Sandbox type %s" % sbT[1])
if SandboxStoreClient.__smdb and ownerName and ownerGroup:
if not eSetup:
eSetup = gConfig.getValue("/DIRAC/Setup", "Production")
return SandboxStoreClient.__smdb.assignSandboxesToEntities({eId: sbList}, ownerName, ownerGroup, eSetup)
rpcClient = self.__getRPCClient()
return rpcClient.assignSandboxesToEntities({eId: sbList}, ownerName, ownerGroup, eSetup)
def __assignSandboxToEntity(self, eId, sbLocation, sbType, ownerName="", ownerGroup="", eSetup=""):
"""
Assign a sandbox to a job
sbLocation is "SEName:SEPFN"
sbType is Input or Output
"""
return self.__assignSandboxesToEntity(eId, [(sbLocation, sbType)], ownerName, ownerGroup, eSetup)
def __unassignEntities(self, eIdList):
"""
Unassign a list of jobs of their respective sandboxes
"""
rpcClient = self.__getRPCClient()
return rpcClient.unassignEntities(eIdList)
|
petricm/DIRAC
|
WorkloadManagementSystem/Client/SandboxStoreClient.py
|
Python
|
gpl-3.0
| 12,655
|
[
"DIRAC"
] |
c345468be5be3c7a22d89ef7095bad013673019d2db348f7670b3db6a5417f80
|
## To Do:
## Make stats fields read-only
## More descriptive description
# Author: Daryl Harrison
# Enthought library imports
from enthought.traits.api import Instance, Enum, Bool, Tuple, Float
from enthought.traits.ui.api import View, Item, Group, TupleEditor
from enthought.tvtk.api import tvtk
# Local imports
from enthought.mayavi.core.filter import Filter
from enthought.mayavi.core.traits import DEnum
from enthought.mayavi.core.dataset_manager import DatasetManager
from numpy import array
from sets import Set
################################################################################
# `MeshDiagnostics` class.
################################################################################
class MeshDiagnostics(Filter):
"""
Identifies stiff elements of a solid mesh.
Also includes diagnostics from vtkMeshQuality:
<http://www.vtk.org/doc/release/5.0/html/a01739.html>
"""
# The version of this class. Used for persistence.
__version__ = 0
_dataset_manager = Instance(DatasetManager, allow_none=False)
_surface_grid = Instance(tvtk.PointSet, allow_none=False)
stiff_elements = Bool
_first_time_stiff_elements = Bool(True)
_mesh_quality_filter = Instance(tvtk.MeshQuality, args=(), allow_none=False)
tet_quality_measure = Enum('Edge ratio', 'Aspect ratio', 'Radius ratio', 'Min angle', 'Frobenius norm', 'Volume')
triangle_quality_measure = Enum('Edge ratio', 'Aspect ratio', 'Radius ratio', 'Min angle', 'Frobenius norm')
quad_quality_measure = Enum('Edge ratio', 'Aspect ratio', 'Radius ratio', 'Min angle', 'Med Frobenius norm', 'Max Frobenius norm')
hex_quality_measure = Enum('Edge ratio')
tet_stats = triangle_stats = quad_stats = hex_stats = Tuple(Float,Float,Float,Float,Float)
######################################################################
# The view.
######################################################################
stats_editor = TupleEditor(labels=['Minimum','Average','Maximum','Variance','Number of cells'])
traits_view = \
View(
Item(name='stiff_elements'),
Group(
Group(
Item(name='tet_quality_measure'),
Item(name='tet_stats', style='custom', editor=stats_editor),
label='Tetrahedron quality measure',
show_labels=False,
show_border=True,
),
Group(
Item(name='triangle_quality_measure'),
Item(name='triangle_stats', style='custom', editor=stats_editor),
label='Triangle quality measure',
show_labels=False,
show_border=True,
),
Group(
Item(name='quad_quality_measure'),
Item(name='quad_stats', style='custom', editor=stats_editor),
label='Quadrilateral quality measure',
show_labels=False,
show_border=True,
),
Group(
Item(name='hex_quality_measure'),
Item(name='hex_stats', style='custom', editor=stats_editor),
label='Hexahedron quality measure',
show_labels=False,
show_border=True,
),
enabled_when='stiff_elements==False',
show_border=False
),
width=300
)
"""
Hexahedron quality measure - EdgeRatio
Quadrilateral quality measure - EdgeRatio
- AspectRatio
- RadiusRatio
- MinAngle
- MedFrobeniusNorm
- MaxFrobeniusNorm
Tetrahedron quality measure - EdgeRatio
- AspectRatio
- RadiusRatio
- MinAngle
- FrobeniusNorm
- Volume (compatibility mode on/off)
Triangle quality measure - EdgeRatio
- AspectRatio
- RadiusRatio
- MinAngle
- FrobeniusNorm
"""
######################################################################
# `Filter` interface.
######################################################################
def update_pipeline(self):
if len(self.inputs) == 0 or len(self.inputs[0].outputs) == 0:
return
self._mesh_quality_filter.set_input(self.inputs[0].outputs[0])
for type in ['tet','triangle','quad','hex']:
self._change_quality_measure(type, 'Edge ratio')
self._mesh_quality_filter.update()
self._set_outputs([self._mesh_quality_filter.output])
def update_data(self):
self.data_changed = True
######################################################################
# Non-public interface.
######################################################################
def _stiff_elements_changed(self, value):
if (value):
self._find_stiff_elements()
self._set_outputs([self._surface_grid])
else:
self._set_outputs([self._mesh_quality_filter.output])
def _tet_quality_measure_changed(self, value):
self._change_quality_measure('tet', value)
def _change_quality_measure(self, cell_type, quality_measure):
if (quality_measure == 'Volume'):
self._mesh_quality_filter.compatibility_mode = True
self._mesh_quality_filter.volume = True
else:
self._mesh_quality_filter.compatibility_mode = False
self._mesh_quality_filter.volume = False
quality_measure_names = {'Edge ratio':'edge_ratio', 'Aspect ratio':'aspect_ratio', 'Radius ratio':'radius_ratio',
'Min angle':'min_angle', 'Frobenius norm':'frobenius_norm', 'Max Frobenius norm':'max_frobenius_norm',
'Med Frobenius norm':'med_frobenius_norm'}
setattr(self._mesh_quality_filter, '%s_quality_measure' %cell_type, quality_measure_names.get(quality_measure))
self._mesh_quality_filter.update()
self._set_outputs([self._mesh_quality_filter.output])
cell_type_fullnames = {'tet':'Tetrahedron', 'triangle':'Triangle', 'quad':'Quadrilateral', 'hex':'Hexahedron'}
stats = self._mesh_quality_filter.output.field_data.get_array('Mesh %s Quality' %cell_type_fullnames.get(cell_type))[0]
setattr(self, '%s_stats' %cell_type, stats)
def _find_stiff_elements(self):
if (self._first_time_stiff_elements):
self._first_time_stiff_elements = False
self._surface_grid = type(self.inputs[0].outputs[0])()
self._surface_grid.copy_structure(self.inputs[0].outputs[0])
if (self._surface_grid.points.data_type == 'double'):
# tvtk.DataSetSurfaceFilter produces surface points as float
# so we need to convert input points from doubles to floats
# if necessary
float_points = array(self._surface_grid.points, 'float32')
self._surface_grid.points = float_points
self._dataset_manager = DatasetManager(dataset=self._surface_grid)
surface_points = self._get_surface_points()
self._add_stiff_elements(surface_points)
def _get_surface_points(self):
surface_filter = tvtk.DataSetSurfaceFilter()
surface_filter.set_input(self._surface_grid)
surface_filter.update()
return surface_filter.output.points
def _add_stiff_elements(self, surface_points):
stiff_elements = []
surface_points = Set(surface_points)
for i in range(self._surface_grid.number_of_cells):
cell = self._surface_grid.get_cell(i)
points = Set(cell.points)
val = int(points.issubset(surface_points))
stiff_elements.append(val)
array_name = 'Stiff Elements'
self._dataset_manager.add_array(array(stiff_elements), array_name, 'cell')
self._dataset_manager.activate(array_name, 'cell')
|
rjferrier/fluidity
|
mayavi/mayavi_amcg/filters/mesh_diagnostics.py
|
Python
|
lgpl-2.1
| 8,416
|
[
"Mayavi",
"VTK"
] |
df608c818a501cc611860a72ba48c246043ab38da9fcaf2cfe8be11369737ad5
|
import ast
class ParentNodeTransformer(object):
def visit(self, node):
if not hasattr(node, 'parent'):
node.parent = None
node.parents = []
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for index, item in enumerate(value):
if isinstance(item, ast.AST):
self.visit(item)
self._set_parnt_fields(item, node, field, index)
elif isinstance(value, ast.AST):
self.visit(value)
self._set_parnt_fields(value, node, field)
return node
def _set_parnt_fields(self, node, parent, field, index=None):
node.parent = parent
node.parents.append(parent)
node.parent_field = field
node.parent_field_index = index
|
librallu/RICM4Projet
|
parser/astmonkey/astmonkey-0.1.1/build/lib/astmonkey/transformers.py
|
Python
|
gpl-3.0
| 852
|
[
"VisIt"
] |
2d2fa552d2793143b3e76e386ed928a4d18c3011598b6414927615b1a5ef962f
|
"""
Simulation tools for generating fake images
"""
import numpy as np
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
def rotate_CD_matrix(cd, pa_aper):
"""Rotate CD matrix
Parameters
----------
cd: (2,2) array
CD matrix
pa_aper: float
Position angle, in degrees E from N, of y axis of the detector
Returns
-------
cd_rot: (2,2) array
Rotated CD matrix
Comments
--------
`astropy.wcs.WCS.rotateCD` doesn't work for non-square pixels in that it
doesn't preserve the pixel scale! The bug seems to come from the fact
that `rotateCD` assumes a transposed version of its own CD matrix.
For example:
>>> import astropy.wcs as pywcs
>>>
>>> ## Nominal rectangular WFC3/IR pixel
>>> cd_wfc3 = np.array([[ 2.35945978e-05, 2.62448998e-05],
>>> [ 2.93050803e-05, -2.09858771e-05]])
>>>
>>> ## Square pixel
>>> cd_square = np.array([[0.1/3600., 0], [0, 0.1/3600.]])
>>>
>>> for cd, label in zip([cd_wfc3, cd_square], ['WFC3/IR', 'Square']):
>>> wcs = pywcs.WCS()
>>> wcs.wcs.cd = cd
>>> wcs.rotateCD(45.)
>>> print '%s pixel: pre=%s, rot=%s' %(label,
>>> np.sqrt((cd**2).sum(axis=0))*3600,
>>> np.sqrt((wcs.wcs.cd**2).sum(axis=0))*3600)
WFC3/IR pixel: pre=[ 0.1354 0.121 ], rot=[ 0.1282 0.1286]
Square pixel: pre=[ 0.1 0.1], rot=[ 0.1 0.1]
"""
rad = np.deg2rad(-pa_aper)
mat = np.zeros((2,2))
mat[0,:] = np.array([np.cos(rad),-np.sin(rad)])
mat[1,:] = np.array([np.sin(rad),np.cos(rad)])
cd_rot = np.dot(mat, cd)
return cd_rot
def niriss_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589,
filter='F150W', grism='GR150R'):
"""Make JWST/NIRISS image header
Parameters
----------
ra, dec: float, float
Coordinates of the center of the image
pa_aper: float
Position angle of the y-axis of the detector
filter: str
Blocking filter to use.
grism: str
Grism to use
Returns
--------
h: astropy.io.fits.Header
FITS header with appropriate keywords
wcs: astropy.wcs.WCS
WCS specification (computed from keywords in `h`).
Comments
--------
NIRISS: 0.065"/pix, requires filter & grism specification
"""
naxis = 2048, 2048
crpix = 1024, 1024
cd = np.array([[ -0.0658, 0], [0, 0.0654]])/3600.
cd_rot = rotate_CD_matrix(cd, pa_aper)
h = pyfits.Header()
h['CRVAL1'] = ra
h['CRVAL2'] = dec
h['WCSAXES'] = 2
h['CTYPE1'] = 'RA---TAN'
h['CTYPE2'] = 'DEC--TAN'
for i in range(2):
h['NAXIS%d' %(i+1)] = naxis[i]
h['CRPIX%d' %(i+1)] = crpix[i]
h['CDELT%d' %(i+1)] = 1.0
for j in range(2):
h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]
### Backgrounds
# http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf
bg = {'F090W':0.50, 'F115W':0.47, 'F140M':0.23, 'F150W':0.48, 'F158M':0.25, 'F200W':0.44}
h['BACKGR'] = bg[filter], 'Total, e/s'
h['FILTER'] = filter
h['INSTRUME'] = 'NIRISS'
h['READN'] = 6 , 'Rough, per pixel per 1 ks exposure' # e/pix/per
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
if grism == 'GR150R':
h['GRISM'] = 'GR150R', 'Spectral trace along X'
else:
h['GRISM'] = 'GR150C', 'Spectral trace along Y'
wcs = pywcs.WCS(h)
h['EXTVER'] = 1
return h, wcs
def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589,
filter='F444W', grism='DFSR'):
"""Make JWST/NIRCAM image header
Parameters
----------
ra, dec: float, float
Coordinates of the center of the image
pa_aper: float
Position angle of the y-axis of the detector
filter: str
Blocking filter to use.
grism: str
Grism to use
Returns
--------
h: astropy.io.fits.Header
FITS header with appropriate keywords
wcs: astropy.wcs.WCS
WCS specification (computed from keywords in `h`).
Comments
--------
NIRCAM, 0.0648"/pix, requires filter specification
"""
naxis = 2048, 2048
crpix = 1024, 1024
cd = np.array([[ -0.0648, 0], [0, 0.0648]])/3600.
cd_rot = rotate_CD_matrix(cd, pa_aper)
h = pyfits.Header()
h['CRVAL1'] = ra
h['CRVAL2'] = dec
h['WCSAXES'] = 2
h['CTYPE1'] = 'RA---TAN'
h['CTYPE2'] = 'DEC--TAN'
for i in range(2):
h['NAXIS%d' %(i+1)] = naxis[i]
h['CRPIX%d' %(i+1)] = crpix[i]
h['CDELT%d' %(i+1)] = 1.0
for j in range(2):
h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]
### Backgrounds
# http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf
bg = {'F277W':0.30, 'F356W':0.90, 'F444W': 3.00, 'F322W2':1.25, 'F430M':0.65, 'F460M':0.86, 'F410M':0.5} # F410M is a hack, no number
h['BACKGR'] = bg[filter], 'Total, e/s'
h['FILTER'] = filter
h['INSTRUME'] = 'NIRCam'
h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
if grism == 'DFSR':
h['GRISM'] = 'DFSR', 'Spectral trace along X'
else:
h['GRISM'] = 'DFSC', 'Spectral trace along Y'
wcs = pywcs.WCS(h)
h['EXTVER'] = 1
return h, wcs
def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589,
flt='ibhj34h6q_flt.fits', filter='G141'):
"""Make HST/WFC3-IR image header
Parameters
----------
ra, dec: float, float
Coordinates of the center of the image
pa_aper: float
Position angle of the y-axis of the detector
flt: str
Filename of a WFC3/IR FLT file that will be used to provide the
SIP geometric distortion keywords.
filter: str
Grism/filter to use.
Returns
--------
h: astropy.io.fits.Header
FITS header with appropriate keywords
wcs: astropy.wcs.WCS
WCS specification (computed from keywords in `h`).
Comments
--------
WFC3 IR, requires reference FLT file for the SIP header
"""
import numpy as np
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
im = pyfits.open(flt)
wcs = pywcs.WCS(im[1].header, relax=True)
thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180
wcs.wcs.crval = np.array([ra, dec])
### Rotate the CD matrix
theta = im[1].header['PA_APER'] - pa_aper
cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)
wcs.wcs.cd = cd_rot
h = wcs.to_header(relax=True)
for i in [1,2]:
for j in [1,2]:
h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]
h.remove('PC%d_%d' %(i,j))
h['BACKGR'] = 1.
h['FILTER'] = filter
h['INSTRUME'] = 'WFC3'
h['READN'] = im[0].header['READNSEA']
h['NAXIS1'] = h['NAXIS2'] = 1014
h['DETECTOR'] = 'IR'
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
return h, wcs
def wfirst_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, naxis=(4096,4096)):
"""Make WFIRST WFI header
Parameters
----------
ra, dec: float, float
Coordinates of the center of the image
pa_aper: float
Position angle of the y-axis of the detector
filter: str
Blocking filter to use.
naxis: (int,int)
Image dimensions
Returns
--------
h: astropy.io.fits.Header
FITS header with appropriate keywords
wcs: astropy.wcs.WCS
WCS specification (computed from keywords in `h`).
Comments
--------
WFIRST GRS Grism
Current aXe config file has no field dependence, so field size can be
anything you want in `naxis`.
"""
#naxis = 2048, 2048
crpix = naxis[0]/2., naxis[0]/2.
cd = np.array([[ -0.11, 0], [0, 0.11]])/3600.
cd_rot = rotate_CD_matrix(cd, pa_aper)
h = pyfits.Header()
h['CRVAL1'] = ra
h['CRVAL2'] = dec
h['WCSAXES'] = 2
h['CTYPE1'] = 'RA---TAN'
h['CTYPE2'] = 'DEC--TAN'
for i in range(2):
h['NAXIS%d' %(i+1)] = naxis[i]
h['CRPIX%d' %(i+1)] = crpix[i]
h['CDELT%d' %(i+1)] = 1.0
for j in range(2):
h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]
h['BACKGR'] = 0.17+0.49, 'Total, e/s SDT Report A-1'
h['FILTER'] = 'GRS', 'WFIRST grism'
h['INSTRUME'] = 'WFIRST'
h['READN'] = 17, 'SDT report Table 3-3' # e/pix/per
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
wcs = pywcs.WCS(h)
h['EXTVER'] = 1
return h, wcs
def make_fake_image(header, output='direct.fits', background=None, exptime=1.e4, nexp=10):
"""Use the header from NIRISS, WFC3/IR or WFIRST and make an 'FLT' image that `grizli` can read as a reference.
Parameters
----------
header: astropy.io.fits.Header
Header created by one of the generating functions, such as
`niriss_header`.
output: str
Filename of the output FITS file. Will have extensions 'SCI', 'ERR',
and 'DQ'. The 'ERR' extension is populated with a read-noise +
background error model using
>>> var = nexp*header['READN'] + background*exptime
The 'SCI' extension is filled with gaussian deviates with standard
deviation `sqrt(var)`.
The 'DQ' extension is filled with (int) zeros.
background: None or float
Background value to use for sky noise. If None, then read from
`header['BACKGR']`.
exptime: float
Exposure time to use for background sky noise.
nexp: int
Number of exposures to use for read noise.
Returns
-------
Nothing; outputs saved in `output` FITS file.
"""
hdu = pyfits.HDUList()
header['EXPTIME'] = exptime
header['NEXP'] = nexp
header['BUNIT'] = 'ELECTRONS/S'
hdu.append(pyfits.PrimaryHDU(header=header))
naxis = (header['NAXIS1'], header['NAXIS2'])
for name, dtype in zip(['SCI', 'ERR', 'DQ'],
[np.float32, np.float32, np.int32]):
hdu.append(pyfits.ImageHDU(header=header,
data=np.zeros(np.array(naxis).T,
dtype=dtype), name=name))
if background == None:
background = header['BACKGR']
header['BACKGR'] = background
### Simple error model of read noise and sky background
var = nexp*header['READN'] + background*exptime
### electrons / s
rms = np.sqrt(var)/exptime
hdu['ERR'].data += rms
hdu['SCI'].data = np.random.normal(size=np.array(naxis).T)*rms
hdu.writeto(output, clobber=True, output_verify='fix')
|
albertfxwang/grizli
|
grizli/fake_image.py
|
Python
|
mit
| 11,308
|
[
"Gaussian"
] |
15002d72cf49fc2101b42b81cd9b4b7820640a0453b292fb2937354aab8b212a
|
#!/usr/bin/python
import sys
import numpy as np
import irtk
import os
from glob import glob
from lib.BundledSIFT import *
import scipy.ndimage as nd
import argparse
# filename = sys.argv[1]
# ga = float(sys.argv[2])
# output_mask = sys.argv[3]
parser = argparse.ArgumentParser(
description='Slice-by-slice detection of fetal brain MRI (3D).' )
parser.add_argument( "filename", type=str )
parser.add_argument( "ga", type=float )
parser.add_argument( "output_mask", type=str )
parser.add_argument( '--debug', action="store_true", default=False )
parser.add_argument( '--fold', type=str, default='0' )
args = parser.parse_args()
print args
filename = args.filename
ga = args.ga
output_mask = args.output_mask
output_dir = os.path.dirname( output_mask )
if output_dir != '' and not os.path.exists(output_dir):
os.makedirs(output_dir)
if output_dir == '':
output_dir = '.'
if os.environ['USER'] == "kevin":
raw_folder = "/home/kevin/Imperial/PhD/DATASETS/Originals/"
vocabulary = "/home/kevin/Imperial/PhD/MyPHD/Detection/BOW/pipeline2/LEARNING/vocabulary_"+args.fold+".npy"
mser_detector = "/home/kevin/Imperial/PhD/MyPHD/Detection/BOW/pipeline2/LEARNING/mser_detector_"+args.fold+"_linearSVM"
ga_file = "/home/kevin/Imperial/PhD/MyPHD/Detection/BOW/pipeline2/LEARNING/metadata/ga.csv"
else:
raw_folder = "/vol/biomedic/users/kpk09/DATASETS/Originals"
vocabulary = "/vol/biomedic/users/kpk09/pipeline2/LEARNING/vocabulary_"+args.fold+".npy"
mser_detector = "/vol/biomedic/users/kpk09/pipeline2/LEARNING/mser_detector_"+args.fold+"_linearSVM"
ga_file = "/vol/biomedic/users/kpk09/pipeline2/LEARNING/metadata/ga.csv"
print"Detect MSER regions"
detections = []
NEW_SAMPLING = 0.8
img = irtk.imread(filename, dtype="float32").saturate().rescale()
image_regions = detect_mser( filename,
ga,
vocabulary,
mser_detector,
NEW_SAMPLING,
DEBUG=args.debug,
output_folder=output_dir,
return_image_regions=True)
# flatten list
# http://stackoverflow.com/questions/406121/flattening-a-shallow-list-in-python
import itertools
chain = itertools.chain(*image_regions)
image_regions = list(chain)
print "Fit cube using RANSAC"
def convert_input(image_regions):
detections = []
for ellipse_center, c in image_regions:
x,y,z = map(float, ellipse_center)
center = [x*NEW_SAMPLING,
y*NEW_SAMPLING,
z*img.header['pixelSize'][2]]
region = np.hstack( (c, [[z]]*c.shape[0]) ).astype('float32')
region[:,0] *= NEW_SAMPLING
region[:,1] *= NEW_SAMPLING
region[:,2] *= img.header['pixelSize'][2]
detections.append((center, region))
return detections
detections = convert_input(image_regions)
print detections
(center, u, ofd), inliers = ransac_ellipses( detections,
ga,
nb_iterations=1000,
model="box",
return_indices=True )
print "initial mask"
mask = irtk.zeros(img.resample2D(NEW_SAMPLING, interpolation='nearest').get_header(),
dtype='uint8')
for i in inliers:
(x,y,z), c = image_regions[i]
mask[z,c[:,1],c[:,0]] = 1
mask = mask.resample2D(img.header['pixelSize'][0], interpolation='nearest' )
# ellipse mask
ellipse_mask = irtk.zeros(img.resample2D(NEW_SAMPLING, interpolation='nearest').get_header(), dtype='uint8')
for i in inliers:
(x,y,z), c = image_regions[i]
ellipse = cv2.fitEllipse(np.reshape(c, (c.shape[0],1,2) ).astype('int32'))
tmp_img = np.zeros( (ellipse_mask.shape[1],ellipse_mask.shape[2]), dtype='uint8' )
cv2.ellipse( tmp_img, (ellipse[0],
(ellipse[1][0],ellipse[1][1]),
ellipse[2]) , 1, thickness=-1)
ellipse_mask[z][tmp_img > 0] = 1
ellipse_mask = ellipse_mask.resample2D(img.header['pixelSize'][0], interpolation='nearest')
#irtk.imwrite(output_dir + "/ellipse_mask.nii", ellipse_mask )
mask[ellipse_mask == 1] = 1
# fill holes, close and dilate
disk_close = irtk.disk( 5 )
disk_dilate = irtk.disk( 2 )
for z in xrange(mask.shape[0]):
mask[z] = nd.binary_fill_holes( mask[z] )
mask[z] = nd.binary_closing( mask[z], disk_close )
mask[z] = nd.binary_dilation( mask[z], disk_dilate )
neg_mask = np.ones(mask.shape, dtype='uint8')*2
# irtk.imwrite(output_dir + "/mask.nii", mask )
# irtk.imwrite(output_dir + "/mask.vtk", mask )
#x,y,z = img.WorldToImage(center)
x,y,z = center
x = int(round( x / img.header['pixelSize'][0] ))
y = int(round( y / img.header['pixelSize'][1] ))
z = int(round( z / img.header['pixelSize'][2] ))
w = h = int(round( ofd / img.header['pixelSize'][0]))
d = int(round( ofd / img.header['pixelSize'][2]))
print z,y,x
print w,h,d
# cropped = img[max(0,z-d/2):min(img.shape[0],z+d/2+1),
# max(0,y-h/2):min(img.shape[1],y+h/2+1),
# max(0,x-w/2):min(img.shape[2],x+w/2+1)]
#irtk.imwrite(output_dir + "/cropped.nii", cropped )
#irtk.imwrite(output_dir + "/cropped.vtk", cropped )
neg_mask[max(0,z-d/2):min(img.shape[0],z+d/2+1),
max(0,y-h/2):min(img.shape[1],y+h/2+1),
max(0,x-w/2):min(img.shape[2],x+w/2+1)] = 0
mask[neg_mask>0] = 2
print mask, mask.max()
irtk.imwrite(output_mask, mask )
|
ghisvail/irtk-legacy
|
wrapping/cython/scripts/fetalMask_detection.py
|
Python
|
bsd-3-clause
| 5,528
|
[
"VTK"
] |
2e7e24ae611527f126e13dfd9a5c936df863449d7724befa751e98e0a6237fde
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2012 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module with the always_seq decorator. """
from __future__ import absolute_import
import sys
import inspect
from types import FunctionType
import ast
from myhdl import AlwaysError, intbv
from myhdl._util import _isGenFunc, _dedent
from myhdl._cell_deref import _cell_deref
from myhdl._delay import delay
from myhdl._Signal import _Signal, _WaiterList,_isListOfSigs
from myhdl._Waiter import _Waiter, _EdgeWaiter, _EdgeTupleWaiter
from myhdl._always import _Always
from myhdl._resolverefs import _AttrRefTransformer
from myhdl._visitors import _SigNameVisitor
# evacuate this later
AlwaysSeqError = AlwaysError
class _error:
pass
_error.EdgeType = "first argument should be an edge"
_error.ResetType = "reset argument should be a ResetSignal"
_error.ArgType = "decorated object should be a classic (non-generator) function"
_error.NrOfArgs = "decorated function should not have arguments"
_error.SigAugAssign = "signal assignment does not support augmented assignment"
_error.EmbeddedFunction = "embedded functions in always_seq function not supported"
class ResetSignal(_Signal):
def __init__(self, val, active, async):
""" Construct a ResetSignal.
This is to be used in conjunction with the always_seq decorator,
as the reset argument.
"""
_Signal.__init__(self, bool(val))
self.active = bool(active)
self.async = async
def always_seq(edge, reset):
if not isinstance(edge, _WaiterList):
raise AlwaysSeqError(_error.EdgeType)
edge.sig._read = True
edge.sig._used = True
if reset is not None:
if not isinstance(reset, ResetSignal):
raise AlwaysSeqError(_error.ResetType)
reset._read = True
reset._used = True
def _always_seq_decorator(func):
if not isinstance(func, FunctionType):
raise AlwaysSeqError(_error.ArgType)
if _isGenFunc(func):
raise AlwaysSeqError(_error.ArgType)
if func.__code__.co_argcount > 0:
raise AlwaysSeqError(_error.NrOfArgs)
return _AlwaysSeq(func, edge, reset)
return _always_seq_decorator
class _AlwaysSeq(_Always):
def __init__(self, func, edge, reset):
senslist = [edge]
self.reset = reset
if reset is not None:
self.genfunc = self.genfunc_reset
active = self.reset.active
async = self.reset.async
if async:
if active:
senslist.append(reset.posedge)
else:
senslist.append(reset.negedge)
else:
self.genfunc = self.genfunc_no_reset
super(_AlwaysSeq, self).__init__(func, senslist)
# find symdict
# similar to always_comb, but in class constructor
varnames = func.__code__.co_varnames
symdict = {}
for n, v in func.__globals__.items():
if n not in varnames:
symdict[n] = v
# handle free variables
if func.__code__.co_freevars:
for n, c in zip(func.__code__.co_freevars, func.__closure__):
try:
obj = _cell_deref(c)
symdict[n] = obj
except NameError:
raise NameError(n)
self.symdict = symdict
# now infer outputs to be reset
s = inspect.getsource(func)
s = _dedent(s)
tree = ast.parse(s)
# print ast.dump(tree)
v = _AttrRefTransformer(self)
v.visit(tree)
v = _SigNameVisitor(self.symdict)
v.visit(tree)
if v.results['inout']:
raise AlwaysSeqError(_error.SigAugAssign, v.results['inout'])
if v.results['embedded_func']:
raise AlwaysSeqError(_error.EmbeddedFunction)
sigregs = self.sigregs = []
varregs = self.varregs = []
for n in v.results['output']:
reg = self.symdict[n]
if isinstance(reg, _Signal):
sigregs.append(reg)
elif isinstance(reg, intbv):
varregs.append((n, reg, int(reg)))
else:
assert _isListOfSigs(reg)
for e in reg:
sigregs.append(e)
def reset_sigs(self):
for s in self.sigregs:
s.next = s._init
def reset_vars(self):
for v in self.varregs:
# only intbv's for now
n, reg, init = v
reg._val = init
def genfunc_reset(self):
senslist = self.senslist
if len(senslist) == 1:
senslist = senslist[0]
reset_sigs = self.reset_sigs
reset_vars = self.reset_vars
func = self.func
while 1:
yield senslist
if self.reset == self.reset.active:
reset_sigs()
reset_vars()
else:
func()
def genfunc_no_reset(self):
senslist = self.senslist
assert len(senslist) == 1
senslist = senslist[0]
func = self.func
while 1:
yield senslist
func()
|
gw0/myhdl
|
myhdl/_always_seq.py
|
Python
|
lgpl-2.1
| 6,007
|
[
"VisIt"
] |
d2a2c51b7ec30e02a726d68cde451f5304f06ff49915d9eea260944a54b5a9e7
|
#!/usr/bin/evn python
import sys
import spglib
import numpy as np
#######################################
# Uncomment to use Atoms class in ASE #
#######################################
# from ase import Atoms
#######################################################################
# Using the local Atoms-like class (BSD license) where a small set of #
# ASE Atoms features is comaptible but enough for this example. #
#######################################################################
from atoms import Atoms
def show_symmetry(symmetry):
for i in range(symmetry['rotations'].shape[0]):
print(" --------------- %4d ---------------" % (i + 1))
rot = symmetry['rotations'][i]
trans = symmetry['translations'][i]
print(" rotation:")
for x in rot:
print(" [%2d %2d %2d]" % (x[0], x[1], x[2]))
print(" translation:")
print(" (%8.5f %8.5f %8.5f)" % (trans[0], trans[1], trans[2]))
def show_lattice(lattice):
print("Basis vectors:")
for vec, axis in zip(lattice, ("a", "b", "c")):
print("%s %10.5f %10.5f %10.5f" % (tuple(axis,) + tuple(vec)))
def show_cell(lattice, positions, numbers):
show_lattice(lattice)
print("Atomic points:")
for p, s in zip(positions, numbers):
print("%2d %10.5f %10.5f %10.5f" % ((s,) + tuple(p)))
silicon_ase = Atoms(symbols=['Si'] * 8,
cell=[(4, 0, 0),
(0, 4, 0),
(0, 0, 4)],
scaled_positions=[(0, 0, 0),
(0, 0.5, 0.5),
(0.5, 0, 0.5),
(0.5, 0.5, 0),
(0.25, 0.25, 0.25),
(0.25, 0.75, 0.75),
(0.75, 0.25, 0.75),
(0.75, 0.75, 0.25)],
pbc=True)
silicon = ([(4, 0, 0),
(0, 4, 0),
(0, 0, 4)],
[(0, 0, 0),
(0, 0.5, 0.5),
(0.5, 0, 0.5),
(0.5, 0.5, 0),
(0.25, 0.25, 0.25),
(0.25, 0.75, 0.75),
(0.75, 0.25, 0.75),
(0.75, 0.75, 0.25)],
[14,] * 8)
silicon_dist = ([(4.01, 0, 0),
(0, 4, 0),
(0, 0, 3.99)],
[(0.001, 0, 0),
(0, 0.5, 0.5),
(0.5, 0, 0.5),
(0.5, 0.5, 0),
(0.25, 0.25, 0.251),
(0.25, 0.75, 0.75),
(0.75, 0.25, 0.75),
(0.75, 0.75, 0.25)],
[14,] * 8)
silicon_prim = ([(0, 2, 2),
(2, 0, 2),
(2, 2, 0)],
[(0, 0, 0),
(0.25, 0.25, 0.25)],
[14, 14])
rutile = ([(4, 0, 0),
(0, 4, 0),
(0, 0, 3)],
[(0, 0, 0),
(0.5, 0.5, 0.5),
(0.3, 0.3, 0.0),
(0.7, 0.7, 0.0),
(0.2, 0.8, 0.5),
(0.8, 0.2, 0.5)],
[14, 14, 8, 8, 8, 8])
rutile_dist = ([(3.97, 0, 0),
(0, 4.03, 0),
(0, 0, 3.0)],
[(0, 0, 0),
(0.5001, 0.5, 0.5),
(0.3, 0.3, 0.0),
(0.7, 0.7, 0.002),
(0.2, 0.8, 0.5),
(0.8, 0.2, 0.5)],
[14, 14, 8, 8, 8, 8])
a = 3.07
c = 3.52
MgB2 = ([(a, 0, 0),
(-a/2, a/2*np.sqrt(3), 0),
(0, 0, c)],
[(0, 0, 0),
(1.0/3, 2.0/3, 0.5),
(2.0/3, 1.0/3, 0.5)],
[12, 5, 5])
a = [3., 0., 0.]
b = [-3.66666667, 3.68178701, 0.]
c = [-0.66666667, -1.3429469, 1.32364995]
niggli_lattice = np.array([a, b, c])
# For VASP case
# import vasp
# bulk = vasp.read_vasp(sys.argv[1])
print("[get_spacegroup]")
print(" Spacegroup of Silicon is %s." % spglib.get_spacegroup(silicon))
print('')
print("[get_spacegroup]")
print(" Spacegroup of Silicon (ASE Atoms-like format) is %s." %
spglib.get_spacegroup(silicon_ase))
print('')
print("[get_spacegroup]")
print(" Spacegroup of Rutile is %s." % spglib.get_spacegroup(rutile))
print('')
print("[get_spacegroup]")
print(" Spacegroup of MgB2 is %s." % spglib.get_spacegroup(MgB2))
print('')
print("[get_symmetry]")
print(" Symmetry operations of Rutile unitcell are:")
print('')
symmetry = spglib.get_symmetry(rutile)
show_symmetry(symmetry)
print('')
print("[get_symmetry]")
print(" Symmetry operations of MgB2 are:")
print('')
symmetry = spglib.get_symmetry(MgB2)
show_symmetry(symmetry)
print('')
print("[get_pointgroup]")
print(" Pointgroup of Rutile is %s." %
spglib.get_pointgroup(symmetry['rotations'])[0])
print('')
dataset = spglib.get_symmetry_dataset( rutile )
print("[get_symmetry_dataset] ['international']")
print(" Spacegroup of Rutile is %s (%d)." % (dataset['international'],
dataset['number']))
print('')
print("[get_symmetry_dataset] ['pointgroup']")
print(" Pointgroup of Rutile is %s." % (dataset['pointgroup']))
print('')
print("[get_symmetry_dataset] ['hall']")
print(" Hall symbol of Rutile is %s (%d)." % (dataset['hall'],
dataset['hall_number']))
print('')
print("[get_symmetry_dataset] ['wyckoffs']")
alphabet = "abcdefghijklmnopqrstuvwxyz"
print(" Wyckoff letters of Rutile are: ", dataset['wyckoffs'])
print('')
print("[get_symmetry_dataset] ['equivalent_atoms']")
print(" Mapping to equivalent atoms of Rutile are: ")
for i, x in enumerate(dataset['equivalent_atoms']):
print(" %d -> %d" % (i + 1, x + 1))
print('')
print("[get_symmetry_dataset] ['rotations'], ['translations']")
print(" Symmetry operations of Rutile unitcell are:")
for i, (rot,trans) in enumerate(zip(dataset['rotations'],
dataset['translations'])):
print(" --------------- %4d ---------------" % (i + 1))
print(" rotation:")
for x in rot:
print(" [%2d %2d %2d]" % (x[0], x[1], x[2]))
print(" translation:")
print(" (%8.5f %8.5f %8.5f)" % (trans[0], trans[1], trans[2]))
print('')
print("[refine_cell]")
print(" Refine distorted rutile structure")
lattice, positions, numbers = spglib.refine_cell(rutile_dist, symprec=1e-1)
show_cell(lattice, positions, numbers)
print('')
print("[find_primitive]")
print(" Fine primitive distorted silicon structure")
lattice, positions, numbers = spglib.find_primitive(silicon_dist, symprec=1e-1)
show_cell(lattice, positions, numbers)
print('')
print("[standardize_cell]")
print(" Standardize distorted rutile structure:")
print(" (to_primitive=0 and no_idealize=0)")
lattice, positions, numbers = spglib.standardize_cell(rutile_dist,
to_primitive=0,
no_idealize=0,
symprec=1e-1)
show_cell(lattice, positions, numbers)
print('')
print("[standardize_cell]")
print(" Standardize distorted rutile structure:")
print(" (to_primitive=0 and no_idealize=1)")
lattice, positions, numbers = spglib.standardize_cell(rutile_dist,
to_primitive=0,
no_idealize=1,
symprec=1e-1)
show_cell(lattice, positions, numbers)
print('')
print("[standardize_cell]")
print(" Standardize distorted silicon structure:")
print(" (to_primitive=1 and no_idealize=0)")
lattice, positions, numbers = spglib.standardize_cell(silicon_dist,
to_primitive=1,
no_idealize=0,
symprec=1e-1)
show_cell(lattice, positions, numbers)
print('')
print("[standardize_cell]")
print(" Standardize distorted silicon structure:")
print(" (to_primitive=1 and no_idealize=1)")
lattice, positions, numbers = spglib.standardize_cell(silicon_dist,
to_primitive=1,
no_idealize=1,
symprec=1e-1)
show_cell(lattice, positions, numbers)
print('')
symmetry = spglib.get_symmetry(silicon)
print("[get_symmetry]")
print(" Number of symmetry operations of silicon conventional")
print(" unit cell is %d (192)." % len(symmetry['rotations']))
show_symmetry(symmetry)
print('')
symmetry = spglib.get_symmetry_from_database(525)
print("[get_symmetry_from_database]")
print(" Number of symmetry operations of silicon conventional")
print(" unit cell is %d (192)." % len(symmetry['rotations']))
show_symmetry(symmetry)
print('')
reduced_lattice = spglib.niggli_reduce(niggli_lattice)
print("[niggli_reduce]")
print(" Original lattice")
show_lattice(niggli_lattice)
print(" Reduced lattice")
show_lattice(reduced_lattice)
print('')
mapping, grid = spglib.get_ir_reciprocal_mesh([11, 11, 11],
silicon_prim,
is_shift=[0, 0, 0])
num_ir_kpt = len(np.unique(mapping))
print("[get_ir_reciprocal_mesh]")
print(" Number of irreducible k-points of primitive silicon with")
print(" 11x11x11 Monkhorst-Pack mesh is %d (56)." % num_ir_kpt)
print('')
mapping, grid = spglib.get_ir_reciprocal_mesh([8, 8, 8],
rutile,
is_shift=[1, 1, 1])
num_ir_kpt = len(np.unique(mapping))
print("[get_ir_reciprocal_mesh]")
print(" Number of irreducible k-points of Rutile with")
print(" 8x8x8 Monkhorst-Pack mesh is %d (40)." % num_ir_kpt)
print('')
mapping, grid = spglib.get_ir_reciprocal_mesh([9, 9, 8],
MgB2,
is_shift=[0, 0, 1])
num_ir_kpt = len(np.unique(mapping))
print("[get_ir_reciprocal_mesh]")
print(" Number of irreducible k-points of MgB2 with")
print(" 9x9x8 Monkhorst-Pack mesh is %s (48)." % num_ir_kpt)
print('')
|
sauliusg/cod-tools
|
src/externals/spglib/python/examples/example.py
|
Python
|
gpl-2.0
| 10,235
|
[
"ASE",
"VASP"
] |
fb37cc0e278396c7953499995b97eeb69308f24d2bac3797574aa9501e03cdaa
|
#!/usr/bin/env python
#
'''
Example calculation with the native DF-MP2 code.
'''
from pyscf.gto import Mole
from pyscf.scf import RHF
from pyscf.mp.dfmp2_native import DFMP2
mol = Mole()
mol.atom = '''
H 0.0 0.0 0.0
F 0.0 0.0 1.1
'''
mol.basis = 'cc-pVDZ'
mol.spin = 0
mol.build()
mf = RHF(mol).run()
DFMP2(mf).run()
|
sunqm/pyscf
|
examples/mp/10-dfmp2.py
|
Python
|
apache-2.0
| 332
|
[
"PySCF"
] |
c814bcf24f34aa32f8ec49bef6437a34abffe553b2eeb5bae4d9044ff7c2293c
|
#!/usr/bin/env python
##############################################################################
## Filename: PSF_analysis.py
## Version: $Revision$
## Description: Standard star spectrum extraction
## Author: $Author$
## $Id$
##############################################################################
"""Based on 1.86 version of extract_star. This is the same point
source extractor than extract_star, except that all the PSF parameters can
be correlated or free."""
__author__ = "C. Buton, Y. Copin, E. Pecontal"
__version__ = '$Id$'
import os
import optparse
import pyfits # getheader
import pySNIFS
import pySNIFS_fit
import scipy as S
from scipy import linalg as L
from scipy.ndimage import filters as F
import ToolBox.Atmospheric as TA
SpaxelSize = 0.43 # Spaxel size in arcsec
MK_pressure = 616. # Default pressure [mbar]
MK_temp = 2. # Default temperature [C]
# Definitions ================================================================
def print_msg(str, limit):
"""Print message 'str' if verbosity level (opts.verbosity) >= limit."""
if opts.verbosity >= limit:
print str
def read_PT(hdr, update=False):
"""Read pressure and temperature from hdr, and check value consistency."""
pressure = hdr.get('PRESSURE', S.nan)
temp = hdr.get('TEMP', S.nan)
if not 550<pressure<650: # Non-std pressure
if update:
print "WARNING: non-std pressure (%.0f mbar) updated to %.0f mbar" % \
(pressure, MK_pressure)
hdr.update('PRESSURE',MK_pressure,"Default MaunaKea pressure [mbar]")
pressure = MK_pressure
if not -20<temp<20: # Non-std temperature
if update:
print "WARNING: non-std temperature (%.0f C) updated to %.0f C" % \
(temp, MK_temp)
hdr.update('TEMP', MK_temp, "Default MaunaKea temperature [C]")
temp = MK_temp
return pressure,temp
def estimate_parangle(hdr):
"""Estimate parallactic angle [degree] from header keywords."""
from math import sin,cos,pi,sqrt,atan2
d2r = pi/180. # Degree to Radians
# DTCS latitude is probably not the most precise one (see fit_ADR.py)
phi = hdr['LATITUDE']*d2r # Latitude [rad]
sinphi = sin(phi)
cosphi = cos(phi)
try:
ha = hdr['HAMID'] # Hour angle (format: 04:04:52.72)
except:
ha = hdr['HA'] # Hour angle (format: 04:04:52.72)
try:
dec = hdr['TELDEC'] # Declination (format 08:23:19.20)
except:
dec = hdr['DEC'] # Declination (format 08:23:19.20)
# We neglect position offset (see
# https://projects.lbl.gov/mantis/view.php?id=280 note 773) since offset
# keywords are not universal...
def dec_deg(dec):
"""Convert DEC string (DD:MM:SS.SS) to degrees."""
l = [ float(x) for x in dec.split(':') ]
return l[0] + l[1]/60. + l[2]/3600.
ha = dec_deg(ha)*15*d2r # Hour angle [rad]
dec = dec_deg(dec)*d2r # Declination [rad]
sinha = sin(ha)
cosha = cos(ha)
sindec = sin(dec)
cosdec = cos(dec)
# Zenithal angle (to be compared to dec_deg(hdr['ZD']))
cosdz = sindec*sinphi + cosphi*cosdec*cosha
sindz = sqrt(1. - cosdz**2)
# Parallactic angle (to be compared to hdr['PARANG'])
sineta = sinha*cosphi / sindz
coseta = ( cosdec*sinphi - sindec*cosphi*cosha ) / sindz
eta = atan2(sineta,coseta) # [rad]
return eta/d2r # [deg]
def eval_poly(coeffs, x):
"""Evaluate polynom sum_i ci*x**i on x. It uses 'natural' convention for
polynomial coeffs: [c0,c1...,cn] (opposite to S.polyfit).."""
if S.isscalar(x):
y = 0 # Faster on scalar
for i,c in enumerate(coeffs):
# Incremental computation of x**i is only slightly faster
y += c * x**i
else: # Faster on arrays
y = S.polyval(coeffs[::-1], x) # Beware coeffs order!
return y
def laplace_filtering(cube, eps=1e-4):
lapl = F.laplace(cube.data/cube.data.mean())
fdata = F.median_filter(cube.data, size=[1, 3])
hist = pySNIFS.histogram(S.ravel(S.absolute(lapl)), nbin=100,
Max=100, cumul=True)
threshold = hist.x[S.argmax(S.where(hist.data<(1-eps), 0, 1))]
print_msg("Laplace filter threshold [eps=%g]: %.2f" % (eps,threshold), 2)
filt = (S.absolute(lapl) <= threshold)
return filt
def polyfit_clip(x, y, deg, clip=3, nitermax=10):
"""Least squares polynomial fit with sigma-clipping (if clip>0). Returns
polynomial coeffs w/ same convention as S.polyfit: [cn,...,c1,c0]."""
good = S.ones(y.shape, dtype='bool')
niter = 0
while True:
niter += 1
coeffs = S.polyfit(x[good], y[good], deg)
old = good
if clip:
dy = S.polyval(coeffs, x) - y
good = S.absolute(dy) < clip*S.std(dy)
if (good==old).all(): break # No more changes, stop there
if niter > nitermax: # Max. # of iter, stop there
print_msg("polyfit_clip reached max. # of iterations: " \
"deg=%d, clip=%.2f x %f, %d px removed" % \
(deg, clip, S.std(dy), len((~old).nonzero()[0])), 2)
break
if y[good].size <= deg+1:
raise ValueError("polyfit_clip: Not enough points left (%d) " \
"for degree %d" % (y[good].size,deg))
return coeffs
def extract_spec(cube, psf_fn, psf_ctes, psf_param, skyDeg=0,
method='psf', radius=5.):
"""Extract object and sky spectra from cube according to PSF (described by
psf_fn, psf_ctes and psf_params) in presence of sky (polynomial degree
skyDeg) using method ('psf' or 'aperture' or 'optimal'). For aperture
related methods, radius gives aperture radius in arcsec.
Returns Spec,Var where Spec and Var are (nslice,npar+1)."""
if method not in ('psf','aperture','optimal'):
raise ValueError("Extraction method '%s' unrecognized" % method)
if skyDeg < -1:
raise ValueError("skyDeg=%d is invalid (should be >=-1)" % skyDeg)
if (cube.var>1e20).any():
print "WARNING: discarding infinite variances in extract_spec"
cube.var[cube.var>1e20] = 0
if (cube.var<0).any(): # There should be none anymore
print "WARNING: discarding negative variances in extract_spec"
cube.var[cube.var<0] = 0
# The PSF parameters are only the shape parameters. We set the intensity
# of each slice to 1.
param = S.concatenate((psf_param,[1.]*cube.nslice))
# Rejection of bad points (YC: need some clarifications...)
filt = laplace_filtering(cube)
if (~filt).any():
print "WARNING: filtering out %d vx in %d slices in extract_spec" % \
(len((~filt).nonzero()[0]),
len(S.nonzero([ f.any() for f in ~filt ])[0]))
cube.var *= filt # Discard non-selected voxels
# Linear least-squares fit: I*PSF + sky [ + a*x + b*y + ...]
spxSize = psf_ctes[0] # Spaxel size [arcsec]
cube.x = cube.i - 7 # x in spaxel
cube.y = cube.j - 7 # y in spaxel
model = psf_fn(psf_ctes, cube)
psf = model.comp(param, normed=True) # nslice,nlens
npar_sky = int((skyDeg+1)*(skyDeg+2)/2) # Nb param. in polynomial bkgnd
Z = S.zeros((cube.nslice,cube.nlens,npar_sky+1),'d')
Z[:,:,0] = psf # Intensity
if npar_sky: # =0 when no background (skyDeg<=-1)
Z[:,:,1] = 1 # Constant background
n = 2
for d in xrange(1,skyDeg+1):
for j in xrange(d+1):
Z[:,:,n] = cube.x**(d-j) * cube.y**j # Bkgnd polynomials
n += 1 # Finally: n = npar_sky + 1
# Weighting
weight = S.where(cube.var>0, 1/S.sqrt(cube.var), 0) # nslice,nlens
X = (Z.T * weight.T).T # nslice,nlens,npar+1
b = weight*cube.data # nslice,nlens
# The linear least-squares fit could be done directly using
# Spec = S.array([ L.lstsq(xx,bb)[0] for xx,bb in zip(X,b) ])
# but A is needed anyway to compute covariance matrix C=1/A.
# Furthermore, linear resolution
# [ L.solve(aa,bb) for aa,bb in zip(A,B) ]
# can be replace by faster (~x10) matrix product
# [ S.dot(cc,bb) for cc,bb in zip(C,B) ]
# since C=1/A is already available.
# See Numerical Recipes (2nd ed.), sect.15.4
A = S.array([S.dot(xx.T, xx) for xx in X]) # nslice,npar+1,npar+1
B = S.array([S.dot(xx.T, bb) for xx,bb in zip(X,b)]) # nslice,npar+1
try:
C = S.array([L.inv(aa) for aa in A]) # nslice,npar+1,npar+1
except L.LinAlgError:
raise L.LinAlgError("Singular matrix during spectrum extraction")
# Spec & Var = nslice x Star,Sky,[slope_x...]
Spec = S.array([S.dot(cc,bb) for cc,bb in zip(C,B)]) # nslice,npar+1
Var = S.array([S.diag(cc) for cc in C]) # nslice,npar+1
# Now, what about negative sky?
# One could also use an NNLS fit to force parameter non-negativity:
# [ pySNIFS_fit.fnnls(aa,bb)[0] for aa,bb in zip(A,B) ]
# *BUT* 1. it is incompatible w/ non-constant sky (since it will force all
# sky coeffs to >0). This can therefore be done only if skyDeg=0 (it would
# otherwise involve optimization with constraints on sky positivity).
# 2. There is no easy way to estimate covariance matrix from NNLS
# fit. Since an NNLS fit on a negative sky slice would probably always
# lead to a null sky, an NNLS fit is then equivalent to a standard 'PSF'
# fit without sky.
if skyDeg==0:
negSky = Spec[:,1]<0 # Test for presence of negative sky
if negSky.any():
print "WARNING: %d slices w/ sky<0 in extract_spec" % \
(len(negSky.nonzero()[0]))
print_msg(str(cube.lbda[negSky]), 2)
# For slices w/ sky<0, fit only PSF without background
A = S.array([ S.dot(xx,xx) for xx in X[negSky,:,0] ])
B = S.array([ S.dot(xx,bb)
for xx,bb in zip(X[negSky,:,0],b[negSky]) ])
C = 1/A
Spec[negSky,0] = C*B # Linear fit without sky
Spec[negSky,1] = 0 # Set sky to null
Var[negSky,0] = C
Var[negSky,1] = 0
if method=='psf':
return cube.lbda,Spec,Var # Nothing else to be done
# Reconstruct background and subtract it from cube
bkgnd = 0
var_bkgnd = 0
for d in xrange(1,npar_sky+1): # Loop over sky components
bkgnd += (Z[:,:,d].T * Spec[:,d]).T
var_bkgnd += (Z[:,:,d].T**2 * Var[:,d]).T
subData = cube.data - bkgnd # Bkgnd subtraction (nslice,nlens)
subVar = cube.var.copy()
good = cube.var>0
subVar[good] += var_bkgnd[good] # Variance of bkgnd-sub. signal
# Replace invalid data (var=0) by model PSF = Intensity*PSF
if not good.all():
print_msg("Replacing %d vx with modeled signal" % \
len((~good).nonzero()[0]), 1)
subData[~good] = (Spec[:,0]*psf.T).T[~good]
# Plain summation over aperture
# For the moment, a spaxel is either 100% or 0% within the aperture (same
# limitation as quick_extract). Potential development:
# 1. compute spaxel fraction inside the aperture
# 2. extrapolate missing flux if aperture is partially outside FoV
# from PSF fit
# Aperture center [spx] (nslice)
xc = psf_param[2] + \
psf_param[0]*model.ADR_coeff[:,0]*S.cos(psf_param[1])
yc = psf_param[3] + \
psf_param[0]*model.ADR_coeff[:,0]*S.sin(psf_param[1])
# Aperture radius in spaxels
aperRad = radius / spxSize
print_msg("Aperture radius: %.2f arcsec = %.2f spx" % (radius,aperRad), 1)
# Radius [spx] (nslice,nlens)
r = S.hypot((model.x.T - xc).T, (model.y.T - yc).T)
# Circular aperture (nslice,nlens)
# Use r<aperRad[:,S.newaxis] if radius is a (nslice,) vec.
frac = (r < aperRad).astype('float')
# Check if aperture hits the FoV edges
hit = ((xc - aperRad) < -7.5) | ((xc + aperRad) > 7.5) | \
((yc - aperRad) < -7.5) | ((yc + aperRad) > 7.5)
if hit.any():
# Find the closest edge
ld = (xc - aperRad + 7.5).min() # Dist. to left edge (<0 if outside)
rd =-(xc + aperRad - 7.5).max() # Dist. to right edge
bd = (yc - aperRad + 7.5).min() # Dist. to bottom edge
td =-(yc + aperRad - 7.5).max() # Dist. to top edge
cd = -min(ld,rd,bd,td) # Should be positive
ns = int(cd) + 1 # Additional spaxels
print "WARNING: Aperture (r=%.2f spx) hits FoV edges by %.2f spx" % \
(aperRad, cd)
if method=='optimal':
print "WARNING: Model extrapolation outside FoV " \
"not implemented for optimal summation."
if hit.any() and method=='aperture':
# Extrapolate signal from PSF model
print_msg("Signal extrapolation outside FoV...", 1)
# Extend usual range by ns spx on each side
nw = 15 + 2*ns # New FoV size in spaxels
mid = (7 + ns) # FoV center
extRange = S.arange(nw) - mid
extx,exty = S.meshgrid(extRange[::-1],extRange) # nw,nw
extnlens = extx.size # = nlens' = nw**2
print_msg(" Extend FoV by %d spx: nlens=%d -> %d" % \
(ns, model.nlens, extnlens), 1)
# Compute PSF on extended range (nslice,extnlens)
extModel = psf_fn(psf_ctes, cube, coords=(extx,exty)) # Extended model
extPsf = extModel.comp(param, normed=True) # nslice,extnlens
# Embed background-subtracted data in extended model PSF
origData = subData.copy()
origVar = subVar.copy()
subData = (Spec[:,0]*extPsf.T).T # Extended model, nslice,extnlens
subVar = S.zeros((extModel.nslice,extModel.nlens))
for i in xrange(model.nlens):
# Embeb original spx i in extended model array by finding
# corresponding index j in new array
j, = ((extModel.x[0]==model.x[0,i]) & \
(extModel.y[0]==model.y[0,i])).nonzero()
subData[:,j[0]] = origData[:,i]
subVar[:,j[0]] = origVar[:,i]
r = S.hypot((extModel.x.T - xc).T, (extModel.y.T - yc).T)
frac = (r < aperRad).astype('float')
if method == 'aperture':
# Replace signal and variance estimates from plain summation
Spec[:,0] = (frac * subData).sum(axis=1)
Var[:,0] = (frac**2 * subVar).sum(axis=1)
return cube.lbda,Spec,Var
if method=='optimal':
# Model signal = Intensity*PSF + bkgnd
modsig = (Spec[:,0]*psf.T).T + bkgnd # nslice,nlens
# One has to have a model of the variance. This can be estimated from
# a simple 'photon noise + RoN' model on each slice: signal ~ alpha*N
# (alpha = 1/flat-field coeff and N = photon counts) and variance ~ (N
# + RoN**2) * alpha**2 = (signal/alpha + RoN**2) * alpha**2 =
# alpha*signal + beta. This model disregards spatial component of
# flat-field, which is supposed to be constant of FoV.
# Model variance = alpha*Signal + beta
coeffs = S.array([ polyfit_clip(modsig[s], cube.var[s], 1, clip=5)
for s in xrange(cube.nslice) ])
coeffs = F.median_filter(coeffs, (5,1)) # A bit of smoothing...
modvar = S.array([ S.polyval(coeffs[s], modsig[s])
for s in xrange(cube.nslice) ]) # nslice,nlens
# Optimal weighting
norm = (frac * psf).sum(axis=1) # PSF norm, nslice
npsf = (psf.T / norm).T # nslice,nlens
weight = frac * npsf / modvar # Unormalized weights, nslice,nlens
norm = (weight * npsf).sum(axis=1) # Weight norm, nslice
weight = (weight.T / norm).T # Normalized weights, nslice,nlens
# Replace signal and variance estimates from optimal summation
Spec[:,0] = (weight * subData).sum(axis=1)
Var[:,0] = (weight**2 * subVar).sum(axis=1)
return cube.lbda,Spec,Var
def psfVariance(cube, full_cube, psfFn, psfCtes, fitpar, cov, slices):
"""Compute Variance due to the psf parameters dispersion."""
def derivIntensity(cube, psfFn, psfCtes, fitpar):
"""Derive intensity by each psf parameter."""
model = psfFn(psfCtes, cube)
psf = model.comp(fitpar[:model.npar]) # nslice,nlens
dpsf = model.deriv(fitpar[:model.npar])[:-1] # npar_psf, nslice, nlens
data = cube.data
Spsf = S.sum(psf , axis= 1)
Spsf2 = S.sum(psf**2, axis= 1)
Sdata = S.sum(data , axis= 1)
Sdpsf = S.sum(dpsf , axis=-1)
den = ( Spsf2 - Spsf**2 )
num = ( S.sum(data*dpsf, axis=-1) - Sdata * Sdpsf ) * den -\
( S.sum(data*psf , axis= 1) - Spsf * Sdata ) *\
( S.sum(dpsf*psf , axis=-1) - Spsf ) * 2
return num/den**2, model.npar_cor
dI,npar_cor = derivIntensity(cube, psfFn, psfCtes, fitpar)
mat = cov[:npar_cor,:npar_cor]
psfVar = S.sum( [ dI[i]*dI[j]*mat[i,j]
for i in S.arange(npar_cor)
for j in S.arange(npar_cor) ], axis=0 )
NmetaSlices = cube.nslice # Number of meta-slices.
NmicroSlices = slices[-1] # Number of micro-slices per meta-slice.
Nslices = full_cube.nslice # Number of micro-slices.
# Variances / nb of micro-slices per meta-slice
psfVar /= NmicroSlices
# reshape the variance array to the full size.
reshapedVar = S.zeros((NmetaSlices,NmicroSlices), dtype='f') # microslices
for i,v in enumerate(psfVar):
reshapedVar[i,:]=v
psfVar = reshapedVar.ravel()
psfVar = S.resize(psfVar,(Nslices))
return psfVar
def fit_slices(cube, psf_fn, betaDeg=-1, etaDeg=-1, sigmaDeg=-1, skyDeg=0, nsky=2):
"""Fit (meta)slices of (meta)cube using PSF psf_fn and a background of
polynomial degree skyDeg."""
if skyDeg < -1:
raise ValueError("skyDeg=%d is invalid (should be >=-1)" % skyDeg)
npar_psf = 7 # Number of parameters of the psf
npar_sky = int((skyDeg+1)*(skyDeg+2)/2) # Nb. param. in polynomial bkgnd
if betaDeg!=-1:
npar_psf += 1
if etaDeg!=-1:
npar_psf += 1
if sigmaDeg!=-1:
npar_psf += 1
npar_ind = 1 # Nb of independant parameter (Intensity)
cube_sky = pySNIFS.SNIFS_cube()
cube_sky.x = cube.x
cube_sky.y = cube.y
cube_sky.i = cube.i
cube_sky.j = cube.j
cube_sky.nslice = 1
cube_sky.nlens = cube.nlens
cube_star = pySNIFS.SNIFS_cube()
cube_star.x = cube.x
cube_star.y = cube.y
cube_star.i = cube.i
cube_star.j = cube.j
cube_star.nslice = 1
cube_star.nlens = cube.nlens
# PSF + Intensity + Bkgnd coeffs
param_arr = S.zeros((cube.nslice,npar_psf+npar_ind+npar_sky), dtype='d')
khi2_vec = S.zeros(cube.nslice, dtype='d')
error_mat = S.zeros((cube.nslice,npar_psf+npar_ind+npar_sky), dtype='d')
if nsky>7: # Nb of edge spx used for sky estimate
raise ValueError('The number of edge pixels should be less than 7')
skySpx = (cube_sky.i < nsky) | (cube_sky.i >= 15-nsky) | \
(cube_sky.j < nsky) | (cube_sky.j >= 15-nsky)
print_msg(" Adjusted parameters: [delta],[theta],xc,yc,PA,ell,alpha,I,"
"%d bkgndCoeffs" % (skyDeg and npar_sky or 0),2)
for i in xrange(cube.nslice):
cube_star.lbda = S.array([cube.lbda[i]])
cube_star.data = cube.data[i, S.newaxis]
cube_star.var = cube.var[i, S.newaxis]
cube_sky.data = cube.data[i, S.newaxis].copy() # To be modified
cube_sky.var = cube.var[i, S.newaxis].copy()
# Sky estimate (from FoV edge spx)
medstar = F.median_filter(cube_star.data[0], 3)
skyLev = S.median(cube_sky.data.T[skySpx].squeeze())
if skyDeg>0:
# Fit a 2D polynomial of degree skyDeg on the edge pixels
# of a given cube slice.
cube_sky.var.T[~skySpx] = 0 # Discard central spaxels
model_sky = pySNIFS_fit.model(data=cube_sky,
func=['poly2D;%d' % skyDeg],
param=[[skyLev] + [0.]*(npar_sky-1)],
bounds=[[[0,None]] +
[[None,None]]*(npar_sky-1)])
model_sky.fit()
cube_sky.data = model_sky.evalfit() # 1st background estimate
medstar -= cube_sky.data[0] # Subtract structured background estim.
elif skyDeg == 0:
medstar -= skyLev # Subtract sky level estimate
# Guess parameters for the current slice
imax = medstar.max() # Intensity
xc = S.average(cube_star.x, weights=medstar) # Centroid [spx]
yc = S.average(cube_star.y, weights=medstar)
xc = S.clip(xc, -7.5,7.5) # Put initial guess ~ in FoV
yc = S.clip(yc, -7.5,7.5)
# Filling in the guess parameter arrays (px) and bounds arrays (bx)
p1 = [0., 0., xc, yc, 0., 1., 2.4] # psf parameters
bD = eD = sD = -1
if betaDeg!=-1:
p1 += [2.3]
bD = 0
if etaDeg!=-1:
p1 += [1.]
eD = 0
if sigmaDeg!=-1:
p1 += [0.9]
sD = 0
p1 += [imax] # Intensity
b1 = [[0, 0], # delta (unfitted)
[0, 0], # theta (unfitted)
[None, None], # xc
[None, None], # yc
[None, None], # PA
[0., None], # ellipticity >0
[0., None]] # alpha > 0
if betaDeg!=-1:
b1 += [[0., None]]
if etaDeg!=-1:
b1 += [[0., None]]
if sigmaDeg!=-1:
b1 += [[0., None]]
b1 += [[0., None]] # Intensity > 0
func = ['%s;%f,%f,%f,%f,%f,%f,%f' % \
(psf_fn.name, SpaxelSize,cube_star.lbda[0],0,0,bD,eD,sD)]
param = [p1]
bounds = [b1]
if skyDeg >= 0:
if skyDeg: # Use estimate from prev. polynomial fit
p2 = list(model_sky.fitpar)
else: # Guess: Background=constant (>0)
p2 = [skyLev]
b2 = [[0,None]] + [[None,None]]*(npar_sky-1)
func += ['poly2D;%d' % skyDeg]
param += [p2]
bounds += [b2]
else: # No background
p2 = []
print_msg(" Initial guess [#%d/%d, %.0fA]: %s" % \
(i+1,cube.nslice,cube.lbda[i],p1+p2), 2)
# Instanciating of a model class
model_star = pySNIFS_fit.model(data=cube_star, func=func,
param=param, bounds=bounds,
myfunc={psf_fn.name:psf_fn})
# Fit of the current slice
model_star.fit(maxfun=400, msge=int(opts.verbosity >= 3))
# Probably one should check model_star.status...
# Restore true chi2 (not reduced one), ie.
# chi2 = ((cube_star.data-model_star.evalfit())**2/cube_star.var).sum()
model_star.khi2 *= model_star.dof
# Error computation
hess = pySNIFS_fit.approx_deriv(model_star.objgrad, model_star.fitpar,
order=2)
if model_star.fitpar[5]>0 and \
model_star.fitpar[6]>0 and model_star.fitpar[-npar_sky-1]>0:
cov = S.linalg.inv(hess[2:,2:]) # Discard 1st 2 lines (unfitted)
diag = cov.diagonal()
if (diag>0).all():
errorpar = S.concatenate(([0.,0.], S.sqrt(diag)))
# Shall we *= model_star.khi2, see
# http://www.asu.edu/sas/sasdoc/sashtml/stat/chap45/sect24.htm
else: # Some negative diagonal elements!
print "WARNING: negative covariance diag. elements in metaslice %d" % (i+1)
model_star.khi2 *= -1
errorpar = S.zeros(len(error_mat.T))
else:
# Set error to 0 if alpha, intens. or ellipticity is 0.
if model_star.fitpar[5]==0:
print "WARNING: ellipticity of metaslice %d is null" % (i+1)
elif model_star.fitpar[6]==0:
print "WARNING : alpha of metaslice %d is null" % (i+1)
elif model_star.fitpar[-npar_sky-1]==0:
print "WARNING : intensity of metaslice %d is null" % (i+1)
model_star.khi2 *= -1 # To be discarded
errorpar = S.zeros(len(error_mat.T))
# Storing the result of the current slice parameters
param_arr[i] = model_star.fitpar
khi2_vec[i] = model_star.khi2
error_mat[i] = errorpar
print_msg(" Fit result [#%d/%d, %.0fA]: %s" % \
(i+1,cube.nslice,cube.lbda[i],model_star.fitpar), 2)
return (param_arr,khi2_vec,error_mat)
def create_2D_log_file(filename,object,airmass,efftime,
cube,param_arr,khi2,error_mat):
npar_sky = (opts.skyDeg+1)*(opts.skyDeg+2)/2
delta,theta = param_arr[:2]
xc,yc = param_arr[2:4]
PA,ell,alpha = param_arr[4:7]
if betaDeg>=0:
beta = param_arr[7]
if etaDeg>=0 and betaDeg>=0:
eta = param_arr[8]
if etaDeg>=0 and betaDeg==-1:
eta = param_arr[7]
if sigmaDeg>=0:
sigma = param_arr[-npar_sky-2]
intensity = param_arr[-npar_sky-1]
sky = param_arr[-npar_sky:]
logfile = open(filename,'w')
logfile.write('# cube : %s \n' % os.path.basename(opts.input))
logfile.write('# object : %s \n' % object)
logfile.write('# airmass : %.2f \n' % airmass)
logfile.write('# efftime : %.2f \n' % efftime)
logfile.write('# lbda delta +/- ddelta theta +/- dtheta\
xc +/- dxc yc +/- dyc PA +/- dPA\
%s%s%s%s%s I +/- dI %s khi2\n' % \
(''.join([' q%i +/-d q%i ' % (i,i)
for i in xrange(ellDeg+1)]),
''.join([' alpha%i +/- dalpha%i ' % (i,i)
for i in xrange(alphaDeg+1)]),
''.join([' beta%i +/- dbeta%i ' % (i,i)
for i in xrange(betaDeg+1)]),
''.join([' eta%i +/- deta%i ' % (i,i)
for i in xrange(etaDeg+1)]),
''.join([' sigma%i +/- dsigma%i ' % (i,i)
for i in xrange(sigmaDeg+1)]),
''.join([' sky%i +/- dsky%i ' % (i,i)
for i in xrange(npar_sky)])))
str = ' %i % 10.4e% 10.4e % 10.4e% 10.4e % 10.4e% 10.4e '
str += ' % 10.4e% 10.4e % 10.4e% 10.4e '
str += ' % 10.4e% 10.4e ' * (ellDeg+1)
str += ' % 10.4e% 10.4e ' * (alphaDeg+1)
if betaDeg!=-1:
str += ' % 10.4e% 10.4e ' * (betaDeg+1)
if etaDeg!=-1:
str += ' % 10.4e% 10.4e ' * (etaDeg+1)
if sigmaDeg!=-1:
str += ' % 10.4e% 10.4e ' * (sigmaDeg+1)
str += ' % 10.4e% 10.4e ' # Intensity
str += ' % 10.4e% 10.4e ' * npar_sky # sky
str += ' % 10.4e \n' # Khi2
for n in xrange(cube.nslice):
list2D = [cube.lbda[n],
delta[n], error_mat[n][0],
theta[n], error_mat[n][1],
xc[n] , error_mat[n][2],
yc[n] , error_mat[n][3],
PA[n] , error_mat[n][4]]
list2D += [ell[n] , error_mat[n][5]] + [0.,0.] * ellDeg
list2D += [alpha[n], error_mat[n][6]] + [0.,0.] * alphaDeg
if betaDeg!=-1:
list2D += [beta[n], error_mat[n][7]]
list2D += [0.,0.] * betaDeg
if etaDeg!=-1 and betaDeg!=-1:
list2D += [eta[n], error_mat[n][8]]
list2D += [0.,0.] * etaDeg
if etaDeg!=-1 and betaDeg==-1:
list2D += [eta[n], error_mat[n][7]]
list2D += [0.,0.] * etaDeg
if sigmaDeg!=-1:
list2D += [sigma[n], error_mat[n][-npar_sky-2]]
list2D += [0.,0.] * sigmaDeg
list2D += [intensity[n], error_mat[n][-npar_sky-1]]
tmp = S.array((sky.T[n],error_mat[n][-npar_sky:]))
list2D += tmp.T.flatten().tolist()
list2D += [khi2[n]]
logfile.write(str % tuple(list2D))
logfile.close()
def create_3D_log_file(filename,object,airmass,efftime,\
seeing,fitpar,khi3D,errorpar,lbda_ref):
logfile = open(filename,'w')
logfile.write('# cube : %s \n' % os.path.basename(opts.input))
logfile.write('# object : %s \n' % object)
logfile.write('# airmass : %.2f \n' % airmass)
logfile.write('# efftime : %.2f \n' % efftime)
logfile.write('# seeing : %.2f \n' % seeing)
logfile.write('# lbda delta +/- ddelta theta +/- dtheta\
xc +/- dxc yc +/- dyc PA +/- dPA\
%s%s%s%s%s khi2\n' % \
(''.join([' q%i +/-d q%i ' % (i,i)
for i in xrange(ellDeg+1)]),
''.join([' a%i +/- da%i ' % (i,i)
for i in xrange(alphaDeg+1)]),
''.join([' b%i +/- db%i ' % (i,i)
for i in xrange(betaDeg+1)]),
''.join([' e%i +/- de%i ' % (i,i)
for i in xrange(etaDeg+1)]),
''.join([' s%i +/- ds%i ' % (i,i)
for i in xrange(sigmaDeg+1)])))
str = ' %i % 10.4e% 10.4e % 10.4e% 10.4e % 10.4e% 10.4e '
str += ' % 10.4e% 10.4e % 10.4e% 10.4e '
str += ' % 10.4e% 10.4e ' * (ellDeg+1)
str += ' % 10.4e% 10.4e ' * (alphaDeg+1)
if betaDeg!=-1:
str += ' % 10.4e% 10.4e ' * (betaDeg+1)
if etaDeg!=-1:
str += ' % 10.4e% 10.4e ' * (etaDeg+1)
if sigmaDeg!=-1:
str += ' % 10.4e% 10.4e ' * (sigmaDeg+1)
str += ' % 10.4e \n' # Khi2
for n in xrange(cube.nslice):
list3D = [cube.lbda.mean(),
fitpar[0],errorpar[0],
fitpar[1],errorpar[1],
fitpar[2],errorpar[2],
fitpar[3],errorpar[3],
fitpar[4],errorpar[4]]
for i in xrange(ellDeg+1):
list3D += [fitpar[5+i],errorpar[5+i]]
for i in xrange(alphaDeg+1):
list3D += [fitpar[6+ellDeg+i],
errorpar[6+ellDeg+i]]
if betaDeg!=-1:
for i in xrange(betaDeg+1):
list3D += [fitpar[7+ellDeg+alphaDeg+i],
errorpar[7+ellDeg+alphaDeg+i]]
if etaDeg!=-1:
for i in xrange(etaDeg+1):
list3D += [fitpar[8+ellDeg+alphaDeg+betaDeg+i],
errorpar[8+ellDeg+alphaDeg+betaDeg+i]]
if sigmaDeg!=-1:
for i in xrange(sigmaDeg+1):
list3D += [fitpar[9+ellDeg+alphaDeg+betaDeg+etaDeg+i],
errorpar[9+ellDeg+alphaDeg+betaDeg+etaDeg+i]]
list3D += [khi3D]
logfile.write(str % tuple(list3D))
def build_sky_cube(cube, sky, sky_var, skyDeg):
if skyDeg < 0:
raise ValueError("Cannot build_sky_cube with skyDeg=%d < 0." % skyDeg)
nslices = len(sky)
npar_sky = int((skyDeg+1)*(skyDeg+2)/2)
poly = pySNIFS_fit.poly2D(skyDeg,cube)
cube2 = pySNIFS.zerolike(cube)
cube2.x = (cube2.x)**2
cube2.y = (cube2.y)**2
poly2 = pySNIFS_fit.poly2D(skyDeg,cube2)
param = S.zeros((nslices,npar_sky),'d')
vparam = S.zeros((nslices,npar_sky),'d')
for i in xrange(nslices):
param[i,:] = sky[i].data
vparam[i,:] = sky_var[i].data
data = poly.comp(param)
var = poly2.comp(vparam)
bkg_cube = pySNIFS.zerolike(cube)
bkg_cube.data = data
bkg_cube.var = var
bkg_spec = bkg_cube.get_spec(no=bkg_cube.no)
return bkg_cube,bkg_spec
def fill_header(hdr, param, lbda_ref, opts, khi2, seeing, tflux, sflux):
"""Fill header hdr with fit-related keywords."""
hdr.update('ES_VERS' ,__version__)
hdr.update('ES_CUBE' ,opts.input, 'Input cube')
hdr.update('ES_LREF' ,lbda_ref, 'Lambda ref. [A]')
hdr.update('ES_SDEG' ,opts.skyDeg,'Polynomial bkgnd degree')
hdr.update('ES_KHI2' ,khi2, 'Reduced khi2 of meta-fit')
hdr.update('ES_AIRM', 1/S.cos(S.arctan(param[0])), 'Effective airmass')
hdr.update('ES_PARAN',param[1]/S.pi*180, 'Effective parangle [deg]')
hdr.update('ES_DELTA',param[0], 'ADR power')
hdr.update('ES_THETA',param[1]/S.pi*180,'ADR angle [deg]')
hdr.update('ES_XC' ,param[2], 'xc @lbdaRef [spx]')
hdr.update('ES_YC' ,param[3], 'yc @lbdaRef [spx]')
if opts.supernova:
hdr.update('ES_SNMOD',opts.supernova,'SN mode (no 3D fit for PSF)')
hdr.update('ES_PA' ,param[4], 'Position angle')
for i in xrange(opts.ellDeg + 1):
hdr.update('ES_Q%i' % i, param[5+i], 'Ellipticity coeff. q%d' % i)
for i in xrange(opts.alphaDeg + 1):
hdr.update('ES_A%i' % i, param[6+opts.ellDeg+i], 'Alpha coeff. a%d' % i)
if opts.betaDeg!=-1:
for i in xrange(opts.betaDeg + 1):
hdr.update('ES_B%i' % i, param[7+opts.ellDeg+opts.alphaDeg+i], 'Beta coeff. a%d' % i)
if opts.etaDeg!=-1:
for i in xrange(opts.etaDeg + 1):
hdr.update('ES_E%i' % i, param[8+opts.ellDeg+opts.alphaDeg+opts.betaDeg+i], 'Eta coeff. e%d' % i)
if opts.sigmaDeg!=-1:
for i in xrange(opts.sigmaDeg + 1):
hdr.update('ES_S%i' % i, param[9+opts.ellDeg+opts.alphaDeg+opts.betaDeg+opts.etaDeg+i], 'Sigma coeff. e%d' % i)
hdr.update('ES_METH', opts.method, 'Extraction method')
if opts.method != 'psf':
hdr.update('ES_APRAD', opts.radius, 'Aperture radius [sigma]')
hdr.update('ES_TFLUX',tflux, 'Sum of the spectrum flux')
if opts.skyDeg >= 0:
hdr.update('ES_SFLUX',sflux, 'Sum of the sky flux')
hdr.update('SEEING', seeing, 'Seeing [arcsec] (psf_analysis)')
# PSF classes ================================================================
class ExposurePSF:
"""
Empirical PSF 3D function used by the L{model} class.
"""
def __init__(self, psf_ctes, cube, coords=None):
"""Initiating the class.
@param psf_ctes: Internal parameters (pixel size in cube spatial unit,
reference wavelength and polynomial degree of alpha). A
list of three numbers.
@param cube: Input cube. This is a L{SNIFS_cube} object.
@param coords: if not None, should be (x,y).
"""
self.spxSize = psf_ctes[0] # Spaxel size [arcsec]
self.lbda_ref = psf_ctes[1] # Reference wavelength [AA]
self.ellDeg = int(psf_ctes[2]) # Ellip polynomial degree
self.alphaDeg = int(psf_ctes[3]) # Alpha polynomial degree
self.betaDeg = int(psf_ctes[4])
self.etaDeg = int(psf_ctes[5])
self.sigmaDeg = int(psf_ctes[6])
self.npar_cor = 10 + self.ellDeg + self.alphaDeg + self.betaDeg +\
self.etaDeg + self.sigmaDeg # PSF parameters
self.npar_ind = 1 # Intensity parameters per slice
self.nslice = cube.nslice
self.npar = self.npar_cor + self.npar_ind*cube.nslice
if coords is None:
self.nlens = cube.nlens
self.x = S.resize(cube.x, (self.nslice,self.nlens)) # nslice,nlens
self.y = S.resize(cube.y, (self.nslice,self.nlens))
else:
x = coords[0].ravel()
y = coords[1].ravel()
assert len(x)==len(y), \
"Incompatible coordinates (%d/%d)" % (len(x),len(y))
self.nlens = len(x)
self.x = S.resize(x, (self.nslice,self.nlens)) # nslice,nlens
self.y = S.resize(y, (self.nslice,self.nlens))
self.l = S.resize(cube.lbda, (self.nlens,self.nslice)).T # nslice,nlens
# ADR in spaxels (nslice,nlens)
if hasattr(cube,'e3d_data_header'): # Read from a real cube if possible
pressure,temp = read_PT(cube.e3d_data_header)
else:
pressure,temp = MK_pressure,MK_temp # Default values for P and T
self.n_ref = TA.refractiveIndex(self.lbda_ref, P=pressure, T=temp)
self.ADR_coeff = ( self.n_ref - \
TA.refractiveIndex(self.l, P=pressure, T=temp) ) * \
206265 / self.spxSize # l > l_ref <=> coeff > 0
def index(self,n):
if n==6:
return n+self.ellDeg
elif n==7:
return n+self.ellDeg+self.alphaDeg
elif n==8:
return n+self.ellDeg+self.alphaDeg+self.betaDeg
elif n==9:
return n+self.ellDeg+self.alphaDeg+self.betaDeg+self.etaDeg
elif n==10:
return n+self.ellDeg+self.alphaDeg+self.betaDeg+self.etaDeg+\
self.sigmaDeg
def comp(self, param, normed=False):
"""
Compute the function.
@param param: Input parameters of the polynomial. A list of numbers:
- C{param[0:x]} : The x parameters of the PSF shape
- C{param[0]}: Atmospheric dispersion power
- C{param[1]}: Atmospheric dispersion position angle
- C{param[2]}: X center at the reference wavelength
- C{param[3]}: Y center at the reference wavelength
- C{param[4]}: Position angle
- C{param[5+m]}: Ellipticity ( m:polynomial degree of ellipticity )
- C{param[6+m+n]}: Moffat radius ( n:polynomial degree of moffat radius )
- C{param[7+m+n+o]}: Moffat power ( o:polynomial degree of moffat power )
- C{param[8+m+n+o+p]}: norm gaussian / norm moffat ( p:polynomial degree of ng/nm )
- C{param[9+m+n+o+p+q]}: Gaussian radius ( q:polynomial degree of gaussian radius )
- C{param[10+m+n+o+p+q:]}: Intensity parameters ( one for each slice in the cube )
@param normed: Should the function be normalized (integral)
"""
self.param = S.asarray(param)
lbda_rel = self.l / self.lbda_ref - 1 # nslice,nlens
# ADR params
delta = self.param[0]
theta = self.param[1]
xc = self.param[2]
yc = self.param[3]
x0 = xc + delta*self.ADR_coeff*S.sin(theta) # nslice,nlens
y0 = yc - delta*self.ADR_coeff*S.cos(theta)
# Other params
PA = self.param[4] # Position Angle
ellCoeffs = self.param[5:self.index(6)]
alphaCoeffs = self.param[self.index(6):self.npar_cor]
ell = eval_poly(ellCoeffs, lbda_rel)
alpha = eval_poly(alphaCoeffs, lbda_rel)
# Correlated params
## s1,s0,b1,b0,e1,e0 = self.corrCoeffs
## b0,b1,s00,s01,s10,s11,e00,e01,e10,e11 = self.corrCoeffs
## b00,b01,b02,b10,b11,b12,s00,s01,s02,s10,s11,s12,e00,e01,e02,e10,e11,e12 = self.corrCoeffs
b00,b01,b10,b11,s00,s01,s10,s11,e00,e01,e10,e11 = self.corrCoeffs
if self.betaDeg==-1: # Beta
## beta = b0 + b1*alpha
beta = (b00+b01*lbda_rel) + (b10+b11*lbda_rel)*alpha
## beta = (b00+b01*lbda_rel+b02*lbda_rel**2) + (b10+b11*lbda_rel+b12*lbda_rel**2)*alpha
else:
betaCoeffs = self.param[self.index(7):self.index(8)]
beta = eval_poly(betaCoeffs, lbda_rel)
if self.etaDeg==-1: # Eta
## eta = e0 + e1*alpha
eta = (e00+e01*lbda_rel) + (e10+e11*lbda_rel)*alpha
## eta = (e00+e01*lbda_rel+e02*lbda_rel**2) + (e10+e11*lbda_rel+e12*lbda_rel**2)*alpha
else:
etaCoeffs = self.param[self.index(8):self.index(9)]
eta = eval_poly(etaCoeffs, lbda_rel)
if self.sigmaDeg==-1: # Sigma
## sigma = s0 + s1*alpha
sigma = (s00+s01*lbda_rel) + (s10+s11*lbda_rel)*alpha
## sigma = (s00+s01*lbda_rel+s02*lbda_rel**2) + (s10+s11*lbda_rel+s12*lbda_rel**2)*alpha
else:
sigmaCoeffs = self.param[self.index(9):self.index(10)]
sigma = eval_poly(sigmaCoeffs, lbda_rel)
# Gaussian + Moffat
dx = self.x - x0
dy = self.y - y0
r2 = dx**2 + ell*dy**2 + 2*PA*dx*dy
gaussian = S.exp(-r2/2/sigma**2)
moffat = (1 + r2/alpha**2)**(-beta)
# Function
val = self.param[self.npar_cor:,S.newaxis] * (moffat + eta*gaussian)
# The 3D psf model is not normalized to 1 in integral. The result must
# be renormalized by (2*eta*sigma**2 + alpha**2/(beta-1)) *
# S.pi/sqrt(ell)
if normed:
val /= S.pi*( 2*eta*sigma**2 + alpha**2/(beta-1) )/S.sqrt(ell)
return val
def deriv(self, param):
"""
Compute the derivative of the function with respect to its parameters.
@param param: Input parameters of the polynomial.
A list numbers (see L{SNIFS_psf_3D.comp}).
"""
self.param = S.asarray(param)
grad = S.zeros((self.npar_cor+self.npar_ind,)+self.x.shape,'d')
lbda_rel = self.l / self.lbda_ref - 1
# ADR params
delta = self.param[0]
theta = self.param[1]
xc = self.param[2]
yc = self.param[3]
costheta = S.cos(theta)
sintheta = S.sin(theta)
x0 = xc + delta*self.ADR_coeff*sintheta
y0 = yc - delta*self.ADR_coeff*costheta
# Other params
PA = self.param[4] # Position Angle
ellCoeffs = self.param[5:self.index(6)]
alphaCoeffs = self.param[self.index(6):self.index(7)]
ell = eval_poly(ellCoeffs, lbda_rel)
alpha = eval_poly(alphaCoeffs, lbda_rel)
# Correlated params
## s1,s0,b1,b0,e1,e0 = self.corrCoeffs
## b0,b1,s00,s01,s10,s11,e00,e01,e10,e11 = self.corrCoeffs
## b00,b01,b02,b10,b11,b12,s00,s01,s02,s10,s11,s12,e00,e01,e02,e10,e11,e12 = self.corrCoeffs
b00,b01,b10,b11,s00,s01,s10,s11,e00,e01,e10,e11 = self.corrCoeffs
if self.betaDeg==-1: # Beta
## beta = b0 + b1*alpha
beta = (b00+b01*lbda_rel) + (b10+b11*lbda_rel)*alpha
## beta = (b00+b01*lbda_rel+b02*lbda_rel**2) + (b10+b11*lbda_rel+b12*lbda_rel**2)*alpha
else:
betaCoeffs = self.param[self.index(7):self.index(8)]
beta = eval_poly(betaCoeffs, lbda_rel)
if self.etaDeg==-1: # Eta
## eta = e0 + e1*alpha
eta = (e00+e01*lbda_rel) + (e10+e11*lbda_rel)*alpha
## eta = (e00+e01*lbda_rel+e02*lbda_rel**2) + (e10+e11*lbda_rel+e12*lbda_rel**2)*alpha
else:
etaCoeffs = self.param[self.index(8):self.index(9)]
eta = eval_poly(etaCoeffs, lbda_rel)
if self.sigmaDeg==-1: # Sigma
## sigma = s0 + s1*alpha
sigma = (s00+s01*lbda_rel) + (s10+s11*lbda_rel)*alpha
## sigma = (s00+s01*lbda_rel+s02*lbda_rel**2) + (s10+s11*lbda_rel+s12*lbda_rel**2)*alpha
else:
sigmaCoeffs = self.param[self.index(9):self.index(10)]
sigma = eval_poly(sigmaCoeffs, lbda_rel)
# Gaussian + Moffat
dx = self.x - x0
dy = self.y - y0
dy2 = dy**2
r2 = dx**2 + ell*dy2 + 2*PA*dx*dy
gaussian = S.exp(-r2/2/sigma**2)
ea = 1 + r2/alpha**2
moffat = ea**(-beta)
j1 = eta/sigma**2
j2 = 2*beta/ea/alpha**2
da0 = moffat * r2 * j2 / alpha
if self.betaDeg==-1:
## da0 += -moffat * b1 * S.log(ea)
da0 += -moffat * (b10+b11*lbda_rel) * S.log(ea)
## da0 += -moffat * (b10+b11*lbda_rel+b12*lbda_rel**2) * S.log(ea)
if self.etaDeg==-1:
## da0 += e1 * gaussian
da0 += (e10+e11*lbda_rel) * gaussian
## da0 += (e10+e11*lbda_rel+e02*lbda_rel**2) * gaussian
if self.sigmaDeg==-1:
## da0 += gaussian * j1 * r2 * s1 / sigma
da0 += gaussian * j1 * r2 * (s10+s11*lbda_rel) / sigma
## da0 += gaussian * j1 * r2 * (s10+s11*lbda_rel+s12*lbda_rel**2) / sigma
# Derivatives
tmp = gaussian*j1 + moffat*j2
grad[2] = tmp*( dx + PA*dy) # dPSF/dx0
grad[3] = tmp*(ell*dy + PA*dx) # dPSF/dy0
grad[0] = self.ADR_coeff*(sintheta*grad[2] - costheta*grad[3])
grad[1] = delta*self.ADR_coeff*(sintheta*grad[3] + costheta*grad[2])
grad[4] = -tmp * dx*dy # dPSF/dPA
for i in xrange(self.ellDeg + 1):
grad[5+i] = -tmp/2 * dy2 * lbda_rel**i
for i in xrange(self.alphaDeg + 1):
grad[self.index(6)+i] = da0 * lbda_rel**i
if self.betaDeg!=-1:
for i in xrange(self.betaDeg + 1):
grad[self.index(7)+i] = -moffat * S.log(ea) * lbda_rel**i
if self.etaDeg!=-1:
for i in xrange(self.etaDeg + 1):
grad[self.index(8)+i] = gaussian * lbda_rel**i
if self.sigmaDeg!=-1:
for i in xrange(self.sigmaDeg + 1):
grad[self.index(9)+i] = eta * r2 * gaussian / sigma**3
grad[:self.npar_cor] *= self.param[S.newaxis,self.npar_cor:,S.newaxis]
grad[self.npar_cor] = moffat + eta*gaussian # dPSF/dI
return grad
def _HWHM_fn(self, r, lbda):
"""Half-width at half maximum function (=0 at HWHM)."""
lbda_rel = lbda/self.lbda_ref - 1
alphaCoeffs = self.param[self.index(6):self.index(7)]
alpha = eval_poly(alphaCoeffs, lbda_rel)
## s1,s0,b1,b0,e1,e0 = self.corrCoeffs # Correlations
## b0,b1,s00,s01,s10,s11,e00,e01,e10,e11 = self.corrCoeffs
## b00,b01,b02,b10,b11,b12,s00,s01,s02,s10,s11,s12,e00,e01,e02,e10,e11,e12 = self.corrCoeffs
b00,b01,b10,b11,s00,s01,s10,s11,e00,e01,e10,e11 = self.corrCoeffs
if self.betaDeg==-1: # Beta
## beta = b0 + b1*alpha
beta = (b00+b01*lbda_rel) + (b10+b11*lbda_rel)*alpha
## beta = (b00+b01*lbda_rel+b02*lbda_rel**2) + (b10+b11*lbda_rel+b12*lbda_rel**2)*alpha
else:
betaCoeffs = self.param[self.index(7):self.index(8)]
beta = eval_poly(betaCoeffs, lbda_rel)
if self.etaDeg==-1: # Eta
## eta = e0 + e1*alpha
eta = (e00+e01*lbda_rel) + (e10+e11*lbda_rel)*alpha
## eta = (e00+e01*lbda_rel+e02*lbda_rel**2) + (e10+e11*lbda_rel+e12*lbda_rel**2)*alpha
else:
etaCoeffs = self.param[self.index(8):self.index(9)]
eta = eval_poly(etaCoeffs, lbda_rel)
if self.sigmaDeg==-1: # Sigma
## sigma = s0 + s1*alpha
sigma = (s00+s01*lbda_rel) + (s10+s11*lbda_rel)*alpha
## sigma = (s00+s01*lbda_rel+s02*lbda_rel**2) + (s10+s11*lbda_rel+s12*lbda_rel**2)*alpha
else:
sigmaCoeffs = self.param[self.index(9):self.index(10)]
sigma = eval_poly(sigmaCoeffs, lbda_rel)
gaussian = S.exp(-r**2/2/sigma**2)
moffat = (1 + r**2/alpha**2)**(-beta)
# PSF=moffat + eta*gaussian, maximum is 1+eta
return moffat + eta*gaussian - (eta + 1)/2
def FWHM(self, lbda):
"""Estimate FWHM of PSF at wavelength lbda."""
# Compute FWHM from radial profile
fwhm = 2*S.optimize.fsolve(func=self._HWHM_fn, x0=1.,args=(lbda))
return fwhm # In spaxels
class long_blue_exposure_psf(ExposurePSF):
name = 'long blue'
#corrCoeffs = [0.215,0.545,0.345,1.685,0.0,1.04] # Old
#corrCoeffs = [0.2676,0.5721,0.4413,1.3644,-0.2258,1.1453] # New
#corrCoeffs = [0.2721,0.5368,0.4183,1.4896,-0.2441,1.2246] # color diff
#corrCoeffs = [1.3881,0.4324,0.4670,8.048e-06,0.2939,-9.785e-07,1.4417,-5.806e-05,-0.2771,1.102e-05] #b0,b1,s00,s01,s10,s11,e00,e01,e10,e11
#corrCoeffs = [-3.6230,0.00246,-2.915e-07,2.0092,-7.66e-04,9.091e-08, # b00,b01,b02,b10,b11,b12
# 12.3877,-0.002693,2.901e-07,-3.5045,0.0008732,-9.584e-08, # s00,s01,s02,s10,s11,s12
# 0 , 0 , 0 , 0 , 0 , 0 ] # e00,e01,e02,e10,e11,e12
#corrCoeffs = [1.3939,0,0,0.4149,0,0,
# -2.3780,0.001418,-1.768e-07,1.3559,-0.0005172,6.516e-08,
# -106.2317,0.0638,-6.926e-06,40.8057,-0.02406,2.597e-06]
#corrCoeffs = [1.0141,0.0001452,0,0.5636,-4.457e-05,0,
# 1.0424,-2.383e-05,0,0.1152,-5.153e-07,0,
# 1.3315,-6.817e-05,0,-0.2397,2.864e-05,0]
corrCoeffs = [1.2047,9.884e-05,0.4979,-2.887e-05,0.9473,-1.271e-05,0.1608,-7.258e-06,0.9872,8.231e-06,-0.0952,-1.134e-05] #b00,b01,b10,b11,s00,s01,s10,s11,e00,e01,e10,e11
class long_red_exposure_psf(ExposurePSF):
name = 'long red'
#corrCoeffs = [0.215,0.545,0.345,1.685,0.0,1.04] # Old
#corrCoeffs = [0.2676,0.5721,0.4413,1.3644,-0.2258,1.1453] # New
#corrCoeffs = [0.2827,0.5438,0.4433,1.3184,-0.1782,0.9781] # color diff
#corrCoeffs = [1.3881,0.4324,0.4670,8.048e-06,0.2939,-9.785e-07,1.4417,-5.806e-05,-0.2771,1.102e-05] #b0,b1,s00,s01,s10,s11,e00,e01,e10,e11
#corrCoeffs = [-2.4953,0.00101,-6.546e-08,2.1068,-4.39e-04,2.821e-08, # b00,b01,b02,b10,b11,b12
# 9.765,-0.001339,8.806e-08,-3.6703,0.0005614,-3.72e-08, # s00,s01,s02,s10,s11,s12
# 0 , 0 , 0 , 0 , 0 , 0 ] # e00,e01,e02,e10,e11,e12
#corrCoeffs = [1.3939,0,0,0.4149,0,0,
# 0.5714,-1.711e-05,0,0.3093,2.557e-06,0,
# 1.1926,-2.611e-05,0,-01771,-7.277e-06,0]
#corrCoeffs = [2.0210,-0.0001286,0,-0.0247,7.99e-05,0,
# 0.2315,-1.065e-05,0,0.4865,6.91e-06,0,
# 1.9819,-8.003e-05,0,-0.3596,1.402e-05,0]
corrCoeffs = [1.8326,-9.339e-05,0.0573,6.484e-05,0.6015,-4.637e-05,0.3075,2.494e-05,1.4611,-1.195e-04,-0.2299,6.769e-05] #b00,b01,b10,b11,s00,s01,s10,s11,e00,e01,e10,e11
class short_blue_exposure_psf(ExposurePSF):
name = 'short blue'
#corrCoeffs = [0.2,0.56,0.415,1.395,0.16,0.6] # Old
#corrCoeffs = [0.2103,0.6583,0.4884,1.3264,0.0041,0.4090] # New
#corrCoeffs = [0.2394,0.6181,0.4667,1.3615,-0.0262,0.4532] # color diff
#corrCoeffs = [1.3351,0.4812,0.5234,2.293e-05,0.2912,-1.408e-05,0.7256,-6.826e-05,-0.2079,4.556e-05] #b0,b1,s00,s01,s10,s11,e00,e01,e10,e11
#corrCoeffs = [-1.2658,0.00125,-1.469e-07,2.2089,-8.17e-04,9.47e-08, # b00,b01,b02,b10,b11,b12
# 4.3073, 0 , 0 ,-2.2489, 0 , 0 , # s00,s01,s02,s10,s11,s12
# 0 , 0 , 0 , 0 , 0 , 0 ] # e00,e01,e02,e10,e11,e12
#corrCoeffs = [1.7849,-7.774e-05,0,0.1582,5.792e-05,0,
# 1.3371,-0.0006844,9.156e-08,0.2072,0.0002397,-3.535e-08,
# -0.4626,0.0001073,0,0.3728,-6.1e-05,0]
#corrCoeffs = [0.9849,0.0001005,0,0.7211,-5.914e-05,0,
# 0.3180,8.193e-06,0,0.1997,-2.999e-05,0,
# 5.0177,-0.001875,0,-0.9489,2.109e-05,0]
corrCoeffs = [1.0116,9.246e-05,0.7127,-5.656e-05,0.6673,3.197e-05,0.1702,-2.39e-05,0.9530,-4.783e-06,-0.1493,-9.421e-06] #b00,b01,b10,b11,s00,s01,s10,s11,e00,e01,e10,e11
class short_red_exposure_psf(ExposurePSF):
name = 'short red'
#corrCoeffs = [0.2,0.56,0.415,1.395,0.16,0.6] # Old
#corrCoeffs = [0.2103,0.6583,0.4884,1.3264,0.0041,0.4090] # New
#corrCoeffs = [0.1659,0.6915,0.4936,1.3144,0.1516,0.2260] # color diff
#corrCoeffs = [1.3351,0.4812,0.5234,2.293e-05,0.2912,-1.408e-05,0.7256,-6.826e-05,-0.2079,4.556e-05] #b0,b1,s00,s01,s10,s11,e00,e01,e10,e11
#corrCoeffs = [1.4045,-1.333e-05,0,0.4463,7.309e-06,0 , # b00,b01,b02,b10,b11,b12
# 0.3079, 0 , 0 ,0.3596, 0 , 0 , # s00,s01,s02,s10,s11,s12
# 0 , 0 , 0 , 0 , 0 , 0 ] # e00,e01,e02,e10,e11,e12
#corrCoeffs = [1.3161,2.145e-05,0,0.5196,-1.851e-05,0,
# 0.8431,-6.503e-05,0,-0.0516,5.751e-05,0,
# -0.0136,0,0,0.4338,0,0]
#corrCoeffs = [1.5946,-3.476e-05,0,0.1954,4.182e-05,0,
# 0.3180,8.193e-06,0,0.5003,-1.099e-05,0,
# 0.7218,-4.259e-05,0,-0.1544,2.744e-05 ,0]
corrCoeffs = [1.6282,-3.76e-05,0.1929,-4.152e-05,0.3584,1.396e-06,0.4473,-5.176e-06,1.3428,-1.439e-04,-0.5985,1.166e-04] #b00,b01,b10,b11,s00,s01,s10,s11,e00,e01,e10,e11
# ########## MAIN ##############################
if __name__ == "__main__":
# Options ====================================================================
methods = ('psf','aperture','optimal')
usage = "usage: [%prog] [options] -i inE3D.fits " \
"-o outSpec.fits -s outSky.fits"
parser = optparse.OptionParser(usage, version=__version__)
parser.add_option("-i", "--in", type="string", dest="input",
help="Input datacube (euro3d format)")
parser.add_option("-o", "--out", type="string",
help="Output star spectrum")
parser.add_option("-s", "--sky", type="string",
help="Output sky spectrum")
parser.add_option("-K", "--skyDeg", type="int", dest="skyDeg",
help="Sky polynomial background degree [%default]",
default=0 )
parser.add_option("-A", "--alphaDeg", type="int",
help="Alpha polynomial degree [%default]",
default=2)
parser.add_option("-Q", "--ellDeg", type="int",
help="Ellipticity polynomial degree [%default]",
default=0)
parser.add_option("-B", "--betaDeg", type="int",
help="Beta polynomial degree [%default].\
If -1, Beta is a linear function of alpha",
default=-1)
parser.add_option("-S", "--sigmaDeg", type="int",
help="Sigma polynomial degree [%default].\
If -1, Sigma is a linear function of alpha",
default=-1)
parser.add_option("-E", "--etaDeg", type="int",
help="Eta polynomial degree [%default].\
If -1, Eta is a linear function of alpha",
default=-1)
parser.add_option("-m", "--method", type="string",
help="Extraction method ['%default']",
default="psf")
parser.add_option("-r", "--radius", type="float",
help="Aperture radius for non-PSF extraction " \
"[%default sigma]", default=5.)
parser.add_option("-p", "--plot", action='store_true',
help="Plot flag (syn. '--graph=png')")
parser.add_option("-g", "--graph", type="string",
help="Graphic output format ('eps', 'png' or 'pylab')")
parser.add_option("-v", "--verbosity", type="int",
help="Verbosity level (<0: quiet) [%default]",
default=0)
parser.add_option("-f", "--file", type="string",
help="Save 2D adjustment results in file.")
parser.add_option("-F", "--File", type="string",
help="Save 3D adjustment results in file.")
parser.add_option("--supernova", action='store_true',
help="SN mode (no final 3D fit).")
parser.add_option("--keepmodel", action='store_true',
help="Store meta-slice model in 3D-cube.")
opts,pars = parser.parse_args()
if not opts.input:
parser.error("No input datacube specified.")
if opts.out and not opts.sky:
parser.error("Option '--sky' is missing !")
if opts.graph:
opts.plot = True
elif opts.plot:
opts.graph = 'png'
opts.method = opts.method.lower()
if opts.method not in methods:
parser.error("Unrecognized extraction method '%s' %s " % \
(opts.method,methods))
if opts.skyDeg < 0:
opts.skyDeg = -1
if opts.sky:
print "WARNING: cannot extract sky spectrum in no-sky mode."
# Input datacube =============================================================
print "Opening datacube %s" % opts.input
full_cube = pySNIFS.SNIFS_cube(opts.input)
step = full_cube.lstep
print_msg("Cube %s: %d slices [%.2f-%.2f], %d spaxels" % \
(os.path.basename(opts.input), full_cube.nslice,
full_cube.lbda[0], full_cube.lbda[-1], full_cube.nlens), 1)
# The full_cube.e3d_data_header dictionary is not enough for later updates
# in fill_hdr, which requires a *true* pyfits header.
inhdr = pyfits.getheader(opts.input, 1) # 1st extension
obj = inhdr.get('OBJECT', 'Unknown')
efftime = inhdr['EFFTIME']
airmass = inhdr['AIRMASS']
parangle = inhdr.get('PARANG', S.nan)
if S.isnan(parangle):
print "WARNING: cannot read PARANG keyword, estimate it from header"
parangle = estimate_parangle(inhdr)
channel = inhdr['CHANNEL'][0].upper()
pressure,temp = read_PT(inhdr, update=True)
ellDeg = opts.ellDeg
alphaDeg = opts.alphaDeg
betaDeg = opts.betaDeg
etaDeg = opts.etaDeg
sigmaDeg = opts.sigmaDeg
npar_psf = 10 + ellDeg + alphaDeg + betaDeg + etaDeg + sigmaDeg
skyDeg = opts.skyDeg
npar_sky = int((skyDeg+1)*(skyDeg+2)/2)
# Select the PSF (short or long and blue or red)
if (efftime > 12.) and (channel.upper().startswith('B')): # Long & Blue exposure
psfFn = long_blue_exposure_psf
elif (efftime > 12.) and (channel.upper().startswith('R')): # Long & Red exposure
psfFn = long_red_exposure_psf
elif (efftime < 12.) and (channel.upper().startswith('B')): # Short & Blue exposure
psfFn = short_blue_exposure_psf
elif (efftime < 12.) and (channel.upper().startswith('R')): # Short & Red exposure
psfFn = short_red_exposure_psf
print " Object: %s, Airmass: %.2f, Efftime: %.1fs [%s]" % \
(obj, airmass, efftime, psfFn.name)
# Meta-slices definition (min,max,step [px])
if channel.upper().startswith('B'):
slices=[10, 900, 65]
elif channel.upper().startswith('R'):
slices=[10, 1500, 130]
else:
parser.error("Input datacube %s has no valid CHANNEL keyword (%s)" % \
(opts.input, channel))
print " Channel: '%s', extracting slices: %s" % (channel,slices)
cube = pySNIFS.SNIFS_cube(opts.input, slices=slices)
cube.x = cube.i - 7 # From arcsec to spx
cube.y = cube.j - 7
print_msg(" Meta-slices before selection: %d " \
"from %.2f to %.2f by %.2f A" % \
(len(cube.lbda), cube.lbda[0], cube.lbda[-1], cube.lstep), 0)
# Normalisation of the signal and variance in order to avoid numerical
# problems with too small numbers
norm = cube.data.mean()
cube.data /= norm
cube.var /= norm**2
# Computing guess parameters from slice by slice 2D fit ======================
print "Slice-by-slice 2D-fitting..."
param_arr,khi2_vec,error_mat = fit_slices(cube, psfFn, betaDeg=opts.betaDeg,\
etaDeg=opts.etaDeg, sigmaDeg=opts.sigmaDeg,\
skyDeg=opts.skyDeg)
print_msg("", 1)
param_arr = param_arr.T # (nparam,nslice)
delta_vec,theta_vec = param_arr[:2]
xc_vec,yc_vec = param_arr[2:4]
PA_vec,ell_vec,alpha_vec = param_arr[4:7]
if betaDeg>=0:
beta_vec = param_arr[7]
if etaDeg>=0 and betaDeg>=0:
eta_vec = param_arr[8]
if etaDeg>=0 and betaDeg==-1:
eta_vec = param_arr[7]
if sigmaDeg>=0:
sigma_vec = param_arr[-npar_sky-2]
int_vec = param_arr[-npar_sky-1]
if skyDeg >= 0:
sky_vec = param_arr[-npar_sky:]
# Save 2D adjusted parameter file ============================================
if opts.file:
print "Producing 2D adjusted parameter file [%s]..." % opts.file
create_2D_log_file(opts.file,obj,airmass,efftime,
cube,param_arr,khi2_vec,error_mat)
# 3D model fitting ===========================================================
print "Datacube 3D-fitting..."
# Computing the initial guess for the 3D fitting from the results of the
# slice by slice 2D fit
lbda_ref = 5000. # Use constant lbda_ref for easy comparison
nslice = cube.nslice
lbda_rel = cube.lbda / lbda_ref - 1
# 1) ADR parameters (from keywords)
delta = S.tan(S.arccos(1./airmass)) # ADR power
theta = parangle/180.*S.pi # ADR angle [rad]
# 2) Reference position
# Convert meta-slice centroids to position at ref. lbda, and clip around
# median position
adr = TA.ADR(pressure, temp, lref=lbda_ref, delta=delta, theta=theta)
print_msg(str(adr), 1)
xref,yref = adr.refract(xc_vec,yc_vec, cube.lbda, backward=True)
valid = khi2_vec > 0 # Discard unfitted slices
x0,y0 = S.median(xref[valid]),S.median(yref[valid]) # Robust to outliers
r = S.hypot(xref - x0, yref - y0)
rmax = 5*S.median(r[valid]) # Robust to outliers
good = valid & (r <= rmax) # Valid fit and reasonable position
bad = valid & (r > rmax) # Valid fit but discarded position
if (valid & bad).any():
print "WARNING: %d metaslices discarded after ADR selection" % \
(len(S.nonzero(valid & bad)))
print_msg("%d/%d centroids found withing %.2f spx of (%.2f,%.2f)" % \
(len(xref[good]),len(xref),rmax,x0,y0), 1)
xc,yc = xref[good].mean(),yref[good].mean()
# We could use a weighted average, but does not make much of a difference
# dx,dy = error_mat[:,2],error_mat[:,3]
# xc = S.average(xref[good], weights=1/dx[good]**2)
# yc = S.average(yref[good], weights=1/dy[good]**2)
if not good.all(): # Invalid slices + discarded centroids
print "%d/%d centroid positions discarded for initial guess" % \
(len(xc_vec[~good]),nslice)
if len(xc_vec[good]) <= max(alphaDeg+1,ellDeg+1):
raise ValueError('Not enough points for initial guesses')
print_msg(" Reference position guess [%.2fA]: %.2f x %.2f spx" % \
(lbda_ref,xc,yc), 1)
print_msg(" ADR guess: delta=%.2f, theta=%.1f deg" % \
(delta, theta/S.pi*180), 1)
# 3) Other parameters
PA = S.median(PA_vec)
polEll = pySNIFS.fit_poly(ell_vec[good],3,ellDeg,lbda_rel[good])
ell = polEll.coeffs[::-1]
polAlpha = pySNIFS.fit_poly(alpha_vec[good],4,alphaDeg,lbda_rel[good])
alpha = polAlpha.coeffs[::-1]
if betaDeg!=-1:
polBeta = pySNIFS.fit_poly(beta_vec[good],3,betaDeg,lbda_rel[good])
beta = polBeta.coeffs[::-1]
if sigmaDeg!=-1:
polSigma = pySNIFS.fit_poly(sigma_vec[good],3,sigmaDeg,lbda_rel[good])
sigma = polSigma.coeffs[::-1]
if etaDeg!=-1:
polEta = pySNIFS.fit_poly(eta_vec[good],3,etaDeg,lbda_rel[good])
eta = polEta.coeffs[::-1]
# Filling in the guess parameter arrays (px) and bounds arrays (bx)
p1 = [delta, theta, xc, yc, PA]
p1 += ell.tolist()
p1 += alpha.tolist()
if betaDeg!=-1:
p1 += beta.tolist()
if etaDeg!=-1:
p1 += eta.tolist()
if sigmaDeg!=-1:
p1 += sigma.tolist()
p1 += int_vec.tolist()
if opts.supernova: # Fix all parameters but intensities
print "WARNING: supernova-mode, no 3D PSF-fit"
# This mode completely discards 3D fit. In pratice, a 3D-fit is still
# performed on intensities, just to be coherent w/ the remaining of
# the code.
b1 = [[delta, delta], # delta
[theta, theta], # theta
[xc, xc], # x0
[yc, yc], # y0
[PA, PA]] # PA
for coeff in p1[5:6+ellDeg]+p1[6+ellDeg:npar_psf]:
b1 += [[coeff,coeff]] # ell,alpha,beta,eta and sigma coeff.
else:
b1 = [[None, None], # delta
[None, None], # theta
[None, None], # x0
[None, None], # y0
[None, None]] # PA
b1 += [[0., None]] + [[None, None]]*ellDeg # ell0 >0
b1 += [[0., None]] + [[None, None]]*alphaDeg # a0 > 0
if betaDeg!=-1:
b1 += [[0,None]] + [[None, None]]*betaDeg # b0 > 0
if etaDeg!=-1:
b1 += [[0,None]] + [[None, None]]*etaDeg # e0 > 0
if sigmaDeg!=-1:
b1 += [[0,None]] + [[None, None]]*sigmaDeg # s0 > 0
b1 += [[0., None]]*nslice # Intensities
func = [ '%s;%f,%f,%f,%f,%f,%f,%f' % \
(psfFn.name,SpaxelSize,lbda_ref,ellDeg,\
alphaDeg,opts.betaDeg,opts.etaDeg,opts.sigmaDeg) ] # PSF
param = [p1]
bounds = [b1]
if skyDeg >= 0:
p2 = S.ravel(sky_vec.T)
b2 = ([[0,None]] + [[None,None]]*(npar_sky-1)) * nslice
func += ['poly2D;%d' % skyDeg] # Add background
param += [p2]
bounds += [b2]
print_msg(" Adjusted parameters: delta,theta,xc,yc,PA,"
"%d ellCoeffs,%d alphaCoeffs,%d intens., %d bkgndCoeffs" % \
(ellDeg+1,alphaDeg+1,nslice,
skyDeg>=0 and (npar_sky*nslice) or 0), 3)
print_msg(" Initial guess [PSF]: %s" % p1[:npar_psf], 2)
print_msg(" Initial guess [Intensities]: %s" % \
p1[npar_psf:npar_psf+nslice], 3)
if skyDeg >= 0:
print_msg(" Initial guess [PSF]: %s" % p1[:npar_psf], 2)
# Instanciating the model class and perform the fit
data_model = pySNIFS_fit.model(data=cube, func=func,
param=param, bounds=bounds,
myfunc={psfFn.name:psfFn})
data_model.fit(maxfun=2000, save=True, msge=(opts.verbosity>=3))
# Storing result and guess parameters
fitpar = data_model.fitpar # Adjusted parameters
khi2 = data_model.khi2 # Reduced khi2 of meta-fit
khi2 *= data_model.dof # Restore real chi2
cov = data_model.param_error(fitpar)
errorpar = S.sqrt(cov.diagonal())
print_msg(" Fit result: DoF: %d, chi2=%f" % (data_model.dof, khi2), 2)
print_msg(" Fit result [PSF param]: %s" % fitpar[:npar_psf], 2)
print_msg(" Fit result [Intensities]: %s" % \
fitpar[npar_psf:npar_psf+nslice], 3)
if skyDeg >= 0:
print_msg(" Fit result [Background]: %s" % \
fitpar[npar_psf+nslice:], 3)
print_msg(" Reference position fit [%.2fA]: %.2f x %.2f spx" % \
(lbda_ref,fitpar[2],fitpar[3]), 1)
print_msg(" ADR fit: delta=%.2f, theta=%.1f deg" % \
(fitpar[0], fitpar[1]/S.pi*180), 1)
# Compute seeing (FWHM in arcsec)
seeing = data_model.func[0].FWHM(lbda_ref) * SpaxelSize
print " Seeing estimate: %.2f'' FWHM" % seeing
print " Effective airmass: %.2f" % (1/S.cos(S.arctan(fitpar[0])))
# Test positivity of alpha and ellipticity. At some point, maybe it would
# be necessary to force positivity in the fit (e.g. fmin_cobyla).
fit_alpha = eval_poly(fitpar[6+ellDeg:7+ellDeg+alphaDeg], lbda_rel)
if fit_alpha.min() < 0:
raise ValueError("Alpha is negative (%.2f) at %.0fA" % \
(fit_alpha.min(), cube.lbda[fit_alpha.argmin()]))
fit_ell = eval_poly(fitpar[5:6+ellDeg], lbda_rel)
if fit_ell.min() < 0:
raise ValueError("Ellipticity is negative (%.2f) at %.0fA" % \
(fit_ell.min(), cube.lbda[fit_ell.argmin()]))
# Computing final spectra for object and background ======================
# Compute aperture radius
if opts.method == 'psf':
radius = None
method = 'psf'
else:
radius = opts.radius * seeing/2.355 # Aperture radius [arcsec]
method = "%s r=%.1f sigma=%.2f''" % \
(opts.method, opts.radius, radius)
print "Extracting the spectrum [method=%s]..." % method
if skyDeg < 0:
print "WARNING: no background adjusted"
psfCtes = [SpaxelSize,lbda_ref,ellDeg,alphaDeg,
opts.betaDeg,opts.etaDeg,opts.sigmaDeg]
lbda,spec,var = extract_spec(full_cube, psfFn, psfCtes,
fitpar[:npar_psf], skyDeg=opts.skyDeg,
method=opts.method, radius=radius)
if skyDeg >= 0: # Compute background
spec[:,1:] /= SpaxelSize**2 # Per arcsec^2
var[:,1:] /= SpaxelSize**4
sky_spec_list = pySNIFS.spec_list([ pySNIFS.spectrum(data=s,
start=lbda[0],
step=step)
for s in spec[:,1:] ])
sky_var_list = pySNIFS.spec_list([ pySNIFS.spectrum(data=v,
start=lbda[0],
step=step)
for v in var[:,1:] ])
bkg_cube,bkg_spec = build_sky_cube(full_cube,sky_spec_list.list,
sky_var_list.list,opts.skyDeg)
bkg_spec.data /= full_cube.nlens
bkg_spec.var /= full_cube.nlens
# Creating a standard SNIFS cube with the adjusted data
# Do not use directly data_model.evalfit() since we want to keep
# psf and bkg separated. But in the end, psf+bkg = data_model.evalfit()
cube_fit = pySNIFS.SNIFS_cube(lbda=cube.lbda)
cube_fit.x = cube_fit.i - 7 # x in spaxel
cube_fit.y = cube_fit.j - 7 # y in spaxel
psf_model = psfFn(psfCtes, cube=cube_fit)
#psf_model = data_model.func[0]
psf = psf_model.comp(fitpar[:psf_model.npar])
cube_fit.data = psf
if skyDeg >= 0:
bkg_model = pySNIFS_fit.poly2D(opts.skyDeg, cube_fit)
#bkg_model = data_model.func[1]
bkg = bkg_model.comp(fitpar[psf_model.npar: \
psf_model.npar+bkg_model.npar])
cube_fit.data += bkg
# Update headers =========================================================
tflux = spec[:,0].sum() # Sum of the total flux of the spectrum
if skyDeg >= 0:
sflux = bkg_spec.data.sum() # Sum of the total flux of the sky
else:
sflux = 0 # No stored anyway
fill_header(inhdr,fitpar[:npar_psf],lbda_ref,opts,khi2,seeing,tflux,sflux)
# Save star spectrum =====================================================
if not opts.out:
opts.out = 'spec_%s.fits' % (channel)
print "WARNING: saving output source spectrum to %s" % opts.out
star_spec = pySNIFS.spectrum(data=spec[:,0],start=lbda[0],step=step)
star_spec.WR_fits_file(opts.out,header_list=inhdr.items())
star_var = pySNIFS.spectrum(data=var[:,0],start=lbda[0],step=step)
star_var.WR_fits_file('var_'+opts.out,header_list=inhdr.items())
# Save sky spectrum/spectra ==============================================
if skyDeg >= 0:
if not opts.sky:
opts.sky = 'sky_%s.fits' % (channel)
print "WARNING: saving output sky spectrum to %s" % opts.sky
sky_spec = pySNIFS.spectrum(data=bkg_spec.data,start=lbda[0],step=step)
sky_spec.WR_fits_file(opts.sky,header_list=inhdr.items())
sky_var = pySNIFS.spectrum(data=bkg_spec.var,start=lbda[0],step=step)
sky_var.WR_fits_file('var_'+opts.sky,header_list=inhdr.items())
# Save 3D adjusted parameter file ========================================
if opts.File:
print "Producing 3D adjusted parameter file [%s]..." % opts.File
create_3D_log_file(opts.File,obj,airmass,efftime,
seeing,fitpar,khi2,errorpar,lbda_ref)
# Save adjusted PSF ==============================
if opts.keepmodel:
path,name = os.path.split(opts.out)
outpsf = os.path.join(path,'psf_'+name)
print "Saving adjusted meta-slice PSF in 3D-fits cube '%s'..." % outpsf
cube_fit.WR_3d_fits(outpsf)
# Create output graphics =================================================
if opts.plot:
print "Producing output figures [%s]..." % opts.graph
import matplotlib
if opts.graph=='png':
matplotlib.use('Agg')
elif opts.graph=='eps':
matplotlib.use('PS')
else:
opts.graph = 'pylab'
import pylab
if opts.graph=='pylab':
plot1 = plot2 = plot3 = plot4 = plot6 = plot7 = plot8 = plot5 = ''
else:
basename = os.path.splitext(opts.out)[0]
plot1 = os.path.extsep.join((basename+"_plt" , opts.graph))
plot2 = os.path.extsep.join((basename+"_fit1", opts.graph))
plot3 = os.path.extsep.join((basename+"_fit2", opts.graph))
plot4 = os.path.extsep.join((basename+"_fit3", opts.graph))
plot6 = os.path.extsep.join((basename+"_fit4", opts.graph))
plot7 = os.path.extsep.join((basename+"_fit5", opts.graph))
plot8 = os.path.extsep.join((basename+"_fit6", opts.graph))
plot5 = os.path.extsep.join((basename+"_fit7", opts.graph))
# Plot of the star and sky spectra -----------------------------------
print_msg("Producing spectra plot %s..." % plot1, 1)
fig1 = pylab.figure()
if skyDeg >= 0:
axS = fig1.add_subplot(3, 1, 1)
axB = fig1.add_subplot(3, 1, 2)
axN = fig1.add_subplot(3, 1, 3)
else:
axS = fig1.add_subplot(2, 1, 1)
axN = fig1.add_subplot(2, 1, 2)
axS.plot(star_spec.x, star_spec.data, 'b')
axS.set_title("Point-source spectrum [%s, %s]" % (obj,method))
axS.set_xlim(star_spec.x[0],star_spec.x[-1])
axS.set_xticklabels([])
axS.text(0.95,0.8, os.path.basename(opts.input), fontsize='smaller',
horizontalalignment='right', transform=axS.transAxes)
axN.plot(star_spec.x, S.sqrt(star_var.data), 'b')
if skyDeg >= 0:
axB.plot(bkg_spec.x, bkg_spec.data, 'g')
axB.set_xlim(bkg_spec.x[0],bkg_spec.x[-1])
axB.set_title("Background spectrum (per sq. arcsec)")
axB.set_xticklabels([])
axN.plot(bkg_spec.x, S.sqrt(bkg_spec.var), 'g')
axN.set_title("Error spectra")
axN.set_title("Error spectrum")
axN.semilogy()
axN.set_xlim(star_spec.x[0],star_spec.x[-1])
axN.set_xlabel("Wavelength [A]")
# Plot of the fit on each slice --------------------------------------
print_msg("Producing slice fit plot %s..." % plot2, 1)
ncol = int(S.floor(S.sqrt(nslice)))
nrow = int(S.ceil(nslice/float(ncol)))
fig2 = pylab.figure()
fig2.subplots_adjust(left=0.05, right=0.97, bottom=0.05, top=0.97)
mod = data_model.evalfit()
for i in xrange(nslice): # Loop over meta-slices
ax = fig2.add_subplot(nrow, ncol, i+1)
data = cube.data[i,:]
fit = mod[i,:]
fmin = min(data.min(), fit.min()) - 2e-2
ax.plot(data - fmin) # Signal
ax.plot(fit - fmin) # Fit
ax.semilogy()
ax.set_xlim(0,len(data))
pylab.setp(ax.get_xticklabels()+ax.get_yticklabels(), fontsize=6)
ax.text(0.1,0.8, "%.0f" % cube.lbda[i], fontsize=8,
horizontalalignment='left', transform=ax.transAxes)
if ax.is_last_row() and ax.is_first_col():
ax.set_xlabel("Spaxel ID", fontsize=8)
ax.set_ylabel("Flux + cte", fontsize=8)
# Plot of the fit on rows and columns sum ----------------------------
print_msg("Producing profile plot %s..." % plot3, 1)
fig3 = pylab.figure()
fig3.subplots_adjust(left=0.05, right=0.97, bottom=0.05, top=0.97)
for i in xrange(nslice): # Loop over slices
ax = fig3.add_subplot(nrow, ncol, i+1)
sigSlice = S.nan_to_num(cube.slice2d(i, coord='p'))
varSlice = S.nan_to_num(cube.slice2d(i, coord='p', var=True))
modSlice = cube_fit.slice2d(i, coord='p')
prof_I = sigSlice.sum(axis=0) # Sum along rows
prof_J = sigSlice.sum(axis=1) # Sum along columns
err_I = S.sqrt(varSlice.sum(axis=0))
err_J = S.sqrt(varSlice.sum(axis=1))
mod_I = modSlice.sum(axis=0)
mod_J = modSlice.sum(axis=1)
ax.errorbar(range(len(prof_I)),prof_I,err_I, fmt='bo', ms=3)
ax.plot(mod_I, 'b-')
ax.errorbar(range(len(prof_J)),prof_J,err_J, fmt='r^', ms=3)
ax.plot(mod_J, 'r-')
pylab.setp(ax.get_xticklabels()+ax.get_yticklabels(), fontsize=6)
ax.text(0.1,0.8, "%.0f" % cube.lbda[i], fontsize=8,
horizontalalignment='left', transform=ax.transAxes)
if ax.is_last_row() and ax.is_first_col():
ax.set_xlabel("I (blue) or J (red)", fontsize=8)
ax.set_ylabel("Flux", fontsize=8)
# Plot of the star center of gravity and adjusted center -------------
print_msg("Producing ADR plot %s..." % plot4, 1)
xguess = xc + delta*psf_model.ADR_coeff[:,0]*S.sin(theta)
yguess = yc - delta*psf_model.ADR_coeff[:,0]*S.cos(theta)
xfit = fitpar[2] + fitpar[0]*psf_model.ADR_coeff[:,0]*S.sin(fitpar[1])
yfit = fitpar[3] - fitpar[0]*psf_model.ADR_coeff[:,0]*S.cos(fitpar[1])
fig4 = pylab.figure()
ax4a = fig4.add_subplot(2, 2, 1)
ax4b = fig4.add_subplot(2, 2, 2)
ax4c = fig4.add_subplot(2, 1, 2, aspect='equal', adjustable='datalim')
ax4a.errorbar(cube.lbda[good], xc_vec[good],yerr=error_mat[good,2],
fmt='b.',ecolor='b',label="Fit 2D")
if bad.any():
ax4a.plot(cube.lbda[bad],xc_vec[bad],'r.', label='_nolegend_')
ax4a.plot(cube.lbda, xguess, 'k--', label="Guess 3D")
ax4a.plot(cube.lbda, xfit, 'g', label="Fit 3D")
ax4a.set_xlabel("Wavelength [A]")
ax4a.set_ylabel("X center [spaxels]")
pylab.setp(ax4a.get_xticklabels()+ax4a.get_yticklabels(), fontsize=8)
leg = ax4a.legend(loc='lower left')
pylab.setp(leg.get_texts(), fontsize='smaller')
ax4b.errorbar(cube.lbda[good], yc_vec[good],yerr=error_mat[good,3],
fmt='b.',ecolor='b')
if bad.any():
ax4b.plot(cube.lbda[bad],yc_vec[bad],'r.')
ax4b.plot(cube.lbda, yfit, 'g')
ax4b.plot(cube.lbda, yguess, 'k--')
ax4b.set_xlabel("Wavelength [A]")
ax4b.set_ylabel("Y center [spaxels]")
pylab.setp(ax4b.get_xticklabels()+ax4b.get_yticklabels(), fontsize=8)
ax4c.errorbar(xc_vec[valid], yc_vec[valid],
xerr=error_mat[valid,2],yerr=error_mat[valid,3],
fmt=None, ecolor='g')
ax4c.scatter(xc_vec[good], yc_vec[good], faceted=True,
c=cube.lbda[good][::-1],
cmap=matplotlib.cm.Spectral, zorder=3)
# Plot position selection process
ax4c.plot(xref[good],yref[good],'b.') # Selected ref. positions
ax4c.plot(xref[bad],yref[bad],'r.') # Discarded ref. positions
ax4c.plot((x0,xc),(y0,yc),'k-')
ax4c.plot(xguess, yguess, 'k--') # Guess ADR
ax4c.plot(xfit, yfit, 'g') # Adjusted ADR
ax4c.set_autoscale_on(False)
ax4c.plot((xc,),(yc,),'k+')
ax4c.add_patch(matplotlib.patches.Circle((x0,y0),radius=rmax,
ec='0.8',fc=None))
ax4c.add_patch(matplotlib.patches.Rectangle((-7.5,-7.5),15,15,
ec='0.8',lw=2,fc=None)) # FoV
ax4c.text(0.03, 0.85,
'Guess: x0,y0=%4.2f,%4.2f delta=%.2f theta=%.1fdeg' % \
(xc, yc, delta, parangle),
transform=ax4c.transAxes, fontsize='smaller')
ax4c.text(0.03, 0.75,
'Fit: x0,y0=%4.2f,%4.2f delta=%.2f theta=%.1fdeg' % \
(fitpar[2], fitpar[3],
1/S.cos(S.arctan(fitpar[0])), fitpar[1]/S.pi*180),
transform=ax4c.transAxes, fontsize='smaller')
ax4c.set_xlabel("X center [spaxels]")
ax4c.set_ylabel("Y center [spaxels]")
fig4.text(0.5, 0.93, "ADR plot [%s, airmass=%.2f]" % (obj, airmass),
horizontalalignment='center', size='large')
# Plot of the other model parameters ---------------------------------
print_msg("Producing model parameter plot %s..." % plot6, 1)
def estimate_error(x, cov, idx):
return S.sqrt(eval_poly(cov.diagonal()[idx], x))
def plot_conf_interval(ax, x, y, dy,color,label):
ax.plot(x, y, color, label="_nolegend_")
ax.plot(x, y+dy, color+':', label='_nolegend_')
ax.plot(x, y-dy, color+':', label='_nolegend_')
def index(n):
if n==6:
return n+opts.ellDeg
elif n==7:
return n+opts.ellDeg+opts.alphaDeg
elif n==8:
return n+opts.ellDeg+opts.alphaDeg+opts.betaDeg
elif n==9:
return n+opts.ellDeg+opts.alphaDeg+opts.betaDeg+opts.etaDeg
elif n==10:
return n+opts.ellDeg+opts.alphaDeg+opts.betaDeg+opts.etaDeg+opts.sigmaDeg
err_PA = S.sqrt(cov.diagonal()[4])
err_ell = estimate_error(lbda_rel,cov,range(5,index(6)))
err_alpha = estimate_error(lbda_rel,cov,range(index(6),index(7)))
if betaDeg!=-1:
err_beta = estimate_error(lbda_rel, cov,
range(index(7),index(8)))
if etaDeg!=-1:
err_eta = estimate_error(lbda_rel, cov,
range(index(8),index(9)))
if sigmaDeg!=-1:
err_sigma = estimate_error(lbda_rel, cov,
range(index(9),index(10)))
guess_ell = eval_poly(polEll.coeffs[::-1], lbda_rel)
fit_ell = eval_poly(fitpar[5:index(6)], lbda_rel)
guess_alpha = eval_poly(polAlpha.coeffs[::-1], lbda_rel)
fit_alpha = eval_poly(fitpar[index(6):index(7)], lbda_rel)
if betaDeg!=-1:
guess_beta = eval_poly(polBeta.coeffs[::-1], lbda_rel)
fit_beta = eval_poly(fitpar[index(7):index(8)], lbda_rel)
if etaDeg!=-1:
guess_eta = eval_poly(polEta.coeffs[::-1], lbda_rel)
fit_eta = eval_poly(fitpar[index(8):index(9)], lbda_rel)
if sigmaDeg!=-1:
guess_sigma = eval_poly(polSigma.coeffs[::-1], lbda_rel)
fit_sigma = eval_poly(fitpar[index(9):index(10)], lbda_rel)
fig6 = pylab.figure()
ax6a = fig6.add_subplot(2, 1, 1)
ax6b = fig6.add_subplot(4, 1, 3)
ax6c = fig6.add_subplot(4, 1, 4)
ax6a.errorbar(cube.lbda[good], alpha_vec[good],error_mat[good,6],
fmt='b.', ecolor='blue', label="Fit 2D")
if bad.any():
ax6a.plot(cube.lbda[bad],alpha_vec[bad],'r.', label="_nolegend_")
ax6a.plot(cube.lbda, guess_alpha, 'b--', label="Guess 3D")
plot_conf_interval(ax6a, cube.lbda, fit_alpha, err_alpha,'b','alpha 3D')
if betaDeg!=-1:
ax6a.errorbar(cube.lbda[good], beta_vec[good],error_mat[good,7],
fmt='g.', ecolor='green', label="_nolegend_")
if bad.any():
ax6a.plot(cube.lbda[bad],beta_vec[bad],'r.', label="_nolegend_")
ax6a.plot(cube.lbda, guess_beta, 'g--', label='_nolegend_')
plot_conf_interval(ax6a, cube.lbda, fit_beta, err_beta,'g','beta 3D')
if etaDeg!=-1:
if betaDeg!=-1:
ax6a.errorbar(cube.lbda[good], eta_vec[good],error_mat[good,8],
fmt='r.', ecolor='red', label="_nolegend_")
else:
ax6a.errorbar(cube.lbda[good], eta_vec[good],error_mat[good,7],
fmt='r.', ecolor='red', label="_nolegend_")
if bad.any():
ax6a.plot(cube.lbda[bad],eta_vec[bad],'r.', label="_nolegend_")
ax6a.plot(cube.lbda, guess_eta, 'r--', label='_nolegend_')
plot_conf_interval(ax6a, cube.lbda, fit_eta, err_eta,'r','eta')
if sigmaDeg!=-1:
ax6a.errorbar(cube.lbda[good], sigma_vec[good],error_mat[good,-3],
fmt='y.', ecolor='yellow', label="_nolegend_")
if bad.any():
ax6a.plot(cube.lbda[bad],sigma_vec[bad],'r.', label="_nolegend_")
ax6a.plot(cube.lbda, guess_sigma, 'y--', label='_nolegend_')
plot_conf_interval(ax6a, cube.lbda, fit_sigma, err_sigma,'y','sigma')
leg = ax6a.legend(loc='upper right')
pylab.setp(leg.get_texts(), fontsize='smaller')
ax6a.set_ylabel(r'psf parameters')
ax6a.set_xticklabels([])
ax6a.set_title("Model parameters [%s, seeing %.2f'' FWHM]" % \
(obj,seeing))
ax6b.errorbar(cube.lbda[good], ell_vec[good], error_mat[good,5],
fmt='b.',ecolor='blue')
if bad.any():
ax6b.plot(cube.lbda[bad],ell_vec[bad],'r.')
ax6b.plot(cube.lbda, guess_ell, 'k--')
plot_conf_interval(ax6b, cube.lbda, fit_ell, err_ell,'b','ell 3D')
ax6b.text(0.03, 0.3,
'Guess: %s' % \
(', '.join([ 'e%d=%.2f' % (i,e)
for i,e in enumerate(polEll.coeffs[::-1]) ]) ),
transform=ax6b.transAxes, fontsize='smaller')
ax6b.text(0.03, 0.1,
'Fit: %s' % \
(', '.join([ 'e%d=%.2f' % (i,e)
for i,e in enumerate(fitpar[5:6+ellDeg]) ])),
transform=ax6b.transAxes, fontsize='smaller')
ax6b.set_ylabel('1/q^2')
ax6b.set_xticklabels([])
ax6c.errorbar(cube.lbda[good], PA_vec[good]/S.pi*180,
error_mat[good,4]/S.pi*180, fmt='b.', ecolor='b')
if bad.any():
ax6c.plot(cube.lbda[bad],PA_vec[bad]/S.pi*180,'r.')
ax6c.plot([cube.lbda[0],cube.lbda[-1]], [PA/S.pi*180]*2, 'k--')
plot_conf_interval(ax6c, S.asarray([cube.lbda[0],cube.lbda[-1]]),
S.asarray([fitpar[4]/S.pi*180]*2),
S.asarray([err_PA/S.pi*180]*2),'b','PA 3D')
ax6c.set_ylabel('PA [deg]')
ax6c.text(0.03, 0.1,
'Guess: PA=%4.2f Fit: PA=%4.2f' % \
(PA/S.pi*180,fitpar[4]/S.pi*180),
transform=ax6c.transAxes, fontsize='smaller')
ax6c.set_xlabel("Wavelength [A]")
# Plot of the radial profile -----------------------------------------
print_msg("Producing radial profile plot %s..." % plot7, 1)
fig7 = pylab.figure()
fig7.subplots_adjust(left=0.05, right=0.97, bottom=0.05, top=0.97)
def ellRadius(x,y, x0,y0, ell, q):
dx = x - x0
dy = y - y0
return S.sqrt(dx**2 + ell*dy**2 + 2*q*dx*dy)
for i in xrange(nslice): # Loop over slices
ax = fig7.add_subplot(nrow, ncol, i+1)
# Use adjusted elliptical radius instead of plain radius
#r = S.hypot(cube.x-xfit[i],cube.y-yfit[i])
#rfit = S.hypot(cube_fit.x-xfit[i],cube_fit.y-yfit[i])
r = ellRadius(cube.x,cube.y, xfit[i],yfit[i], fit_ell[i], fitpar[4])
rfit = ellRadius(cube_fit.x,cube_fit.y, xfit[i],yfit[i],
fit_ell[i], fitpar[4])
ax.plot(r, cube.data[i], 'b.')
ax.plot(rfit, cube_fit.data[i], 'r,')
ax.plot(rfit, psf[i], 'g,')
if skyDeg >= 0:
ax.plot(rfit, bkg[i],'c,')
ax.semilogy()
pylab.setp(ax.get_xticklabels()+ax.get_yticklabels(), fontsize=6)
ax.text(0.9,0.8, "%.0f" % cube.lbda[i], fontsize=8,
horizontalalignment='right', transform=ax.transAxes)
if method!='psf':
ax.axvline(radius/SpaxelSize, color='y', lw=2)
if ax.is_last_row() and ax.is_first_col():
ax.set_xlabel("Elliptical radius [spaxels]", fontsize=8)
ax.set_ylabel("Flux", fontsize=8)
ax.axis([0, rfit.max()*1.1,
cube.data[i][cube.data[i]>0].min()/1.2,
cube.data[i].max()*1.2])
# Radial Chi2 plot (not activated by default)
if False:
print_msg("Producing radial chi2 plot...", 1)
fig = pylab.figure()
fig.subplots_adjust(left=0.05, right=0.97, bottom=0.05, top=0.97)
for i in xrange(nslice): # Loop over slices
ax = fig.add_subplot(nrow, ncol, i+1)
rfit = ellRadius(cube_fit.x,cube_fit.y, xfit[i],yfit[i],
fit_ell[i], fitpar[4])
chi2 = (cube.slice2d(i,coord='p') - \
cube_fit.slice2d(i,coord='p'))**2 / \
cube.slice2d(i,coord='p',var=True)
ax.plot(rfit, chi2.flatten(), 'b.')
ax.semilogy()
pylab.setp(ax.get_xticklabels()+ax.get_yticklabels(), fontsize=6)
ax.text(0.9,0.8, "%.0f" % cube.lbda[i], fontsize=8,
horizontalalignment='right', transform=ax.transAxes)
if method!='psf':
ax.axvline(radius/SpaxelSize, color='y', lw=2)
if ax.is_last_row() and ax.is_first_col():
ax.set_xlabel("Elliptical radius [spaxels]", fontsize=8)
ax.set_ylabel("chi2", fontsize=8)
# Contour plot of each slice -----------------------------------------
print_msg("Producing PSF contour plot %s..." % plot8, 1)
fig8 = pylab.figure()
fig8.subplots_adjust(left=0.05, right=0.97, bottom=0.05, top=0.97,
hspace=0.02, wspace=0.02)
extent = (cube.x.min()-0.5,cube.x.max()+0.5,
cube.y.min()-0.5,cube.y.max()+0.5)
for i in xrange(nslice): # Loop over meta-slices
ax = fig8.add_subplot(ncol, nrow, i+1, aspect='equal')
data = cube.slice2d(i, coord='p')
fit = cube_fit.slice2d(i, coord='p')
vmin,vmax = pylab.prctile(fit, (5.,95.)) # Percentiles
lev = S.logspace(S.log10(vmin),S.log10(vmax),5)
ax.contour(data, lev, origin='lower', extent=extent)
cnt = ax.contour(fit, lev, ls='--', origin='lower', extent=extent)
pylab.setp(cnt.collections, linestyle='dotted')
ax.errorbar((xc_vec[i],),(yc_vec[i],),
xerr=(error_mat[i,2],),yerr=(error_mat[i,3],),
fmt=None, ecolor='k')
ax.plot((xfit[i],),(yfit[i],), 'g+')
if opts.method != 'psf':
ax.add_patch(matplotlib.patches.Circle((xfit[i],yfit[i]),
radius/SpaxelSize,
fc=None, ec='y', lw=2))
pylab.setp(ax.get_xticklabels()+ax.get_yticklabels(), fontsize=6)
ax.text(0.1,0.1, "%.0f" % cube.lbda[i], fontsize=8,
horizontalalignment='left', transform=ax.transAxes)
ax.axis(extent)
if ax.is_last_row() and ax.is_first_col():
ax.set_xlabel("I", fontsize=8)
ax.set_ylabel("J", fontsize=8)
if not ax.is_last_row():
ax.set_xticks([])
if not ax.is_first_col():
ax.set_yticks([])
# Residuals of each slice --------------------------------------------
print_msg("Producing residuals plot %s..." % plot5, 1)
fig5 = pylab.figure()
fig5.subplots_adjust(left=0.05, right=0.97, bottom=0.05, top=0.97,
hspace=0.02, wspace=0.02)
for i in xrange(nslice): # Loop over meta-slices
ax = fig5.add_subplot(ncol, nrow, i+1, aspect='equal')
data = cube.slice2d(i, coord='p')
var = cube.slice2d(i, coord='p', var=True)
fit = cube_fit.slice2d(i, coord='p')
res = S.nan_to_num((data - fit)/S.sqrt(var))
vmin,vmax = pylab.prctile(res, (3.,97.)) # Percentiles
ax.imshow(res, origin='lower', extent=extent,
vmin=vmin, vmax=vmax, interpolation='nearest')
ax.plot((xfit[i],),(yfit[i],), 'k+')
pylab.setp(ax.get_xticklabels()+ax.get_yticklabels(), fontsize=6)
ax.text(0.1,0.1, "%.0f" % cube.lbda[i], fontsize=8,
horizontalalignment='left', transform=ax.transAxes)
ax.axis(extent)
if ax.is_last_row() and ax.is_first_col():
ax.set_xlabel("I", fontsize=8)
ax.set_ylabel("J", fontsize=8)
if not ax.is_last_row():
ax.set_xticks([])
if not ax.is_first_col():
ax.set_yticks([])
if opts.graph=='pylab':
pylab.show()
else:
fig1.savefig(plot1)
fig2.savefig(plot2)
fig3.savefig(plot3)
fig4.savefig(plot4)
fig6.savefig(plot6)
fig7.savefig(plot7)
fig8.savefig(plot8)
fig5.savefig(plot5)
# End of psf_analysis.py =========================================================
|
snfactory/extract-star
|
scripts/PSF_analysis.py
|
Python
|
mit
| 95,995
|
[
"Gaussian"
] |
d59e3c519e02d42184234080f1f85a1e758dfe927cae4979affef638d9dfc668
|
"""
@name: PyHouse/src/Modules/Drivers/USB/Driver_USB_17DD_5500.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2012-2017 by D. Brian Kimmel
@license: MIT License
@note: Created on Dec 13, 2012
@summary: This module is for
Created to handle the UPB PIM which is a HID device.
Bus xxx Device yyy: ID 17dd:5500
Device Descriptor:
bLength 18 [0]
bDescriptorType 1 [1]
bcdUSB 1.00 [2:4]
bDeviceClass 0 [4] - (Defined at Interface level)
bDeviceSubClass 0 [5]
bDeviceProtocol 0 [6]
bMaxPacketSize0 8 [7]
idVendor 0x17dd [8:10]
idProduct 0x5500 [10:12]
bcdDevice 0.00 [12:14]
iManufacturer 1 [14] - Simply Automated Inc.
iProduct 2 [15] - USB to Serial
iSerial 0 [16]
bNumConfigurations 1 [17]
Configuration Descriptor:
bLength 9
bDescriptorType 2
wTotalLength 41
bNumInterfaces 1
bConfigurationValue 1
iConfiguration 4 Sample HID
bmAttributes 0x80
(Bus Powered)
MaxPower 100mA
Interface Descriptor:
bLength 9
bDescriptorType 4
bInterfaceNumber 0
bAlternateSetting 0
bNumEndpoints 2
bInterfaceClass 3 Human Interface Device
bInterfaceSubClass 0 No Subclass
bInterfaceProtocol 0 None
iInterface 0
HID Device Descriptor:
bLength 9
bDescriptorType 33
bcdHID 1.00
bCountryCode 0 Not supported
bNumDescriptors 1
bDescriptorType 34 Report
wDescriptorLength 37
Report Descriptors:
** UNAVAILABLE **
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x81 EP 1 IN
bmAttributes 3
Transfer Type Interrupt
Synch Type None
Usage Type Data
wMaxPacketSize 0x0008 1x 8 bytes
bInterval 10
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x02 EP 2 OUT
bmAttributes 3
Transfer Type Interrupt
Synch Type None
Usage Type Data
wMaxPacketSize 0x0008 1x 8 bytes
bInterval 10
"""
__updated__ = '2019-10-06'
# import array
import usb
# Import PyMh files
# from Modules.Drivers.USB import USB_driver
from Modules.Core import logging_pyh as Logger
from _ctypes import Array
from array import array
LOG = Logger.getLogger('PyHouse.UPB_17DD_5500 ')
class Api(object):
@staticmethod
def Setup():
"""Use the control endpoint to set up report descriptors for HID devices.
Much of this was determined empirically for a smarthome UPB PIM
"""
l_requestType = 0x21 # LIBUSB_ENDPOINT_OUT (0x00) | LIBUSB_REQUEST_TYPE_CLASS (0x20) | LIBUSB_RECIPIENT_DEVICE (0x00)
l_request = 0x09 #
l_value = 0x0003 # Report type & Report ID
l_index = 0
l_report = bytearray(b'12345')
# l_report = array.array('B', '\x00'.encode('utf-8') * 5)
l_report[0] = 0xc0
l_report[1] = 0x12
l_report[2] = 0x00
l_report[3] = 0x00
l_report[4] = 0x03
l_ret = (l_requestType,
l_request,
l_value,
l_index,
l_report)
LOG.info("USB_driver_17DD_5500._setup_hid_device() {}".format(l_ret))
return l_ret
@staticmethod
def Read(self, p_usb):
print("USB_driver_17DD_5500.read_device() - usb ={}".format(p_usb))
l_len = -1
while l_len != 0:
try:
l_msg = p_usb.Device.read(0x81, 8, timeout=1000)
# we seem to have actual length + 240 as 1st char
l_len = l_msg[0] - 240
if l_len > 0:
LOG.info("USB_driver.read_device() {} - {}".format(l_len, l_msg))
for l_x in range(l_len):
p_usb.message.append(l_msg[l_x + 1])
except usb.USBError as e:
l_len = 0
break
except Exception as e:
LOG.info(" -- Error in USB_driver_17DD_5500.read_device() ".format(e))
l_len = 0
break
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Core/Drivers/Usb/Driver_USB_17DD_5500.py
|
Python
|
mit
| 4,755
|
[
"Brian"
] |
2b135fe8141430cd3b800d061cb6b0a6790e7410e38a5751fbd4d295c9891a23
|
#!/usr/bin/env python
import numpy as np
try:
# Matplotlib is not a dependency
import matplotlib as mpl
mpl.use('Agg') # force the antigrain backend
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
except (ImportError, RuntimeError):
mpl = None
from scipy.optimize import leastsq
from ase.units import second
from ase.io.trajectory import PickleTrajectory
# Dimer oscillation model used for least-squares fit
def f(p, t):
return p[0] * np.cos(p[1] * (t - p[2])) + p[3]
# Jacobian of model with respect to its four parameters
def Df(p, t):
return np.array([
np.cos(p[1] * (t - p[2])),
-p[0] * np.sin(p[1] * (t - p[2])) * (t - p[2]),
p[0] * np.sin(p[1] * (t - p[2])) * p[1],
np.ones_like(t)])
for name in ['h2_osc', 'n2_osc', 'na2_md', 'na2_osc']:
print '\nAnalysing %s\n%s' % (name, '-'*32)
# Import relevant test and make sure it has the prerequisite parameters
m = __import__(name, {}, {})
for attr in ['d_bond', 'd_disp', 'timestep', 'period', 'ndiv', 'niter']:
if not hasattr(m, attr):
raise ImportError('Module %s has no %s value' % (name, attr))
# Read dimer bond length time series from trajectory file
traj = PickleTrajectory(name + '_td.traj', 'r')
nframes = len(traj)
natoms = len(traj[0])
symbol = traj[0].get_name()
t_i = m.timestep * m.ndiv * np.arange(nframes)
Ekin_i, Epot_i = np.empty(nframes), np.empty(nframes)
R_iav = np.empty((nframes, natoms, 3))
V_iav = np.empty((nframes, natoms, 3))
A_iav = np.empty((nframes, natoms, 3))
for i in range(nframes):
Ekin_i[i] = traj[i].get_kinetic_energy()
Epot_i[i] = traj[i].get_potential_energy()
R_iav[i] = traj[i].get_positions()
V_iav[i] = traj[i].get_velocities()
A_iav[i] = traj[i].get_forces() / traj[i].get_masses()[:,np.newaxis]
print 'Read %d frames from trajectory...' % nframes
assert nframes * m.ndiv == m.niter, (nframes, m.ndiv, m.niter)
traj.close()
# Verify that energy was conserved
dEstd = np.std(Ekin_i + Epot_i)
dEmax = np.max(Ekin_i.ptp(), Epot_i.ptp())
assert dEstd < 1e-2 * dEmax + 1e-6, (dEstd, dEmax)
# Compare position, velocity and force time series using Velocity Verlet
dt = m.timestep * m.ndiv * 1e-18 * second
Rn_iav = R_iav[:-1] + V_iav[:-1] * dt + 0.5 * A_iav[:-1] * dt**2
Vn_iav = V_iav[:-1] + 0.5 * (A_iav[:-1] + A_iav[1:]) * dt
dRstd_av = (np.sum((Rn_iav - R_iav[1:])**2, axis=0) / len(Rn_iav))**0.5
dVstd_av = (np.sum((Vn_iav - V_iav[1:])**2, axis=0) / len(Vn_iav))**0.5
dRmax_av, dVmax_av = R_iav.ptp(axis=0), V_iav.ptp(axis=0)
assert np.all(dRstd_av < 1e-4 * dRmax_av + 1e-12), (dRstd_av, dRmax_av)
assert np.all(dVstd_av < 1e-4 * dVmax_av + 1e-12), (dVstd_av, dVmax_av)
# Fit model to time series using imported parameters as an initial guess
d_i = np.sum((R_iav[:,1] - R_iav[:,0])**2, axis=-1)**0.5
p0 = (m.d_disp, 2 * np.pi / m.period, -m.timestep * m.ndiv, m.d_bond)
p, cov, info, msg, status = leastsq(lambda p: f(p, t_i) - d_i, p0, \
Dfun=lambda p: Df(p, t_i), col_deriv=True, full_output=True)
print 'leastsq returned %d: %s' % (status, msg.replace('\n ',''))
print 'p0=', np.asarray(p0)
print 'p =', p
assert status in range(1,4+1), (p, cov, info, msg, status)
tol = 0.1 #TODO use m.reltol
err = np.abs(2 * np.pi / p[1] - m.period) / m.period
print 'T=%13.9f fs, Tref=%13.9f fs, err=%5.2f %%, tol=%.1f %%' \
% (2 * np.pi / p[1] * 1e-3, m.period * 1e-3, 1e2 * err, 1e2 * tol)
if mpl:
fig = Figure()
ax = fig.add_axes([0.11, 0.1, 0.86, 0.83])
raw = r',\;'.join([r'T=%.2f\mathrm{\,fs}',
r'T_\mathrm{ref}=%.2f\mathrm{\,fs}',
r'\eta=%.2f\,\%%'])
mathmode = raw % (2 * np.pi / p[1] * 1e-3, m.period * 1e-3, 1e2 * err)
ax.set_title(symbol + ' ($' + mathmode + '$)')
ax.set_xlabel('Time [fs]')
ax.set_ylabel('Dimer bond length [Ang]')
ax.plot(t_i * 1e-3, d_i, '-b', t_i * 1e-3, f(p, t_i), '--k')
ax.legend(('Ehrenfest data', r'$A\,\mathrm{cos}(\omega(t-t_0))+B$'))
FigureCanvasAgg(fig).print_figure(name + '.png', dpi=90)
if err > tol:
print 'Relative error %f %% > tolerance %f %%' % (1e2 * err, 1e2 * tol)
raise SystemExit(1)
|
ajylee/gpaw-rtxs
|
gpaw/test/big/ehrenfest/oscfit.py
|
Python
|
gpl-3.0
| 4,456
|
[
"ASE"
] |
f534b845a38b0d5fe3c2ba13f9fd54523aa3a3ed71d1a1012a716fbae864d98b
|
# coding: utf8
import logging
import random
import six
from jinja2 import Template
from kalliope.core import OrderListener
from kalliope.core.HookManager import HookManager
from kalliope.core.ConfigurationManager import SettingLoader, BrainLoader
from kalliope.core.Cortex import Cortex
from kalliope.core.Lifo.LifoManager import LifoManager
from kalliope.core.Models.MatchedSynapse import MatchedSynapse
from kalliope.core.NeuronExceptions import NeuronExceptions
from kalliope.core.OrderAnalyser import OrderAnalyser
from kalliope.core.Utils.Utils import Utils
logging.basicConfig()
logger = logging.getLogger("kalliope")
class InvalidParameterException(NeuronExceptions):
"""
Some Neuron parameters are invalid.
"""
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(InvalidParameterException, self).__init__(message)
class MissingParameterException(NeuronExceptions):
"""
Some Neuron parameters are missing.
"""
def __init__(self, message=None):
# Call the base class constructor with the parameters it needs
super(MissingParameterException, self).__init__(message)
self.message = message
class NoTemplateException(Exception):
"""
You must specify a say_template or a file_template
"""
pass
class TemplateFileNotFoundException(Exception):
"""
Template file can not be found. Check the provided path.
"""
pass
class TTSModuleNotFound(Exception):
"""
TTS module can not be find. It must be configured in the settings file.
"""
pass
class NeuronModule(object):
"""
This Abstract Class is representing main Class for Neuron.
Each Neuron must implement this Class.
"""
def __init__(self, **kwargs):
"""
Class used by neuron for talking
:param kwargs: Same parameter as the Child. Can contain info about the tts to use instead of the
default one
"""
# get the child who called the class
child_name = self.__class__.__name__
self.neuron_name = child_name
sl = SettingLoader()
self.settings = sl.settings
brain_loader = BrainLoader()
self.brain = brain_loader.brain
self.tts = self._get_tts_object(settings=self.settings)
# get templates if provided
# Check if there is a template associate to the output message
self.say_template = kwargs.get('say_template', None)
# check if there is a template file associate to the output message
self.file_template = kwargs.get('file_template', None)
# keep the generated message
self.tts_message = None
# if the current call is api one
self.is_api_call = kwargs.get('is_api_call', False)
# boolean to know id the synapse is waiting for an answer
self.is_waiting_for_answer = False
# the synapse name to add the the buffer
self.pending_synapse = None
# a dict of parameters the user ask to save in short term memory
self.kalliope_memory = kwargs.get('kalliope_memory', None)
# parameters loaded from the order can be save now
Cortex.save_parameter_from_order_in_memory(self.kalliope_memory)
def __str__(self):
retuned_string = ""
retuned_string += self.tts_message
return retuned_string
def serialize(self):
"""
This method allows to serialize in a proper way this object
:return: A dict of name and parameters
:rtype: Dict
"""
self.tts_message = Utils.encode_text_utf8(self.tts_message)
return {
'neuron_name': self.neuron_name,
'generated_message': self.tts_message
}
def say(self, message):
"""
USe TTS to speak out loud the Message.
A message can be a string, a list or a dict
If it's a string, simply use the TTS with the message
If it's a list, we select randomly a string in the list and give it to the TTS
If it's a dict, we use the template given in parameter to create a string that we give to the TTS
:param message: Can be a String or a dict or a list
.. raises:: TTSModuleNotFound
"""
logger.debug("[NeuronModule] Say() called with message: %s" % message)
tts_message = None
# we can save parameters from the neuron in memory
Cortex.save_neuron_parameter_in_memory(self.kalliope_memory, message)
if isinstance(message, str) or isinstance(message, six.text_type):
logger.debug("[NeuronModule] message is string")
tts_message = message
if isinstance(message, list):
logger.debug("[NeuronModule] message is list")
tts_message = random.choice(message)
if isinstance(message, dict):
logger.debug("[NeuronModule] message is dict")
tts_message = self._get_message_from_dict(message)
if message is None:
logger.debug("[NeuronModule] message is empty, try to load a template")
tts_message = self._get_message_from_dict(message)
if tts_message is not None:
logger.debug("[NeuronModule] tts_message to say: %s" % tts_message)
self.tts_message = tts_message
Utils.print_success(tts_message)
# save in kalliope memory the last tts message
Cortex.save("kalliope_last_tts_message", tts_message)
# process the audio only if the mute flag is false
if self.settings.options.mute:
logger.debug("[NeuronModule] mute is True, Kalliope is muted")
else:
logger.debug("[NeuronModule] mute is False, make Kalliope speaking")
HookManager.on_start_speaking()
# get the instance of the TTS module
tts_folder = None
if self.settings.resources:
tts_folder = self.settings.resources.tts_folder
tts_module_instance = Utils.get_dynamic_class_instantiation(package_name="tts",
module_name=self.tts.name,
parameters=self.tts.parameters,
resources_dir=tts_folder)
# generate the audio file and play it
tts_module_instance.say(tts_message)
HookManager.on_stop_speaking()
def _get_message_from_dict(self, message_dict):
"""
Generate a message that can be played by a TTS engine from a dict of variable and the jinja template
:param message_dict: the dict of message
:return: The message to say
.. raises:: TemplateFileNotFoundException
"""
returned_message = None
# the user chooses a say_template option
if self.say_template is not None:
returned_message = self._get_say_template(self.say_template, message_dict)
# the user chooses a file_template option
if self.file_template is not None: # the user choose a file_template option
returned_message = self._get_file_template(self.file_template, message_dict)
return returned_message
@staticmethod
def _get_say_template(list_say_template, message_dict):
if isinstance(list_say_template, list):
# then we pick randomly one template
list_say_template = random.choice(list_say_template)
t = Template(list_say_template)
return t.render(**message_dict)
@classmethod
def _get_file_template(cls, file_template, message_dict):
real_file_template_path = Utils.get_real_file_path(file_template)
if real_file_template_path is None:
raise TemplateFileNotFoundException("Template file %s not found in templates folder"
% real_file_template_path)
# load the content of the file as template
t = Template(cls._get_content_of_file(real_file_template_path))
# add kalliope memory
final_message_dict = dict()
final_message_dict["kalliope_memory"] = Cortex.get_memory()
if message_dict:
final_message_dict.update(**message_dict)
returned_message = t.render(final_message_dict)
return returned_message
@staticmethod
def run_synapse_by_name(synapse_name, user_order=None, synapse_order=None, high_priority=False,
is_api_call=False, overriding_parameter_dict=None):
"""
call the lifo for adding a synapse to execute in the list of synapse list to process
:param synapse_name: The name of the synapse to run
:param user_order: The user order
:param synapse_order: The synapse order
:param high_priority: If True, the synapse is executed before the end of the current synapse list
:param is_api_call: If true, the current call comes from the api
:param overriding_parameter_dict: dict of value to add to neuron parameters
"""
synapse = BrainLoader().brain.get_synapse_by_name(synapse_name)
matched_synapse = MatchedSynapse(matched_synapse=synapse,
matched_order=synapse_order,
user_order=user_order,
overriding_parameter=overriding_parameter_dict)
list_synapse_to_process = list()
list_synapse_to_process.append(matched_synapse)
# get the singleton
lifo_buffer = LifoManager.get_singleton_lifo()
lifo_buffer.add_synapse_list_to_lifo(list_synapse_to_process, high_priority=high_priority)
lifo_buffer.execute(is_api_call=is_api_call)
@staticmethod
def is_order_matching(order_said, order_match):
return OrderAnalyser().is_normal_matching(signal_order=order_match,
user_order=order_said)
@staticmethod
def _get_content_of_file(real_file_template_path):
"""
Return the content of a file in path <real_file_template_path>
:param real_file_template_path: path of the file to return the content
:return: file content str
"""
with open(real_file_template_path, 'r') as content_file:
return content_file.read()
@staticmethod
def get_audio_from_stt(callback):
"""
Call the default STT to get an audio sample and return it into the callback method
:param callback: A callback function
"""
HookManager.on_start_listening()
# call the order listener
ol = OrderListener(callback=callback)
ol.start()
ol.join()
# wait that the STT engine has finish his job (or the neurotransmitter neuron will be killed)
if ol.stt_instance is not None:
ol.stt_instance.join()
HookManager.on_stop_listening()
def get_neuron_name(self):
"""
Return the name of the neuron who call the mother class
:return:
"""
return self.neuron_name
@staticmethod
def _get_tts_object(tts_name=None, override_parameter=None, settings=None):
"""
Return a TTS model object
If no tts name provided, return the default TTS defined in the settings
If the TTS name is provided, get the default configuration for this TTS in settings and override each parameters
with parameters provided in override_parameter
:param tts_name: name of the TTS to load
:param override_parameter: dict of parameter to override the default configuration of the TTS
:param settings: current settings
:return: Tts model object
"""
# if the tts_name is not provided, we get the default tts from settings
if tts_name is None:
tts_name = settings.default_tts_name
# create a tts object from the tts the user want to use
tts_object = next((x for x in settings.ttss if x.name == tts_name), None)
if tts_object is None:
raise TTSModuleNotFound("[NeuronModule] The tts module name %s does not exist in settings file" % tts_name)
if override_parameter is not None: # the user want to override the default TTS configuration
logger.debug("[NeuronModule] args for TTS plugin before update: %s" % str(tts_object.parameters))
for key, value in override_parameter.items():
tts_object.parameters[key] = value
logger.debug("[NeuronModule] args for TTS plugin after update: %s" % str(tts_object.parameters))
logger.debug("[NeuronModule] TTS args: %s" % tts_object)
return tts_object
|
kalliope-project/kalliope
|
kalliope/core/NeuronModule.py
|
Python
|
gpl-3.0
| 12,930
|
[
"NEURON"
] |
f2b68b212ac8065e9ef6c86b3de2b23e945c8a31d0634eb1aab8209759345d61
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Compute Engine demo using the Google Python Client Library.
Demo steps:
- Create an instance with a start up script and metadata.
- Print out the URL where the modified image will be written.
- The start up script executes these steps on the instance:
- Installs Image Magick on the machine.
- Downloads the image from the URL provided in the metadata.
- Adds the text provided in the metadata to the image.
- Copies the edited image to Cloud Storage.
- After recieving input from the user, shut down the instance.
To run this demo:
- Edit the client id and secret in the client_secrets.json file.
- Enter your Compute Engine API console project name below.
- Enter the URL of an image in the code below.
- Create a bucket on Google Cloud Storage accessible by your console project:
http://cloud.google.com/products/cloud-storage.html
- Enter the name of the bucket below.
"""
__author__ = 'kbrisbin@google.com (Kathryn Hurley)'
import logging
try:
import simplejson as json
except:
import json
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
import gce
IMAGE_URL = 'http://storage.googleapis.com/gce-demo-input/photo.jpg'
IMAGE_TEXT = 'Ready for dessert?'
INSTANCE_NAME = 'startup-script-demo'
DISK_NAME = INSTANCE_NAME + '-disk'
INSERT_ERROR = 'Error inserting %(name)s.'
DELETE_ERROR = """
Error deleting %(name)s. %(name)s might still exist; You can use
the console (http://cloud.google.com/console) to delete %(name)s.
"""
def delete_resource(delete_method, *args):
"""Delete a Compute Engine resource using the supplied method and args.
Args:
delete_method: The gce.Gce method for deleting the resource.
"""
resource_name = args[0]
logging.info('Deleting %s' % resource_name)
try:
delete_method(*args)
except (gce.ApiError, gce.ApiOperationError, ValueError) as e:
logging.error(DELETE_ERROR, {'name': resource_name})
logging.error(e)
def main():
"""Perform OAuth 2 authorization, then start, list, and stop instance(s)."""
logging.basicConfig(level=logging.INFO)
# Load the settings for this sample app.
settings = json.loads(open(gce.SETTINGS_FILE, 'r').read())
# Perform OAuth 2.0 authorization flow.
flow = flow_from_clientsecrets(
settings['client_secrets'], scope=settings['compute_scope'])
storage = Storage(settings['oauth_storage'])
credentials = storage.get()
# Authorize an instance of httplib2.Http.
if credentials is None or credentials.invalid:
credentials = run(flow, storage)
http = httplib2.Http()
auth_http = credentials.authorize(http)
# Retrieve user input.
image_url = raw_input(
'Enter the URL of an image [Defaults to %s]: ' % IMAGE_URL)
if not image_url:
image_url = IMAGE_URL
image_text = raw_input(
'Enter text to add to the image [Defaults to "%s"]: ' % IMAGE_TEXT)
if not image_text:
image_text = IMAGE_TEXT
bucket = raw_input('Enter a Cloud Storage bucket [Required]: ')
if not bucket:
logging.error('Cloud Storage bucket required.')
return
# Initialize gce.Gce.
gce_helper = gce.Gce(auth_http, project_id=settings['project'])
# List all running instances.
logging.info('These are your running instances:')
instances = gce_helper.list_instances()
for instance in instances:
logging.info(instance['name'])
return
# Create a Persistent Disk (PD), which is used as a boot disk.
try:
gce_helper.create_disk(DISK_NAME)
except (gce.ApiError, gce.ApiOperationError, ValueError, Exception) as e:
logging.error(INSERT_ERROR, {'name': DISK_NAME})
logging.error(e)
return
# Start an instance with a local start-up script and boot disk.
logging.info('Starting GCE instance')
try:
gce_helper.start_instance(
INSTANCE_NAME,
DISK_NAME,
service_email=settings['compute']['service_email'],
scopes=settings['compute']['scopes'],
startup_script='startup.sh',
metadata=[
{'key': 'url', 'value': image_url},
{'key': 'text', 'value': image_text},
{'key': 'cs-bucket', 'value': bucket}])
except (gce.ApiError, gce.ApiOperationError, ValueError, Exception) as e:
# Delete the disk in case the instance fails to start.
delete_resource(gce_helper.delete_disk, DISK_NAME)
logging.error(INSERT_ERROR, {'name': INSTANCE_NAME})
logging.error(e)
return
except gce.DiskDoesNotExistError as e:
logging.error(INSERT_ERROR, {'name': INSTANCE_NAME})
logging.error(e)
return
logging.info(
'Visit http://storage.googleapis.com/%s/output.png.' % bucket)
logging.info('It might take a minute for the output.png file to show up.')
raw_input('Hit Enter when done to shutdown instance.')
# Stop the instance.
delete_resource(gce_helper.stop_instance, INSTANCE_NAME)
# Delete the disk.
delete_resource(gce_helper.delete_disk, DISK_NAME)
logging.info('Remember to delete the output.png file in ' + bucket)
if __name__ == '__main__':
main()
|
eepgwde/pyeg0
|
gapi/gce/main.py
|
Python
|
gpl-3.0
| 5,684
|
[
"VisIt"
] |
ef585396acbb03e2d7d9f2d1c2b5467b8d985d7b8dac49034d58c1e3b4e2207b
|
# NOTE: This example uses the ALPHA release of the next generation Twilio
# helper library - for more information on how to download and install this
# version, visit
# https://www.twilio.com/docs/libraries/python#accessing-preview-twilio-features
from twilio.rest import Client
# Account SID and Auth Token are found in the console:
# twilio.com/console
account = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
token = "your_auth_token"
client = Client(account, token)
service = client.preview.proxy.services.create(
friendly_name="My Awesome Service",
callback_url="https://www.example.com/"
)
print(service.sid)
|
teoreteetik/api-snippets
|
proxy/quickstart/create-service/create-service.6.x.py
|
Python
|
mit
| 617
|
[
"VisIt"
] |
ba420f361ecd56f35345e0b3a3d18c1c168a69cae14bd49f1e079c4c9cea31e0
|
'''@package docstring
@author: Jyh-Miin Lin (Jimmy), Cambridge University
@address: jyhmiinlin@gmail.com
Created on 2013/1/21
================================================================================
This file is part of pynufft.
pynufft is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pynufft is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pynufft. If not, see <http://www.gnu.org/licenses/>.
================================================================================
First, see test_1D(),test_2D(), test_3D(), examples
'''
try:
from nufft import *
import numpy
import scipy.fftpack
# import numpy.random
import matplotlib.pyplot
import matplotlib.cm
# import matplotlib.numerix
# import matplotlib.numerix.random_array
# import sys
# import utils
except:
print('faile to import modules')
print('numpy, scipy, matplotlib are required')
raise
dtype = numpy.complex64
#
# try:
# import llvm
# except:
# print('llvm not supported')
# from cx import *
# Add the ptdraft folder path to the sys.path list
# sys.path.append('..')
#import CsTransform.pynufft as pf
# try:
# import pycuda.gpuarray as gpuarray
# import pycuda.driver as cuda
# import pycuda.autoinit
# import pycuda.cumath as cumath
# gpu_flag = 1
# except:
# print "No PyOpenCL/PyFFT detected"
# gpu_flag = 0
# import utils
cmap=matplotlib.cm.gray
norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
# try:
# from numba import autojit
#
# except:
# print('numba not supported')
# def create_krnl(self,u): # create the negative 3D laplacian __kernel of size u.shape[0:3]
#
# krnl = numpy.zeros(numpy.shape(u)[0:3],dtype=numpy.complex64)
# krnl[0,0,0]=6
# krnl[1,0,0]=-1
# krnl[0,1,0]=-1
# krnl[0,0,1]=-1
# krnl[-1,0,0]=-1
# krnl[0,-1,0]=-1
# krnl[0,0,-1]=-1
# krnl = self.ifft_kkf(krnl)
#
# return krnl # (256*256*16)
# @autojit
def tailor_fftn(X):
X = scipy.fftpack.fftshift(scipy.fftpack.fftn(scipy.fftpack.fftshift((X))))
return X
def tailor_ifftn(X):
X = scipy.fftpack.fftshift(scipy.fftpack.ifftn(scipy.fftpack.ifftshift(X)))
return X
def output(cc):
print('max',numpy.max(numpy.abs(cc[:])))
def Normalize(D):
return D/numpy.max(numpy.abs(D[:]))
def checkmax(x,dbg):
max_val = numpy.max(numpy.abs(x[:]))
if dbg ==0:
pass
else:
print( max_val)
return max_val
def appendmat(input_array,L):
if numpy.ndim(input_array) == 1:
input_shape = numpy.size(input_array)
input_shape = (input_shape,)
else:
input_shape = input_array.shape
Lprod= numpy.prod(input_shape)
output_array=numpy.copy(input_array)
output_array=numpy.reshape(output_array,(Lprod,1),order='F')
output_array=numpy.tile(output_array,(1,L))
output_array=numpy.reshape(output_array,input_shape+(L,),order='F')
return output_array
def freq_gradient(x):# zero frequency at centre
grad_x = numpy.copy(x)
dim_x=numpy.shape(x)
# print('freq_gradient shape',dim_x)
for pp in xrange(0,dim_x[2]):
grad_x[...,pp,:]=grad_x[...,pp,:] * (-2.0*numpy.pi*(pp -dim_x[2]/2.0 )) / dim_x[2]
return grad_x
def freq_gradient_H(x):
return -freq_gradient(x)
# def shrink_core(s,LMBD):
# # LMBD = LMBD + 1.0e-15
# s = numpy.sqrt(s).real
# ss = numpy.maximum(s-LMBD , 0.0)/(s+1e-7) # shrinkage
# return ss
# def shrink(dd, bb,LMBD):
#
# n_dims=numpy.shape(dd)[0]
#
# xx=()
#
# s = numpy.zeros(dd[0].shape)
# for pj in xrange(0,n_dims):
# s = s+ (dd[pj] + bb[pj])*(dd[pj] + bb[pj]).conj()
# s = numpy.sqrt(s).real
# ss = numpy.maximum(s-LMBD*1.0 , 0.0)/(s+1e-7) # shrinkage
# for pj in xrange(0,n_dims):
#
# xx = xx+ (ss*(dd[pj]+bb[pj]),)
#
# return xx
def shrink2(dd,bb,ss,n_dims):
xx = tuple(ss*(dd[pj]+bb[pj]) for pj in xrange(0,n_dims))
return xx
def shrink1(dd,bb,n_dims):
# s = numpy.zeros(numpy.shape(dd[0]),dtype = numpy.float)
# c = numpy.empty_like(s) # only real
# for pj in xrange(0,n_dims):
# c = (dd[pj] + bb[pj]).real
# s = s+ c**2
s = sum((dd[pj] + bb[pj]).real**2 for pj in xrange(0,n_dims))
s = s**0.5
return s.real
def shrink(dd, bb,LMBD):
# n_dims=numpy.shape(dd)[0]
n_dims = len(dd)
s = shrink1(dd,bb,n_dims)
ss = numpy.maximum(s-LMBD*1.0 , 0.0)/(s+1e-15)# shrinkage
xx = shrink2(dd,bb,ss,n_dims)
return xx
def TVconstraint(xx,bb):
try:
n_xx = len(xx)
# n_bb = len(bb)
# cons_shape = numpy.shape(xx[0])
# cons=numpy.zeros(cons_shape,dtype=numpy.complex64)
# cons = sum( get_Diff_H( xx[jj] - bb[jj] , jj)
# for jj in xrange(0,n_xx))
# for jj in xrange(0,n_xx):
# cons = cons + get_Diff_H( xx[jj] - bb[jj] , jj)
cons = sum(get_Diff_H( xx[jj] - bb[jj] , jj) for jj in xrange(0,n_xx))
except:
n_xx = len(xx)
n_bb = len(bb)
if n_xx != n_bb:
print('xx and bb size wrong!')
return cons
# def Dx(u):
# shapes = numpy.shape(u)
# rows=shapes[0]
# ind1 = xrange(0,rows)
# ind2 = numpy.roll(ind1,1,axis=0)
# u2= u[ind2,...]
# u2[...]= u[...] - u2[...]
# return u2#u[ind1,...]-u[ind2,...]
def Dx(u):
u2=numpy.concatenate((u,u[0:1,...]),axis=0)
u2=numpy.roll(u2,1,axis=0)
u2=numpy.diff(u2,n=1,axis=0)
return u2
def get_Diff_H(x,axs): # hermitian operator of get_Diff(x,axs)
if axs > 0:
# transpose the specified axs to 0
# and use the case when axs == 0
# then transpose back
mylist=list(xrange(0,x.ndim))
(mylist[0], mylist[axs])=(mylist[axs],mylist[0])
tlist=tuple(mylist[:])
#=======================================================================
dcxt=numpy.transpose(
get_Diff_H(numpy.transpose(x,tlist),0),
tlist)
elif axs == 0:
# x=x[::-1,...]
#x=numpy.flipud(x)
dcxt=-get_Diff(x, 0)
#dcxt=numpy.flipud(dcxt)# flip along axes
# dcxt=dcxt[::-1,...]
dcxt=numpy.roll(dcxt, axis=0, shift=-1)
# dcxt=-get_Diff(x,0)
# dcxt=numpy.roll(dcxt,shift=2, axis=0)
return dcxt
def get_Diff(x,axs):
#calculate the 1D gradient of images
if axs > 0:
# transpose the specified axs to 0
# and use the case when axs == 0
# then transpose back
mylist=list(xrange(0,x.ndim))
(mylist[0], mylist[axs])=(mylist[axs],mylist[0])
tlist=tuple(mylist[:])
#=======================================================================
dcx=numpy.transpose(
get_Diff(numpy.transpose(x,tlist),0),
tlist)
elif axs == 0:
# xshape=numpy.shape(x)
# dcy=numpy.empty(numpy.shape(y),dtype=numpy.complex64)
# ShapeProd=numpy.prod(xshape[1:])
# x = numpy.reshape(x,xshape[0:1]+(ShapeProd,),order='F')
# dcx=numpy.empty(numpy.shape(x),dtype=x.dtype)
# dcx=Dx(x)
# for ss in xrange(0,ShapeProd):
# dcx[:,ss] = Dx(x[:,ss]) # Diff operators
dcx = Dx(x)
# dcy[:,:,ll] = Dyt(y[:,:,ll]-by[:,:,ll]) # Hermitian of Diff operators
# dcx=numpy.reshape(dcx, xshape ,order='F')
return dcx
def CombineMulti(multi_coil_data,axs):
U=numpy.mean(multi_coil_data,axs)
U = appendmat(U,multi_coil_data.shape[axs])
return U
def CopySingle2Multi(single_coil_data,n_tail):
U=numpy.copy(single_coil_data)
U = appendmat(U, n_tail)
return U
class pynufft(nufft):
def __init__(self,om, Nd, Kd,Jd):
nufft.__init__(self,om, Nd, Kd,Jd)
# self.st['q'] = self.st['p']
# self.st['q'] = self.st['q'].conj().multiply(self.st['q'])
# self.st['q'] = self.st['q'].sum(0)
# self.st['q'] = numpy.array(self.st['q'] )
# self.st['q']=numpy.reshape(self.st['q'],(numpy.prod(self.st['Kd']),1),order='F').real
# self.st['q']=self.st['p'].getH().dot(self.st['p']).diagonal() # slow version
#
# self.st['q']=numpy.reshape(self.st['q'],(numpy.prod(self.st['Kd']),1),order='F')
#
def forwardbackward(self,x):
if self.cuda_flag == 0:
st=self.st
Nd = st['Nd']
# Kd = st['Kd'] # unused
# dims = numpy.shape(x) #unused
dd = numpy.size(Nd)
# print('in nufft, dims:dd',dims,dd)
# print('ndim(x)',numpy.ndim(x[:,1]))
# checker
checker(x,Nd)
if numpy.ndim(x) == dd:
Lprod = 1
x = numpy.reshape(x,Nd+(1,),order='F')
elif numpy.ndim(x) > dd: # multi-channel data
Lprod = numpy.size(x)/numpy.prod(Nd)
Lprod = Lprod.astype(int)
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
Xk = self.Nd2Kd(x,0) #
for ii in xrange(0,Lprod):
Xk[...,ii] = st['q'][...,0]*Xk[...,ii]
'''
Now transform Kd grids to Nd grids(not be reshaped)
'''
x= self.Kd2Nd(Xk,0) #
checker(x,Nd) # check output
return x
elif self.cuda_flag == 1:
return self.forwardbackward_gpu(x)
def gpu_k_modulate(self):
try:
self.myfft(self.data_dev, self.data_dev,inverse=False)
self.data_dev=self.W_dev*self.data_dev
self.myfft(self.data_dev, self.data_dev,inverse=True)
return 0
except:
return 1
# def gpu_k_demodulate(self):
# try:
# self.myfft(self.data_dev, self.data_dev,inverse=False)
# self.data_dev=self.data_dev/self.W_dev
# self.myfft(self.data_dev, self.data_dev,inverse=True)
# print('inside gpu_k_demodulate')
# return 0
# except:
# return 1
def Nd2KdWKd2Nd_gpu(self,x, weight_flag):
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
#print('661 x.shape',x.shape)
# x is Nd Lprod
st=self.st
Nd = st['Nd']
Kd = st['Kd']
# dims = numpy.shape(x)
# dd = numpy.size(Nd)
Lprod = numpy.shape(x)[-1]
if self.debug==0:
pass
else:
checker(x,Nd)
snc = st['sn']
output_x=numpy.zeros(Kd, dtype=numpy.complex64)
# self.W_dev = self.thr.to_device(self.W.T.astype(dtype))
for ll in xrange(0,Lprod):
if weight_flag == 0:
pass
else:
x[...,ll] = x[...,ll] * snc # scaling factors
output_x=output_x*0.0
output_x[crop_slice_ind(x[...,ll].shape)] = x[...,ll]
self.data_dev = self.thr.to_device(output_x.astype(dtype))
if self.gpu_k_modulate()==0:
pass
else:
print('gpu_k_modulate error')
break
x[...,ll]=self.data_dev.get()[crop_slice_ind(Nd)]
if weight_flag == 0:
pass
else: #weight_flag =1 scaling factors
x[...,ll] = x[...,ll]*snc.conj() #% scaling factors
if self.debug==0:
pass # turn off checker
else:
checker(x,Nd) # checking size of x divisible by Nd
return x
def forwardbackward_gpu(self,x):
# print('inside forwardbackward_gpu ')
st=self.st
Nd = st['Nd']
# Kd = st['Kd'] # unused
# dims = numpy.shape(x) #unused
dd = numpy.size(Nd)
# print('in nufft, dims:dd',dims,dd)
# print('ndim(x)',numpy.ndim(x[:,1]))
# checker
checker(x,Nd)
if numpy.ndim(x) == dd:
Lprod = 1
elif numpy.ndim(x) > dd: # multi-channel data
Lprod = numpy.size(x)/numpy.prod(Nd)
Lprod = Lprod.astype(int)
x = numpy.reshape(x,Nd+(Lprod,),order='F')
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
x = self.Nd2KdWKd2Nd_gpu(x,0) #
# for ii in xrange(0,Lprod):
# # tmp_Xk = self.Nd2Kd_gpu(x[...,ii],0)
# Xk[...,ii] = st['q'][...,0]*Xk[...,ii]
# x[...,ii]= self.Kd2Nd_gpu(tmp_Xk,0)
'''
Now transform Kd grids to Nd grids(not be reshaped)
'''
# x= self.Kd2Nd(Xk,0) #
checker(x,Nd) # check output
return x
def inverse(self,data, mu, LMBD, gamma, nInner, nBreg): # main function of solver
self.f = data
self.mu = mu
self.LMBD = LMBD
self.gamma = gamma
self.nInner= nInner
self.nBreg= nBreg
# print(numpy.size(data) , self.st['M'] )
if numpy.size(data) == self.st['M']:
self.st['senseflag'] = 0
# print(numpy.size(data) )
print('turn-off sense recon')
try:
if self.st['senseflag']==0:
self.st = self._create_mask()
pass
else:
raise
except:
self.LMBD=self.LMBD*1.0
self.st['senseflag']=0 # turn-off sense, to get sensemap
#precompute highly constrainted images to guess the sensitivity maps
(u0,dump)=self._kernel(self.f, self.st , self.mu, self.LMBD, self.gamma, 1,2)
#===============================================================================
# mask
#===============================================================================
self.st = self._create_mask()
if numpy.size(u0.shape) > numpy.size(self.st['Nd']):
for pp in xrange(2,numpy.size(u0.shape)):
self.st['mask'] = appendmat(self.st['mask'],u0.shape[pp] )
self.st['mask2'] = appendmat(self.st['mask2'],u0.shape[pp] )
#===============================================================================
#estimate sensitivity maps by divided by rms images
self.st['sensemap'] = self._make_sense(u0) # setting up sense map in st['sensemap']
# for jj in xrange(0,self.st['sensemap'].shape[-1]):
# matplotlib.pyplot.subplot(2,2,jj+1)
# matplotlib.pyplot.imshow(self.st['sensemap'][...,jj].imag)
# matplotlib.pyplot.show()
self.st['senseflag']=1 # turn-on sense, to get sensemap
#scale back the _constrainted factor LMBD
self.LMBD=self.LMBD*1.0
#CS reconstruction
(self.u, self.u_stack)=self._kernel(self.f, self.st , self.mu, self.LMBD, self.gamma,
self.nInner,self.nBreg)
# for jj in xrange(0,self.u.shape[-1]):
# self.u[...,jj] = self.u[...,jj]*(self.st['sn']**0.7)# rescale the final image intensity
#
if self.u.shape[-1] == 1:
if numpy.ndim(self.u) != numpy.ndim(self.st['Nd']): # alwasy true?
self.u = self.u[...,0]
# self.u = Normalize(self.u)
return self.u
# self.u=1.5*self.u/numpy.max(numpy.real(self.u[:]))
def _kernel(self, f, st , mu, LMBD, gamma, nInner, nBreg):
L= numpy.size(f)/st['M']
image_dim=st['Nd']+(L,)
if numpy.ndim(f) == 1:# preventing row vector
f=numpy.reshape(f,(numpy.shape(f)[0],1),order='F')
# f0 = numpy.copy(f) # deep copy to prevent scope f0 to f
# unused
# u = numpy.zeros(image_dim,dtype=numpy.complex64)
#===========================================================================
# check whether sense is used
# if senseflag == 0, create an all-ones mask
# if sensflag size is wrong, create an all-ones mask (shouldn't occur)
#===========================================================================
if st['senseflag'] == 0:
st['sensemap'] = numpy.ones(image_dim,dtype=numpy.complex64)
elif numpy.shape(st['sensemap']) != image_dim: #(shouldn't occur)
st['sensemap'] = numpy.ones(image_dim,dtype=numpy.complex64)
else:
pass # correct, use existing sensemap
#=========================================================================
# check whether mask is used
#=========================================================================
# if st.has_key('mask'):
if 'mask' in st: # condition in second step
if (numpy.shape(st['mask']) != image_dim) :
st['mask'] = numpy.reshape(st['mask'],image_dim,order='F')
# numpy.ones(image_dim,dtype=numpy.complex64)
else: # condition in first step
st['mask'] = numpy.ones(image_dim,dtype=numpy.complex64)
if 'mask2' in st:
if numpy.shape(st['mask2']) != image_dim:
st['mask2'] = numpy.reshape(st['mask2'],image_dim,order='F')
else:
st['mask2'] = numpy.ones(image_dim,dtype=numpy.complex64)
#===========================================================================
# update sensemap so we don't need to add ['mask'] in the iteration
#===========================================================================
st['sensemap'] = st['sensemap']*st['mask']
#=======================================================================
# RTR: k-space sampled density
# only diagonal elements are relevant (on k-space grids)
#=======================================================================
RTR=self._create_kspace_sampling_density()
#===============================================================================
# # # Laplacian oeprator, convolution kernel in spatial domain
# # related to _constraint
#===============================================================================
uker = self._create_laplacian_kernel()
#=======================================================================
# uker: deconvolution kernel in k-space,
# which will be divided in k-space in iterations
#=======================================================================
#===========================================================================
# initial estimation u, u0, uf
#===========================================================================
u = self.backward(f)#*self.st['sensemap'].conj()#/(1e-10+self.st['sensemap'].conj())#st['sensemap'].conj()*(self.backward(f))
# c = numpy.max(numpy.abs(u.flatten())) # Rough coefficient
for jj in xrange(0,u.shape[-1]):
u[...,jj] = u[...,jj]/self.st['sn']
if self.debug ==0:
pass
else:
print('senseflag',st['senseflag'])
if st['senseflag'] == 1:
u=CombineMulti(u,-1)[...,0:1] # summation of multicoil images
u0 = numpy.copy(u)
self.thresh_scale= numpy.max(numpy.abs(u0[:]))
self.u0=numpy.copy(u0)
# else:
# print('existing self.u, so we use previous u and u0')
# u=numpy.copy(self.u) # using existing initial values
# u0=numpy.copy(self.u0)
# if st['senseflag'] == 1:
# print('u.shape line 305',u.shape)
# u == u[...,0:1]
# print('u.shape line 307',u.shape)
#===============================================================================
# Now repeat the uker to L slices e.g. uker=512x512x8 (if L=8)
# useful for later calculation
#===============================================================================
#expand 2D/3D kernel to desired dimension of kspace
uker = self._expand_deconv_kernel_dimension(uker,u.shape[-1])
RTR = self._expand_RTR(RTR,u.shape[-1])
uker = self.mu*RTR - LMBD*uker + gamma
if self.debug ==0:
pass
else:
print('uker.shape line 319',uker.shape)
(xx,bb,dd)=self._make_split_variables(u)
uf = numpy.copy(u0) # only used for ISRA, written here for generality
murf = numpy.copy(u) # initial values
# #===============================================================================
u_stack = numpy.empty(st['Nd']+(nBreg,),dtype=numpy.complex)
self.err =1.0e+13
u_k_1=0
for outer in xrange(0,nBreg):
for inner in xrange(0,nInner):
# update u
if self.debug==0:
pass
else:
print('iterating',[inner,outer])
#===============================================================
# update u # simple k-space deconvolution to guess initial u
u = self._update_u(murf,u,uker,xx,bb)
for jj in xrange(0,u.shape[-1]):
u[...,jj] = u[...,jj]*(self.st['sn']**1)
# Temporally scale the image for softthresholding
c = numpy.max(numpy.abs(u.flatten())) # Rough coefficient
# to correct threshold of nonlinear shrink
#===================================================================
# # update d
#===================================================================
#===================================================================
# Shrinkage: remove tiny values "in somewhere sparse!"
# dx+bx should be sparse!
#===================================================================
# shrinkage
#===================================================================
dd=self._update_d(u,dd)
xx=self._shrink( dd, bb, c/LMBD/(numpy.prod(st['Nd'])**(1.0/len(st['Nd']))))
#===============================================================
#===================================================================
# # update b
#===================================================================
bb=self._update_b(bb, dd, xx)
for jj in xrange(0,u.shape[-1]):
u[...,jj] = u[...,jj]/(self.st['sn']**1)
# Temporally scale the image for softthresholding
# if outer < nBreg: # do not update in the last loop
if st['senseflag']== 1:
u = appendmat(u[...,0],L)
(u, murf, uf, u_k_1)=self._external_update(u, uf, u0, u_k_1, outer) # update outer Split_bregman
if st['senseflag']== 1:
u = u[...,0:1]
murf = murf[...,0:1]
u_stack[...,outer] = (u[...,0]*(self.st['sn']))
# u_stack[...,outer] =u[...,0]
fermi = scipy.fftpack.fftshift( self.st['mask'][...,0] )
for jj in xrange(0,u.shape[-1]):
u[...,jj] = scipy.fftpack.ifftn(scipy.fftpack.fftn(u[...,jj])*fermi ) # apply GE's fermi filter
u[...,jj] = u[...,jj]*(self.st['sn'])*self.st['mask2'][...,jj]# rescale the final image intensity
# matplotlib.pyplot.imshow(self.st['mask2'][:,:,0].real)
# matplotlib.pyplot.show()
return (u,u_stack)
def _update_u(self,murf,u,uker,xx,bb):
#print('inside _update_u')
# checkmax(u)
# checkmax(murf)
# rhs = self.mu*murf + self.LMBD*self.get_Diff(x,y,bx,by) + self.gamma
#=======================================================================
# Trick: make "llist" for numpy.transpose
mylist = tuple(xrange(0,numpy.ndim(xx[0])))
tlist = mylist[1::-1]+mylist[2:]
#=======================================================================
# update the right-head side terms
rhs = (self.mu*murf +
self.LMBD*self._constraint(xx,bb) +
self.gamma * u)
rhs = rhs * self.st['mask'][...,0:u.shape[-1]]
# rhs=Normalize(rhs)
#=======================================================================
# Trick: make "flist" for fftn
flist = mylist[:-1:1]
u = self._k_deconv(rhs, uker,self.st,flist,mylist)
# print('max rhs u',numpy.max(numpy.abs(rhs[:])),numpy.max(numpy.abs(u[:])))
# print('max,q',numpy.max(numpy.abs(self.st['q'][:])))
# for jj in xrange(0,1):
# u = u - 0.1*(self.k_deconv(u, 1.0/(RTR+self.LMBD*uker+self.gamma),self.st,flist,mylist) - rhs
# )
# checkmax(u)
# checkmax(rhs)
# checkmax(murf)
#print('leaving _update_u')
return u # normalization
def _k_deconv(self, u,uker,st,flist,mylist):
u0=numpy.copy(u)
u=u*st['mask'][...,0:u.shape[-1]]
# u=scipy.fftpack.fftn(u, st['Kd'],flist)
###
# if self.cuda_flag == 1:
# tmpU=numpy.zeros(st['Kd'],dtype=u.dtype)
# self.W_dev = self.thr.to_device((uker[...,0]).astype(numpy.complex64))
# for pj in xrange(0,u.shape[-1]):
#
# tmpU=tmpU*0.0
#
# tmpU[crop_slice_ind(st['Nd'])] = u[...,pj]
# self.data_dev = self.thr.to_device(tmpU.astype(numpy.complex64))
#
# # self.myfft(self.data_dev, self.data_dev,inverse=False)
# # self.data_dev=self.W_dev*self.data_dev
# # self.myfft(self.data_dev, self.data_dev,inverse=True)
# if self.gpu_k_demodulate()==0:
# pass
# else:
# print('gpu_k_modulate error')
# break
# u[...,pj]=self.data_dev.get()[crop_slice_ind(st['Nd'])]
# # u = U[[slice(0, st['Nd'][_ss]) for _ss in mylist[:-1]]]
# self.W_dev = self.thr.to_device(1.0/uker[...,0].astype(numpy.complex64))
# elif self.cuda_flag == 0:
U=numpy.empty(st['Kd']+(u.shape[-1],),dtype=u.dtype)
for pj in xrange(0,u.shape[-1]):
U[...,pj]=self.emb_fftn(u[...,pj], st['Kd'], xrange(0,numpy.size(st['Kd'])))
U[...,pj]=U[...,pj]/uker[...,pj] # deconvolution
U[...,pj]=self.emb_ifftn(U[...,pj], st['Kd'], xrange(0,numpy.size(st['Kd'])))
u = U[[slice(0, st['Nd'][_ss]) for _ss in mylist[:-1]]]
# optional: one- additional Conjugated step to ensure the quality
# for pp in xrange(0,3):
# u = self._cg_step(u0,u,uker,st,flist,mylist)
#
u=u*st['mask'][...,0:u.shape[-1]]
return u
def _cg_step(self, rhs, u, uker, st,flist,mylist):
u=u#*st['mask'][...,0:u.shape[-1]]
# u=scipy.fftpack.fftn(u, st['Kd'],flist)
AU=numpy.empty(st['Kd']+(u.shape[-1],),dtype=u.dtype)
# print('U.shape. line 446',U.shape)
# print('u.shape. line 447',u.shape)
for pj in xrange(0,u.shape[-1]):
AU[...,pj]=self.emb_fftn(u[...,pj], st['Kd'], xrange(0,numpy.size(st['Kd']))) * uker[...,pj] # deconvolution
AU[...,pj]=self.emb_ifftn(AU[...,pj], st['Kd'], xrange(0,numpy.size(st['Kd'])))
ax0 = AU[[slice(0, st['Nd'][_ss]) for _ss in mylist[:-1]]]
u=u#*st['mask'][...,0:u.shape[-1]]
r = rhs - ax0
p = r
for running_count in xrange(0,1):
upper_inner = r.conj()*r
upper_inner = numpy.sum(upper_inner[:])
AU=numpy.empty(st['Kd']+(u.shape[-1],),dtype=u.dtype)
# print('U.shape. line 446',U.shape)
# print('u.shape. line 447',u.shape)
for pj in xrange(0,u.shape[-1]):
AU[...,pj]=self.emb_fftn(p[...,pj], st['Kd'], xrange(0,numpy.size(st['Kd']))) * uker[...,pj] # deconvolution
AU[...,pj]=self.emb_ifftn(AU[...,pj], st['Kd'], xrange(0,numpy.size(st['Kd'])))
Ap = AU[[slice(0, st['Nd'][_ss]) for _ss in mylist[:-1]]]
lower_inner = p.conj()*Ap
lower_inner = numpy.sum(lower_inner[:])
alfa_k = upper_inner/ lower_inner
# alfa_k = alfa_k*0.6
u = u + alfa_k * p
r2 = r - alfa_k *Ap
beta_k = numpy.sum((r2.conj()*r2)[:])/numpy.sum((r.conj()*r)[:])
r = r2
p = r + beta_k*p
return u
def _constraint(self,xx,bb):
'''
include TVconstraint and others
'''
cons = TVconstraint(xx[:],bb[:])
return cons
def _shrink(self,dd,bb,thrsld):
'''
soft-thresholding the edges
'''
output_xx=shrink( dd[:], bb[:], thrsld)# 3D thresholding
return output_xx
def _make_split_variables(self,u):
n_dims = len(self.st['Nd'])
xx = ()
bb = ()
dd = ()
for jj in xrange(0,n_dims):
x=numpy.zeros(u.shape)
bx=numpy.zeros(u.shape)
dx=numpy.zeros(u.shape)
xx = xx + (x,)
bb = bb + (bx,)
dd = dd + (dx,)
# x=numpy.zeros(u.shape)
# y=numpy.zeros(u.shape)
# bx=numpy.zeros(u.shape)
# by=numpy.zeros(u.shape)
# dx=numpy.zeros(u.shape)
# dy=numpy.zeros(u.shape)
# xx= (x,y)
# bb= (bx,by)
# dd= (dx,dy)
return(xx,bb,dd)
def _extract_svd(self,input_stack,L):
C= numpy.copy(input_stack) # temporary array
print('size of input_stack', numpy.shape(input_stack))
C=C/numpy.max(numpy.abs(C))
reps_acs = 16
mysize = 16
K= 5 # rank of 10 prevent singular? artifacts(certain disruption)
half_mysize = mysize/2
dimension = numpy.ndim(C) -1 # collapse coil dimension
if dimension == 1:
tmp_stack = numpy.empty((mysize,),dtype = numpy.complex64)
svd_size = mysize
C_size = numpy.shape(C)[0]
data = numpy.empty((svd_size,L*reps_acs),dtype=numpy.complex64)
# for jj in xrange(0,L):
# C[:,jj]=tailor_fftn(C[:,jj])
# for kk in xrange(0,reps_acs):
# tmp_stack = numpy.reshape(tmp_stack,(svd_size,),order = 'F')
# data[:,jj] = numpy.reshape(tmp_stack,(svd_size,),order = 'F')
elif dimension == 2:
tmp_stack = numpy.empty((mysize,mysize,),dtype = numpy.complex64)
svd_size = mysize**2
data = numpy.empty((svd_size,L*reps_acs),dtype=numpy.complex64)
C_size = numpy.shape(C)[0:2]
for jj in xrange(0,L):
# matplotlib.pyplot.imshow(C[...,jj].real)
# matplotlib.pyplot.show()
# tmp_pt=(C_size[0]-reps_acs)/2
C[:,:,jj]=tailor_fftn(C[:,:,jj])
for kk in xrange(0,reps_acs):
a=numpy.mod(kk,reps_acs**0.5)
b=kk/(reps_acs**0.5)
tmp_stack = C[C_size[0]/2-half_mysize-(reps_acs**0.5)/2+a : C_size[0]/2+half_mysize-(reps_acs**0.5)/2+a,
C_size[1]/2-half_mysize-(reps_acs**0.5)/2+b : C_size[1]/2+half_mysize-(reps_acs**0.5)/2+b,jj]
data[:,jj*reps_acs+kk] = numpy.reshape(tmp_stack,(svd_size,),order = 'F')
elif dimension == 3:
tmp_stack = numpy.empty((mysize,mysize,mysize),dtype = numpy.complex64)
svd_size = mysize**3
data = numpy.empty((svd_size,L),dtype=numpy.complex64)
C_size = numpy.shape(C)[0:3]
for jj in xrange(0,L):
C[:,:,:,jj]=tailor_fftn(C[:,:,:,jj])
tmp_stack= C[C_size[0]/2-half_mysize:C_size[0]/2+half_mysize,
C_size[1]/2-half_mysize:C_size[1]/2+half_mysize,
C_size[2]/2-half_mysize:C_size[2]/2+half_mysize,
jj]
data[:,jj] = numpy.reshape(tmp_stack,(svd_size,),order = 'F')
# OK, data is the matrix of size (mysize*n, L) for SVD
import scipy.linalg
(s_blah,vh_blah) = scipy.linalg.svd(data)[1:3]
for jj in xrange(0,numpy.size(s_blah)): #
if s_blah[jj] > 0.1*s_blah[0]: # 10% of maximum singular value to decide the rank
K = jj+1
# pass
else:
break
v_blah =vh_blah.conj().T
C = C*0.0 # now C will be used as the output stack
V_para = v_blah[:,0:K]
print('shape of V_para',numpy.shape(V_para))
V_para = numpy.reshape(V_para,(reps_acs**0.5,reps_acs**0.5,L, K),order='F')
C2 = numpy.zeros((C.shape[0],C.shape[1],L,K),dtype=numpy.complex64)
for jj in xrange(0,L): # coils
for kk in xrange(0,K): # rank
C2[C.shape[0]/2-reps_acs**0.5/2:C.shape[0]/2+reps_acs**0.5/2,
C.shape[1]/2-reps_acs**0.5/2:C.shape[1]/2+reps_acs**0.5/2,
jj,kk]=V_para[:,:,jj,kk]
C2[:,:,jj,kk]=tailor_fftn(C2[:,:,jj,kk])
# C_value = numpy.empty_like(C)
for mm in xrange(0,C.shape[0]): # dim 0
for nn in xrange(0,C.shape[1]): # dim 1
G = C2[mm,nn,:,:].T # Transpose (non-conjugated) of G
Gh = G.conj().T # hermitian
g = numpy.dot(Gh,G) #construct g matrix for eigen-decomposition
w,v = numpy.linalg.eig(g) # eigen value:w, eigen vector: v
ind = numpy.argmax(numpy.abs(w)) # find the maximum
the_eig = numpy.abs(w[ind]) # find the abs of maximal eigen value
ref_angle=(numpy.sum(v[:,ind])/(numpy.abs(numpy.sum(v[:,ind]))))
v[:,ind] = v[:,ind]/ref_angle # correct phase by summed value
C[mm,nn,:] = v[:,ind]*the_eig
# for jj in xrange(0,L):
# matplotlib.pyplot.subplot(2,2,jj+1)
# matplotlib.pyplot.imshow((C[...,jj].real))
# matplotlib.pyplot.show()
# for jj in xrange(0,L):
# matplotlib.pyplot.subplot(2,2,jj+1)
# matplotlib.pyplot.imshow((input_stack[...,jj].real))
# matplotlib.pyplot.show()
return C/numpy.max(numpy.abs(C)) # normalize the coil sensitivities
def _make_sense(self,u0):
# st=self.st
L=numpy.shape(u0)[-1]
try:
coil_sense = self._extract_svd(u0,L)
# st['sensemap']=u0
# for jj in xrange(0,L):
# matplotlib.pyplot.subplot(2,2,jj+1)
# matplotlib.pyplot.imshow((st['sensemap'][...,jj].real))
# matplotlib.pyplot.show()
return coil_sense
except:
u0dims= numpy.ndim(u0)
beta=100
if u0dims-1 >0:
rows=numpy.shape(u0)[0]
dpss_rows = numpy.kaiser(rows, beta)
dpss_rows = numpy.fft.fftshift(dpss_rows)
dpss_rows[3:-3] = 0.0
dpss_fil = dpss_rows
if self.debug==0:
pass
else:
print('dpss shape',dpss_fil.shape)
if u0dims-1 > 1:
cols=numpy.shape(u0)[1]
dpss_cols = numpy.kaiser(cols, beta)
dpss_cols = numpy.fft.fftshift(dpss_cols)
dpss_cols[3:-3] = 0.0
dpss_fil = appendmat(dpss_fil,cols)
dpss_cols = appendmat(dpss_cols,rows)
dpss_fil=dpss_fil*numpy.transpose(dpss_cols,(1,0))
if self.debug==0:
pass
else:
print('dpss shape',dpss_fil.shape)
if u0dims-1 > 2:
zag = numpy.shape(u0)[2]
dpss_zag = numpy.kaiser(zag, beta)
dpss_zag = numpy.fft.fftshift(dpss_zag)
dpss_zag[3:-3] = 0.0
dpss_fil = appendmat(dpss_fil,zag)
dpss_zag = appendmat(dpss_zag,rows)
dpss_zag = appendmat(dpss_zag,cols)
dpss_fil=dpss_fil*numpy.transpose(dpss_zag,(1,2,0)) # low pass filter
if self.debug==0:
pass
else:
print('dpss shape',dpss_fil.shape)
#dpss_fil=dpss_fil / 10.0
# rms = numpy.mean((coil_sense),-1)
# rms = rms/numpy.max()
coil_sense = numpy.copy(u0)
rms=(numpy.mean( (coil_sense*coil_sense.conj()),-1))**0.5 # Root of sum square / OLD
for ll in xrange(0,L):
# st['sensemap'][...,ll]=(u0[...,ll]+1e-16)/(rms+1e-16) # / OLD
coil_sense[...,ll]=(coil_sense[...,ll]+1e-16)/(rms+1e-16) # need SVD
# st['sensemap']=coil_sense
# st['sensemap']=numpy.empty(numpy.shape(u0),dtype=numpy.complex64)
if self.debug==0:
pass
else:
# print('sensemap shape',st['sensemap'].shape, L)
print('u0shape',u0.shape,rms.shape)
for ll in xrange(0,L):
# st['sensemap'][...,ll]=(u0[...,ll]+1e-16)/(rms+1e-16) # / OLD
# st['sensemap'][...,ll]=coil_sense[...,ll] # need SVD
if self.debug==0:
pass
else:
print('sensemap shape',coil_sense.shape, L)
print('rmsshape', rms.shape)
coil_sense[...,ll]= scipy.fftpack.fftshift(coil_sense[...,ll])
if self.pyfftw_flag == 1:
if self.debug==0:
pass
else:
print('USING pyfftw and thread is = ',self.threads)
coil_sense[...,ll] = pyfftw.interfaces.scipy_fftpack.fftn(coil_sense[...,ll])#,
# coil_sense[...,ll].shape,
# range(0,numpy.ndim(coil_sense[...,ll])),
# threads=self.threads)
coil_sense[...,ll] = coil_sense[...,ll] * dpss_fil
coil_sense[...,ll] = pyfftw.interfaces.scipy_fftpack.ifftn(coil_sense[...,ll])#,
# coil_sense[...,ll].shape,
# range(0,numpy.ndim(coil_sense[...,ll])),
# threads=self.threads)
else:
coil_sense[...,ll] = scipy.fftpack.fftn(coil_sense[...,ll])#,
# coil_sense[...,ll].shape,
# range(0,numpy.ndim(coil_sense[...,ll])))
coil_sense[...,ll] = coil_sense[...,ll] * dpss_fil
coil_sense[...,ll] = scipy.fftpack.ifftn(coil_sense[...,ll])#,
# coil_sense[...,ll].shape,
# range(0,numpy.ndim(coil_sense[...,ll])))
# coil_sense[...,ll]=scipy.fftpack.ifftn(scipy.fftpack.fftn(coil_sense[...,ll])*dpss_fil)
# coil_sense = Normalize(coil_sense)
coil_sense[...,ll]= scipy.fftpack.ifftshift(coil_sense[...,ll])
return coil_sense
def _create_kspace_sampling_density(self):
#=======================================================================
# RTR: k-space sampled density
# only diagonal elements are relevant (on k-space grids)
#=======================================================================
RTR=self.st['q'] # see __init__() in class "nufft"
return RTR
def _create_laplacian_kernel(self):
#===============================================================================
# # # Laplacian oeprator, convolution kernel in spatial domain
# # related to constraint
#===============================================================================
uker = numpy.zeros(self.st['Kd'][:],dtype=numpy.complex64)
n_dims= numpy.size(self.st['Nd'])
if n_dims == 1:
uker[0] = -2.0
uker[1] = 1.0
uker[-1] = 1.0
elif n_dims == 2:
uker[0,0] = -4.0
uker[1,0] = 1.0
uker[-1,0] = 1.0
uker[0,1] = 1.0
uker[0,-1] = 1.0
elif n_dims == 3:
uker[0,0,0] = -6.0
uker[1,0,0] = 1.0
uker[-1,0,0] = 1.0
uker[0,1,0] = 1.0
uker[0,-1,0] = 1.0
uker[0,0,1] = 1.0
uker[0,0,-1] = 1.0
uker =self.emb_fftn(uker, self.st['Kd'][:], range(0,numpy.ndim(uker)))
return uker
def _expand_deconv_kernel_dimension(self, uker, L):
# if numpy.size(self.st['Kd']) > 2:
# for dd in xrange(2,numpy.size(self.st['Kd'])):
# uker = appendmat(uker,self.st['Kd'][dd])
uker = appendmat(uker,L)
return uker
def _expand_RTR(self,RTR,L):
# if numpy.size(self.st['Kd']) > 2:
# for dd in xrange(2,numpy.size(self.st['Kd'])):
# RTR = appendmat(RTR,self.st['Kd'][dd])
RTR= numpy.reshape(RTR,self.st['Kd'],order='F')
RTR = appendmat(RTR,L)
return RTR
def _update_d(self,u,dd):
out_dd = tuple(get_Diff(u,jj) for jj in xrange(0,len(dd)))
# out_dd = ()
# for jj in xrange(0,len(dd)) :
# out_dd = out_dd + (get_Diff(u,jj),)
return out_dd
def _update_b(self, bb, dd, xx):
ndims=len(bb)
cc=numpy.empty(bb[0].shape)
out_bb=()
for pj in xrange(0,ndims):
cc=bb[pj]+dd[pj]-xx[pj]
out_bb=out_bb+(cc,)
return out_bb
def _create_mask(self):
st=self.st
# st['mask']=numpy.ones(st['Nd'],dtype=numpy.float32)
# st['mask2']=numpy.ones(st['Nd'],dtype=numpy.float32)
n_dims= numpy.size(st['Nd'])
sp_rat =0.0
for di in xrange(0,n_dims):
sp_rat = sp_rat + (st['Nd'][di]/2)**2
sp_rat = sp_rat**0.5
x = numpy.ogrid[[slice(0, st['Nd'][_ss]) for _ss in xrange(0,n_dims)]]
tmp = 0
for di in xrange(0,n_dims):
tmp = tmp + ( (x[di] - st['Nd'][di]/2.0)/(st['Nd'][di]/2.0) )**2
tmp = (1.0*tmp)**0.5
# indx = tmp/sp_rat >=1.1
# st['mask'][indx] =0.0
st['mask'] = 1.0/(1.0+numpy.exp( (tmp-1.05)/0.005))
st['mask2'] =1.0/(1.0+numpy.exp( (tmp-1.025)/0.005))
# matplotlib.pyplot.imshow( st['mask'].real)
# matplotlib.pyplot.show()
return st
def _external_update(self,u, uf, u0, u_k_1, outer): # overload the update function
tmpuf=(self.forwardbackward(
u*self.st['sensemap']))#*(self.st['sensemap'])
if self.st['senseflag'] == 1:
tmpuf=CombineMulti(tmpuf,-1)
err = (checkmax(tmpuf,self.debug) -checkmax(u0,self.debug) )/checkmax(u0,self.debug)
r = u0 - tmpuf
# r = u0 - tmpuf
p = r
# err = (checkmax(tmpuf)- checkmax(u0))/checkmax(u0)
err= numpy.abs(err)
if self.debug==0:
pass
else:
print('err',err,self.err)
# if (err < self.err):
# uf = uf+p*err*0.1
if numpy.abs(err) < numpy.abs(self.err):
uf = uf + p*err*(outer+1)
self.err = err
u_k_1 = u
else:
err = self.err
if self.debug==0:
pass
else:
print('no function')
u = u_k_1
murf = uf
if self.debug==0:
pass
else:
print('leaving ext_update')
return (u, murf, uf, u_k_1)
def show_3D():
import mayavi.mlab
raw = numpy.load('phantom_3D_128_128_128.npy')
reconreal = numpy.load('reconreal.npy')
blurreal = numpy.load('blurreal.npy')
reconreal[0:,0:80,0:64]=0
raw[0:,0:80,0:64]=0
blurreal[0:,0:80,0:64]=0
mayavi.mlab.contour3d(raw, contours=4, transparent=True)
mayavi.mlab.show()
mayavi.mlab.contour3d(blurreal, contours=4, transparent=True)
mayavi.mlab.show()
mayavi.mlab.contour3d(reconreal, contours=4, transparent=True)
mayavi.mlab.show()
def test_3D():
cm = matplotlib.cm.gray
# load raw data, which is 3D shapp-logan phantom
raw = numpy.load('phantom_3D_128_128_128.npy')
# numpy.save('testfile.npy',raw)
# raw = numpy.load('testfile.npy')
# demonstrate the 64th slice
matplotlib.pyplot.imshow(raw[:,:,64],cmap=cm)
matplotlib.pyplot.show()
print('max.image',numpy.max(raw[:]))
# load 3D k-space trajectory (sparse)
om = numpy.loadtxt('om3D2.txt')
# image dimension is 3D isotropic
Nd=(128,128,128)
Kd=(128,128,128)
# Note: sparse sampling works best for Jd = 1
Jd=(1,1,1)
# create Nufft Object
MyNufftObj = pynufft(om, Nd, Kd, Jd)
# create data
K_data=MyNufftObj.forward(raw)
# regridding and blurred images
image_blur = MyNufftObj.backward(K_data)[...,0]
# turn off sense recon because it is not necessary
MyNufftObj.st['senseflag']=0
# Now doing the reconstruction
# import pp
# job_server = pp.Server()
#
# f1=job_server.submit(MyNufftObj.inverse,(K_data, 1.0, 0.1, 0.01,3, 5),
# modules = ('numpy','pyfftw','pynufft'),globals=globals())
# f2=job_server.submit(MyNufftObj.inverse,(numpy.sqrt(K_data)*10+(0.0+0.1j), 1.0, 0.05, 0.01,3, 20),
# modules = ('numpy','pyfftw','pynufft'),globals=globals())
# image1 = f1()
# image2 = f2()
image1=MyNufftObj.inverse(K_data, 1.0, 0.1, 0.01,3,5)
# image1 = MyNufftObj.inverse(K_data,1.0, 0.05, 0.001, 1,10)
# matplotlib.pyplot.subplot(2,3,1)
# matplotlib.pyplot.imshow(raw[:,:,64],cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.subplot(2,3,2)
# matplotlib.pyplot.imshow(image_blur[:,:,64].real,cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.subplot(2,3,3)
# matplotlib.pyplot.imshow((image2[:,:,64].real),cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.subplot(2,3,4)
# matplotlib.pyplot.imshow(raw[:,:,96],cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.subplot(2,3,5)
# matplotlib.pyplot.imshow(image_blur[:,:,96].real,cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.subplot(2,3,6)
# matplotlib.pyplot.imshow((image2[:,:,96].real),cmap=cm,interpolation = 'nearest')
# matplotlib.pyplot.show()
matplotlib.pyplot.subplot(2,3,1)
matplotlib.pyplot.imshow(raw[:,:,64],cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.subplot(2,3,2)
matplotlib.pyplot.imshow(image_blur[:,:,64].real,cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.subplot(2,3,3)
matplotlib.pyplot.imshow((image1[:,:,64].real),cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.subplot(2,3,4)
matplotlib.pyplot.imshow(raw[:,:,96],cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.subplot(2,3,5)
matplotlib.pyplot.imshow(image_blur[:,:,96].real,cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.subplot(2,3,6)
matplotlib.pyplot.imshow((image1[:,:,96].real),cmap=cm,interpolation = 'nearest')
matplotlib.pyplot.show()
numpy.save('blurreal.npy',image_blur.real)
numpy.save('reconreal.npy',image1.real)
# mayavi.mlab.imshow()
def test_2D():
import numpy
import matplotlib#.pyplot
cm = matplotlib.cm.gray
# load example image
image = numpy.loadtxt('phantom_256_256.txt')
image[128,128]= 1.0
# import scipy.misc
# image = scipy.misc.imresize(image,Nd)
Nd =(256,256) # image space size
Kd =(512,512) # k-space size
Jd =(6,6) # interpolation size
# load k-space points
om = numpy.loadtxt('om.txt')
#create object
NufftObj = pynufft(om, Nd,Kd,Jd)
NufftObj.st['senseflag']=1
# simulate "data"
data= NufftObj.forward(image )
# now get the original image
#reconstruct image with 0.1 constraint1, 0.001 constraint2,
# 2 inner iterations and 10 outer iterations
image_recon = NufftObj.inverse(data, 1.0, 0.4, 0.01,10, 10)
image_blur = NufftObj.backward(data)
image_recon = Normalize(image_recon)
matplotlib.pyplot.plot(om[:,0],om[:,1],'x')
matplotlib.pyplot.show()
norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
norm2=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0e-1)
# display images
matplotlib.pyplot.subplot(2,2,1)
matplotlib.pyplot.imshow(image,
norm = norm,cmap =cm,interpolation = 'nearest')
matplotlib.pyplot.title('true image')
matplotlib.pyplot.subplot(2,2,3)
matplotlib.pyplot.imshow(image_recon.real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('recovered image')
matplotlib.pyplot.subplot(2,2,2)
matplotlib.pyplot.imshow(image_blur[:,:,0].real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('blurred image')
matplotlib.pyplot.subplot(2,2,4)
matplotlib.pyplot.imshow(image_recon.real-image,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('residual error')
matplotlib.pyplot.show()
def test_1D():
# import several modules
import numpy
import matplotlib#.pyplot
#create 1D curve from 2D image
image = numpy.loadtxt('phantom_256_256.txt')
image = image[:,128]
#determine the location of samples
om = numpy.loadtxt('om1D.txt')
om = numpy.reshape(om,(numpy.size(om),1),order='F')
# reconstruction parameters
Nd =(256,) # image space size
Kd =(256,) # k-space size
Jd =(1,) # interpolation size
# initiation of the object
NufftObj = pynufft(om, Nd,Kd,Jd)
# simulate "data"
data= NufftObj.forward(image )
#adjoint(reverse) of the forward transform
image_blur= NufftObj.backward(data)[:,0]
#inversion of data
image_recon = NufftObj.inverse(data, 1.0, 1, 0.001,15,16)
#Showing histogram of sampling locations
matplotlib.pyplot.hist(om,20)
matplotlib.pyplot.title('histogram of the sampling locations')
matplotlib.pyplot.show()
#show reconstruction
matplotlib.pyplot.subplot(2,2,1)
matplotlib.pyplot.plot(image)
matplotlib.pyplot.title('original')
matplotlib.pyplot.ylim([0,1])
matplotlib.pyplot.subplot(2,2,3)
matplotlib.pyplot.plot(image_recon.real)
matplotlib.pyplot.title('recon')
matplotlib.pyplot.ylim([0,1])
matplotlib.pyplot.subplot(2,2,2)
matplotlib.pyplot.plot(image_blur.real)
matplotlib.pyplot.title('blurred')
matplotlib.pyplot.subplot(2,2,4)
matplotlib.pyplot.plot(image_recon.real - image)
matplotlib.pyplot.title('residual')
# matplotlib.pyplot.subplot(2,2,4)
# matplotlib.pyplot.plot(numpy.abs(data))
matplotlib.pyplot.show()
# def test_Dx():
# u = numpy.ones((128,128,128,1),dtype = numpy.complex64)
def test_2D_multiprocessing():
import numpy
import matplotlib.pyplot
import copy
cm = matplotlib.cm.gray
# load example image
image = numpy.loadtxt('phantom_256_256.txt')
image[128,128]= 1.0
Nd =(256,256) # image space size
Kd =(512,512) # k-space size
Jd =(6,6) # interpolation size
# load k-space points
om = numpy.loadtxt('om.txt')
#create object
NufftObj = pynufft(om, Nd,Kd,Jd)
NewObj = copy.deepcopy(NufftObj)
# simulate "data"
data= NufftObj.forward(image )
# data2=data.copy()
# data2 =numpy.sqrt(data2)*10+(0.0+0.1j)
# now get the original image
#reconstruct image with 0.1 constraint1, 0.001 constraint2,
# 2 inner iterations and 10 outer iterations
import pp
job_server = pp.Server()
f1=job_server.submit(NewObj.inverse,(data, 1.0, 0.05, 0.01,3, 20),
modules = ('numpy','pyfftw','pynufft'),globals=globals())
f2=job_server.submit(NewObj.inverse,(numpy.sqrt(data)*10+(0.0+0.1j), 1.0, 0.05, 0.01,3, 20),
modules = ('numpy','pyfftw','pynufft'),globals=globals())
image_recon = f1()
image_recon2 = f2()
# image_recon = NewObj.inverse(data, 1.0, 0.05, 0.01,3, 20)
image_blur = NufftObj.backward(data)
image_recon = Normalize(image_recon)
matplotlib.pyplot.plot(om[:,0],om[:,1],'x')
matplotlib.pyplot.show()
norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
norm2=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0e-1)
# display images
matplotlib.pyplot.subplot(2,2,1)
matplotlib.pyplot.imshow(image,
norm = norm,cmap =cm,interpolation = 'nearest')
matplotlib.pyplot.title('true image')
matplotlib.pyplot.subplot(2,2,3)
matplotlib.pyplot.imshow(image_recon.real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('recovered image')
matplotlib.pyplot.subplot(2,2,2)
matplotlib.pyplot.imshow(image_blur[:,:,0].real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('blurred image')
matplotlib.pyplot.subplot(2,2,4)
matplotlib.pyplot.imshow(image_recon.real-image,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('residual error')
matplotlib.pyplot.show()
matplotlib.pyplot.subplot(2,2,1)
matplotlib.pyplot.imshow(image,
norm = norm,cmap =cm,interpolation = 'nearest')
matplotlib.pyplot.title('true image')
matplotlib.pyplot.subplot(2,2,3)
matplotlib.pyplot.imshow(image_recon2.real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('recovered image')
matplotlib.pyplot.subplot(2,2,2)
matplotlib.pyplot.imshow(image_blur[:,:,0].real,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('blurred image')
matplotlib.pyplot.subplot(2,2,4)
matplotlib.pyplot.imshow(image_recon.real-image,
norm = norm,cmap= cm,interpolation = 'nearest')
matplotlib.pyplot.title('residual error')
matplotlib.pyplot.show()
if __name__ == '__main__':
import cProfile
# test_1D()
# test_2D()
test_3D()
# show_3D()
# test_Dx()
cProfile.run('test_3D()')
# cProfile.run('test_2D_multiprocessing()')
|
jyhmiinlin/cineFSE
|
CsTransform/back_pynufft.py
|
Python
|
gpl-3.0
| 57,202
|
[
"Mayavi"
] |
c8e7078fdef41a6148a939f7613233eb7e3679712aec00e1c00e1dd122e4d8ba
|
"""This module parses the VTK methods, obtains the argument and return
type information, and organizes them.
"""
# Author: Prabhu Ramachandran
# Copyright (c) 2004-2007, Enthought, Inc.
# License: BSD Style.
import re
import types
# Local imports (these are relative imports for a good reason).
import class_tree
import vtk_module as vtk
from common import is_version_62
class VTKMethodParser:
"""This class provides useful methods for parsing methods of a VTK
class or instance.
The class allows one to categorize the methods of the VTK class
and also obtain the method signatures in a form that is easy to
use. When the `parse` method is called, it in turn calls the
`_organize_methods` method. This method organizes the VTK methods
into different instance variables described in the following.
`self.toggle_meths` contains a dictionary of all the boolean
methods of the form <Value>On/Off. The dictionary keys are
strings with the <Value>'s and the value of each item is the
default value (0/1) of the item (the example below will clarify
this). `self.state_meths` contains a dictionary which collects
the Set<Prop>To<Value> type of methods. The key is the <Prop> and
the value is a list containing the different string <Value>'s and
their corresponding mapped value. The first value in these is the
default value of the <Prop>. `self.get_set_meths` will contain a
dictionary which collects all the methods of the form
Set/Get<Prop> that are not already specified in
`self.toggle_meths` or `self.state_meths`. The default value of
the Get<Prop> is stored. If the value accepted by the method has
a range (via the methods `Get<Prop>MinValue` and
`Get<Prop>MaxValue`), then that range is computed and stored.
`self.get_meths` stores the methods that are of the form
`Get<Prop>`. `self.other_meths` stores the remaining methods.
The parsing is quite fast. Parsing every class in the VTK API
takes a couple of seconds (on a Pentium III @ 450Mhz).
Here is an example::
>>> import vtk
>>> p = VTKMethodParser()
>>> p.parse(vtk.vtkProperty)
>>> print p.get_toggle_methods()
{'EdgeVisibility': 0, 'BackfaceCulling': 0, 'FrontfaceCulling': 0}
>>> print p.get_state_methods()['Representation']
[['Surface', 2], ['Points', 0], ['Surface', 2], ['Wireframe', 1]]
>>> print p.get_get_set_methods()['Opacity']
(1.0, (0.0, 1.0))
>>> print p.get_get_methods()
['GetClassName']
>>> print p.get_other_methods()[:3]
['BackfaceRender', 'DeepCopy', 'IsA']
The class also provides a method called `get_method_signature`
that obtains the Python method signature given the VTK method
object. Here is an example::
>>> import vtk
>>> p = VTKMethodParser()
>>> o = vtk.vtkProperty
>>> print p.get_method_signature(o.GetClassName)
[(['string'], None)]
>>> print p.get_method_signature(o.GetColor)[0]
([('float', 'float', 'float')], None)
>>> print p.get_method_signature(o.GetColor)[1]
([None], (('float', 'float', 'float'),))
The `get_method_signature` is fairly efficient and obtaining the
signature for every method in every class in the VTK API takes
around 6 seconds (on a Pentium III @ 450Mhz).
"""
def __init__(self, use_tree=True):
"""Initializes the object.
Parameters
----------
- use_tree : `bool`
If True (default), use a ClassTree instance to obtain a
concrete subclass for an abstract base class. This is used
only to find the range and default values for some of the
methods. If False, no ClassTree instance is created.
This is optional because, creating a ClassTree is expensive.
The parser functionality can be very useful even without the
use of a ClassTree. For example, if one wants to save the
state of a VTK object one only needs to know the names of
the methods and not their default values, ranges etc. In
that case using a parser should be cheap.
"""
# The ClassTree is needed to find an instantiable child class
# for an abstract VTK parent class. This instance is used to
# obtain the state values and the ranges of the arguments
# accepted by the Get/Set methods that have a
# Get<Prop>{MaxValue,MinValue} method.
if use_tree:
self._tree = class_tree.ClassTree(vtk)
self._tree.create()
else:
self._tree = None
self._state_patn = re.compile('To[A-Z0-9]')
self._initialize()
#################################################################
# 'VTKMethodParser' interface.
#################################################################
def parse(self, obj, no_warn=True):
"""Parse the methods for a given VTK object/class.
Given a VTK class or object, this method parses the methods
and orgaizes them into useful categories. The categories and
their usage is documented in the documentation for the class.
Parameters
----------
- obj : VTK class or instance
- no_warn : `bool` (default: True)
If True (default), it suppresses any warnings generated by
the VTK object when parsing the methods. This is safe to
use.
"""
if not hasattr(obj, '__bases__'):
klass = obj.__class__
else:
klass = obj
methods = self.get_methods(klass)
if no_warn:
# Save warning setting and shut it off before parsing.
warn = vtk.vtkObject.GetGlobalWarningDisplay()
if klass.__name__ <> 'vtkObject':
vtk.vtkObject.GlobalWarningDisplayOff()
self._organize_methods(klass, methods)
if no_warn:
# Reset warning status.
vtk.vtkObject.SetGlobalWarningDisplay(warn)
def _get_parent_methods(self, klass):
"""Returns all the methods of the classes parents."""
methods = {}
while len(klass.__bases__) > 0:
klass = klass.__bases__[0]
meths = dir(klass)
d = methods.fromkeys(meths)
methods.update(d)
return methods.keys()
def get_methods(self, klass):
"""Returns all the relevant methods of the given VTK class."""
methods = dir(klass)[:]
if hasattr(klass, '__members__'):
# Only VTK versions < 4.5 have these.
for m in klass.__members__:
methods.remove(m)
# Ignore the parent methods.
ignore = self._get_parent_methods(klass)
# Skip some of the ignores.
skip = ['GetInput', 'SetInput']
# Sometimes the child has only GetInput while the parent has
# SetInput.
if hasattr(klass, 'SetInput') and \
'SetInput' not in methods and \
'GetInput' in methods:
methods.append('SetInput')
# Get/set pairs that are overridden. Basically, if a parent
# class has a 'GetThing' and the child overrides/has a
# 'SetThing' (or vice-versa), then the removal of the parent
# methods is wrong since the child changes the trait definition
# which breaks things. We therefore do not remove any of the
# Get/SetThings that are ignored due to them being in the
# parent. However one has to be careful about cases where these are
# really Toggle (ThingOn) or State (SetThingToThong) etc. methods and
# in those cases we really should ignore the method. So in essence,
# any Get/Set pair that is not a State or Toggle should be redefined.
overrides = []
for m in methods:
check = False
if m.startswith('Get'):
m1 = 'Set' + m[3:]
check = True
elif m.startswith('Set'):
m1 = 'Get' + m[3:]
check = True
if check:
if m1 in methods and (m1 in ignore or m in ignore):
# Skips are stored as Set followed by Get.
skip.extend(['Set' +m[3:], 'Get'+m[3:]])
for m in skip[:]:
if m.startswith('Set'):
base = m[3:]
mg, ms = 'Get' + base, 'Set' + base
m_st = 'Set' + base + 'To'
m_t = base + 'Off'
for method in methods:
if m_st in method or m_t == method:
skip.remove(ms)
skip.remove(mg)
break
if 'GetViewProp' in methods and 'GetProp' in methods:
ignore.extend(['GetProp', 'SetProp'])
if 'GetViewProps' in methods and 'GetProps' in methods:
ignore.extend(['GetProps', 'SetProps'])
# Remove any deprecated traits.
if 'GetScaledText' in methods and 'GetTextScaleMode' in methods:
ignore.extend(['GetScaledText', 'SetScaledText',
'ScaledTextOn', 'ScaledTextOff'])
# Now we can safely remove the methods.
for m in methods[:]:
if m in ignore and m not in skip:
methods.remove(m)
return methods
def get_toggle_methods(self):
"""Returns a dictionary of the parsed <Value>On/Off methods
along with the default value.
"""
return self.toggle_meths
def get_state_methods(self):
"""Returns a dict of the parsed Set<Prop>To<Value>.
The keys are the <Prop> string with a list of the different
<Value> strings along with their corresponding value (if
obtainable). The first value is the default value of the
state.
"""
return self.state_meths
def get_get_set_methods(self):
"""Returns a dict of the parsed Get/Set<Value> methods.
The keys of the dict are the <Value> strings and contain a
two-tuple containing the default value (or None if it is not
obtainable for some reason) and a pair of numbers specifying
an acceptable range of values (or None if not obtainable).
"""
return self.get_set_meths
def get_get_methods(self):
"""Return a list of parsed Get<Value> methods.
All of these methods do NOT have a corresponding Set<Value>.
"""
return self.get_meths
def get_other_methods(self):
"""Return list of all other methods, that are not
categorizable.
"""
return self.other_meths
def get_method_signature(self, method):
"""Returns information on the Python method signature given
the VTK method.
The doc string of the given method object to get the method
signature. The method returns a list of tuples, each of which
contains 2 items, the first is a list representing the return
value the second represents the arguments to be passed to the
function. If the method supports different return values and
arguments, this function returns all of their signatures.
Parameters
----------
- method : `method`
A VTK method object.
"""
# VTK 6.2 false built in funcs/methods are ignored
if is_version_62():
built_in_func = isinstance(method, types.BuiltinFunctionType)
built_in_meth = isinstance(method, types.BuiltinMethodType)
if not (built_in_func or built_in_meth):
return None
# Remove all the C++ function signatures.
doc = method.__doc__
doc = doc[:doc.find('\n\n')]
sig = []
c_sig = [] # The C++ signature
in_sig = False
in_c_sig = False
counter = 0
for line in doc.split('\n'):
if line.startswith('V.'):
in_sig = True
in_c_sig = False
sig.append(line.strip())
elif line.startswith('C++:'):
in_sig = False
in_c_sig = True
c_sig.append(line.strip())
counter += 1
elif in_sig:
sig[counter] = sig[counter] + line.strip()
elif in_c_sig:
c_sig[counter-1] = c_sig[counter-1] + line.strip()
# Remove the V.<method_name>
sig = [x.replace('V.' + method.__name__, '') for x in sig]
c_sig = [x[x.find('('):] for x in c_sig]
pat = re.compile(r'\b')
# Split into [return_value, arguments] after processing them.
tmp = list(sig)
sig = []
for sig_idx, i in enumerate(tmp):
# Split to get return values.
x = i.split('->')
# Strip each part.
x = [y.strip() for y in x]
if len(x) == 1: # No return value
x = [None, x[0]]
else:
x.reverse()
ret, arg = x
# Remove leading and trailing parens for arguments.
arg = arg[1:-1]
if not arg:
arg = None
if arg and arg[-1] in [')', ']']:
arg = arg + ','
# Check if we are able to parse all the arguments -- some
# unstable versions of VTK have problems generating the
# docstring and in this case we will try to use the C++
# docstring signature.
n_arg = 0
arg_map = {'unsigned int': 'int', 'unsigned char': 'int',
'unsigned long': 'long', 'unsigned short': 'int'}
if arg is not None and c_sig:
n_arg = arg.count(',') + 1
# The carguments have parenthesis like: (int, int)
carg = c_sig[sig_idx][1:-1].split(',')
if n_arg > 0:
args = []
if len(carg) == n_arg:
for idx, x in enumerate(arg.split(',')):
if len(x.strip()) == 0:
carg_val = carg[idx].strip()
if 'unsigned' in carg_val and \
carg_val in arg_map:
args.append(arg_map[carg_val])
elif 'void' in carg_val:
args.append("string")
else:
args.append(x)
else:
args.append(x)
arg = ', '.join(args)
if ret is not None and ret.startswith('(') and '...' in ret:
# A tuple (new in VTK-5.7)
ret = "tuple"
if arg is not None:
if '[float, ...]' in arg:
arg = arg.replace('[float, ...]', 'tuple')
elif '(float, ...)' in arg:
arg = arg.replace('(float, ...)', 'tuple')
if ret == '(, )':
ret = None
# Now quote the args and eval them. Easy!
try:
if ret:
ret = eval(pat.sub('\"', ret))
if arg:
arg = eval(pat.sub('\"', arg))
if type(arg) == type('str'):
arg = [arg]
except SyntaxError:
pass
else:
sig.append(([ret], arg))
return sig
def get_tree(self):
"""Return the ClassTree instance used by this class."""
return self._tree
#################################################################
# Non-public interface.
#################################################################
def _initialize(self):
"""Initializes the method categories."""
# Collects the <Value>On/Off methods.
self.toggle_meths = {}
# Collects the Set<Prop>To<Value> methods.
self.state_meths = {}
# Collects the Set/Get<Value> pairs.
self.get_set_meths = {}
# Collects the Get<Value> methods.
self.get_meths = []
# Collects all the remaining methods.
self.other_meths = []
def _organize_methods(self, klass, methods):
"""Organizes the given methods of a VTK class into different
categories.
Parameters
----------
- klass : A VTK class
- methods : `list` of `str`
A list of the methods to be categorized.
"""
self._initialize()
meths = methods[:]
meths = self._find_toggle_methods(klass, meths)
meths = self._find_state_methods(klass, meths)
meths = self._find_get_set_methods(klass, meths)
meths = self._find_get_methods(klass, meths)
self.other_meths = [x for x in meths \
if callable(getattr(klass, x))]
def _remove_method(self, meths, method):
try:
meths.remove(method)
except ValueError:
pass
def _find_toggle_methods(self, klass, methods):
"""Find/store methods of the form <Value>{On,Off} in the given
`methods`. Returns the remaining list of methods.
"""
meths = methods[:]
tm = self.toggle_meths
klass_name = klass.__name__
problem_methods = ['CopyVectors', 'CopyTensors',
'CopyTCoords', 'CopyScalars',
'CopyNormals', 'CopyGlobalIds',
'CopyPedigreeIds']
for method in meths[:]:
if klass_name == 'vtkDataSetAttributes' and \
method[:-2] in problem_methods:
continue
elif method[:-2] == 'AlphaBitPlanes':
continue
if method[-2:] == 'On':
key = method[:-2]
if (key + 'Off') in meths and ('Get' + key) in meths:
tm[key] = None
meths.remove(method)
meths.remove(key + 'Off')
self._remove_method(meths, 'Set' + key)
self._remove_method(meths, 'Get' + key)
# get defaults
if tm:
obj = self._get_instance(klass)
if obj:
for key in tm:
try:
tm[key] = getattr(obj, 'Get%s'%key)()
except (TypeError, AttributeError):
print klass.__name__, key
pass
return meths
def _find_state_methods(self, klass, methods):
"""Find/store methods of the form Set<Prop>To<Value> in the
given `methods`. Returns the remaining list of methods. The
method also computes the mapped value of the different
<Values>.
"""
# These ignored ones are really not state methods.
ignore = ['SetUpdateExtentToWholeExtent',
'SetDataExtentToWholeExtent',
'SetOutputSpacingToDefault', # In vtkImageReslice.
'SetOutputOriginToDefault', # In vtkImageReslice
'SetOutputExtentToDefault' # In vtkImageReslice
]
meths = methods[:]
sm = self.state_meths
for method in meths[:]:
if method not in ignore and method[:3] == 'Set':
# Methods of form Set<Prop>To<Value>
match = self._state_patn.search(method)
# Second cond. ensures that this is not an accident.
if match and (('Get'+method[3:]) not in meths):
key = method[3:match.start()] # The <Prop> part.
if (('Get' + key) in methods):
val = method[match.start()+2:] # <Value> part.
meths.remove(method)
if sm.has_key(key):
sm[key].append([val, None])
else:
sm[key] = [[val, None]]
meths.remove('Get'+ key)
self._remove_method(meths, 'Set'+ key)
if ('Get' + key + 'MaxValue') in meths:
meths.remove('Get' + key + 'MaxValue')
meths.remove('Get' + key + 'MinValue')
try:
meths.remove('Get' + key + 'AsString')
except ValueError:
pass
# Find the values for each of the states, i.e. find that
# vtkProperty.SetRepresentationToWireframe() corresponds to
# vtkProperty.SetRepresentation(1).
if sm:
obj = self._get_instance(klass)
klass_name = klass.__name__
if obj and not klass_name.endswith('Viewer'):
# We do not try to inspect viewers, because they'll
# trigger segfaults during the inspection
for key, values in sm.items():
default = getattr(obj, 'Get%s'%key)()
for x in values[:]:
try:
getattr(obj, 'Set%sTo%s'%(key, x[0]))()
except TypeError:
# vtkRenderedGraphRepresentation has some of
# its SetIvarToState methods that have
# non-standard arguments, this throws off
# the parser and we ignore these.
#print klass.__name__, key
pass
else:
val = getattr(obj, 'Get%s'%key)()
x[1] = val
if val == default:
values.insert(0, [x[0], val])
return meths
def _find_get_set_methods(self, klass, methods):
"""Find/store methods of the form {Get,Set}Prop in the given
`methods` and returns the remaining list of methods.
Note that it makes sense to call this *after*
`_find_state_methods` is called in order to avoid incorrect
duplication. This method also computes the default value and
the ranges of the arguments (when possible) by using the
Get<Prop>{MaxValue,MinValue} methods.
"""
meths = methods[:]
gsm = self.get_set_meths
klass_name = klass.__name__
for method in meths[:]:
# Methods of the Set/Get form.
if method in ['Get', 'Set']:
# This occurs with the vtkInformation class.
continue
elif klass_name == 'vtkProp' and method[3:] == 'AllocatedRenderTime':
# vtkProp.Get/SetAllocatedRenderTime is private and
# SetAllocatedRenderTime takes two args, don't wrap it.
continue
elif klass_name == 'vtkGenericAttributeCollection' and \
method[3:] == 'AttributesToInterpolate':
continue
elif klass_name == 'vtkOverlappingAMR' and method[3:] == 'Origin':
continue
elif (klass_name == 'vtkOrientationMarkerWidget'
and method[3:] in ['OutlineColor', 'Viewport']):
continue
elif (klass_name == 'vtkImageDataGeometryFilter'
and method[3:] == 'Extent'):
continue
elif (klass_name == 'vtkVolumeMapper'
and method[3:] == 'CroppingRegionPlanes'):
continue
elif (method[:3] == 'Set') and ('Get' + method[3:]) in methods:
key = method[3:]
meths.remove('Set' + key)
meths.remove('Get' + key)
if ('Get' + key + 'MaxValue') in meths:
meths.remove('Get' + key + 'MaxValue')
meths.remove('Get' + key + 'MinValue')
gsm[key] = 1
else:
gsm[key] = None
# Find the default and range of the values.
if gsm:
obj = self._get_instance(klass)
if obj:
for key, value in gsm.items():
if klass_name in ['vtkPolyData', 'vtkContext2D']:
# Evil hack, these classes segfault!
default = None
elif klass_name == 'vtkHyperOctree' and \
key == 'Dimension':
# This class breaks standard VTK conventions.
gsm[key] = (3, (1, 3))
continue
else:
try:
default = getattr(obj, 'Get%s'%key)()
except TypeError:
default = None
if value:
low = getattr(obj, 'Get%sMinValue'%key)()
high = getattr(obj, 'Get%sMaxValue'%key)()
gsm[key] = (default, (low, high))
else:
gsm[key] = (default, None)
else:
# We still might have methods that have a default range.
for key, value in gsm.items():
if value == 1:
gsm[key] = None
return meths
def _find_get_methods(self, klass, methods):
"""Find/store methods of the form Get<Value> in the given
`methods` and returns the remaining list of methods.
"""
meths = methods[:]
gm = self.get_meths
for method in meths[:]:
if method == 'Get':
# Occurs with vtkInformation
continue
elif method[:3] == 'Get':
gm.append(method)
meths.remove(method)
return meths
def _get_instance(self, klass):
"""Given a VTK class, `klass`, returns an instance of the
class.
If the class is abstract, it uses the class tree to return an
instantiable subclass. This is necessary to get the values of
the 'state' methods and the ranges for the Get/Set methods.
"""
obj = None
try:
obj = klass()
except (TypeError, NotImplementedError):
if self._tree:
t = self._tree
n = t.get_node(klass.__name__)
for c in n.children:
obj = self._get_instance(t.get_class(c.name))
if obj:
break
return obj
|
liulion/mayavi
|
tvtk/vtk_parser.py
|
Python
|
bsd-3-clause
| 26,870
|
[
"VTK"
] |
f6ffabce3395c53fc6cbac95085b8ded85c67a54922627633e4ceed5c3ce45f5
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
import os
import re
import shutil
import subprocess
import sys
HEAD_DIR = 'core/templates/dev/head/'
OUT_DIR = 'core/templates/prod/head/'
REMOVE_WS = re.compile(r'\s{2,}').sub
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path."""
YUICOMPRESSOR_DIR = (
'../oppia_tools/yuicompressor-2.4.8/yuicompressor-2.4.8.jar')
cmd = 'java -jar %s %s -o %s' % (
YUICOMPRESSOR_DIR, source_path, target_path)
subprocess.check_call(cmd, shell=True)
def ensure_directory_exists(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def get_target(filename):
return filename.replace(HEAD_DIR, OUT_DIR)
def process_html(filename, target):
f = open(filename, 'r')
content = f.read()
content = REMOVE_WS(' ', content)
ensure_directory_exists(target)
d = open(target, 'w+')
d.write(content)
def process_css(source_path, target_path):
ensure_directory_exists(target_path)
_minify(source_path, target_path)
def process_js(source_path, target_path):
ensure_directory_exists(target_path)
_minify(source_path, target_path)
def process_third_party_libs():
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
node_path = os.path.join(
parent_dir, 'oppia_tools', 'node-4.2.1', 'bin', 'node')
gulp_path = os.path.join(
parent_dir, 'node_modules', 'gulp', 'bin', 'gulp.js')
gulp_build_cmd = [node_path, gulp_path, 'build', '--minify=True']
proc = subprocess.Popen(
gulp_build_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gulp_stdout, gulp_stderr = proc.communicate()
if gulp_stdout:
print gulp_stdout
if gulp_stderr:
print 'Gulp build process failed.Exiting'
print '----------------------------------------'
print gulp_stderr
sys.exit(1)
# Script starts here.
ensure_directory_exists(OUT_DIR)
shutil.rmtree(OUT_DIR)
process_third_party_libs()
for root in os.listdir(os.path.join(os.getcwd())):
if any([s in root for s in ['.git', 'third_party', 'extensions']]):
continue
print('Processing %s' % os.path.join(os.getcwd(), root))
for root, dirs, files in os.walk(os.path.join(os.getcwd(), root)):
for directory in dirs:
print('Processing %s' % os.path.join(root, directory))
for filename in files:
source_path = os.path.join(root, filename)
if source_path.find(OUT_DIR) > 0:
continue
if source_path.find(HEAD_DIR) == -1:
continue
target_path = get_target(source_path)
if filename.endswith('.html'):
process_html(source_path, target_path)
if filename.endswith('.css'):
process_css(source_path, target_path)
if filename.endswith('.js'):
process_js(source_path, target_path)
|
won0089/oppia
|
scripts/build.py
|
Python
|
apache-2.0
| 3,633
|
[
"GULP"
] |
f8eda6d67be663c5253f08dfc4db3020511826457a5a145842aa99a7682ed19d
|
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
import os
import os.path
import sys
import yaml
from collections import defaultdict
from distutils.version import LooseVersion
from jinja2 import Environment
import ansible.constants as C
import ansible.utils
import ansible.galaxy
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.role import GalaxyRole
from ansible.playbook.role.requirement import RoleRequirement
class GalaxyCLI(CLI):
VALID_ACTIONS = ("init", "info", "install", "list", "remove", "search")
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
def __init__(self, args, display=None):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args, display)
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to actions
if self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option(
'--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "search":
self.parser.add_option('-P', '--platforms', dest='platforms',
help='list of OS platforms to filter by')
self.parser.add_option('-T', '--galaxy-tags', dest='tags',
help='list of galaxy tags to filter by')
self.parser.set_usage("usage: %prog search [<search_term>] [-T <galaxy_tag1,galaxy_tag2>] [-P platform]")
# options that apply to more than one action
if self.action != "init":
self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. '
'The default is the roles_path configured in your '
'ansible.cfg file (/etc/ansible/roles if not configured)')
if self.action in ("info","init","install","search"):
self.parser.add_option('-s', '--server', dest='api_server', default="https://galaxy.ansible.com",
help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True,
help='Ignore SSL certificate validation errors.')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role')
# get options, args and galaxy object
self.options, self.args =self.parser.parse_args()
self.display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options, self.display)
return True
def run(self):
super(GalaxyCLI, self).run()
# if not offline, get connect to galaxy api
if self.action in ("info","install", "search") or (self.action == 'init' and not self.options.offline):
api_server = self.options.api_server
self.api = GalaxyAPI(self.galaxy, api_server)
if not self.api:
raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server)
self.execute()
def get_opt(self, k, defval=""):
"""
Returns an option from an Optparse values instance.
"""
try:
data = getattr(self.options, k)
except:
return defval
if k == "roles_path":
if os.pathsep in data:
data = data.split(os.pathsep)[0]
return data
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.get_opt("ignore_errors", False):
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def parse_requirements_files(self, role):
if 'role' in role:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role_info = role_spec_parse(role['role'])
if isinstance(role_info, dict):
# Warning: Slight change in behaviour here. name may be being
# overloaded. Previously, name was only a parameter to the role.
# Now it is both a parameter to the role and the name that
# ansible-galaxy will install under on the local system.
if 'name' in role and 'name' in role_info:
del role_info['name']
role.update(role_info)
else:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = GalaxyRole.url_to_spec(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
return role
def _display_role_info(self, role_info):
text = "\nRole: %s \n" % role_info['name']
text += "\tdescription: %s \n" % role_info['description']
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text += "\t%s: \n" % (k)
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text += "\t\t%s: %s\n" % (key, role_info[k][key])
else:
text += "\t%s: %s\n" % (k, role_info[k])
return text
############################
# execute actions
############################
def execute_init(self):
"""
Executes the init action, which creates the skeleton framework
of a role that complies with the galaxy metadata format.
"""
init_path = self.get_opt('init_path', './')
force = self.get_opt('force', False)
offline = self.get_opt('offline', False)
role_name = self.args.pop(0).strip()
if role_name == "":
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists." % role_path + \
"you can use --force to re-initialize this directory,\n" + \
"however it will reset any main.yml files that may have\n" + \
"been modified there already.")
# create the default README.md
if not os.path.exists(role_path):
os.makedirs(role_path)
readme_path = os.path.join(role_path, "README.md")
f = open(readme_path, "wb")
f.write(self.galaxy.default_readme)
f.close()
for dir in GalaxyRole.ROLE_DIRS:
dir_path = os.path.join(init_path, role_name, dir)
main_yml_path = os.path.join(dir_path, 'main.yml')
# create the directory if it doesn't exist already
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# now create the main.yml file for that directory
if dir == "meta":
# create a skeleton meta/main.yml with a valid galaxy_info
# datastructure in place, plus with all of the available
# platforms included (but commented out), the galaxy_tags
# list, and the dependencies section
platforms = []
if not offline and self.api:
platforms = self.api.get_list("platforms") or []
# group the list of platforms from the api based
# on their names, with the release field being
# appended to a list of versions
platform_groups = defaultdict(list)
for platform in platforms:
platform_groups[platform['name']].append(platform['release'])
platform_groups[platform['name']].sort()
inject = dict(
author = 'your name',
company = 'your company (optional)',
license = 'license (GPLv2, CC-BY, etc)',
issue_tracker_url = 'http://example.com/issue/tracker',
min_ansible_version = '1.2',
platforms = platform_groups,
)
rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject)
f = open(main_yml_path, 'w')
f.write(rendered_meta)
f.close()
pass
elif dir not in ('files','templates'):
# just write a (mostly) empty YAML file for main.yml
f = open(main_yml_path, 'w')
f.write('---\n# %s file for %s\n' % (dir,role_name))
f.close()
self.display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
Executes the info action. This action prints out detailed
information about an installed role as well as info available
from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.get_opt("roles_path")
data = ''
for role in self.args:
role_info = {}
gr = GalaxyRole(self.galaxy, role)
#self.galaxy.add_role(gr)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if self.api:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
__, __, role_spec= req.parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
if not data:
data += "\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Executes the installation action. The args list contains the
roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github),
or it can be a local .tar.gz file.
"""
role_file = self.get_opt("role_file", None)
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file
# or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
elif len(self.args) == 1 and not role_file is None:
# using a role file is mutually exclusive of specifying
# the role name on the command line
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.get_opt("no_deps", False)
force = self.get_opt('force', False)
roles_path = self.get_opt("roles_path")
roles_done = []
roles_left = []
if role_file:
self.display.debug('Getting roles from %s' % role_file)
try:
self.display.debug('Processing role file: %s' % role_file)
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
rolesparsed = map(self.parse_requirements_files, yaml.safe_load(f))
except Exception as e:
raise AnsibleError("%s does not seem like a valid yaml file: %s" % (role_file, str(e)))
roles_left = [GalaxyRole(self.galaxy, **r) for r in rolesparsed]
else:
# roles listed in a file, one per line
self.display.deprecated("Non yaml files for role requirements")
for rname in f.readlines():
if rname.startswith("#") or rname.strip() == '':
continue
roles_left.append(GalaxyRole(self.galaxy, rname.strip()))
f.close()
except (IOError,OSError) as e:
raise AnsibleError("Unable to read requirements file (%s): %s" % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
roles_left.append(GalaxyRole(self.galaxy, rname.strip()))
while len(roles_left) > 0:
# query the galaxy API for the role data
role_data = None
role = roles_left.pop(0)
role_path = role.path
if role.install_info is not None and not force:
self.display.display('- %s is already installed, skipping.' % role.name)
continue
if role_path:
self.options.roles_path = role_path
else:
self.options.roles_path = roles_path
self.display.debug('Installing role %s from %s' % (role.name, self.options.roles_path))
tmp_file = None
installed = False
if role.src and os.path.isfile(role.src):
# installing a local tar.gz
tmp_file = role.src
else:
if role.scm:
# create tar file from scm url
tmp_file = GalaxyRole.scm_archive_role(role.scm, role.src, role.version, role.name)
if role.src:
if '://' not in role.src:
role_data = self.api.lookup_role_by_name(role.src)
if not role_data:
self.display.warning("- sorry, %s was not found on %s." % (role.src, self.options.api_server))
self.exit_without_ignore()
continue
role_versions = self.api.fetch_role_related('versions', role_data['id'])
if not role.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
role.version = str(loose_versions[-1])
else:
role.version = 'master'
elif role.version != 'master':
if role_versions and role.version not in [a.get('name', None) for a in role_versions]:
self.display.warning('role is %s' % role)
self.display.warning("- the specified version (%s) was not found in the list of available versions (%s)." % (role.version, role_versions))
self.exit_without_ignore()
continue
# download the role. if --no-deps was specified, we stop here,
# otherwise we recursively grab roles and all of their deps.
tmp_file = role.fetch(role_data)
if tmp_file:
installed = role.install(tmp_file)
# we're done with the temp file, clean it up
if tmp_file != role.src:
os.unlink(tmp_file)
# install dependencies, if we want them
if not no_deps and installed:
role_dependencies = role.metadata.get('dependencies', [])
for dep in role_dependencies:
self.display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
__, dep_name, __ = dep_req.parse(dep)
dep_role = GalaxyRole(self.galaxy, name=dep_name)
if dep_role.install_info is None or force:
if dep_role not in roles_left:
self.display.display('- adding dependency: %s' % dep_name)
roles_left.append(GalaxyRole(self.galaxy, name=dep_name))
else:
self.display.display('- dependency %s already pending installation.' % dep_name)
else:
self.display.display('- dependency %s is already installed, skipping.' % dep_name)
if not tmp_file or not installed:
self.display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
Executes the remove action. The args list contains the list
of roles to be removed. This list can contain more than one role.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
self.display.display('- successfully removed %s' % role_name)
else:
self.display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
Executes the list action. The args list can contain zero
or one role. If one is specified, only that role will be
shown, otherwise all roles in the specified directory will
be shown.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
self.display.display("- %s, %s" % (name, version))
else:
self.display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.get_opt('roles_path')
roles_path = os.path.expanduser(roles_path)
if not os.path.exists(roles_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path)
elif not os.path.isdir(roles_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path)
path_files = os.listdir(roles_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.metadata
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
self.display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):
search = None
if len(self.args) > 1:
raise AnsibleOptionsError("At most a single search term is allowed.")
elif len(self.args) == 1:
search = self.args.pop()
response = self.api.search_roles(search, self.options.platforms, self.options.tags)
if 'count' in response:
self.galaxy.display.display("Found %d roles matching your search:\n" % response['count'])
data = ''
if 'results' in response:
for role in response['results']:
data += self._display_role_info(role)
self.pager(data)
|
dcrosta/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 24,547
|
[
"Galaxy"
] |
ef2192d4c9f74c05868c6a8c14fd34074754f38b2c0dd9f45d00d6ca9b43432b
|
"""
DIRAC - Distributed Infrastructure with Remote Agent Control
The distributed data production and analysis system of LHCb and other VOs.
DIRAC is a software framework for distributed computing which
allows to integrate various computing resources in a single
system. At the same time it integrates all kinds of computing
activities like Monte Carlo simulations, data processing, or
final user analysis.
It is build as number of cooperating systems:
- Accounting
- Configuration
- Core
- Base
- DISET
- Security
- Utilities
- Workflow
- Framework
- RequestManagement
- Resources
- Transformation
Which are used by other system providing functionality to
the end user:
- DataManagement
- Interfaces
- ResourceStatus
- StorageManagement
- WorkloadManagement
It defines the following data members:
- majorVersion: DIRAC Major version number
- minorVersion: DIRAC Minor version number
- patchLevel: DIRAC Patch level number
- preVersion: DIRAC Pre release number
- version: DIRAC version string
- buildVersion: DIRAC version string
- errorMail: mail address for important errors
- alarmMail: mail address for important alarms
- pythonPath: absolute real path to the directory that contains this file
- rootPath: absolute real path to the parent of DIRAC.pythonPath
It loads Modules from :
- DIRAC.Core.Utililies
It loads:
- S_OK: OK return structure
- S_ERROR: ERROR return structure
- gLogger: global Logger object
- gConfig: global Config object
It defines the following functions:
- abort: aborts execution
- exit: finish execution using callbacks
- siteName: returns DIRAC name for current site
- getPlatform(): DIRAC platform string for current host
- getPlatformTuple(): DIRAC platform tuple for current host
"""
import sys
import os
import platform as pyPlatform
from pkgutil import extend_path
__path__ = extend_path( __path__, __name__ )
__RCSID__ = "$Id$"
# Define Version
majorVersion = 6
minorVersion = 19
patchLevel = 0
preVersion = 13
version = "v%sr%s" % ( majorVersion, minorVersion )
buildVersion = "v%dr%d" % ( majorVersion, minorVersion )
if patchLevel:
version = "%sp%s" % ( version, patchLevel )
buildVersion = "%s build %s" % ( buildVersion, patchLevel )
if preVersion:
version = "%s-pre%s" % ( version, preVersion )
buildVersion = "%s pre %s" % ( buildVersion, preVersion )
# Check of python version
__pythonMajorVersion = ( "2", )
__pythonMinorVersion = ( "7" )
pythonVersion = pyPlatform.python_version_tuple()
if str( pythonVersion[0] ) not in __pythonMajorVersion or str( pythonVersion[1] ) not in __pythonMinorVersion:
print "Python Version %s not supported by DIRAC" % pyPlatform.python_version()
print "Supported versions are: "
for major in __pythonMajorVersion:
for minor in __pythonMinorVersion:
print "%s.%s.x" % ( major, minor )
sys.exit( 1 )
errorMail = "dirac.alarms@gmail.com"
alarmMail = "dirac.alarms@gmail.com"
# Set rootPath of DIRAC installation
pythonPath = os.path.realpath( __path__[0] )
rootPath = os.path.dirname( pythonPath )
# Import DIRAC.Core.Utils modules
#from DIRAC.Core.Utilities import *
from DIRAC.Core.Utilities.Network import getFQDN
import DIRAC.Core.Utilities.ExitCallback as ExitCallback
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
# Logger
from DIRAC.FrameworkSystem.Client.Logger import gLogger
#Configuration client
from DIRAC.ConfigurationSystem.Client.Config import gConfig
# Some Defaults if not present in the configuration
FQDN = getFQDN()
if len( FQDN.split( '.' ) ) > 2 :
# Use the last component of the FQDN as country code if there are more than 2 components
_siteName = 'DIRAC.Client.%s' % FQDN.split( '.' )[-1]
else:
# else use local as country code
_siteName = 'DIRAC.Client.local'
__siteName = False
# # Update DErrno with the extensions errors
# from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
# from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
# allExtensions = CSGlobals.getCSExtensions()
#
# # Update for each extension. Careful to conflict :-)
# for extension in allExtensions:
# ol = ObjectLoader( baseModules = ["%sDIRAC" % extension] )
# extraErrorModule = ol.loadModule( 'Core.Utilities.DErrno' )
# if extraErrorModule['OK']:
# extraErrorModule = extraErrorModule['Value']
#
# # The next 3 dictionary MUST be present for consistency
#
# # Global name of errors
# DErrno.__dict__.update( extraErrorModule.extra_dErrName )
# # Dictionary with the error codes
# DErrno.dErrorCode.update( extraErrorModule.extra_dErrorCode )
# # Error description string
# DErrno.dStrError.update( extraErrorModule.extra_dStrError )
#
# # extra_compatErrorString is optional
# for err in getattr( extraErrorModule, 'extra_compatErrorString', [] ) :
# DErrno.compatErrorString.setdefault( err, [] ).extend( extraErrorModule.extra_compatErrorString[err] )
def siteName():
"""
Determine and return DIRAC name for current site
"""
global __siteName
if not __siteName:
__siteName = gConfig.getValue( '/LocalSite/Site', _siteName )
return __siteName
#Callbacks
ExitCallback.registerSignals()
# platform detection
from DIRAC.Core.Utilities.Platform import getPlatformString, getPlatform, getPlatformTuple
def exit( exitCode = 0 ):
"""
Finish execution using callbacks
"""
ExitCallback.execute( exitCode, [] )
sys.exit( exitCode )
def abort( exitCode, *args, **kwargs ):
"""
Abort execution
"""
try:
gLogger.fatal( *args, **kwargs )
os._exit( exitCode )
except OSError:
gLogger.exception( 'Error while executing DIRAC.abort' )
os._exit( exitCode )
|
Andrew-McNab-UK/DIRAC
|
__init__.py
|
Python
|
gpl-3.0
| 5,925
|
[
"DIRAC"
] |
997beab5aeb265e1a18df0292bae72f18f18bcc9d15b115becb883732f380241
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from builtins import * # NOQA
standard_library.install_aliases() # NOQA
import copy
from logging import getLogger
import chainer
from chainer import cuda
import chainer.functions as F
from chainerrl.agent import AttributeSavingMixin
from chainerrl.agent import BatchAgent
from chainerrl.misc.batch_states import batch_states
from chainerrl.misc.copy_param import synchronize_parameters
from chainerrl.recurrent import Recurrent
from chainerrl.recurrent import RecurrentChainMixin
from chainerrl.recurrent import state_kept
from chainerrl.replay_buffer import batch_experiences
from chainerrl.replay_buffer import ReplayUpdater
def disable_train(chain):
call_orig = chain.__call__
def call_test(self, x):
with chainer.using_config('train', False):
return call_orig(self, x)
chain.__call__ = call_test
class DDPGModel(chainer.Chain, RecurrentChainMixin):
def __init__(self, policy, q_func):
super().__init__(policy=policy, q_function=q_func)
class DDPG(AttributeSavingMixin, BatchAgent):
"""Deep Deterministic Policy Gradients.
This can be used as SVG(0) by specifying a Gaussian policy instead of a
deterministic policy.
Args:
model (DDPGModel): DDPG model that contains both a policy and a
Q-function
actor_optimizer (Optimizer): Optimizer setup with the policy
critic_optimizer (Optimizer): Optimizer setup with the Q-function
replay_buffer (ReplayBuffer): Replay buffer
gamma (float): Discount factor
explorer (Explorer): Explorer that specifies an exploration strategy.
gpu (int): GPU device id if not None nor negative.
replay_start_size (int): if the replay buffer's size is less than
replay_start_size, skip update
minibatch_size (int): Minibatch size
update_interval (int): Model update interval in step
target_update_interval (int): Target model update interval in step
phi (callable): Feature extractor applied to observations
target_update_method (str): 'hard' or 'soft'.
soft_update_tau (float): Tau of soft target update.
n_times_update (int): Number of repetition of update
average_q_decay (float): Decay rate of average Q, only used for
recording statistics
average_loss_decay (float): Decay rate of average loss, only used for
recording statistics
batch_accumulator (str): 'mean' or 'sum'
episodic_update (bool): Use full episodes for update if set True
episodic_update_len (int or None): Subsequences of this length are used
for update if set int and episodic_update=True
logger (Logger): Logger used
batch_states (callable): method which makes a batch of observations.
default is `chainerrl.misc.batch_states.batch_states`
burnin_action_func (callable or None): If not None, this callable
object is used to select actions before the model is updated
one or more times during training.
"""
saved_attributes = ('model',
'target_model',
'actor_optimizer',
'critic_optimizer')
def __init__(self, model, actor_optimizer, critic_optimizer, replay_buffer,
gamma, explorer,
gpu=None, replay_start_size=50000,
minibatch_size=32, update_interval=1,
target_update_interval=10000,
phi=lambda x: x,
target_update_method='hard',
soft_update_tau=1e-2,
n_times_update=1, average_q_decay=0.999,
average_loss_decay=0.99,
episodic_update=False,
episodic_update_len=None,
logger=getLogger(__name__),
batch_states=batch_states,
burnin_action_func=None,
):
self.model = model
if gpu is not None and gpu >= 0:
cuda.get_device_from_id(gpu).use()
self.model.to_gpu(device=gpu)
self.xp = self.model.xp
self.replay_buffer = replay_buffer
self.gamma = gamma
self.explorer = explorer
self.gpu = gpu
self.target_update_interval = target_update_interval
self.phi = phi
self.target_update_method = target_update_method
self.soft_update_tau = soft_update_tau
self.logger = logger
self.average_q_decay = average_q_decay
self.average_loss_decay = average_loss_decay
self.actor_optimizer = actor_optimizer
self.critic_optimizer = critic_optimizer
if episodic_update:
update_func = self.update_from_episodes
else:
update_func = self.update
self.replay_updater = ReplayUpdater(
replay_buffer=replay_buffer,
update_func=update_func,
batchsize=minibatch_size,
episodic_update=episodic_update,
episodic_update_len=episodic_update_len,
n_times_update=n_times_update,
replay_start_size=replay_start_size,
update_interval=update_interval,
)
self.batch_states = batch_states
self.burnin_action_func = burnin_action_func
self.t = 0
self.last_state = None
self.last_action = None
self.target_model = copy.deepcopy(self.model)
disable_train(self.target_model['q_function'])
disable_train(self.target_model['policy'])
self.average_q = 0
self.average_actor_loss = 0.0
self.average_critic_loss = 0.0
# Aliases for convenience
self.q_function = self.model['q_function']
self.policy = self.model['policy']
self.target_q_function = self.target_model['q_function']
self.target_policy = self.target_model['policy']
self.sync_target_network()
def sync_target_network(self):
"""Synchronize target network with current network."""
synchronize_parameters(
src=self.model,
dst=self.target_model,
method=self.target_update_method,
tau=self.soft_update_tau)
# Update Q-function
def compute_critic_loss(self, batch):
"""Compute loss for critic.
Preconditions:
target_q_function must have seen up to s_t and a_t.
target_policy must have seen up to s_t.
q_function must have seen up to s_{t-1} and a_{t-1}.
Postconditions:
target_q_function must have seen up to s_{t+1} and a_{t+1}.
target_policy must have seen up to s_{t+1}.
q_function must have seen up to s_t and a_t.
"""
batch_next_state = batch['next_state']
batch_rewards = batch['reward']
batch_terminal = batch['is_state_terminal']
batch_state = batch['state']
batch_actions = batch['action']
batchsize = len(batch_rewards)
with chainer.no_backprop_mode():
# Target policy observes s_{t+1}
next_actions = self.target_policy(
batch_next_state).sample()
# Q(s_{t+1}, mu(a_{t+1})) is evaluated.
# This should not affect the internal state of Q.
with state_kept(self.target_q_function):
next_q = self.target_q_function(batch_next_state, next_actions)
# Target Q-function observes s_{t+1} and a_{t+1}
if isinstance(self.target_q_function, Recurrent):
batch_next_actions = batch['next_action']
self.target_q_function.update_state(
batch_next_state, batch_next_actions)
target_q = batch_rewards + self.gamma * \
(1.0 - batch_terminal) * F.reshape(next_q, (batchsize,))
# Estimated Q-function observes s_t and a_t
predict_q = F.reshape(
self.q_function(batch_state, batch_actions),
(batchsize,))
loss = F.mean_squared_error(target_q, predict_q)
# Update stats
self.average_critic_loss *= self.average_loss_decay
self.average_critic_loss += ((1 - self.average_loss_decay) *
float(loss.array))
return loss
def compute_actor_loss(self, batch):
"""Compute loss for actor.
Preconditions:
q_function must have seen up to s_{t-1} and s_{t-1}.
policy must have seen up to s_{t-1}.
Postconditions:
q_function must have seen up to s_t and s_t.
policy must have seen up to s_t.
"""
batch_state = batch['state']
batch_action = batch['action']
batch_size = len(batch_action)
# Estimated policy observes s_t
onpolicy_actions = self.policy(batch_state).sample()
# Q(s_t, mu(s_t)) is evaluated.
# This should not affect the internal state of Q.
with state_kept(self.q_function):
q = self.q_function(batch_state, onpolicy_actions)
# Estimated Q-function observes s_t and a_t
if isinstance(self.q_function, Recurrent):
self.q_function.update_state(batch_state, batch_action)
# Avoid the numpy #9165 bug (see also: chainer #2744)
q = q[:, :]
# Since we want to maximize Q, loss is negation of Q
loss = - F.sum(q) / batch_size
# Update stats
self.average_actor_loss *= self.average_loss_decay
self.average_actor_loss += ((1 - self.average_loss_decay) *
float(loss.array))
return loss
def update(self, experiences, errors_out=None):
"""Update the model from experiences"""
batch = batch_experiences(experiences, self.xp, self.phi, self.gamma)
self.critic_optimizer.update(lambda: self.compute_critic_loss(batch))
self.actor_optimizer.update(lambda: self.compute_actor_loss(batch))
def update_from_episodes(self, episodes, errors_out=None):
# Sort episodes desc by their lengths
sorted_episodes = list(reversed(sorted(episodes, key=len)))
max_epi_len = len(sorted_episodes[0])
# Precompute all the input batches
batches = []
for i in range(max_epi_len):
transitions = []
for ep in sorted_episodes:
if len(ep) <= i:
break
transitions.append([ep[i]])
batch = batch_experiences(
transitions, xp=self.xp, phi=self.phi, gamma=self.gamma)
batches.append(batch)
with self.model.state_reset(), self.target_model.state_reset():
# Since the target model is evaluated one-step ahead,
# its internal states need to be updated
self.target_q_function.update_state(
batches[0]['state'], batches[0]['action'])
self.target_policy(batches[0]['state'])
# Update critic through time
critic_loss = 0
for batch in batches:
critic_loss += self.compute_critic_loss(batch)
self.critic_optimizer.update(lambda: critic_loss / max_epi_len)
with self.model.state_reset():
# Update actor through time
actor_loss = 0
for batch in batches:
actor_loss += self.compute_actor_loss(batch)
self.actor_optimizer.update(lambda: actor_loss / max_epi_len)
def act_and_train(self, obs, reward):
self.logger.debug('t:%s r:%s', self.t, reward)
if (self.burnin_action_func is not None
and self.actor_optimizer.t == 0):
action = self.burnin_action_func()
else:
greedy_action = self.act(obs)
action = self.explorer.select_action(self.t, lambda: greedy_action)
self.t += 1
# Update the target network
if self.t % self.target_update_interval == 0:
self.sync_target_network()
if self.last_state is not None:
assert self.last_action is not None
# Add a transition to the replay buffer
self.replay_buffer.append(
state=self.last_state,
action=self.last_action,
reward=reward,
next_state=obs,
next_action=action,
is_state_terminal=False)
self.last_state = obs
self.last_action = action
self.replay_updater.update_if_necessary(self.t)
return self.last_action
def act(self, obs):
with chainer.using_config('train', False):
s = self.batch_states([obs], self.xp, self.phi)
action = self.policy(s).sample()
# Q is not needed here, but log it just for information
q = self.q_function(s, action)
# Update stats
self.average_q *= self.average_q_decay
self.average_q += (1 - self.average_q_decay) * float(q.array)
self.logger.debug('t:%s a:%s q:%s',
self.t, action.array[0], q.array)
return cuda.to_cpu(action.array[0])
def batch_act(self, batch_obs):
"""Select a batch of actions for evaluation.
Args:
batch_obs (Sequence of ~object): Observations.
Returns:
Sequence of ~object: Actions.
"""
with chainer.using_config('train', False), chainer.no_backprop_mode():
batch_xs = self.batch_states(batch_obs, self.xp, self.phi)
batch_action = self.policy(batch_xs).sample()
# Q is not needed here, but log it just for information
q = self.q_function(batch_xs, batch_action)
# Update stats
self.average_q *= self.average_q_decay
self.average_q += (1 - self.average_q_decay) * float(
q.array.mean(axis=0))
self.logger.debug('t:%s a:%s q:%s',
self.t, batch_action.array[0], q.array)
return [cuda.to_cpu(action.array) for action in batch_action]
def batch_act_and_train(self, batch_obs):
"""Select a batch of actions for training.
Args:
batch_obs (Sequence of ~object): Observations.
Returns:
Sequence of ~object: Actions.
"""
if (self.burnin_action_func is not None
and self.actor_optimizer.t == 0):
batch_action = [self.burnin_action_func()
for _ in range(len(batch_obs))]
else:
batch_greedy_action = self.batch_act(batch_obs)
batch_action = [
self.explorer.select_action(
self.t, lambda: batch_greedy_action[i])
for i in range(len(batch_greedy_action))]
self.batch_last_obs = list(batch_obs)
self.batch_last_action = list(batch_action)
return batch_action
def batch_observe_and_train(
self, batch_obs, batch_reward, batch_done, batch_reset):
"""Observe a batch of action consequences for training.
Args:
batch_obs (Sequence of ~object): Observations.
batch_reward (Sequence of float): Rewards.
batch_done (Sequence of boolean): Boolean values where True
indicates the current state is terminal.
batch_reset (Sequence of boolean): Boolean values where True
indicates the current episode will be reset, even if the
current state is not terminal.
Returns:
None
"""
for i in range(len(batch_obs)):
self.t += 1
# Update the target network
if self.t % self.target_update_interval == 0:
self.sync_target_network()
if self.batch_last_obs[i] is not None:
assert self.batch_last_action[i] is not None
# Add a transition to the replay buffer
self.replay_buffer.append(
state=self.batch_last_obs[i],
action=self.batch_last_action[i],
reward=batch_reward[i],
next_state=batch_obs[i],
next_action=None,
is_state_terminal=batch_done[i],
env_id=i,
)
if batch_reset[i] or batch_done[i]:
self.batch_last_obs[i] = None
self.replay_buffer.stop_current_episode(env_id=i)
self.replay_updater.update_if_necessary(self.t)
def batch_observe(self, batch_obs, batch_reward,
batch_done, batch_reset):
pass
def stop_episode_and_train(self, state, reward, done=False):
assert self.last_state is not None
assert self.last_action is not None
# Add a transition to the replay buffer
self.replay_buffer.append(
state=self.last_state,
action=self.last_action,
reward=reward,
next_state=state,
next_action=self.last_action,
is_state_terminal=done)
self.stop_episode()
def stop_episode(self):
self.last_state = None
self.last_action = None
if isinstance(self.model, Recurrent):
self.model.reset_state()
self.replay_buffer.stop_current_episode()
def get_statistics(self):
return [
('average_q', self.average_q),
('average_actor_loss', self.average_actor_loss),
('average_critic_loss', self.average_critic_loss),
]
|
toslunar/chainerrl
|
chainerrl/agents/ddpg.py
|
Python
|
mit
| 17,802
|
[
"Gaussian"
] |
f53371a420d1224b303ee37138a6f26520eaa4d608ad2eb81b7f1a78c84a8b41
|
#!/usr/bin/env python
__author__ = "Sergio Latorre"
__license__ = "GPL"
__email__ = "sergio.latorre@tuebingen.mpg.de"
from sys import exit
try:
from Bio import Entrez
except ImportError:
exit("Biopython module is missing. Try to install it by typing: pip install Biopython")
from urllib2 import urlopen
import pickle
from sys import argv
# Update references
try:
if argv[1] == "update":
from os import remove
remove("references.pkl")
except IndexError:
pass
# Enter valid e-mail adress
try:
Entrez.email = (open("email_address.txt", "r")).readline()
except IOError:
Entrez.email = raw_input("Please enter your e-mail address: ")
print("\n")
with open("email_address.txt", "w") as address:
address.write(Entrez.email)
address.close()
# Functions
def tax_id(name):
try:
first = Entrez.esearch(term = name, db = "taxonomy", retmode = "xlm")
return (Entrez.read(first))["IdList"][0]
except IndexError:
exit("Wrong name!")
def data_tax(idtax):
first = Entrez.efetch(id = idtax, db = "taxonomy", retmode = "xml")
return Entrez.read(first)
# References
try:
ref = pickle.load(open("references.pkl", "rb"))
except IOError:
print("Constructing references database. It might take a while...")
urls = {"plants" : urlopen("ftp://ftp.ensemblgenomes.org/pub/plants/current/species_EnsemblPlants.txt"),
"fungi" : urlopen("ftp://ftp.ensemblgenomes.org/pub/fungi/current/species_EnsemblFungi.txt"),
"protists" : urlopen("ftp://ftp.ensemblgenomes.org/pub/protists/current/species_EnsemblProtists.txt"),
"metazoa" : urlopen("ftp://ftp.ensemblgenomes.org/pub/metazoa/current/species_EnsemblMetazoa.txt")}
references = []
for group in urls:
for line in urls[group]:
if line.startswith("#"):
continue
else:
references.append(line.split("\t")[3])
ref = {}
for reference in references:
data_ref = data_tax(int(reference))
ref[data_ref[0]["ScientificName"]] = data_ref[0]["LineageEx"]
with open("references.pkl", "wb") as f:
pickle.dump(ref, f, pickle.HIGHEST_PROTOCOL)
f.close()
# Interface
name = raw_input("//////////////////////////////////////\n// Please enter Genus or Species //\n// Examples: //\n// Genus: Phaseolus //\n// Species: Pennisetum clandestinum //\n//////////////////////////////////////\n\nEnter genus / species: ")
print("\n")
# Query species
idtax = tax_id(name)
data = (data_tax(idtax))[0]["LineageEx"]
dat = []
for i in range(len(data)-1, -1, -1):
dat.append(data[i]["ScientificName"])
# Search and results
for species in ref:
if name in species.split(" "):
print("Your best option is: %s" % species)
for species in ref:
if name in species.split(" "):
exit()
results = []
for querytax in dat:
for refspecies in ref:
for i in range(len(ref[refspecies])-1, -1, -1):
if ref[refspecies][i]["ScientificName"] == querytax:
results.append((refspecies, ref[refspecies][i]["Rank"], ref[refspecies][i]["ScientificName"]))
if results[0][1] != results[1][1]:
print("Your best option is: %s" % results[0][0])
print("Is related at level of %s: %s" % (results[0][1], results[0][2]))
else:
print("Your bests options are: %s or %s" %(results[0][0], results[1][0]))
print("Are related at level of %s: %s" % (results[0][1], results[0][2]))
|
smlatorreo/get_closest_reference_genome
|
get_closest_reference_genome.py
|
Python
|
gpl-3.0
| 3,503
|
[
"Biopython"
] |
d653c1adec99023184b488718e9ea5b01967c1964c0f570d1db712b027e2da6a
|
"""
DictCache.
"""
__RCSID__ = "$Id$"
import datetime
# DIRAC
from DIRAC.Core.Utilities.LockRing import LockRing
class DictCache( object ):
"""
.. class:: DictCache
simple dict cache
"""
def __init__( self, deleteFunction = False ):
"""
Initialize the dict cache.
If a delete function is specified it will be invoked when deleting a cached object
"""
self.__lock = None
self.__cache = {}
self.__deleteFunction = deleteFunction
@property
def lock( self ):
""" lock """
if not self.__lock:
self.__lock = LockRing().getLock( self.__class__.__name__, recursive = True )
return self.__lock
def exists( self, cKey, validSeconds = 0 ):
"""
Returns True/False if the key exists for the given number of seconds
Arguments:
:param cKey: identification key of the record
:param validSeconds: The amount of seconds the key has to be valid for
"""
self.lock.acquire()
try:
# Is the key in the cache?
if cKey in self.__cache:
expTime = self.__cache[ cKey ][ 'expirationTime' ]
# If it's valid return True!
if expTime > datetime.datetime.now() + datetime.timedelta( seconds = validSeconds ):
return True
else:
# Delete expired
self.delete( cKey )
return False
finally:
self.lock.release()
def delete( self, cKey ):
"""
Delete a key from the cache
:param cKey: identification key of the record
"""
self.lock.acquire()
try:
if cKey not in self.__cache:
return
if self.__deleteFunction:
self.__deleteFunction( self.__cache[ cKey ][ 'value' ] )
del( self.__cache[ cKey ] )
finally:
self.lock.release()
def add( self, cKey, validSeconds, value = None ):
"""
Add a record to the cache
:param cKey: identification key of the record
:param validSeconds: valid seconds of this record
:param value: value of the record
"""
if max( 0, validSeconds ) == 0:
return
self.lock.acquire()
try:
vD = { 'expirationTime' : datetime.datetime.now() + datetime.timedelta( seconds = validSeconds ),
'value' : value }
self.__cache[ cKey ] = vD
finally:
self.lock.release()
def get( self, cKey, validSeconds = 0 ):
"""
Get a record from the cache
:param cKey: identification key of the record
:param validSeconds: The amount of seconds the key has to be valid for
"""
self.lock.acquire()
try:
# Is the key in the cache?
if cKey in self.__cache:
expTime = self.__cache[ cKey ][ 'expirationTime' ]
# If it's valid return True!
if expTime > datetime.datetime.now() + datetime.timedelta( seconds = validSeconds ):
return self.__cache[ cKey ][ 'value' ]
else:
# Delete expired
self.delete( cKey )
return None
finally:
self.lock.release()
def showContentsInString( self ):
"""
Return a human readable string to represent the contents
"""
self.lock.acquire()
try:
data = []
for cKey in self.__cache:
data.append( "%s:" % str( cKey ) )
data.append( "\tExp: %s" % self.__cache[ cKey ][ 'expirationTime' ] )
if self.__cache[ cKey ][ 'value' ]:
data.append( "\tVal: %s" % self.__cache[ cKey ][ 'value' ] )
return "\n".join( data )
finally:
self.lock.release()
def getKeys( self, validSeconds = 0 ):
"""
Get keys for all contents
"""
self.lock.acquire()
try:
keys = []
limitTime = datetime.datetime.now() + datetime.timedelta( seconds = validSeconds )
for cKey in self.__cache:
if self.__cache[ cKey ][ 'expirationTime' ] > limitTime:
keys.append( cKey )
return keys
finally:
self.lock.release()
def purgeExpired( self, expiredInSeconds = 0 ):
"""
Purge all entries that are expired or will be expired in <expiredInSeconds>
"""
self.lock.acquire()
try:
keys = []
limitTime = datetime.datetime.now() + datetime.timedelta( seconds = expiredInSeconds )
for cKey in self.__cache:
if self.__cache[ cKey ][ 'expirationTime' ] < limitTime:
keys.append( cKey )
for cKey in keys:
if self.__deleteFunction:
self.__deleteFunction( self.__cache[ cKey ][ 'value' ] )
del( self.__cache[ cKey ] )
finally:
self.lock.release()
def purgeAll( self ):
"""
Purge all entries
"""
self.lock.acquire()
try:
keys = self.__cache.keys()
for cKey in keys:
if self.__deleteFunction:
self.__deleteFunction( self.__cache[ cKey ][ 'value' ] )
del( self.__cache[ cKey ] )
finally:
self.lock.release()
|
coberger/DIRAC
|
Core/Utilities/DictCache.py
|
Python
|
gpl-3.0
| 4,837
|
[
"DIRAC"
] |
3dc2805bb679d828a516db67c440dde2f41582f198f8e1646f3027558f0b8050
|
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Collection of modules for dealing with biological data in Python.
The Biopython Project is an international association of developers
of freely available Python tools for computational molecular biology.
http://biopython.org
"""
__docformat__ = "restructuredtext en" # not just plaintext
__version__ = "1.64+"
class MissingExternalDependencyError(Exception):
"""Missing an external dependency.
Used for things like missing command line tools. Important for our unit
tests to allow skipping tests with missing external dependencies.
"""
pass
class MissingPythonDependencyError(MissingExternalDependencyError, ImportError):
"""Missing an external python dependency (subclass of ImportError).
Used for missing Python modules (rather than just a typical ImportError).
Important for our unit tests to allow skipping tests with missing external
python dependencies, while also allowing the exception to be caught as an
ImportError.
"""
pass
class BiopythonWarning(Warning):
"""Biopython warning.
Biopython should use this warning (or subclasses of it), making it easy to
silence all our warning messages should you wish to:
>>> import warnings
>>> from Bio import BiopythonWarning
>>> warnings.simplefilter('ignore', BiopythonWarning)
Consult the warnings module documentation for more details.
"""
pass
class BiopythonParserWarning(BiopythonWarning):
"""Biopython parser warning.
Some in-valid data files cannot be parsed and will trigger an exception.
Where a reasonable interpretation is possible, Biopython will issue this
warning to indicate a potential problem. To silence these warnings, use:
>>> import warnings
>>> from Bio import BiopythonParserWarning
>>> warnings.simplefilter('ignore', BiopythonParserWarning)
Consult the warnings module documentation for more details.
"""
pass
class BiopythonDeprecationWarning(BiopythonWarning):
"""Biopython deprecation warning.
Biopython uses this warning instead of the built in DeprecationWarning
since those are ignored by default since Python 2.7.
To silence all our deprecation warning messages, use:
>>> import warnings
>>> from Bio import BiopythonDeprecationWarning
>>> warnings.simplefilter('ignore', BiopythonDeprecationWarning)
Code marked as deprecated is likely to be removed in a future version
of Biopython. To avoid removal of this code, please contact the Biopython
developers by sending an email to biopython-dev@biopython.org.
"""
pass
class BiopythonExperimentalWarning(BiopythonWarning):
"""Biopython experimental code warning.
Biopython uses this warning for experimental code ('alpha' or 'beta'
level code) which is released as part of the standard releases to mark
sub-modules or functions for early adopters to test & give feedback..
Code issuing this warning is likely to change (or even be removed) in
a subsequent release of Biopython. Such code should NOT be used for
production/stable code. It should only be used if:
- You are running the latest release of Biopython, or ideally the
latest code from our repository.
- You are subscribed to the biopython-dev mailing list to provide
feedback on this code, and to be alterted to changes to it.
If all goes well, experimental code would be promoted to stable in
a subsequence release, and this warning removed from it.
"""
pass
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/__init__.py
|
Python
|
gpl-2.0
| 3,764
|
[
"Biopython"
] |
92d68394da93bb611ed9e834b4d26edde8b6523b304d1dce2ed5391031562060
|
# Note: Tests for the TitlesAlignments class are in blast/test_titles.py
# and diamond/test_titles.py because that class needs a concrete
# (iterable) dark.alignments.ReadsAlignments class passed to its
# __init__. The tests below test the simpler dark.titles classes,
# TitleAlignment and TitleAlignments.
from collections import Counter
import six
import warnings
import platform
from unittest import TestCase
from dark.titles import TitleAlignment, TitleAlignments
from dark.reads import Read
from dark.hsp import HSP, LSP
_pypy = platform.python_implementation() == 'PyPy'
class WarningTestMixin(object):
"""
Provide an assertion test which checks to see that a specified warning
was raised.
"""
# Taken from
# http://stackoverflow.com/questions/3892218/
# how-to-test-with-pythons-unittest-that-a-warning-has-been-thrown
def assertWarns(self, warning, callable, *args, **kwds):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
callable(*args, **kwds)
self.assertTrue(
any(item.category == warning for item in warning_list))
class TestTitleAlignment(TestCase):
"""
Test the TitleAlignment class.
"""
def testExpectedAttributes(self):
"""
An instance of TitleAlignment must have the expected attributes.
"""
read = Read('id', 'AAA')
titleAlignment = TitleAlignment(read, [])
self.assertEqual(read, titleAlignment.read)
self.assertEqual([], titleAlignment.hsps)
def testToDict(self):
"""
The toDict method must return the expected result.
"""
read = Read('the-id', 'AAA')
hsp1 = HSP(0, readStart=1, readEnd=2,
readStartInSubject=3, readEndInSubject=4,
subjectStart=5, subjectEnd=6,
readMatchedSequence='aaa', subjectMatchedSequence='ccc',
readFrame=7, subjectFrame=8, identicalCount=9,
percentIdentical=31.3, positiveCount=10,
percentPositive=4.5)
hsp2 = HSP(10, readStart=11, readEnd=12,
readStartInSubject=13, readEndInSubject=14,
subjectStart=15, subjectEnd=16,
readMatchedSequence='ggg', subjectMatchedSequence='ttt',
readFrame=17, subjectFrame=18, identicalCount=19,
percentIdentical=32.3, positiveCount=20,
percentPositive=4.6)
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
self.assertEqual(
{
'hsps': [
{
'score': 0,
'readStart': 1,
'readEnd': 2,
'readStartInSubject': 3,
'readEndInSubject': 4,
'subjectStart': 5,
'subjectEnd': 6,
'readFrame': 7,
'subjectFrame': 8,
'identicalCount': 9,
'percentIdentical': 31.3,
'positiveCount': 10,
'percentPositive': 4.5,
'readMatchedSequence': 'aaa',
'subjectMatchedSequence': 'ccc',
},
{
'score': 10,
'readStart': 11,
'readEnd': 12,
'readStartInSubject': 13,
'readEndInSubject': 14,
'subjectStart': 15,
'subjectEnd': 16,
'readFrame': 17,
'subjectFrame': 18,
'identicalCount': 19,
'percentIdentical': 32.3,
'positiveCount': 20,
'percentPositive': 4.6,
'readMatchedSequence': 'ggg',
'subjectMatchedSequence': 'ttt',
},
],
'read': {
'id': 'the-id',
'quality': None,
'sequence': 'AAA',
},
},
titleAlignment.toDict())
class TestTitleAlignments(WarningTestMixin, TestCase):
"""
Test the TitleAlignments class.
"""
def testExpectedAttributes(self):
"""
An instance of TitleAlignments must have the expected attributes.
"""
titleAlignments = TitleAlignments('subject title', 55)
self.assertEqual('subject title', titleAlignments.subjectTitle)
self.assertEqual(55, titleAlignments.subjectLength)
self.assertEqual([], titleAlignments)
def testAddAlignment(self):
"""
It must be possible to add an alignment to an instance of
TitleAlignments.
"""
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id', 'AAA')
titleAlignment = TitleAlignment(read, [])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(read, titleAlignments[0].read)
self.assertEqual([], titleAlignments[0].hsps)
def testHSPs(self):
"""
The hsps function must produce a list of all HSPs.
"""
hsp1 = HSP(7)
hsp2 = HSP(14)
hsp3 = HSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual([7, 14, 21],
[hsp.score.score for hsp in titleAlignments.hsps()])
def testReadsEmpty(self):
"""
The reads function must return an empty Reads instance if there are no
reads for the title.
"""
titleAlignments = TitleAlignments('subject title', 55)
self.assertEqual(0, len(list(titleAlignments.reads())))
def testReads(self):
"""
The reads function must return a Reads instance with the reads for
the title.
"""
hsp1 = HSP(7)
hsp2 = HSP(14)
hsp3 = HSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read1 = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read1, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read2 = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read2, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual([read1, read2], list(titleAlignments.reads()))
def testReadCountZero(self):
"""
The readCount function must return zero if no reads matched a title.
"""
titleAlignments = TitleAlignments('subject title', 55)
self.assertEqual(0, titleAlignments.readCount())
def testReadCount(self):
"""
The readCount function must indicate how many reads matched a title.
"""
hsp1 = HSP(7)
hsp2 = HSP(14)
hsp3 = HSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(2, titleAlignments.readCount())
def testHspCountZero(self):
"""
The hspCount function must return zero if no reads matched a title.
"""
titleAlignments = TitleAlignments('subject title', 55)
self.assertEqual(0, titleAlignments.hspCount())
def testHspCount(self):
"""
The hspCount function must indicate how many HSPs were found in
total for all the alignments to a title.
"""
hsp1 = HSP(7)
hsp2 = HSP(14)
hsp3 = HSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(3, titleAlignments.hspCount())
def testMedianScoreWithNoAlignments(self):
"""
The medianScore function must raise IndexError (due to no inputs)
if there are no alignments matching a title.
"""
titleAlignments = TitleAlignments('subject title', 55)
error = '^arg is an empty sequence$'
six.assertRaisesRegex(self, ValueError, error,
titleAlignments.medianScore)
def testMedianScoreWithNoHsps(self):
"""
The medianScore function must raise ValueError if there are no HSPs.
"""
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [])
titleAlignments.addAlignment(titleAlignment)
error = '^arg is an empty sequence$'
six.assertRaisesRegex(self, ValueError, error,
titleAlignments.medianScore)
def testMedianScoreOfTwo(self):
"""
The medianScore function must return the median score for the HSPs in
all the alignments matching a title when given 2 scores.
"""
hsp1 = HSP(7)
hsp2 = HSP(15)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(11, titleAlignments.medianScore())
def testMedianScoreOfThree(self):
"""
The medianScore function must return the median score for the HSPs in
all the alignments matching a title when given 3 scores.
"""
hsp1 = HSP(7)
hsp2 = HSP(15)
hsp3 = HSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(15, titleAlignments.medianScore())
def testBestHspWithNoHsps(self):
"""
The bestHsp function must raise ValueError if there are no HSPs.
"""
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [])
titleAlignments.addAlignment(titleAlignment)
if _pypy:
error = '^arg is an empty sequence$'
else:
error = '^max\\(\\) arg is an empty sequence$'
six.assertRaisesRegex(self, ValueError, error, titleAlignments.bestHsp)
def testBestHsp(self):
"""
The bestHsp function must return the HSP with the best score for all
the HSPs for all the alignments matching a title.
"""
hsp1 = HSP(7)
hsp2 = HSP(15)
hsp3 = HSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(hsp3, titleAlignments.bestHsp())
def testWorstHspWithNoHsps(self):
"""
The worstHsp function must raise ValueError if there are no HSPs.
"""
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [])
titleAlignments.addAlignment(titleAlignment)
if _pypy:
error = '^arg is an empty sequence$'
else:
error = '^min\\(\\) arg is an empty sequence$'
six.assertRaisesRegex(self, ValueError, error,
titleAlignments.worstHsp)
def testWorstHsp(self):
"""
The worstHsp function must return the HSP with the worst score for all
the HSPs for all the alignments matching a title.
"""
hsp1 = HSP(7)
hsp2 = HSP(15)
hsp3 = HSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(hsp1, titleAlignments.worstHsp())
def testBetterThanFalse(self):
"""
The hasScoreBetterThan function must return False if there is no HSP
with a score better than the passed value.
"""
hsp1 = HSP(7)
hsp2 = HSP(15)
hsp3 = HSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertFalse(titleAlignments.hasScoreBetterThan(21))
def testBetterThanTrue(self):
"""
The hasScoreBetterThan function must return True if there is an HSP
with a score better than the passed value.
"""
hsp1 = HSP(7)
hsp2 = HSP(15)
hsp3 = HSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertTrue(titleAlignments.hasScoreBetterThan(20))
def testCoverageNoReads(self):
"""
The coverage method must return zero when a title alignments has no
alignments (and therefore no coverage).
"""
titleAlignments = TitleAlignments('subject title', 100)
self.assertEqual(0.0, titleAlignments.coverage())
def testFullCoverage(self):
"""
The coverage method must return the correct value when the title is
fully covered by its reads.
"""
hsp1 = HSP(7, subjectStart=0, subjectEnd=50)
hsp2 = HSP(8, subjectStart=50, subjectEnd=100)
titleAlignments = TitleAlignments('subject title', 100)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(1.0, titleAlignments.coverage())
def testPartialCoverage(self):
"""
The coverage method must return the correct value when the title is
partially covered by its reads.
"""
hsp1 = HSP(7, subjectStart=10, subjectEnd=20)
hsp2 = HSP(15, subjectStart=30, subjectEnd=40)
hsp3 = HSP(21, subjectStart=50, subjectEnd=60)
titleAlignments = TitleAlignments('subject title', 100)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(0.3, titleAlignments.coverage())
def testFullCoverageCounts(self):
"""
The coverageCounts method must return the correct result when the title
is fully covered by its reads.
"""
hsp1 = HSP(7, subjectStart=0, subjectEnd=5)
hsp2 = HSP(8, subjectStart=5, subjectEnd=10)
titleAlignments = TitleAlignments('subject title', 10)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
c = Counter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(c, titleAlignments.coverageCounts())
def testCoverageCounts(self):
"""
The coverageCounts method must return the correct results when the
title is partially covered by its reads.
"""
hsp1 = HSP(7, subjectStart=1, subjectEnd=2)
hsp2 = HSP(15, subjectStart=3, subjectEnd=4)
hsp3 = HSP(21, subjectStart=5, subjectEnd=6)
titleAlignments = TitleAlignments('subject title', 10)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
c = Counter([1, 3, 5])
self.assertEqual(c, titleAlignments.coverageCounts())
def testCoverageCountsOverlap(self):
"""
The coverageCounts method must return the correct results when the
title is partially covered by its reads that overlap.
"""
hsp1 = HSP(7, subjectStart=1, subjectEnd=2)
hsp2 = HSP(15, subjectStart=3, subjectEnd=6)
hsp3 = HSP(21, subjectStart=5, subjectEnd=6)
titleAlignments = TitleAlignments('subject title', 10)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
c = Counter([1, 3, 4, 5, 5])
self.assertEqual(c, titleAlignments.coverageCounts())
def testCoverageInfoNoReads(self):
"""
When a title has no reads aligned to it, the coverageInfo method
must return an empty result.
"""
titleAlignments = TitleAlignments('subject title', 55)
coverage = titleAlignments.coverageInfo()
self.assertEqual({}, coverage)
def testCoverageInfoOneReadWithOneHSP(self):
"""
When a title has one read with one HSP aligned to it, the coverageInfo
method must return just the indices and bases from that read.
"""
titleAlignments = TitleAlignments('subject title', 55)
hsp = HSP(15, subjectStart=3, subjectEnd=6, readMatchedSequence='CGT')
read = Read('id1', 'AAACGT')
titleAlignment = TitleAlignment(read, [hsp])
titleAlignments.addAlignment(titleAlignment)
coverage = titleAlignments.coverageInfo()
self.assertEqual(
{
3: [(15, 'C')],
4: [(15, 'G')],
5: [(15, 'T')],
},
coverage)
def testCoverageInfoOneReadWithTwoHSPs(self):
"""
When a title has one read with two HSPs aligned to it, the coverageInfo
method must return the correct indices and bases from that read.
"""
titleAlignments = TitleAlignments('subject title', 55)
hsp1 = HSP(15, subjectStart=1, subjectEnd=4, readMatchedSequence='A-A')
hsp2 = HSP(10, subjectStart=3, subjectEnd=6, readMatchedSequence='CGT')
read = Read('id1', 'AAACGT')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
coverage = titleAlignments.coverageInfo()
self.assertEqual(
{
1: [(15, 'A')],
2: [(15, '-')],
3: [(15, 'A'), (10, 'C')],
4: [(10, 'G')],
5: [(10, 'T')],
},
coverage)
def testCoverageInfoTwoReadsWithThreeHSPs(self):
"""
When a title has two reads (one with two HSPs, one with one) aligned
to it, the coverageInfo method must return the correct indices and
bases from the read.
"""
titleAlignments = TitleAlignments('subject title', 55)
# First read.
hsp1 = HSP(15, subjectStart=1, subjectEnd=4, readMatchedSequence='A-A')
hsp2 = HSP(10, subjectStart=3, subjectEnd=6, readMatchedSequence='CGT')
read = Read('id1', 'AAACGT')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
# Second read.
hsp1 = HSP(20, subjectStart=5, subjectEnd=10,
readMatchedSequence='CGGTA')
read = Read('id2', 'AAACGTCGGTAAAA')
titleAlignment = TitleAlignment(read, [hsp1])
titleAlignments.addAlignment(titleAlignment)
coverage = titleAlignments.coverageInfo()
self.assertEqual(
{
1: [(15, 'A')],
2: [(15, '-')],
3: [(15, 'A'), (10, 'C')],
4: [(10, 'G')],
5: [(10, 'T'), (20, 'C')],
6: [(20, 'G')],
7: [(20, 'G')],
8: [(20, 'T')],
9: [(20, 'A')],
},
coverage)
def testResidueCountsNoReads(self):
"""
When a title has no reads aligned to it, the residueCounts method
must return an empty result.
"""
titleAlignments = TitleAlignments('subject title', 55)
counts = titleAlignments.residueCounts()
self.assertEqual(0, len(counts))
def testResidueCountsUnknownCaseConversion(self):
"""
The residueCounts method must raise a ValueError when asked to do an
unknown case conversion.
"""
titleAlignments = TitleAlignments('subject title', 55)
error = "convertCaseTo must be one of 'none', 'lower', or 'upper'"
six.assertRaisesRegex(
self, ValueError, error, titleAlignments.residueCounts,
convertCaseTo='xxx')
def testResidueCountsOneReadOneHSP(self):
"""
The residueCounts method must return the correct result when just one
read with one HSP is aligned to a title.
"""
read = Read('id', 'ACGT')
hsp = HSP(33, readStart=0, readEnd=4, readStartInSubject=0,
readEndInSubject=4, subjectStart=0, subjectEnd=4,
readMatchedSequence='ACGT', subjectMatchedSequence='ACGT')
titleAlignments = TitleAlignments('subject title', 55)
titleAlignment = TitleAlignment(read, [hsp])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
0: {'A': 1},
1: {'C': 1},
2: {'G': 1},
3: {'T': 1},
},
titleAlignments.residueCounts())
def testResidueCountsOneReadOneHSPPartialMatch(self):
"""
The residueCounts method must return the correct result when just one
read with one HSP is aligned to a title and only part of the read
matched the subject (all the read bases are still counted and
returned).
"""
read = Read('id', 'ACGT')
hsp = HSP(33, readStart=0, readEnd=2, readStartInSubject=0,
readEndInSubject=4, subjectStart=0, subjectEnd=4,
readMatchedSequence='ACGT', subjectMatchedSequence='ACGT')
titleAlignments = TitleAlignments('subject title', 55)
titleAlignment = TitleAlignment(read, [hsp])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
0: {'A': 1},
1: {'C': 1},
2: {'G': 1},
3: {'T': 1},
},
titleAlignments.residueCounts())
def testResidueCountsOneReadTwoHSPsAtStartOfSubject(self):
"""
The residueCounts method must return the correct result when just one
read with two HSPs is aligned to a title and the leftmost HSP is
aligned with the left edge of the subject.
HSP1: ACGT
HSP2: CGTT
"""
read = Read('id', 'ACGT')
hsp1 = HSP(33, readStart=0, readEnd=4, readStartInSubject=0,
readEndInSubject=4, subjectStart=0, subjectEnd=4,
readMatchedSequence='ACGT', subjectMatchedSequence='ACGT')
hsp2 = HSP(33, readStart=0, readEnd=4, readStartInSubject=1,
readEndInSubject=5, subjectStart=1, subjectEnd=5,
readMatchedSequence='CGTT', subjectMatchedSequence='CGTT')
titleAlignments = TitleAlignments('subject title', 55)
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
0: {'A': 1},
1: {'C': 2},
2: {'G': 2},
3: {'T': 2},
4: {'T': 1},
},
titleAlignments.residueCounts())
def testResidueCountsOneReadTwoHSPsNotAtStartOfSubject(self):
"""
The residueCounts method must return the correct result when just one
read with two HSPs is aligned to a title and the leftmost HSP is not
aligned with the left edge of the subject.
HSP1: ACGT
HSP2: CGTT
"""
read = Read('id', 'ACGT')
hsp1 = HSP(33, readStart=0, readEnd=4, readStartInSubject=10,
readEndInSubject=14, subjectStart=10, subjectEnd=14,
readMatchedSequence='ACGT', subjectMatchedSequence='ACGT')
hsp2 = HSP(33, readStart=0, readEnd=4, readStartInSubject=11,
readEndInSubject=15, subjectStart=11, subjectEnd=15,
readMatchedSequence='CGTT', subjectMatchedSequence='CGTT')
titleAlignments = TitleAlignments('subject title', 55)
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
10: {'A': 1},
11: {'C': 2},
12: {'G': 2},
13: {'T': 2},
14: {'T': 1},
},
titleAlignments.residueCounts())
def testResidueCountsNoCaseConversion(self):
"""
The residueCounts method must return the correct result when asked not
to convert case.
HSP1: AcgT
HSP2: CGTT
"""
read = Read('id', 'ACGT')
hsp1 = HSP(33, readStart=0, readEnd=4, readStartInSubject=10,
readEndInSubject=14, subjectStart=10, subjectEnd=14,
readMatchedSequence='AcgT', subjectMatchedSequence='ACGT')
hsp2 = HSP(33, readStart=0, readEnd=4, readStartInSubject=11,
readEndInSubject=15, subjectStart=11, subjectEnd=15,
readMatchedSequence='CGTT', subjectMatchedSequence='CGTT')
titleAlignments = TitleAlignments('subject title', 55)
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
10: {'A': 1},
11: {'C': 1, 'c': 1},
12: {'G': 1, 'g': 1},
13: {'T': 2},
14: {'T': 1},
},
titleAlignments.residueCounts(convertCaseTo='none'))
def testResidueCountsCaseConvertLower(self):
"""
The residueCounts method must return the correct result when asked to
convert residues to lower case.
HSP1: AcgT
HSP2: CGTT
"""
read = Read('id', 'ACGT')
hsp1 = HSP(33, readStart=0, readEnd=4, readStartInSubject=10,
readEndInSubject=14, subjectStart=10, subjectEnd=14,
readMatchedSequence='AcgT', subjectMatchedSequence='ACGT')
hsp2 = HSP(33, readStart=0, readEnd=4, readStartInSubject=11,
readEndInSubject=15, subjectStart=11, subjectEnd=15,
readMatchedSequence='CGTT', subjectMatchedSequence='CGTT')
titleAlignments = TitleAlignments('subject title', 55)
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
10: {'a': 1},
11: {'c': 2},
12: {'g': 2},
13: {'t': 2},
14: {'t': 1},
},
titleAlignments.residueCounts(convertCaseTo='lower'))
def testResidueCountsCaseConvertUpper(self):
"""
The residueCounts method must return the correct result when asked to
convert residues to upper case.
HSP1: AcgT
HSP2: CGTT
"""
read = Read('id', 'ACGT')
hsp1 = HSP(33, readStart=0, readEnd=4, readStartInSubject=10,
readEndInSubject=14, subjectStart=10, subjectEnd=14,
readMatchedSequence='AcgT', subjectMatchedSequence='ACGT')
hsp2 = HSP(33, readStart=0, readEnd=4, readStartInSubject=11,
readEndInSubject=15, subjectStart=11, subjectEnd=15,
readMatchedSequence='CGTT', subjectMatchedSequence='CGTT')
titleAlignments = TitleAlignments('subject title', 55)
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
10: {'A': 1},
11: {'C': 2},
12: {'G': 2},
13: {'T': 2},
14: {'T': 1},
},
titleAlignments.residueCounts(convertCaseTo='upper'))
def testResidueCountsCaseConvertUpperIsDefault(self):
"""
The residueCounts method must convert to uppercase by default.
HSP1: AcgT
HSP2: CGTT
"""
read = Read('id', 'ACGT')
hsp1 = HSP(33, readStart=0, readEnd=4, readStartInSubject=10,
readEndInSubject=14, subjectStart=10, subjectEnd=14,
readMatchedSequence='AcgT', subjectMatchedSequence='ACGT')
hsp2 = HSP(33, readStart=0, readEnd=4, readStartInSubject=11,
readEndInSubject=15, subjectStart=11, subjectEnd=15,
readMatchedSequence='CGTT', subjectMatchedSequence='CGTT')
titleAlignments = TitleAlignments('subject title', 55)
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
10: {'A': 1},
11: {'C': 2},
12: {'G': 2},
13: {'T': 2},
14: {'T': 1},
},
titleAlignments.residueCounts())
def testResidueCountsTwoReadsTwoHSPsLeftOverhang(self):
"""
The residueCounts method must return the correct result when two
reads, each with one HSP are aligned to a title and the leftmost HSP
is aligned before the left edge of the subject (i.e, will include
negative subject offsets).
Subject: GTT
HSP1: ACGT
HSP2: CGTT
"""
read1 = Read('id', 'ACGT')
hsp1 = HSP(33, readStart=0, readEnd=4, readStartInSubject=-2,
readEndInSubject=2, subjectStart=0, subjectEnd=2,
readMatchedSequence='GT', subjectMatchedSequence='GT')
read2 = Read('id', 'CGTT')
hsp2 = HSP(33, readStart=0, readEnd=4, readStartInSubject=-1,
readEndInSubject=3, subjectStart=0, subjectEnd=3,
readMatchedSequence='GTT', subjectMatchedSequence='GTT')
titleAlignments = TitleAlignments('subject title', 55)
titleAlignment = TitleAlignment(read1, [hsp1])
titleAlignments.addAlignment(titleAlignment)
titleAlignment = TitleAlignment(read2, [hsp2])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
-2: {'A': 1},
-1: {'C': 2},
0: {'G': 2},
1: {'T': 2},
2: {'T': 1},
},
titleAlignments.residueCounts())
def testResidueCountsOneReadTwoHSPsNotOverlapping(self):
"""
The residueCounts method must return the correct result when just one
read with two HSPs is aligned to a title and the HSPs do not overlap
one another.
HSP1: ACGT
HSP2: CGTT
"""
read = Read('id', 'ACGT')
hsp1 = HSP(33, readStart=0, readEnd=4, readStartInSubject=0,
readEndInSubject=4, subjectStart=0, subjectEnd=4,
readMatchedSequence='ACGT', subjectMatchedSequence='ACGT')
hsp2 = HSP(33, readStart=0, readEnd=4, readStartInSubject=10,
readEndInSubject=14, subjectStart=10, subjectEnd=14,
readMatchedSequence='CGTT', subjectMatchedSequence='CGTT')
titleAlignments = TitleAlignments('subject title', 55)
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
0: {'A': 1},
1: {'C': 1},
2: {'G': 1},
3: {'T': 1},
10: {'C': 1},
11: {'G': 1},
12: {'T': 1},
13: {'T': 1},
},
titleAlignments.residueCounts())
def testSummaryWhenEmpty(self):
"""
If summary is called on an instance of TitleAlignments with no
alignments a ValueError must be raised.
"""
titleAlignments = TitleAlignments('subject title', 55)
error = '^max\\(\\) arg is an empty sequence$'
six.assertRaisesRegex(self, ValueError, error, titleAlignments.summary)
def testSummary(self):
"""
The summary method must return the correct result.
"""
titleAlignments = TitleAlignments('subject title', 10)
titleAlignments.addAlignment(
TitleAlignment(Read('id1', 'ACGT'), [
HSP(30, subjectStart=0, subjectEnd=2),
]))
titleAlignments.addAlignment(
TitleAlignment(Read('id2', 'ACGT'), [
HSP(55, subjectStart=2, subjectEnd=4),
HSP(40, subjectStart=8, subjectEnd=9),
]))
self.assertEqual(
{
'bestScore': 55,
'coverage': 0.5,
'hspCount': 3,
'medianScore': 40,
'readCount': 2,
'subjectLength': 10,
'subjectTitle': 'subject title',
},
titleAlignments.summary())
def testToDict(self):
"""
The toDict method must return the expected result.
"""
read = Read('the-id', 'AAA')
hsp1 = HSP(0, readStart=1, readEnd=2,
readStartInSubject=3, readEndInSubject=4,
subjectStart=5, subjectEnd=6,
readMatchedSequence='aaa', subjectMatchedSequence='ccc',
readFrame=7, subjectFrame=8, identicalCount=9,
percentIdentical=17.9, positiveCount=10,
percentPositive=3.9)
hsp2 = HSP(10, readStart=11, readEnd=12,
readStartInSubject=13, readEndInSubject=14,
subjectStart=15, subjectEnd=16,
readMatchedSequence='ggg', subjectMatchedSequence='ttt',
readFrame=17, subjectFrame=18, identicalCount=19,
percentIdentical=27.9, positiveCount=20,
percentPositive=3.8)
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments = TitleAlignments('subject title', 10)
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(
{
'subjectTitle': 'subject title',
'subjectLength': 10,
'titleAlignments': [
{
'hsps': [
{
'score': 0,
'readStart': 1,
'readEnd': 2,
'readStartInSubject': 3,
'readEndInSubject': 4,
'subjectStart': 5,
'subjectEnd': 6,
'readFrame': 7,
'subjectFrame': 8,
'identicalCount': 9,
'percentIdentical': 17.9,
'positiveCount': 10,
'percentPositive': 3.9,
'readMatchedSequence': 'aaa',
'subjectMatchedSequence': 'ccc',
},
{
'score': 10,
'readStart': 11,
'readEnd': 12,
'readStartInSubject': 13,
'readEndInSubject': 14,
'subjectStart': 15,
'subjectEnd': 16,
'readFrame': 17,
'subjectFrame': 18,
'identicalCount': 19,
'percentIdentical': 27.9,
'positiveCount': 20,
'percentPositive': 3.8,
'readMatchedSequence': 'ggg',
'subjectMatchedSequence': 'ttt',
},
],
'read': {
'id': 'the-id',
'quality': None,
'sequence': 'AAA',
},
},
],
},
titleAlignments.toDict())
class TestTitleAlignmentsLSP(TestCase):
"""
Test the TitleAlignments class using LSPs. The only tests here are ones
that depend on lower scores being better.
"""
def testBestHsp(self):
"""
The bestHsp function must return the HSP with the best score for the
HSPs all the alignments matching a title.
"""
hsp1 = LSP(7)
hsp2 = LSP(15)
hsp3 = LSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(hsp1, titleAlignments.bestHsp())
def testWorstHsp(self):
"""
The worstHsp function must return the HSP with the worst score for all
the HSPs for all the alignments matching a title.
"""
hsp1 = LSP(7)
hsp2 = LSP(15)
hsp3 = LSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(hsp3, titleAlignments.worstHsp())
def testBetterThanFalse(self):
"""
The hasScoreBetterThan function must return False if there is no HSP
with a score better than the passed value.
"""
hsp1 = LSP(7)
hsp2 = LSP(15)
hsp3 = LSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertFalse(titleAlignments.hasScoreBetterThan(7))
def testBetterThanTrue(self):
"""
The hasScoreBetterThan function must return True if there is an HSP
with a score better than the passed value.
"""
hsp1 = LSP(7)
hsp2 = LSP(15)
hsp3 = LSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertTrue(titleAlignments.hasScoreBetterThan(9))
def testReadIdsEmpty(self):
"""
The readIds function must return the empty set if no reads matched a
title.
"""
titleAlignments = TitleAlignments('subject title', 55)
self.assertEqual(0, len(titleAlignments.readIds()))
def testReadIds(self):
"""
The readIds function must return the set of read ids for the alignments
matching a title.
"""
hsp1 = LSP(7)
hsp2 = LSP(15)
hsp3 = LSP(21)
titleAlignments = TitleAlignments('subject title', 55)
read = Read('id1', 'AAA')
titleAlignment = TitleAlignment(read, [hsp1, hsp2])
titleAlignments.addAlignment(titleAlignment)
read = Read('id2', 'AAA')
titleAlignment = TitleAlignment(read, [hsp3])
titleAlignments.addAlignment(titleAlignment)
self.assertEqual(set(['id1', 'id2']), titleAlignments.readIds())
|
terrycojones/dark-matter
|
test/test_titles.py
|
Python
|
mit
| 42,821
|
[
"BLAST"
] |
707749965f3c5d80b06ea2010e8efb5b0cd854d69c18ffa84eaf9158f773d864
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/graphs.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import datetime
from king_phisher import ua_parser
from king_phisher import utilities
from king_phisher.client import gui_utilities
from gi.repository import Gtk
try:
import matplotlib
matplotlib.rcParams['backend'] = 'GTK3Cairo'
from matplotlib import dates
from matplotlib import pyplot
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
from matplotlib.backends.backend_gtk3cairo import FigureManagerGTK3Cairo as FigureManager
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3 as NavigationToolbar
except ImportError:
has_matplotlib = False
"""Whether the :py:mod:`matplotlib` module is available."""
else:
if utilities.check_requirements(['matplotlib>=1.4.1']):
has_matplotlib = False
else:
has_matplotlib = True
EXPORTED_GRAPHS = {}
def export(klass):
"""
Decorator for classes to mark them as valid graph providers.
:param class klass: The class to mark as a graph provider.
:return: The *klass* parameter is returned.
"""
graph_name = klass.__name__[13:]
klass._graph_id = len(EXPORTED_GRAPHS)
klass.name = graph_name
EXPORTED_GRAPHS[graph_name] = klass
return klass
def get_graph(graph_name):
"""
Return the graph providing class for *graph_name*.
:param str graph_name: The name of the graph provider.
:return: The graph provider class.
:rtype: :py:class:`.CampaignGraph`
"""
return EXPORTED_GRAPHS.get(graph_name)
def get_graphs():
"""
Get a list of all registered graph providers.
:return: All registered graph providers.
:rtype: list
"""
return sorted(EXPORTED_GRAPHS.keys())
class CampaignGraph(object):
"""
A basic graph provider for using :py:mod:`matplotlib` to create graph
representations of campaign data. This class is meant to be subclassed
by real providers.
"""
title = 'Unknown'
"""The title of the graph."""
_graph_id = None
table_subscriptions = []
"""A list of tables from which information is needed to produce the graph."""
def __init__(self, config, parent, size_request=None):
"""
:param dict config: The King Phisher client configuration.
:param parent: The parent window for this object.
:type parent: :py:class:`Gtk.Window`
:param tuple size_request: The size to set for the canvas.
"""
self.config = config
"""A reference to the King Phisher client configuration."""
self.parent = parent
"""The parent :py:class:`Gtk.Window` instance."""
self.figure, ax = pyplot.subplots()
self.axes = self.figure.get_axes()
self.canvas = FigureCanvas(self.figure)
self.manager = None
if size_request:
self.canvas.set_size_request(*size_request)
self.canvas.mpl_connect('button_press_event', self.mpl_signal_canvas_button_pressed)
self.canvas.show()
self.navigation_toolbar = NavigationToolbar(self.canvas, self.parent)
self.navigation_toolbar.hide()
self.popup_menu = Gtk.Menu.new()
menu_item = Gtk.MenuItem.new_with_label('Export')
menu_item.connect('activate', self.signal_activate_popup_menu_export)
self.popup_menu.append(menu_item)
menu_item = Gtk.MenuItem.new_with_label('Refresh')
menu_item.connect('activate', lambda action: self.refresh())
self.popup_menu.append(menu_item)
menu_item = Gtk.CheckMenuItem.new_with_label('Show Toolbar')
menu_item.connect('toggled', self.signal_toggled_popup_menu_show_toolbar)
self.popup_menu.append(menu_item)
self.popup_menu.show_all()
@classmethod
def get_graph_id(klass):
"""
The graph id of an exported :py:class:`.CampaignGraph`.
:param klass: The class to return the graph id of.
:type klass: :py:class:`.CampaignGraph`
:return: The id of the graph.
:rtype: int
"""
return klass._graph_id
def make_window(self):
"""
Create a window from the figure manager.
:return: The graph in a new, dedicated window.
:rtype: :py:class:`Gtk.Window`
"""
if self.manager == None:
self.manager = FigureManager(self.canvas, 0)
window = self.manager.window
window.set_transient_for(self.parent)
window.set_title(self.title)
return window
def mpl_signal_canvas_button_pressed(self, event):
if event.button != 3:
return
self.popup_menu.popup(None, None, None, None, event.button, Gtk.get_current_event_time())
return True
def signal_activate_popup_menu_export(self, action):
dialog = gui_utilities.UtilityFileChooser('Export Graph', self.parent)
file_name = self.config['campaign_name'] + '.png'
response = dialog.run_quick_save(file_name)
dialog.destroy()
if not response:
return
destination_file = response['target_path']
self.figure.savefig(destination_file, format='png')
def signal_toggled_popup_menu_show_toolbar(self, widget):
if widget.get_property('active'):
self.navigation_toolbar.show()
else:
self.navigation_toolbar.hide()
def load_graph(self):
"""Load the graph information via :py:meth:`.refresh`."""
self.refresh()
def refresh(self, info_cache=None, stop_event=None):
"""
Refresh the graph data by retrieving the information from the
remote server.
:param dict info_cache: An optional cache of data tables.
:param stop_event: An optional object indicating that the operation should stop.
:type stop_event: :py:class:`threading.Event`
:return: A dictionary of cached tables from the server.
:rtype: dict
"""
info_cache = (info_cache or {})
if not self.parent.rpc:
return info_cache
for table in self.table_subscriptions:
if stop_event and stop_event.is_set():
return info_cache
if not table in info_cache:
info_cache[table] = list(self.parent.rpc.remote_table('campaign/' + table, self.config['campaign_id']))
map(lambda ax: ax.clear(), self.axes)
self._load_graph(info_cache)
self.canvas.draw()
return info_cache
@export
class CampaignGraphOverview(CampaignGraph):
"""Display a graph which represents an overview of the campaign."""
title = 'Overview'
table_subscriptions = ['credentials', 'visits']
def _load_graph(self, info_cache):
rpc = self.parent.rpc
cid = self.config['campaign_id']
visits = info_cache['visits']
creds = info_cache['credentials']
bars = []
bars.append(rpc('campaign/messages/count', cid))
bars.append(len(visits))
bars.append(len(utilities.unique(visits, key=lambda visit: visit['message_id'])))
if len(creds):
bars.append(len(creds))
bars.append(len(utilities.unique(creds, key=lambda cred: cred['message_id'])))
width = 0.25
ax = self.axes[0]
bars = ax.bar(range(len(bars)), bars, width)
ax.set_ylabel('Grand Total')
ax.set_title('Campaign Overview')
ax.set_yticks((1,))
ax.set_xticks(map(lambda x: float(x) + (width / 2), range(len(bars))))
ax.set_xticklabels(('Messages', 'Visits', 'Unique\nVisits', 'Credentials', 'Unique\nCredentials')[:len(bars)], rotation=30)
for col in bars:
height = col.get_height()
ax.text(col.get_x() + col.get_width() / 2.0, height, str(height), ha='center', va='bottom')
self.figure.subplots_adjust(bottom=0.25)
return info_cache
@export
class CampaignGraphVisitorInfo(CampaignGraph):
"""Display a graph which represents information regarding a campaign's visitors."""
title = 'Visitor Information'
table_subscriptions = ['visits']
def _load_graph(self, info_cache):
visits = info_cache['visits']
operating_systems = {}
unknown_os = 'Unknown OS'
for visit in visits:
user_agent = ua_parser.parse_user_agent(visit['visitor_details'])
if user_agent:
operating_systems[user_agent.os_name] = operating_systems.get(user_agent.os_name, 0) + 1
else:
operating_systems[unknown_os] = operating_systems.get(unknown_os, 0) + 1
os_names = operating_systems.keys()
os_names.sort(key=lambda name: operating_systems[name])
os_names.reverse()
bars = []
for os_name in os_names:
bars.append(operating_systems[os_name])
width = 0.25
ax = self.axes[0]
bars = ax.bar(range(len(bars)), bars, width)
ax.set_ylabel('Total Visits')
ax.set_title('Visitor OS Information')
ax.set_yticks((1,))
ax.set_xticks(map(lambda x: float(x) + (width / 2), range(len(bars))))
ax.set_xticklabels(os_names, rotation=30)
for col in bars:
height = col.get_height()
ax.text(col.get_x() + col.get_width() / 2.0, height, str(height), ha='center', va='bottom')
self.figure.subplots_adjust(bottom=0.25)
return info_cache
@export
class CampaignGraphVisitsTimeline(CampaignGraph):
"""Display a graph which represents the visits of a campaign over time."""
title = 'Visits Timeline'
table_subscriptions = ['visits']
def _load_graph(self, info_cache):
visits = info_cache['visits']
first_visits = map(lambda visit: visit['first_visit'], visits)
ax = self.axes[0]
ax.set_ylabel('Number of Visits')
ax.set_title('Visits Over Time')
if not len(first_visits):
ax.set_yticks((0,))
ax.set_xticks((0,))
return info_cache
ax.xaxis.set_major_locator(dates.AutoDateLocator())
ax.xaxis.set_major_formatter(dates.DateFormatter('%Y-%m-%d'))
first_visits.sort()
first_visit_span = first_visits[-1] - first_visits[0]
ax.plot_date(first_visits, range(1, len(first_visits) + 1), '-')
self.figure.autofmt_xdate()
if first_visit_span < datetime.timedelta(7):
ax.xaxis.set_minor_locator(dates.DayLocator())
if first_visit_span < datetime.timedelta(3) and len(first_visits) > 1:
ax.xaxis.set_minor_locator(dates.HourLocator())
return info_cache
|
0x0mar/king-phisher
|
king_phisher/client/graphs.py
|
Python
|
bsd-3-clause
| 10,887
|
[
"VisIt"
] |
79f7b4de04ac5225c5cb3d247e77709d686d2c9d71182e97505278faeb8471f8
|
__author__ = 'ddeconti'
import FileHandler
import numpy
import pickle
import sys
from rdkit import Chem, DataStructs
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
def svc_training(target, control):
np_fps = []
for fp in target + control:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
lin_svc = svm.LinearSVC(C=1.0)
ys_fit = [1] * len(target) + [0] * len(control)
lin_svc.fit(np_fps, ys_fit)
return lin_svc
def svc_test(target, control, lin_svc):
ys_fit = [1] * len(target) + [0] * len(control)
print lin_svc.score(target+control, ys_fit)
def main(sa):
sln_filename = sa[0]
sdf_filename = sa[1]
sln_fp = FileHandler.SlnFile(sln_filename).get_fingerprint_list()
sdf_fp = FileHandler.SdfFile(sdf_filename).get_fingerprint_list()
pain_train, pain_test = train_test_split(sln_fp,
test_size=0.2,
random_state=24)
control_train, control_test = train_test_split(sdf_fp,
test_size=0.2,
random_state=24)
lin_svc = svc_training(pain_train, control_train)
svc_test(pain_test, control_test, lin_svc)
if __name__ == "__main__":
main(sys.argv[1:])
|
dkdeconti/PAINS-train
|
training_methods/classifier/linear_svc_analysis.py
|
Python
|
mit
| 1,406
|
[
"RDKit"
] |
9fddc308cf2bc8f6c4e814aaaf78874bf4b235fbef1370c8499703db598ad9ee
|
Keras is like front end and Tensorflow/Theano like backend
The best part of Keras is that the 'best-practices' are built-in
It also comes with pre-trained models for image recognition! How cool is that ;)
Tensorflow = Google
Theano = University of Montreal (MILA)
Tensorflow is low-level, but has more control.
Keras is high-level abstraction => easier to use
When to use Tensorflow?
1. when you try new models
2. when you build large models
When to use Keras?
1. for education / trying new things
2. for quick prototyping
Supervised Learning:
1. Choose a Model
2. Training Phase (using training data)
3. Testing Phase
4. Evaluation Phase
Keras Sequenctial Model API:
- create an empty sequential model object and then add layers to it in sequence
model = keras.models.Sequential()
model.add(Dense(32, input_dim=9)) #32 is number of neurons in that layer
model.add(Dense(128))
- Customizing Layers
Let's talk about the different ways we can customize a neural network layer. Before values flow from nodes in one layer to the next, they pass through an activation function.
Keras lets us choose which activation function is used for each layer by passing in the name of the activation function we want to use. In this case, I've told it to use a rectified linear unit, or RELU, activation function
model.add(Dense(number_of_neurons, activation='reulu'))
So far we've talked about densely connected layers which are the most basic type of layer, but Keras also supports many different types of neural network layers.
Let's look at two other major types of layers that Keras supports.
First are convolutional layers. These are typically used to process images or spacial data. Next are recurrent layers.
keras.layers.convolutional.conv2D()
Recurrent layers are special layers that have a memory built into each neuron. These are used to process sequential data like words in a
sentence where the previous data points are important to understanding the next data point. You can mix layers of different types in the same model as needed.
keras.layers.recurrent.LSTM()
The final step of defining a model is to compile it by calling model.compile.
model.compile(optimizer='adam', loss='mse')
When you compile a model, you have to pass in the optimizer algorithm and the loss function you want to use. The optimizer algorithm is the algorithm used to train your neural network.
The loss function is how the training process measures how right or how wrong your neural network's predictions are. In this case, I've used the adam optimizer function which is a common and powerful optimizer, and the mean squared error loss function.
|
iitjee/SteppinsMachineLearning
|
Keras/00 Intro.py
|
Python
|
apache-2.0
| 2,773
|
[
"NEURON"
] |
6033c8cf232a68700c2cd23fee694bc5d7e3cee6850bc265d87d3e5ed97fa583
|
import numpy
import random
from rdkit.ML.DecTree import ID3
from rdkit.ML.DecTree import CrossValidate
def GenRandomExamples(nVars=10,randScale=0.3,bitProb=0.5,nExamples=500,seed=(0,0),
addResults=1):
random.seed(seed[0])
varWeights = numpy.array([random.random() for x in range(nVars)])*randScale
examples = [None]*nExamples
for i in xrange(nExamples):
varVals=[random.random()>bitProb for x in range(nVars)]
temp = numpy.array(varVals) * varWeights
res = sum(temp)
if addResults:
varVals.append(res>=1.)
examples[i] = varVals
nPossibleVals = [2]*(nExamples+1)
attrs = range(nVars)
return (examples,attrs,nPossibleVals)
if __name__ == '__main__':
import cPickle
examples,attrs,nPossibleVals = GenRandomExamples()
outF = open('random.dat.pkl','wb+')
cPickle.dump(examples,outF)
cPickle.dump(attrs,outF)
cPickle.dump(nPossibleVals,outF)
tree = ID3.ID3Boot(examples,attrs,nPossibleVals)
tree.Pickle('save.pkl')
|
rdkit/rdkit-orig
|
rdkit/ML/DecTree/randomtest.py
|
Python
|
bsd-3-clause
| 1,060
|
[
"RDKit"
] |
c1fa7f99ba73f0becd7fd52f4c642d13cc215b683f87c3d7951caff067ed3969
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import numpy as np
import math
import itertools
import collections
import warnings
from monty.json import MSONable
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import pbc_diff
"""
This module provides classes to define everything related to band structures.
"""
__author__ = "Geoffroy Hautier, Shyue Ping Ong, Michael Kocher"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy@uclouvain.be"
__status__ = "Development"
__date__ = "March 14, 2012"
class Kpoint(MSONable):
"""
Class to store kpoint objects. A kpoint is defined with a lattice and frac
or cartesian coordinates syntax similar than the site object in
pymatgen.core.structure.
Args:
coords: coordinate of the kpoint as a numpy array
lattice: A pymatgen.core.lattice.Lattice lattice object representing
the reciprocal lattice of the kpoint
to_unit_cell: Translates fractional coordinate to the basic unit
cell, i.e., all fractional coordinates satisfy 0 <= a < 1.
Defaults to False.
coords_are_cartesian: Boolean indicating if the coordinates given are
in cartesian or fractional coordinates (by default fractional)
label: the label of the kpoint if any (None by default)
"""
def __init__(self, coords, lattice, to_unit_cell=False,
coords_are_cartesian=False, label=None):
self._lattice = lattice
self._fcoords = lattice.get_fractional_coords(coords) \
if coords_are_cartesian else coords
self._label = label
if to_unit_cell:
for i in range(len(self._fcoords)):
self._fcoords[i] -= math.floor(self._fcoords[i])
self._ccoords = lattice.get_cartesian_coords(self._fcoords)
@property
def lattice(self):
"""
The lattice associated with the kpoint. It's a
pymatgen.core.lattice.Lattice object
"""
return self._lattice
@property
def label(self):
"""
The label associated with the kpoint
"""
return self._label
@property
def frac_coords(self):
"""
The fractional coordinates of the kpoint as a numpy array
"""
return np.copy(self._fcoords)
@property
def cart_coords(self):
"""
The cartesian coordinates of the kpoint as a numpy array
"""
return np.copy(self._ccoords)
@property
def a(self):
"""
Fractional a coordinate of the kpoint
"""
return self._fcoords[0]
@property
def b(self):
"""
Fractional b coordinate of the kpoint
"""
return self._fcoords[1]
@property
def c(self):
"""
Fractional c coordinate of the kpoint
"""
return self._fcoords[2]
def __str__(self):
"""
Returns a string with fractional, cartesian coordinates and label
"""
return "{} {} {}".format(self.frac_coords, self.cart_coords,
self.label)
def as_dict(self):
"""
Json-serializable dict representation of a kpoint
"""
return {"lattice": self.lattice.as_dict(),
"fcoords": list(self.frac_coords),
"ccoords": list(self.cart_coords), "label": self.label,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class BandStructure(object):
"""
This is the most generic band structure data possible
it's defined by a list of kpoints + energies for each of them
.. attribute:: kpoints:
the list of kpoints (as Kpoint objects) in the band structure
.. attribute:: lattice_rec
the reciprocal lattice of the band structure.
.. attribute:: efermi
the fermi energy
.. attribute:: is_spin_polarized
True if the band structure is spin-polarized, False otherwise
.. attribute:: bands
The energy eigenvalues as a {spin: ndarray}. Note that the use of an
ndarray is necessary for computational as well as memory efficiency
due to the large amount of numerical data. The indices of the ndarray
are [band_index, kpoint_index].
.. attribute:: nb_bands
returns the number of bands in the band structure
.. attribute:: structure
returns the structure
.. attribute:: projections
The projections as a {spin: ndarray}. Note that the use of an
ndarray is necessary for computational as well as memory efficiency
due to the large amount of numerical data. The indices of the ndarray
are [band_index, kpoint_index, orbital_index, ion_index].
Args:
kpoints: list of kpoint as numpy arrays, in frac_coords of the
given lattice by default
eigenvals: dict of energies for spin up and spin down
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up
lattice: The reciprocal lattice as a pymatgen Lattice object.
Pymatgen uses the physics convention of reciprocal lattice vectors
WITH a 2*pi coefficient
efermi: fermi energy
labels_dict: (dict) of {} this links a kpoint (in frac coords or
cartesian coordinates depending on the coords) to a label.
coords_are_cartesian: Whether coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure
projections: dict of orbital projections as {spin: ndarray}. The
indices of the ndarrayare [band_index, kpoint_index, orbital_index,
ion_index].If the band structure is not spin polarized, we only
store one data set under Spin.up.
"""
def __init__(self, kpoints, eigenvals, lattice, efermi, labels_dict=None,
coords_are_cartesian=False, structure=None, projections=None):
self.efermi = efermi
self.lattice_rec = lattice
self.kpoints = []
self.labels_dict = {}
self.structure = structure
self.projections = projections or {}
if labels_dict is None:
labels_dict = {}
if len(self.projections) != 0 and self.structure is None:
raise Exception("if projections are provided a structure object"
" needs also to be given")
for k in kpoints:
# let see if this kpoint has been assigned a label
label = None
for c in labels_dict:
if np.linalg.norm(k - np.array(labels_dict[c])) < 0.0001:
label = c
self.labels_dict[label] = Kpoint(
k, lattice, label=label,
coords_are_cartesian=coords_are_cartesian)
self.kpoints.append(
Kpoint(k, lattice, label=label,
coords_are_cartesian=coords_are_cartesian))
self.bands = {spin: np.array(v) for spin, v in eigenvals.items()}
self.nb_bands = len(eigenvals[Spin.up])
self.is_spin_polarized = len(self.bands) == 2
def get_projection_on_elements(self):
"""
Method returning a dictionary of projections on elements.
Returns:
a dictionary in the {Spin.up:[][{Element:values}],
Spin.down:[][{Element:values}]} format
if there is no projections in the band structure
returns an empty dict
"""
result = {}
structure = self.structure
for spin, v in self.projections.items():
result[spin] = [[collections.defaultdict(float)
for i in range(len(self.kpoints))]
for j in range(self.nb_bands)]
for i, j, k in itertools.product(range(self.nb_bands),
range(len(self.kpoints)),
range(structure.num_sites)):
result[spin][i][j][str(structure[k].specie)] += np.sum(
v[i, j, :, k])
return result
def get_projections_on_elements_and_orbitals(self, el_orb_spec):
"""
Method returning a dictionary of projections on elements and specific
orbitals
Args:
el_orb_spec: A dictionary of Elements and Orbitals for which we want
to have projections on. It is given as: {Element:[orbitals]},
e.g., {'Cu':['d','s']}
Returns:
A dictionary of projections on elements in the
{Spin.up:[][{Element:{orb:values}}],
Spin.down:[][{Element:{orb:values}}]} format
if there is no projections in the band structure returns an empty
dict.
"""
result = {}
structure = self.structure
el_orb_spec = {get_el_sp(el): orbs for el, orbs in el_orb_spec.items()}
for spin, v in self.projections.items():
result[spin] = [[{str(e): collections.defaultdict(float)
for e in el_orb_spec}
for i in range(len(self.kpoints))]
for j in range(self.nb_bands)]
for i, j, k in itertools.product(
range(self.nb_bands), range(len(self.kpoints)),
range(structure.num_sites)):
sp = structure[k].specie
for orb_i in range(len(v[i][j])):
o = Orbital(orb_i).name[0]
if sp in el_orb_spec:
if o in el_orb_spec[sp]:
result[spin][i][j][str(sp)][o] += v[i][j][
orb_i][k]
return result
def is_metal(self, efermi_tol=1e-4):
"""
Check if the band structure indicates a metal by looking if the fermi
level crosses a band.
Returns:
True if a metal, False if not
"""
for spin, values in self.bands.items():
for i in range(self.nb_bands):
if np.any(values[i, :] - self.efermi < -efermi_tol) and \
np.any(values[i, :] - self.efermi > efermi_tol):
return True
return False
def get_vbm(self):
"""
Returns data about the VBM.
Returns:
dict as {"band_index","kpoint_index","kpoint","energy"}
- "band_index": A dict with spin keys pointing to a list of the
indices of the band containing the VBM (please note that you
can have several bands sharing the VBM) {Spin.up:[],
Spin.down:[]}
- "kpoint_index": The list of indices in self.kpoints for the
kpoint vbm. Please note that there can be several
kpoint_indices relating to the same kpoint (e.g., Gamma can
occur at different spots in the band structure line plot)
- "kpoint": The kpoint (as a kpoint object)
- "energy": The energy of the VBM
- "projections": The projections along sites and orbitals of the
VBM if any projection data is available (else it is an empty
dictionnary). The format is similar to the projections field in
BandStructure: {spin:{'Orbital': [proj]}} where the array
[proj] is ordered according to the sites in structure
"""
if self.is_metal():
return {"band_index": [], "kpoint_index": [],
"kpoint": [], "energy": None, "projections": {}}
max_tmp = -float("inf")
index = None
kpointvbm = None
for spin, v in self.bands.items():
for i, j in zip(*np.where(v < self.efermi)):
if v[i, j] > max_tmp:
max_tmp = float(v[i, j])
index = j
kpointvbm = self.kpoints[j]
list_ind_kpts = []
if kpointvbm.label is not None:
for i in range(len(self.kpoints)):
if self.kpoints[i].label == kpointvbm.label:
list_ind_kpts.append(i)
else:
list_ind_kpts.append(index)
# get all other bands sharing the vbm
list_ind_band = collections.defaultdict(list)
for spin in self.bands:
for i in range(self.nb_bands):
if math.fabs(self.bands[spin][i][index] - max_tmp) < 0.001:
list_ind_band[spin].append(i)
proj = {}
for spin, v in self.projections.items():
if len(list_ind_band[spin]) == 0:
continue
proj[spin] = v[list_ind_band[spin][0]][list_ind_kpts[0]]
return {'band_index': list_ind_band,
'kpoint_index': list_ind_kpts,
'kpoint': kpointvbm, 'energy': max_tmp,
'projections': proj}
def get_cbm(self):
"""
Returns data about the CBM.
Returns:
{"band_index","kpoint_index","kpoint","energy"}
- "band_index": A dict with spin keys pointing to a list of the
indices of the band containing the VBM (please note that you
can have several bands sharing the VBM) {Spin.up:[],
Spin.down:[]}
- "kpoint_index": The list of indices in self.kpoints for the
kpoint vbm. Please note that there can be several
kpoint_indices relating to the same kpoint (e.g., Gamma can
occur at different spots in the band structure line plot)
- "kpoint": The kpoint (as a kpoint object)
- "energy": The energy of the VBM
- "projections": The projections along sites and orbitals of the
VBM if any projection data is available (else it is an empty
dictionnary). The format is similar to the projections field in
BandStructure: {spin:{'Orbital': [proj]}} where the array
[proj] is ordered according to the sites in structure
"""
if self.is_metal():
return {"band_index": [], "kpoint_index": [],
"kpoint": [], "energy": None, "projections": {}}
max_tmp = float("inf")
index = None
kpointcbm = None
for spin, v in self.bands.items():
for i, j in zip(*np.where(v >= self.efermi)):
if v[i, j] < max_tmp:
max_tmp = float(v[i, j])
index = j
kpointcbm = self.kpoints[j]
list_index_kpoints = []
if kpointcbm.label is not None:
for i in range(len(self.kpoints)):
if self.kpoints[i].label == kpointcbm.label:
list_index_kpoints.append(i)
else:
list_index_kpoints.append(index)
# get all other bands sharing the cbm
list_index_band = collections.defaultdict(list)
for spin in self.bands:
for i in range(self.nb_bands):
if math.fabs(self.bands[spin][i][index] - max_tmp) < 0.001:
list_index_band[spin].append(i)
proj = {}
for spin, v in self.projections.items():
if len(list_index_band[spin]) == 0:
continue
proj[spin] = v[list_index_band[spin][0]][list_index_kpoints[0]]
return {'band_index': list_index_band,
'kpoint_index': list_index_kpoints,
'kpoint': kpointcbm, 'energy': max_tmp,
'projections': proj}
def get_band_gap(self):
"""
Returns band gap data.
Returns:
A dict {"energy","direct","transition"}:
"energy": band gap energy
"direct": A boolean telling if the gap is direct or not
"transition": kpoint labels of the transition (e.g., "\\Gamma-X")
"""
if self.is_metal():
return {"energy": 0.0, "direct": False, "transition": None}
cbm = self.get_cbm()
vbm = self.get_vbm()
result = dict(direct=False, energy=0.0, transition=None)
result["energy"] = cbm["energy"] - vbm["energy"]
if (cbm["kpoint"].label is not None and cbm["kpoint"].label == vbm[
"kpoint"].label) \
or np.linalg.norm(cbm["kpoint"].cart_coords
- vbm["kpoint"].cart_coords) < 0.01:
result["direct"] = True
result["transition"] = "-".join(
[str(c.label) if c.label is not None else
str("(") + ",".join(["{0:.3f}".format(c.frac_coords[i])
for i in range(3)])
+ str(")") for c in [vbm["kpoint"], cbm["kpoint"]]])
return result
def get_direct_band_gap_dict(self):
"""
Returns a dictionary of information about the direct
band gap
Returns:
a dictionary of the band gaps indexed by spin
along with their band indices and k-point index
"""
if self.is_metal():
raise ValueError("get_direct_band_gap_dict should"
"only be used with non-metals")
direct_gap_dict = {}
for spin, v in self.bands.items():
above = v[np.all(v > self.efermi, axis=1)]
min_above = np.min(above, axis=0)
below = v[np.all(v < self.efermi, axis=1)]
max_below = np.max(below, axis=0)
diff = min_above - max_below
kpoint_index = np.argmin(diff)
band_indices = [np.argmax(below[:, kpoint_index]),
np.argmin(above[:, kpoint_index]) + len(below)]
direct_gap_dict[spin] = {"value": diff[kpoint_index],
"kpoint_index": kpoint_index,
"band_indices": band_indices}
return direct_gap_dict
def get_direct_band_gap(self):
"""
Returns the direct band gap.
Returns:
the value of the direct band gap
"""
if self.is_metal():
return 0.0
dg = self.get_direct_band_gap_dict()
return min(v['value'] for v in dg.values())
def get_sym_eq_kpoints(self, kpoint, cartesian=False, tol=1e-2):
"""
Returns a list of unique symmetrically equivalent k-points.
Args:
kpoint (1x3 array): coordinate of the k-point
cartesian (bool): kpoint is in cartesian or fractional coordinates
tol (float): tolerance below which coordinates are considered equal
Returns:
([1x3 array] or None): if structure is not available returns None
"""
if not self.structure:
return None
sg = SpacegroupAnalyzer(self.structure)
symmops = sg.get_point_group_operations(cartesian=cartesian)
points = np.dot(kpoint, [m.rotation_matrix for m in symmops])
rm_list = []
# identify and remove duplicates from the list of equivalent k-points:
for i in range(len(points) - 1):
for j in range(i + 1, len(points)):
if np.allclose(pbc_diff(points[i], points[j]), [0, 0, 0], tol):
rm_list.append(i)
break
return np.delete(points, rm_list, axis=0)
def get_kpoint_degeneracy(self, kpoint, cartesian=False, tol=1e-2):
"""
Returns degeneracy of a given k-point based on structure symmetry
Args:
kpoint (1x3 array): coordinate of the k-point
cartesian (bool): kpoint is in cartesian or fractional coordinates
tol (float): tolerance below which coordinates are considered equal
Returns:
(int or None): degeneracy or None if structure is not available
"""
all_kpts = self.get_sym_eq_kpoints(kpoint, cartesian, tol=tol)
if all_kpts is not None:
return len(all_kpts)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self.lattice_rec.as_dict(), "efermi": self.efermi,
"kpoints": []}
# kpoints are not kpoint objects dicts but are frac coords (this makes
# the dict smaller and avoids the repetition of the lattice
for k in self.kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["bands"] = {str(int(spin)): self.bands[spin]
for spin in self.bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): v.tolist() for spin, v in vbm[
'projections'].items()}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): v.tolist() for spin, v in cbm[
'projections'].items()}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
for c in self.labels_dict:
d['labels_dict'][c] = self.labels_dict[c].as_dict()['fcoords']
d['projections'] = {}
if len(self.projections) != 0:
d['structure'] = self.structure.as_dict()
d['projections'] = {str(int(spin)): v.tolist()
for spin, v in self.projections.items()}
return d
@classmethod
def from_dict(cls, d):
"""
Create from dict.
Args:
A dict with all data for a band structure object.
Returns:
A BandStructure object
"""
labels_dict = d['labels_dict']
projections = {}
structure = None
if isinstance(list(d['bands'].values())[0], dict):
eigenvals = {Spin(int(k)): np.array(d['bands'][k]['data'])
for k in d['bands']}
else:
eigenvals = {Spin(int(k)): d['bands'][k] for k in d['bands']}
if 'structure' in d:
structure = Structure.from_dict(d['structure'])
if d.get('projections'):
projections = {Spin(int(spin)): np.array(v)
for spin, v in d["projections"].items()}
return BandStructure(
d['kpoints'], eigenvals,
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
@classmethod
def from_old_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if 'projections' in d and len(d['projections']) != 0:
structure = Structure.from_dict(d['structure'])
projections = {}
for spin in d['projections']:
dd = []
for i in range(len(d['projections'][spin])):
ddd = []
for j in range(len(d['projections'][spin][i])):
dddd = []
for k in range(len(d['projections'][spin][i][j])):
ddddd = []
orb = Orbital(k).name
for l in range(len(d['projections'][spin][i][j][
orb])):
ddddd.append(d['projections'][spin][i][j][
orb][l])
dddd.append(np.array(ddddd))
ddd.append(np.array(dddd))
dd.append(np.array(ddd))
projections[Spin(int(spin))] = np.array(dd)
return BandStructure(
d['kpoints'], {Spin(int(k)): d['bands'][k] for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
class BandStructureSymmLine(BandStructure, MSONable):
"""
This object stores band structures along selected (symmetry) lines in the
Brillouin zone. We call the different symmetry lines (ex: \\Gamma to Z)
"branches".
Args:
kpoints: list of kpoint as numpy arrays, in frac_coords of the
given lattice by default
eigenvals: dict of energies for spin up and spin down
{Spin.up:[][],Spin.down:[][]}, the first index of the array
[][] refers to the band and the second to the index of the
kpoint. The kpoints are ordered according to the order of the
kpoints array. If the band structure is not spin polarized, we
only store one data set under Spin.up.
lattice: The reciprocal lattice.
Pymatgen uses the physics convention of reciprocal lattice vectors
WITH a 2*pi coefficient
efermi: fermi energy
label_dict: (dict) of {} this link a kpoint (in frac coords or
cartesian coordinates depending on the coords).
coords_are_cartesian: Whether coordinates are cartesian.
structure: The crystal structure (as a pymatgen Structure object)
associated with the band structure. This is needed if we
provide projections to the band structure.
projections: dict of orbital projections as {spin: ndarray}. The
indices of the ndarrayare [band_index, kpoint_index, orbital_index,
ion_index].If the band structure is not spin polarized, we only
store one data set under Spin.up.
"""
def __init__(self, kpoints, eigenvals, lattice, efermi, labels_dict,
coords_are_cartesian=False, structure=None,
projections=None):
super(BandStructureSymmLine, self).__init__(
kpoints, eigenvals, lattice, efermi, labels_dict,
coords_are_cartesian, structure, projections)
self.distance = []
self.branches = []
one_group = []
branches_tmp = []
# get labels and distance for each kpoint
previous_kpoint = self.kpoints[0]
previous_distance = 0.0
previous_label = self.kpoints[0].label
for i in range(len(self.kpoints)):
label = self.kpoints[i].label
if label is not None and previous_label is not None:
self.distance.append(previous_distance)
else:
self.distance.append(
np.linalg.norm(self.kpoints[i].cart_coords -
previous_kpoint.cart_coords) +
previous_distance)
previous_kpoint = self.kpoints[i]
previous_distance = self.distance[i]
if label:
if previous_label:
if len(one_group) != 0:
branches_tmp.append(one_group)
one_group = []
previous_label = label
one_group.append(i)
if len(one_group) != 0:
branches_tmp.append(one_group)
for b in branches_tmp:
self.branches.append(
{"start_index": b[0], "end_index": b[-1],
"name": str(self.kpoints[b[0]].label) + "-" +
str(self.kpoints[b[-1]].label)})
self.is_spin_polarized = False
if len(self.bands) == 2:
self.is_spin_polarized = True
def get_equivalent_kpoints(self, index):
"""
Returns the list of kpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the kpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel)
"""
# if the kpoint has no label it can"t have a repetition along the band
# structure line object
if self.kpoints[index].label is None:
return [index]
list_index_kpoints = []
for i in range(len(self.kpoints)):
if self.kpoints[i].label == self.kpoints[index].label:
list_index_kpoints.append(i)
return list_index_kpoints
def get_branch(self, index):
"""
Returns in what branch(es) is the kpoint. There can be several
branches.
Args:
index: the kpoint index
Returns:
A list of dictionaries [{"name","start_index","end_index","index"}]
indicating all branches in which the k_point is. It takes into
account the fact that one kpoint (e.g., \\Gamma) can be in several
branches
"""
to_return = []
for i in self.get_equivalent_kpoints(index):
for b in self.branches:
if b["start_index"] <= i <= b["end_index"]:
to_return.append({"name": b["name"],
"start_index": b["start_index"],
"end_index": b["end_index"],
"index": i})
return to_return
def apply_scissor(self, new_band_gap):
"""
Apply a scissor operator (shift of the CBM) to fit the given band gap.
If it's a metal. We look for the band crossing the fermi level
and shift this one up. This will not work all the time for metals!
Args:
new_band_gap: the band gap the scissor band structure need to have.
Returns:
a BandStructureSymmLine object with the applied scissor shift
"""
if self.is_metal():
# moves then the highest index band crossing the fermi level
# find this band...
max_index = -1000
# spin_index = None
for i in range(self.nb_bands):
below = False
above = False
for j in range(len(self.kpoints)):
if self.bands[Spin.up][i][j] < self.efermi:
below = True
if self.bands[Spin.up][i][j] > self.efermi:
above = True
if above and below:
if i > max_index:
max_index = i
# spin_index = Spin.up
if self.is_spin_polarized:
below = False
above = False
for j in range(len(self.kpoints)):
if self.bands[Spin.down][i][j] < self.efermi:
below = True
if self.bands[Spin.down][i][j] > self.efermi:
above = True
if above and below:
if i > max_index:
max_index = i
# spin_index = Spin.down
old_dict = self.as_dict()
shift = new_band_gap
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if k >= max_index:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
else:
shift = new_band_gap - self.get_band_gap()['energy']
old_dict = self.as_dict()
for spin in old_dict['bands']:
for k in range(len(old_dict['bands'][spin])):
for v in range(len(old_dict['bands'][spin][k])):
if old_dict['bands'][spin][k][v] >= \
old_dict['cbm']['energy']:
old_dict['bands'][spin][k][v] = \
old_dict['bands'][spin][k][v] + shift
old_dict['efermi'] = old_dict['efermi'] + shift
return BandStructureSymmLine.from_dict(old_dict)
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self.lattice_rec.as_dict(), "efermi": self.efermi,
"kpoints": []}
# kpoints are not kpoint objects dicts but are frac coords (this makes
# the dict smaller and avoids the repetition of the lattice
for k in self.kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["branches"] = self.branches
d["bands"] = {str(int(spin)): self.bands[spin].tolist()
for spin in self.bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): v.tolist() for spin, v in vbm[
'projections'].items()}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): v.tolist() for spin, v in cbm[
'projections'].items()}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
# MongoDB does not accept keys starting with $. Add a blanck space to fix the problem
for c in self.labels_dict:
mongo_key = c if not c.startswith("$") else " " + c
d['labels_dict'][mongo_key] = self.labels_dict[c].as_dict()[
'fcoords']
if len(self.projections) != 0:
d['structure'] = self.structure.as_dict()
d['projections'] = {str(int(spin)): v.tolist()
for spin, v in self.projections.items()}
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
try:
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if d.get('projections'):
if isinstance(d["projections"]['1'][0][0], dict):
raise ValueError("Old band structure dict format detected!")
structure = Structure.from_dict(d['structure'])
projections = {Spin(int(spin)): np.array(v)
for spin, v in d["projections"].items()}
return BandStructureSymmLine(
d['kpoints'], {Spin(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
except:
warnings.warn("Trying from_dict failed. Now we are trying the old "
"format. Please convert your BS dicts to the new "
"format. The old format will be retired in pymatgen "
"5.0.")
return BandStructureSymmLine.from_old_dict(d)
@classmethod
def from_old_dict(cls, d):
"""
Args:
d (dict): A dict with all data for a band structure symm line
object.
Returns:
A BandStructureSymmLine object
"""
# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)
labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()}
projections = {}
structure = None
if 'projections' in d and len(d['projections']) != 0:
structure = Structure.from_dict(d['structure'])
projections = {}
for spin in d['projections']:
dd = []
for i in range(len(d['projections'][spin])):
ddd = []
for j in range(len(d['projections'][spin][i])):
dddd = []
for k in range(len(d['projections'][spin][i][j])):
ddddd = []
orb = Orbital(k).name
for l in range(len(d['projections'][spin][i][j][
orb])):
ddddd.append(d['projections'][spin][i][j][
orb][l])
dddd.append(np.array(ddddd))
ddd.append(np.array(dddd))
dd.append(np.array(ddd))
projections[Spin(int(spin))] = np.array(dd)
return BandStructureSymmLine(
d['kpoints'], {Spin(int(k)): d['bands'][k]
for k in d['bands']},
Lattice(d['lattice_rec']['matrix']), d['efermi'],
labels_dict, structure=structure, projections=projections)
def get_reconstructed_band_structure(list_bs, efermi=None):
"""
This method takes a list of band structures and reconstructs
one band structure object from all of them.
This is typically very useful when you split non self consistent
band structure runs in several independent jobs and want to merge back
the results
Args:
list_bs: A list of BandStructure or BandStructureSymmLine objects.
efermi: The Fermi energy of the reconstructed band structure. If
None is assigned an average of all the Fermi energy in each
object in the list_bs is used.
Returns:
A BandStructure or BandStructureSymmLine object (depending on
the type of the list_bs objects)
"""
if efermi is None:
efermi = sum([b.efermi for b in list_bs]) / len(list_bs)
kpoints = []
labels_dict = {}
rec_lattice = list_bs[0].lattice_rec
nb_bands = min([list_bs[i].nb_bands for i in range(len(list_bs))])
kpoints = np.concatenate([[k.frac_coords for k in bs.kpoints]
for bs in list_bs])
dicts = [bs.labels_dict for bs in list_bs]
labels_dict = {k: v.frac_coords for d in dicts for k, v in d.items()}
eigenvals = {}
eigenvals[Spin.up] = np.concatenate([bs.bands[Spin.up][:nb_bands]
for bs in list_bs], axis=1)
if list_bs[0].is_spin_polarized:
eigenvals[Spin.down] = np.concatenate([bs.bands[Spin.down][:nb_bands]
for bs in list_bs], axis=1)
projections = {}
if len(list_bs[0].projections) != 0:
projs = [bs.projections[Spin.up][:nb_bands] for bs in list_bs]
projections[Spin.up] = np.concatenate(projs, axis=1)
if list_bs[0].is_spin_polarized:
projs = [bs.projections[Spin.down][:nb_bands] for bs in list_bs]
projections[Spin.down] = np.concatenate(projs, axis=1)
if isinstance(list_bs[0], BandStructureSymmLine):
return BandStructureSymmLine(kpoints, eigenvals, rec_lattice,
efermi, labels_dict,
structure=list_bs[0].structure,
projections=projections)
else:
return BandStructure(kpoints, eigenvals, rec_lattice, efermi,
labels_dict, structure=list_bs[0].structure,
projections=projections)
|
gpetretto/pymatgen
|
pymatgen/electronic_structure/bandstructure.py
|
Python
|
mit
| 41,659
|
[
"CRYSTAL",
"pymatgen"
] |
70adda6077264d8a2e45006bef01f163555e3c013cf1241f8fac1afe12751485
|
import matplotlib.pyplot as plt
import numpy as np
def plot():
# Make plot with horizontal colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
np.random.seed(123)
data = np.clip(np.random.randn(250, 250), -1, 1)
cax = ax.imshow(data, interpolation="nearest")
ax.set_title("Gaussian noise with horizontal colorbar")
cbar = fig.colorbar(cax, ticks=[-1, 0, 1], orientation="horizontal")
# horizontal colorbar
# Use comma in label
cbar.ax.set_xticklabels(["Low", "Medium", "High,Higher"])
return fig
def test():
from .helpers import assert_equality
assert_equality(plot, __file__[:-3] + "_reference.tex")
|
nschloe/matplotlib2tikz
|
tests/test_noise2.py
|
Python
|
mit
| 667
|
[
"Gaussian"
] |
e82ad0d734b9adc0ac845be67fad0e53fab0ec19fe2a8055340202f822801c64
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-get-normalized-queue-length.py
# Author : Ricardo Graciani
########################################################################
"""
Report Normalized CPU length of queue
This script was used by the dirac-pilot script to set the CPUTime limit for
the matching but now this is no more the case.
Usage:
dirac-wms-get-normalized-queue-length [options] ... Queue ...
Arguments:
Queue: GlueCEUniqueID of the Queue (ie, juk.nikhef.nl:8443/cream-pbs-lhcb)
Example:
$ dirac-wms-get-normalized-queue-length cclcgceli03.in2p3.fr:2119/jobmanager-bqs-long
cclcgceli03.in2p3.fr:2119/jobmanager-bqs-long 857400.0
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.WorkloadManagementSystem.Client.CPUNormalization import queueNormalizedCPU
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
exitCode = 0
for ceUniqueID in args:
normCPU = queueNormalizedCPU(ceUniqueID)
if not normCPU['OK']:
print('ERROR %s:' % ceUniqueID, normCPU['Message'])
exitCode = 2
continue
print(ceUniqueID, normCPU['Value'])
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_wms_get_normalized_queue_length.py
|
Python
|
gpl-3.0
| 1,525
|
[
"DIRAC"
] |
b7507fedade1e0c91f17f91731502ecad27b5952f4894ffb621a8d36126baa08
|
"""Minimal Python 2 & 3 shim around all Qt bindings
DOCUMENTATION
Qt.py was born in the film and visual effects industry to address
the growing need for the development of software capable of running
with more than one flavour of the Qt bindings for Python - PySide,
PySide2, PyQt4 and PyQt5.
1. Build for one, run with all
2. Explicit is better than implicit
3. Support co-existence
Default resolution order:
- PySide2
- PyQt5
- PySide
- PyQt4
Usage:
>> import sys
>> from Qt import QtWidgets
>> app = QtWidgets.QApplication(sys.argv)
>> button = QtWidgets.QPushButton("Hello World")
>> button.show()
>> app.exec_()
All members of PySide2 are mapped from other bindings, should they exist.
If no equivalent member exist, it is excluded from Qt.py and inaccessible.
The idea is to highlight members that exist across all supported binding,
and guarantee that code that runs on one binding runs on all others.
For more details, visit https://github.com/mottosso/Qt.py
LICENSE
See end of file for license (MIT, BSD) information.
"""
import os
import sys
import types
import shutil
import importlib
__version__ = "1.2.3"
# Enable support for `from Qt import *`
__all__ = []
# Flags from environment variables
QT_VERBOSE = bool(os.getenv("QT_VERBOSE"))
QT_PREFERRED_BINDING = os.getenv("QT_PREFERRED_BINDING", "")
QT_SIP_API_HINT = os.getenv("QT_SIP_API_HINT")
# Reference to Qt.py
Qt = sys.modules[__name__]
Qt.QtCompat = types.ModuleType("QtCompat")
try:
long
except NameError:
# Python 3 compatibility
long = int
"""Common members of all bindings
This is where each member of Qt.py is explicitly defined.
It is based on a "lowest common denominator" of all bindings;
including members found in each of the 4 bindings.
The "_common_members" dictionary is generated using the
build_membership.sh script.
"""
_common_members = {
"QtCore": [
"QAbstractAnimation",
"QAbstractEventDispatcher",
"QAbstractItemModel",
"QAbstractListModel",
"QAbstractState",
"QAbstractTableModel",
"QAbstractTransition",
"QAnimationGroup",
"QBasicTimer",
"QBitArray",
"QBuffer",
"QByteArray",
"QByteArrayMatcher",
"QChildEvent",
"QCoreApplication",
"QCryptographicHash",
"QDataStream",
"QDate",
"QDateTime",
"QDir",
"QDirIterator",
"QDynamicPropertyChangeEvent",
"QEasingCurve",
"QElapsedTimer",
"QEvent",
"QEventLoop",
"QEventTransition",
"QFile",
"QFileInfo",
"QFileSystemWatcher",
"QFinalState",
"QGenericArgument",
"QGenericReturnArgument",
"QHistoryState",
"QItemSelectionRange",
"QIODevice",
"QLibraryInfo",
"QLine",
"QLineF",
"QLocale",
"QMargins",
"QMetaClassInfo",
"QMetaEnum",
"QMetaMethod",
"QMetaObject",
"QMetaProperty",
"QMimeData",
"QModelIndex",
"QMutex",
"QMutexLocker",
"QObject",
"QParallelAnimationGroup",
"QPauseAnimation",
"QPersistentModelIndex",
"QPluginLoader",
"QPoint",
"QPointF",
"QProcess",
"QProcessEnvironment",
"QPropertyAnimation",
"QReadLocker",
"QReadWriteLock",
"QRect",
"QRectF",
"QRegExp",
"QResource",
"QRunnable",
"QSemaphore",
"QSequentialAnimationGroup",
"QSettings",
"QSignalMapper",
"QSignalTransition",
"QSize",
"QSizeF",
"QSocketNotifier",
"QState",
"QStateMachine",
"QSysInfo",
"QSystemSemaphore",
"QT_TRANSLATE_NOOP",
"QT_TR_NOOP",
"QT_TR_NOOP_UTF8",
"QTemporaryFile",
"QTextBoundaryFinder",
"QTextCodec",
"QTextDecoder",
"QTextEncoder",
"QTextStream",
"QTextStreamManipulator",
"QThread",
"QThreadPool",
"QTime",
"QTimeLine",
"QTimer",
"QTimerEvent",
"QTranslator",
"QUrl",
"QVariantAnimation",
"QWaitCondition",
"QWriteLocker",
"QXmlStreamAttribute",
"QXmlStreamAttributes",
"QXmlStreamEntityDeclaration",
"QXmlStreamEntityResolver",
"QXmlStreamNamespaceDeclaration",
"QXmlStreamNotationDeclaration",
"QXmlStreamReader",
"QXmlStreamWriter",
"Qt",
"QtCriticalMsg",
"QtDebugMsg",
"QtFatalMsg",
"QtMsgType",
"QtSystemMsg",
"QtWarningMsg",
"qAbs",
"qAddPostRoutine",
"qChecksum",
"qCritical",
"qDebug",
"qFatal",
"qFuzzyCompare",
"qIsFinite",
"qIsInf",
"qIsNaN",
"qIsNull",
"qRegisterResourceData",
"qUnregisterResourceData",
"qVersion",
"qWarning",
"qrand",
"qsrand"
],
"QtGui": [
"QAbstractTextDocumentLayout",
"QActionEvent",
"QBitmap",
"QBrush",
"QClipboard",
"QCloseEvent",
"QColor",
"QConicalGradient",
"QContextMenuEvent",
"QCursor",
"QDesktopServices",
"QDoubleValidator",
"QDrag",
"QDragEnterEvent",
"QDragLeaveEvent",
"QDragMoveEvent",
"QDropEvent",
"QFileOpenEvent",
"QFocusEvent",
"QFont",
"QFontDatabase",
"QFontInfo",
"QFontMetrics",
"QFontMetricsF",
"QGradient",
"QHelpEvent",
"QHideEvent",
"QHoverEvent",
"QIcon",
"QIconDragEvent",
"QIconEngine",
"QImage",
"QImageIOHandler",
"QImageReader",
"QImageWriter",
"QInputEvent",
"QInputMethodEvent",
"QIntValidator",
"QKeyEvent",
"QKeySequence",
"QLinearGradient",
"QMatrix2x2",
"QMatrix2x3",
"QMatrix2x4",
"QMatrix3x2",
"QMatrix3x3",
"QMatrix3x4",
"QMatrix4x2",
"QMatrix4x3",
"QMatrix4x4",
"QMouseEvent",
"QMoveEvent",
"QMovie",
"QPaintDevice",
"QPaintEngine",
"QPaintEngineState",
"QPaintEvent",
"QPainter",
"QPainterPath",
"QPainterPathStroker",
"QPalette",
"QPen",
"QPicture",
"QPictureIO",
"QPixmap",
"QPixmapCache",
"QPolygon",
"QPolygonF",
"QQuaternion",
"QRadialGradient",
"QRegExpValidator",
"QRegion",
"QResizeEvent",
"QSessionManager",
"QShortcutEvent",
"QShowEvent",
"QStandardItem",
"QStandardItemModel",
"QStatusTipEvent",
"QSyntaxHighlighter",
"QTabletEvent",
"QTextBlock",
"QTextBlockFormat",
"QTextBlockGroup",
"QTextBlockUserData",
"QTextCharFormat",
"QTextCursor",
"QTextDocument",
"QTextDocumentFragment",
"QTextFormat",
"QTextFragment",
"QTextFrame",
"QTextFrameFormat",
"QTextImageFormat",
"QTextInlineObject",
"QTextItem",
"QTextLayout",
"QTextLength",
"QTextLine",
"QTextList",
"QTextListFormat",
"QTextObject",
"QTextObjectInterface",
"QTextOption",
"QTextTable",
"QTextTableCell",
"QTextTableCellFormat",
"QTextTableFormat",
"QTouchEvent",
"QTransform",
"QValidator",
"QVector2D",
"QVector3D",
"QVector4D",
"QWhatsThisClickedEvent",
"QWheelEvent",
"QWindowStateChangeEvent",
"qAlpha",
"qBlue",
"qGray",
"qGreen",
"qIsGray",
"qRed",
"qRgb",
"qRgba"
],
"QtHelp": [
"QHelpContentItem",
"QHelpContentModel",
"QHelpContentWidget",
"QHelpEngine",
"QHelpEngineCore",
"QHelpIndexModel",
"QHelpIndexWidget",
"QHelpSearchEngine",
"QHelpSearchQuery",
"QHelpSearchQueryWidget",
"QHelpSearchResultWidget"
],
"QtMultimedia": [
"QAbstractVideoBuffer",
"QAbstractVideoSurface",
"QAudio",
"QAudioDeviceInfo",
"QAudioFormat",
"QAudioInput",
"QAudioOutput",
"QVideoFrame",
"QVideoSurfaceFormat"
],
"QtNetwork": [
"QAbstractNetworkCache",
"QAbstractSocket",
"QAuthenticator",
"QHostAddress",
"QHostInfo",
"QLocalServer",
"QLocalSocket",
"QNetworkAccessManager",
"QNetworkAddressEntry",
"QNetworkCacheMetaData",
"QNetworkConfiguration",
"QNetworkConfigurationManager",
"QNetworkCookie",
"QNetworkCookieJar",
"QNetworkDiskCache",
"QNetworkInterface",
"QNetworkProxy",
"QNetworkProxyFactory",
"QNetworkProxyQuery",
"QNetworkReply",
"QNetworkRequest",
"QNetworkSession",
"QSsl",
"QTcpServer",
"QTcpSocket",
"QUdpSocket"
],
"QtOpenGL": [
"QGL",
"QGLContext",
"QGLFormat",
"QGLWidget"
],
"QtPrintSupport": [
"QAbstractPrintDialog",
"QPageSetupDialog",
"QPrintDialog",
"QPrintEngine",
"QPrintPreviewDialog",
"QPrintPreviewWidget",
"QPrinter",
"QPrinterInfo"
],
"QtSql": [
"QSql",
"QSqlDatabase",
"QSqlDriver",
"QSqlDriverCreatorBase",
"QSqlError",
"QSqlField",
"QSqlIndex",
"QSqlQuery",
"QSqlQueryModel",
"QSqlRecord",
"QSqlRelation",
"QSqlRelationalDelegate",
"QSqlRelationalTableModel",
"QSqlResult",
"QSqlTableModel"
],
"QtSvg": [
"QGraphicsSvgItem",
"QSvgGenerator",
"QSvgRenderer",
"QSvgWidget"
],
"QtTest": [
"QTest"
],
"QtWidgets": [
"QAbstractButton",
"QAbstractGraphicsShapeItem",
"QAbstractItemDelegate",
"QAbstractItemView",
"QAbstractScrollArea",
"QAbstractSlider",
"QAbstractSpinBox",
"QAction",
"QActionGroup",
"QApplication",
"QBoxLayout",
"QButtonGroup",
"QCalendarWidget",
"QCheckBox",
"QColorDialog",
"QColumnView",
"QComboBox",
"QCommandLinkButton",
"QCommonStyle",
"QCompleter",
"QDataWidgetMapper",
"QDateEdit",
"QDateTimeEdit",
"QDesktopWidget",
"QDial",
"QDialog",
"QDialogButtonBox",
"QDirModel",
"QDockWidget",
"QDoubleSpinBox",
"QErrorMessage",
"QFileDialog",
"QFileIconProvider",
"QFileSystemModel",
"QFocusFrame",
"QFontComboBox",
"QFontDialog",
"QFormLayout",
"QFrame",
"QGesture",
"QGestureEvent",
"QGestureRecognizer",
"QGraphicsAnchor",
"QGraphicsAnchorLayout",
"QGraphicsBlurEffect",
"QGraphicsColorizeEffect",
"QGraphicsDropShadowEffect",
"QGraphicsEffect",
"QGraphicsEllipseItem",
"QGraphicsGridLayout",
"QGraphicsItem",
"QGraphicsItemGroup",
"QGraphicsLayout",
"QGraphicsLayoutItem",
"QGraphicsLineItem",
"QGraphicsLinearLayout",
"QGraphicsObject",
"QGraphicsOpacityEffect",
"QGraphicsPathItem",
"QGraphicsPixmapItem",
"QGraphicsPolygonItem",
"QGraphicsProxyWidget",
"QGraphicsRectItem",
"QGraphicsRotation",
"QGraphicsScale",
"QGraphicsScene",
"QGraphicsSceneContextMenuEvent",
"QGraphicsSceneDragDropEvent",
"QGraphicsSceneEvent",
"QGraphicsSceneHelpEvent",
"QGraphicsSceneHoverEvent",
"QGraphicsSceneMouseEvent",
"QGraphicsSceneMoveEvent",
"QGraphicsSceneResizeEvent",
"QGraphicsSceneWheelEvent",
"QGraphicsSimpleTextItem",
"QGraphicsTextItem",
"QGraphicsTransform",
"QGraphicsView",
"QGraphicsWidget",
"QGridLayout",
"QGroupBox",
"QHBoxLayout",
"QHeaderView",
"QInputDialog",
"QItemDelegate",
"QItemEditorCreatorBase",
"QItemEditorFactory",
"QKeyEventTransition",
"QLCDNumber",
"QLabel",
"QLayout",
"QLayoutItem",
"QLineEdit",
"QListView",
"QListWidget",
"QListWidgetItem",
"QMainWindow",
"QMdiArea",
"QMdiSubWindow",
"QMenu",
"QMenuBar",
"QMessageBox",
"QMouseEventTransition",
"QPanGesture",
"QPinchGesture",
"QPlainTextDocumentLayout",
"QPlainTextEdit",
"QProgressBar",
"QProgressDialog",
"QPushButton",
"QRadioButton",
"QRubberBand",
"QScrollArea",
"QScrollBar",
"QShortcut",
"QSizeGrip",
"QSizePolicy",
"QSlider",
"QSpacerItem",
"QSpinBox",
"QSplashScreen",
"QSplitter",
"QSplitterHandle",
"QStackedLayout",
"QStackedWidget",
"QStatusBar",
"QStyle",
"QStyleFactory",
"QStyleHintReturn",
"QStyleHintReturnMask",
"QStyleHintReturnVariant",
"QStyleOption",
"QStyleOptionButton",
"QStyleOptionComboBox",
"QStyleOptionComplex",
"QStyleOptionDockWidget",
"QStyleOptionFocusRect",
"QStyleOptionFrame",
"QStyleOptionGraphicsItem",
"QStyleOptionGroupBox",
"QStyleOptionHeader",
"QStyleOptionMenuItem",
"QStyleOptionProgressBar",
"QStyleOptionRubberBand",
"QStyleOptionSizeGrip",
"QStyleOptionSlider",
"QStyleOptionSpinBox",
"QStyleOptionTab",
"QStyleOptionTabBarBase",
"QStyleOptionTabWidgetFrame",
"QStyleOptionTitleBar",
"QStyleOptionToolBar",
"QStyleOptionToolBox",
"QStyleOptionToolButton",
"QStyleOptionViewItem",
"QStylePainter",
"QStyledItemDelegate",
"QSwipeGesture",
"QSystemTrayIcon",
"QTabBar",
"QTabWidget",
"QTableView",
"QTableWidget",
"QTableWidgetItem",
"QTableWidgetSelectionRange",
"QTapAndHoldGesture",
"QTapGesture",
"QTextBrowser",
"QTextEdit",
"QTimeEdit",
"QToolBar",
"QToolBox",
"QToolButton",
"QToolTip",
"QTreeView",
"QTreeWidget",
"QTreeWidgetItem",
"QTreeWidgetItemIterator",
"QUndoCommand",
"QUndoGroup",
"QUndoStack",
"QUndoView",
"QVBoxLayout",
"QWhatsThis",
"QWidget",
"QWidgetAction",
"QWidgetItem",
"QWizard",
"QWizardPage"
],
"QtX11Extras": [
"QX11Info"
],
"QtXml": [
"QDomAttr",
"QDomCDATASection",
"QDomCharacterData",
"QDomComment",
"QDomDocument",
"QDomDocumentFragment",
"QDomDocumentType",
"QDomElement",
"QDomEntity",
"QDomEntityReference",
"QDomImplementation",
"QDomNamedNodeMap",
"QDomNode",
"QDomNodeList",
"QDomNotation",
"QDomProcessingInstruction",
"QDomText",
"QXmlAttributes",
"QXmlContentHandler",
"QXmlDTDHandler",
"QXmlDeclHandler",
"QXmlDefaultHandler",
"QXmlEntityResolver",
"QXmlErrorHandler",
"QXmlInputSource",
"QXmlLexicalHandler",
"QXmlLocator",
"QXmlNamespaceSupport",
"QXmlParseException",
"QXmlReader",
"QXmlSimpleReader"
],
"QtXmlPatterns": [
"QAbstractMessageHandler",
"QAbstractUriResolver",
"QAbstractXmlNodeModel",
"QAbstractXmlReceiver",
"QSourceLocation",
"QXmlFormatter",
"QXmlItem",
"QXmlName",
"QXmlNamePool",
"QXmlNodeModelIndex",
"QXmlQuery",
"QXmlResultItems",
"QXmlSchema",
"QXmlSchemaValidator",
"QXmlSerializer"
]
}
""" Missing members
This mapping describes members that have been deprecated
in one or more bindings and have been left out of the
_common_members mapping.
The member can provide an extra details string to be
included in exceptions and warnings.
"""
_missing_members = {
"QtGui": {
"QMatrix": "Deprecated in PyQt5",
},
}
def _qInstallMessageHandler(handler):
"""Install a message handler that works in all bindings
Args:
handler: A function that takes 3 arguments, or None
"""
def messageOutputHandler(*args):
# In Qt4 bindings, message handlers are passed 2 arguments
# In Qt5 bindings, message handlers are passed 3 arguments
# The first argument is a QtMsgType
# The last argument is the message to be printed
# The Middle argument (if passed) is a QMessageLogContext
if len(args) == 3:
msgType, logContext, msg = args
elif len(args) == 2:
msgType, msg = args
logContext = None
else:
raise TypeError(
"handler expected 2 or 3 arguments, got {0}".format(len(args)))
if isinstance(msg, bytes):
# In python 3, some bindings pass a bytestring, which cannot be
# used elsewhere. Decoding a python 2 or 3 bytestring object will
# consistently return a unicode object.
msg = msg.decode()
handler(msgType, logContext, msg)
passObject = messageOutputHandler if handler else handler
if Qt.IsPySide or Qt.IsPyQt4:
return Qt._QtCore.qInstallMsgHandler(passObject)
elif Qt.IsPySide2 or Qt.IsPyQt5:
return Qt._QtCore.qInstallMessageHandler(passObject)
def _getcpppointer(object):
if hasattr(Qt, "_shiboken2"):
return getattr(Qt, "_shiboken2").getCppPointer(object)[0]
elif hasattr(Qt, "_shiboken"):
return getattr(Qt, "_shiboken").getCppPointer(object)[0]
elif hasattr(Qt, "_sip"):
return getattr(Qt, "_sip").unwrapinstance(object)
raise AttributeError("'module' has no attribute 'getCppPointer'")
def _wrapinstance(ptr, base=None):
"""Enable implicit cast of pointer to most suitable class
This behaviour is available in sip per default.
Based on http://nathanhorne.com/pyqtpyside-wrap-instance
Usage:
This mechanism kicks in under these circumstances.
1. Qt.py is using PySide 1 or 2.
2. A `base` argument is not provided.
See :func:`QtCompat.wrapInstance()`
Arguments:
ptr (long): Pointer to QObject in memory
base (QObject, optional): Base class to wrap with. Defaults to QObject,
which should handle anything.
"""
assert isinstance(ptr, long), "Argument 'ptr' must be of type <long>"
assert (base is None) or issubclass(base, Qt.QtCore.QObject), (
"Argument 'base' must be of type <QObject>")
if Qt.IsPyQt4 or Qt.IsPyQt5:
func = getattr(Qt, "_sip").wrapinstance
elif Qt.IsPySide2:
func = getattr(Qt, "_shiboken2").wrapInstance
elif Qt.IsPySide:
func = getattr(Qt, "_shiboken").wrapInstance
else:
raise AttributeError("'module' has no attribute 'wrapInstance'")
if base is None:
q_object = func(long(ptr), Qt.QtCore.QObject)
meta_object = q_object.metaObject()
class_name = meta_object.className()
super_class_name = meta_object.superClass().className()
if hasattr(Qt.QtWidgets, class_name):
base = getattr(Qt.QtWidgets, class_name)
elif hasattr(Qt.QtWidgets, super_class_name):
base = getattr(Qt.QtWidgets, super_class_name)
else:
base = Qt.QtCore.QObject
return func(long(ptr), base)
def _isvalid(object):
"""Check if the object is valid to use in Python runtime.
Usage:
See :func:`QtCompat.isValid()`
Arguments:
object (QObject): QObject to check the validity of.
"""
assert isinstance(object, Qt.QtCore.QObject)
if hasattr(Qt, "_shiboken2"):
return getattr(Qt, "_shiboken2").isValid(object)
elif hasattr(Qt, "_shiboken"):
return getattr(Qt, "_shiboken").isValid(object)
elif hasattr(Qt, "_sip"):
return not getattr(Qt, "_sip").isdeleted(object)
else:
raise AttributeError("'module' has no attribute isValid")
def _translate(context, sourceText, *args):
# In Qt4 bindings, translate can be passed 2 or 3 arguments
# In Qt5 bindings, translate can be passed 2 arguments
# The first argument is disambiguation[str]
# The last argument is n[int]
# The middle argument can be encoding[QtCore.QCoreApplication.Encoding]
if len(args) == 3:
disambiguation, encoding, n = args
elif len(args) == 2:
disambiguation, n = args
encoding = None
else:
raise TypeError(
"Expected 4 or 5 arguments, got {0}.".format(len(args) + 2))
if hasattr(Qt.QtCore, "QCoreApplication"):
app = getattr(Qt.QtCore, "QCoreApplication")
else:
raise NotImplementedError(
"Missing QCoreApplication implementation for {binding}".format(
binding=Qt.__binding__,
)
)
if Qt.__binding__ in ("PySide2", "PyQt5"):
sanitized_args = [context, sourceText, disambiguation, n]
else:
sanitized_args = [
context,
sourceText,
disambiguation,
encoding or app.CodecForTr,
n
]
return app.translate(*sanitized_args)
def _loadUi(uifile, baseinstance=None):
"""Dynamically load a user interface from the given `uifile`
This function calls `uic.loadUi` if using PyQt bindings,
else it implements a comparable binding for PySide.
Documentation:
http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi
Arguments:
uifile (str): Absolute path to Qt Designer file.
baseinstance (QWidget): Instantiated QWidget or subclass thereof
Return:
baseinstance if `baseinstance` is not `None`. Otherwise
return the newly created instance of the user interface.
"""
if hasattr(Qt, "_uic"):
return Qt._uic.loadUi(uifile, baseinstance)
elif hasattr(Qt, "_QtUiTools"):
# Implement `PyQt5.uic.loadUi` for PySide(2)
class _UiLoader(Qt._QtUiTools.QUiLoader):
"""Create the user interface in a base instance.
Unlike `Qt._QtUiTools.QUiLoader` itself this class does not
create a new instance of the top-level widget, but creates the user
interface in an existing instance of the top-level class if needed.
This mimics the behaviour of `PyQt5.uic.loadUi`.
"""
def __init__(self, baseinstance):
super(_UiLoader, self).__init__(baseinstance)
self.baseinstance = baseinstance
self.custom_widgets = {}
def _loadCustomWidgets(self, etree):
"""
Workaround to pyside-77 bug.
From QUiLoader doc we should use registerCustomWidget method.
But this causes a segfault on some platforms.
Instead we fetch from customwidgets DOM node the python class
objects. Then we can directly use them in createWidget method.
"""
def headerToModule(header):
"""
Translate a header file to python module path
foo/bar.h => foo.bar
"""
# Remove header extension
module = os.path.splitext(header)[0]
# Replace os separator by python module separator
return module.replace("/", ".").replace("\\", ".")
custom_widgets = etree.find("customwidgets")
if custom_widgets is None:
return
for custom_widget in custom_widgets:
class_name = custom_widget.find("class").text
header = custom_widget.find("header").text
module = importlib.import_module(headerToModule(header))
self.custom_widgets[class_name] = getattr(module,
class_name)
def load(self, uifile, *args, **kwargs):
from xml.etree.ElementTree import ElementTree
# For whatever reason, if this doesn't happen then
# reading an invalid or non-existing .ui file throws
# a RuntimeError.
etree = ElementTree()
etree.parse(uifile)
self._loadCustomWidgets(etree)
widget = Qt._QtUiTools.QUiLoader.load(
self, uifile, *args, **kwargs)
# Workaround for PySide 1.0.9, see issue #208
widget.parentWidget()
return widget
def createWidget(self, class_name, parent=None, name=""):
"""Called for each widget defined in ui file
Overridden here to populate `baseinstance` instead.
"""
if parent is None and self.baseinstance:
# Supposed to create the top-level widget,
# return the base instance instead
return self.baseinstance
# For some reason, Line is not in the list of available
# widgets, but works fine, so we have to special case it here.
if class_name in self.availableWidgets() + ["Line"]:
# Create a new widget for child widgets
widget = Qt._QtUiTools.QUiLoader.createWidget(self,
class_name,
parent,
name)
elif class_name in self.custom_widgets:
widget = self.custom_widgets[class_name](parent)
else:
raise Exception("Custom widget '%s' not supported"
% class_name)
if self.baseinstance:
# Set an attribute for the new child widget on the base
# instance, just like PyQt5.uic.loadUi does.
setattr(self.baseinstance, name, widget)
return widget
widget = _UiLoader(baseinstance).load(uifile)
Qt.QtCore.QMetaObject.connectSlotsByName(widget)
return widget
else:
raise NotImplementedError("No implementation available for loadUi")
"""Misplaced members
These members from the original submodule are misplaced relative PySide2
"""
_misplaced_members = {
"PySide2": {
"QtCore.QStringListModel": "QtCore.QStringListModel",
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtCore.Property": "QtCore.Property",
"QtCore.Signal": "QtCore.Signal",
"QtCore.Slot": "QtCore.Slot",
"QtCore.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtCore.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtCore.QItemSelection": "QtCore.QItemSelection",
"QtCore.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.QItemSelectionRange": "QtCore.QItemSelectionRange",
"QtUiTools.QUiLoader": ["QtCompat.loadUi", _loadUi],
"shiboken2.wrapInstance": ["QtCompat.wrapInstance", _wrapinstance],
"shiboken2.getCppPointer": ["QtCompat.getCppPointer", _getcpppointer],
"shiboken2.isValid": ["QtCompat.isValid", _isvalid],
"QtWidgets.qApp": "QtWidgets.QApplication.instance()",
"QtCore.QCoreApplication.translate": [
"QtCompat.translate", _translate
],
"QtWidgets.QApplication.translate": [
"QtCompat.translate", _translate
],
"QtCore.qInstallMessageHandler": [
"QtCompat.qInstallMessageHandler", _qInstallMessageHandler
],
"QtWidgets.QStyleOptionViewItem": "QtCompat.QStyleOptionViewItemV4",
},
"PyQt5": {
"QtCore.pyqtProperty": "QtCore.Property",
"QtCore.pyqtSignal": "QtCore.Signal",
"QtCore.pyqtSlot": "QtCore.Slot",
"QtCore.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtCore.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtCore.QStringListModel": "QtCore.QStringListModel",
"QtCore.QItemSelection": "QtCore.QItemSelection",
"QtCore.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.QItemSelectionRange": "QtCore.QItemSelectionRange",
"uic.loadUi": ["QtCompat.loadUi", _loadUi],
"sip.wrapinstance": ["QtCompat.wrapInstance", _wrapinstance],
"sip.unwrapinstance": ["QtCompat.getCppPointer", _getcpppointer],
"sip.isdeleted": ["QtCompat.isValid", _isvalid],
"QtWidgets.qApp": "QtWidgets.QApplication.instance()",
"QtCore.QCoreApplication.translate": [
"QtCompat.translate", _translate
],
"QtWidgets.QApplication.translate": [
"QtCompat.translate", _translate
],
"QtCore.qInstallMessageHandler": [
"QtCompat.qInstallMessageHandler", _qInstallMessageHandler
],
"QtWidgets.QStyleOptionViewItem": "QtCompat.QStyleOptionViewItemV4",
},
"PySide": {
"QtGui.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtGui.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtGui.QItemSelection": "QtCore.QItemSelection",
"QtGui.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.Property": "QtCore.Property",
"QtCore.Signal": "QtCore.Signal",
"QtCore.Slot": "QtCore.Slot",
"QtGui.QItemSelectionRange": "QtCore.QItemSelectionRange",
"QtGui.QAbstractPrintDialog": "QtPrintSupport.QAbstractPrintDialog",
"QtGui.QPageSetupDialog": "QtPrintSupport.QPageSetupDialog",
"QtGui.QPrintDialog": "QtPrintSupport.QPrintDialog",
"QtGui.QPrintEngine": "QtPrintSupport.QPrintEngine",
"QtGui.QPrintPreviewDialog": "QtPrintSupport.QPrintPreviewDialog",
"QtGui.QPrintPreviewWidget": "QtPrintSupport.QPrintPreviewWidget",
"QtGui.QPrinter": "QtPrintSupport.QPrinter",
"QtGui.QPrinterInfo": "QtPrintSupport.QPrinterInfo",
"QtUiTools.QUiLoader": ["QtCompat.loadUi", _loadUi],
"shiboken.wrapInstance": ["QtCompat.wrapInstance", _wrapinstance],
"shiboken.unwrapInstance": ["QtCompat.getCppPointer", _getcpppointer],
"shiboken.isValid": ["QtCompat.isValid", _isvalid],
"QtGui.qApp": "QtWidgets.QApplication.instance()",
"QtCore.QCoreApplication.translate": [
"QtCompat.translate", _translate
],
"QtGui.QApplication.translate": [
"QtCompat.translate", _translate
],
"QtCore.qInstallMsgHandler": [
"QtCompat.qInstallMessageHandler", _qInstallMessageHandler
],
"QtGui.QStyleOptionViewItemV4": "QtCompat.QStyleOptionViewItemV4",
},
"PyQt4": {
"QtGui.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtGui.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtGui.QItemSelection": "QtCore.QItemSelection",
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtGui.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.pyqtProperty": "QtCore.Property",
"QtCore.pyqtSignal": "QtCore.Signal",
"QtCore.pyqtSlot": "QtCore.Slot",
"QtGui.QItemSelectionRange": "QtCore.QItemSelectionRange",
"QtGui.QAbstractPrintDialog": "QtPrintSupport.QAbstractPrintDialog",
"QtGui.QPageSetupDialog": "QtPrintSupport.QPageSetupDialog",
"QtGui.QPrintDialog": "QtPrintSupport.QPrintDialog",
"QtGui.QPrintEngine": "QtPrintSupport.QPrintEngine",
"QtGui.QPrintPreviewDialog": "QtPrintSupport.QPrintPreviewDialog",
"QtGui.QPrintPreviewWidget": "QtPrintSupport.QPrintPreviewWidget",
"QtGui.QPrinter": "QtPrintSupport.QPrinter",
"QtGui.QPrinterInfo": "QtPrintSupport.QPrinterInfo",
# "QtCore.pyqtSignature": "QtCore.Slot",
"uic.loadUi": ["QtCompat.loadUi", _loadUi],
"sip.wrapinstance": ["QtCompat.wrapInstance", _wrapinstance],
"sip.unwrapinstance": ["QtCompat.getCppPointer", _getcpppointer],
"sip.isdeleted": ["QtCompat.isValid", _isvalid],
"QtCore.QString": "str",
"QtGui.qApp": "QtWidgets.QApplication.instance()",
"QtCore.QCoreApplication.translate": [
"QtCompat.translate", _translate
],
"QtGui.QApplication.translate": [
"QtCompat.translate", _translate
],
"QtCore.qInstallMsgHandler": [
"QtCompat.qInstallMessageHandler", _qInstallMessageHandler
],
"QtGui.QStyleOptionViewItemV4": "QtCompat.QStyleOptionViewItemV4",
}
}
""" Compatibility Members
This dictionary is used to build Qt.QtCompat objects that provide a consistent
interface for obsolete members, and differences in binding return values.
{
"binding": {
"classname": {
"targetname": "binding_namespace",
}
}
}
"""
_compatibility_members = {
"PySide2": {
"QWidget": {
"grab": "QtWidgets.QWidget.grab",
},
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.sectionsClickable",
"setSectionsClickable":
"QtWidgets.QHeaderView.setSectionsClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.sectionResizeMode",
"setSectionResizeMode":
"QtWidgets.QHeaderView.setSectionResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.sectionsMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setSectionsMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
"PyQt5": {
"QWidget": {
"grab": "QtWidgets.QWidget.grab",
},
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.sectionsClickable",
"setSectionsClickable":
"QtWidgets.QHeaderView.setSectionsClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.sectionResizeMode",
"setSectionResizeMode":
"QtWidgets.QHeaderView.setSectionResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.sectionsMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setSectionsMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
"PySide": {
"QWidget": {
"grab": "QtWidgets.QPixmap.grabWidget",
},
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.isClickable",
"setSectionsClickable": "QtWidgets.QHeaderView.setClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.resizeMode",
"setSectionResizeMode": "QtWidgets.QHeaderView.setResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.isMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
"PyQt4": {
"QWidget": {
"grab": "QtWidgets.QPixmap.grabWidget",
},
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.isClickable",
"setSectionsClickable": "QtWidgets.QHeaderView.setClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.resizeMode",
"setSectionResizeMode": "QtWidgets.QHeaderView.setResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.isMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
}
def _apply_site_config():
try:
import QtSiteConfig
except ImportError:
# If no QtSiteConfig module found, no modifications
# to _common_members are needed.
pass
else:
# Provide the ability to modify the dicts used to build Qt.py
if hasattr(QtSiteConfig, 'update_members'):
QtSiteConfig.update_members(_common_members)
if hasattr(QtSiteConfig, 'update_misplaced_members'):
QtSiteConfig.update_misplaced_members(members=_misplaced_members)
if hasattr(QtSiteConfig, 'update_compatibility_members'):
QtSiteConfig.update_compatibility_members(
members=_compatibility_members)
def _new_module(name):
return types.ModuleType(__name__ + "." + name)
def _import_sub_module(module, name):
"""import_sub_module will mimic the function of importlib.import_module"""
module = __import__(module.__name__ + "." + name)
for level in name.split("."):
module = getattr(module, level)
return module
def _setup(module, extras):
"""Install common submodules"""
Qt.__binding__ = module.__name__
for name in list(_common_members) + extras:
try:
submodule = _import_sub_module(
module, name)
except ImportError:
try:
# For extra modules like sip and shiboken that may not be
# children of the binding.
submodule = __import__(name)
except ImportError:
continue
setattr(Qt, "_" + name, submodule)
if name not in extras:
# Store reference to original binding,
# but don't store speciality modules
# such as uic or QtUiTools
setattr(Qt, name, _new_module(name))
def _reassign_misplaced_members(binding):
"""Apply misplaced members from `binding` to Qt.py
Arguments:
binding (dict): Misplaced members
"""
for src, dst in _misplaced_members[binding].items():
dst_value = None
src_parts = src.split(".")
src_module = src_parts[0]
src_member = None
if len(src_parts) > 1:
src_member = src_parts[1:]
if isinstance(dst, (list, tuple)):
dst, dst_value = dst
dst_parts = dst.split(".")
dst_module = dst_parts[0]
dst_member = None
if len(dst_parts) > 1:
dst_member = dst_parts[1]
# Get the member we want to store in the namesapce.
if not dst_value:
try:
_part = getattr(Qt, "_" + src_module)
while src_member:
member = src_member.pop(0)
_part = getattr(_part, member)
dst_value = _part
except AttributeError:
# If the member we want to store in the namespace does not
# exist, there is no need to continue. This can happen if a
# request was made to rename a member that didn't exist, for
# example if QtWidgets isn't available on the target platform.
_log("Misplaced member has no source: {0}".format(src))
continue
try:
src_object = getattr(Qt, dst_module)
except AttributeError:
if dst_module not in _common_members:
# Only create the Qt parent module if its listed in
# _common_members. Without this check, if you remove QtCore
# from _common_members, the default _misplaced_members will add
# Qt.QtCore so it can add Signal, Slot, etc.
msg = 'Not creating missing member module "{m}" for "{c}"'
_log(msg.format(m=dst_module, c=dst_member))
continue
# If the dst is valid but the Qt parent module does not exist
# then go ahead and create a new module to contain the member.
setattr(Qt, dst_module, _new_module(dst_module))
src_object = getattr(Qt, dst_module)
# Enable direct import of the new module
sys.modules[__name__ + "." + dst_module] = src_object
if not dst_value:
dst_value = getattr(Qt, "_" + src_module)
if src_member:
dst_value = getattr(dst_value, src_member)
setattr(
src_object,
dst_member or dst_module,
dst_value
)
def _build_compatibility_members(binding, decorators=None):
"""Apply `binding` to QtCompat
Arguments:
binding (str): Top level binding in _compatibility_members.
decorators (dict, optional): Provides the ability to decorate the
original Qt methods when needed by a binding. This can be used
to change the returned value to a standard value. The key should
be the classname, the value is a dict where the keys are the
target method names, and the values are the decorator functions.
"""
decorators = decorators or dict()
# Allow optional site-level customization of the compatibility members.
# This method does not need to be implemented in QtSiteConfig.
try:
import QtSiteConfig
except ImportError:
pass
else:
if hasattr(QtSiteConfig, 'update_compatibility_decorators'):
QtSiteConfig.update_compatibility_decorators(binding, decorators)
_QtCompat = type("QtCompat", (object,), {})
for classname, bindings in _compatibility_members[binding].items():
attrs = {}
for target, binding in bindings.items():
namespaces = binding.split('.')
try:
src_object = getattr(Qt, "_" + namespaces[0])
except AttributeError as e:
_log("QtCompat: AttributeError: %s" % e)
# Skip reassignment of non-existing members.
# This can happen if a request was made to
# rename a member that didn't exist, for example
# if QtWidgets isn't available on the target platform.
continue
# Walk down any remaining namespace getting the object assuming
# that if the first namespace exists the rest will exist.
for namespace in namespaces[1:]:
src_object = getattr(src_object, namespace)
# decorate the Qt method if a decorator was provided.
if target in decorators.get(classname, []):
# staticmethod must be called on the decorated method to
# prevent a TypeError being raised when the decorated method
# is called.
src_object = staticmethod(
decorators[classname][target](src_object))
attrs[target] = src_object
# Create the QtCompat class and install it into the namespace
compat_class = type(classname, (_QtCompat,), attrs)
setattr(Qt.QtCompat, classname, compat_class)
def _pyside2():
"""Initialise PySide2
These functions serve to test the existence of a binding
along with set it up in such a way that it aligns with
the final step; adding members from the original binding
to Qt.py
"""
import PySide2 as module
extras = ["QtUiTools"]
try:
try:
# Before merge of PySide and shiboken
import shiboken2
except ImportError:
# After merge of PySide and shiboken, May 2017
from PySide2 import shiboken2
extras.append("shiboken2")
except ImportError:
pass
_setup(module, extras)
Qt.__binding_version__ = module.__version__
if hasattr(Qt, "_shiboken2"):
Qt.QtCompat.wrapInstance = _wrapinstance
Qt.QtCompat.getCppPointer = _getcpppointer
Qt.QtCompat.delete = shiboken2.delete
if hasattr(Qt, "_QtUiTools"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtCore"):
Qt.__qt_version__ = Qt._QtCore.qVersion()
Qt.QtCompat.dataChanged = (
lambda self, topleft, bottomright, roles=None:
self.dataChanged.emit(topleft, bottomright, roles or [])
)
if hasattr(Qt, "_QtWidgets"):
Qt.QtCompat.setSectionResizeMode = \
Qt._QtWidgets.QHeaderView.setSectionResizeMode
_reassign_misplaced_members("PySide2")
_build_compatibility_members("PySide2")
def _pyside():
"""Initialise PySide"""
import PySide as module
extras = ["QtUiTools"]
try:
try:
# Before merge of PySide and shiboken
import shiboken
except ImportError:
# After merge of PySide and shiboken, May 2017
from PySide import shiboken
extras.append("shiboken")
except ImportError:
pass
_setup(module, extras)
Qt.__binding_version__ = module.__version__
if hasattr(Qt, "_shiboken"):
Qt.QtCompat.wrapInstance = _wrapinstance
Qt.QtCompat.getCppPointer = _getcpppointer
Qt.QtCompat.delete = shiboken.delete
if hasattr(Qt, "_QtUiTools"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtGui"):
setattr(Qt, "QtWidgets", _new_module("QtWidgets"))
setattr(Qt, "_QtWidgets", Qt._QtGui)
if hasattr(Qt._QtGui, "QX11Info"):
setattr(Qt, "QtX11Extras", _new_module("QtX11Extras"))
Qt.QtX11Extras.QX11Info = Qt._QtGui.QX11Info
Qt.QtCompat.setSectionResizeMode = Qt._QtGui.QHeaderView.setResizeMode
if hasattr(Qt, "_QtCore"):
Qt.__qt_version__ = Qt._QtCore.qVersion()
Qt.QtCompat.dataChanged = (
lambda self, topleft, bottomright, roles=None:
self.dataChanged.emit(topleft, bottomright)
)
_reassign_misplaced_members("PySide")
_build_compatibility_members("PySide")
def _pyqt5():
"""Initialise PyQt5"""
import PyQt5 as module
extras = ["uic"]
try:
import sip
extras += ["sip"]
except ImportError:
# Relevant to PyQt5 5.11 and above
try:
from PyQt5 import sip
extras += ["sip"]
except ImportError:
sip = None
_setup(module, extras)
if hasattr(Qt, "_sip"):
Qt.QtCompat.wrapInstance = _wrapinstance
Qt.QtCompat.getCppPointer = _getcpppointer
Qt.QtCompat.delete = sip.delete
if hasattr(Qt, "_uic"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtCore"):
Qt.__binding_version__ = Qt._QtCore.PYQT_VERSION_STR
Qt.__qt_version__ = Qt._QtCore.QT_VERSION_STR
Qt.QtCompat.dataChanged = (
lambda self, topleft, bottomright, roles=None:
self.dataChanged.emit(topleft, bottomright, roles or [])
)
if hasattr(Qt, "_QtWidgets"):
Qt.QtCompat.setSectionResizeMode = \
Qt._QtWidgets.QHeaderView.setSectionResizeMode
_reassign_misplaced_members("PyQt5")
_build_compatibility_members('PyQt5')
def _pyqt4():
"""Initialise PyQt4"""
import sip
# Validation of envivornment variable. Prevents an error if
# the variable is invalid since it's just a hint.
try:
hint = int(QT_SIP_API_HINT)
except TypeError:
hint = None # Variable was None, i.e. not set.
except ValueError:
raise ImportError("QT_SIP_API_HINT=%s must be a 1 or 2")
for api in ("QString",
"QVariant",
"QDate",
"QDateTime",
"QTextStream",
"QTime",
"QUrl"):
try:
sip.setapi(api, hint or 2)
except AttributeError:
raise ImportError("PyQt4 < 4.6 isn't supported by Qt.py")
except ValueError:
actual = sip.getapi(api)
if not hint:
raise ImportError("API version already set to %d" % actual)
else:
# Having provided a hint indicates a soft constraint, one
# that doesn't throw an exception.
sys.stderr.write(
"Warning: API '%s' has already been set to %d.\n"
% (api, actual)
)
import PyQt4 as module
extras = ["uic"]
try:
import sip
extras.append(sip.__name__)
except ImportError:
sip = None
_setup(module, extras)
if hasattr(Qt, "_sip"):
Qt.QtCompat.wrapInstance = _wrapinstance
Qt.QtCompat.getCppPointer = _getcpppointer
Qt.QtCompat.delete = sip.delete
if hasattr(Qt, "_uic"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtGui"):
setattr(Qt, "QtWidgets", _new_module("QtWidgets"))
setattr(Qt, "_QtWidgets", Qt._QtGui)
if hasattr(Qt._QtGui, "QX11Info"):
setattr(Qt, "QtX11Extras", _new_module("QtX11Extras"))
Qt.QtX11Extras.QX11Info = Qt._QtGui.QX11Info
Qt.QtCompat.setSectionResizeMode = \
Qt._QtGui.QHeaderView.setResizeMode
if hasattr(Qt, "_QtCore"):
Qt.__binding_version__ = Qt._QtCore.PYQT_VERSION_STR
Qt.__qt_version__ = Qt._QtCore.QT_VERSION_STR
Qt.QtCompat.dataChanged = (
lambda self, topleft, bottomright, roles=None:
self.dataChanged.emit(topleft, bottomright)
)
_reassign_misplaced_members("PyQt4")
# QFileDialog QtCompat decorator
def _standardizeQFileDialog(some_function):
"""Decorator that makes PyQt4 return conform to other bindings"""
def wrapper(*args, **kwargs):
ret = (some_function(*args, **kwargs))
# PyQt4 only returns the selected filename, force it to a
# standard return of the selected filename, and a empty string
# for the selected filter
return ret, ''
wrapper.__doc__ = some_function.__doc__
wrapper.__name__ = some_function.__name__
return wrapper
decorators = {
"QFileDialog": {
"getOpenFileName": _standardizeQFileDialog,
"getOpenFileNames": _standardizeQFileDialog,
"getSaveFileName": _standardizeQFileDialog,
}
}
_build_compatibility_members('PyQt4', decorators)
def _none():
"""Internal option (used in installer)"""
Mock = type("Mock", (), {"__getattr__": lambda Qt, attr: None})
Qt.__binding__ = "None"
Qt.__qt_version__ = "0.0.0"
Qt.__binding_version__ = "0.0.0"
Qt.QtCompat.loadUi = lambda uifile, baseinstance=None: None
Qt.QtCompat.setSectionResizeMode = lambda *args, **kwargs: None
for submodule in _common_members.keys():
setattr(Qt, submodule, Mock())
setattr(Qt, "_" + submodule, Mock())
def _log(text):
if QT_VERBOSE:
sys.stdout.write(text + "\n")
def _convert(lines):
"""Convert compiled .ui file from PySide2 to Qt.py
Arguments:
lines (list): Each line of of .ui file
Usage:
>> with open("myui.py") as f:
.. lines = _convert(f.readlines())
"""
def parse(line):
line = line.replace("from PySide2 import", "from Qt import QtCompat,")
line = line.replace("QtWidgets.QApplication.translate",
"QtCompat.translate")
if "QtCore.SIGNAL" in line:
raise NotImplementedError("QtCore.SIGNAL is missing from PyQt5 "
"and so Qt.py does not support it: you "
"should avoid defining signals inside "
"your ui files.")
return line
parsed = list()
for line in lines:
line = parse(line)
parsed.append(line)
return parsed
def _cli(args):
"""Qt.py command-line interface"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--convert",
help="Path to compiled Python module, e.g. my_ui.py")
parser.add_argument("--compile",
help="Accept raw .ui file and compile with native "
"PySide2 compiler.")
parser.add_argument("--stdout",
help="Write to stdout instead of file",
action="store_true")
parser.add_argument("--stdin",
help="Read from stdin instead of file",
action="store_true")
args = parser.parse_args(args)
if args.stdout:
raise NotImplementedError("--stdout")
if args.stdin:
raise NotImplementedError("--stdin")
if args.compile:
raise NotImplementedError("--compile")
if args.convert:
sys.stdout.write("#\n"
"# WARNING: --convert is an ALPHA feature.\n#\n"
"# See https://github.com/mottosso/Qt.py/pull/132\n"
"# for details.\n"
"#\n")
#
# ------> Read
#
with open(args.convert) as f:
lines = _convert(f.readlines())
backup = "%s_backup%s" % os.path.splitext(args.convert)
sys.stdout.write("Creating \"%s\"..\n" % backup)
shutil.copy(args.convert, backup)
#
# <------ Write
#
with open(args.convert, "w") as f:
f.write("".join(lines))
sys.stdout.write("Successfully converted \"%s\"\n" % args.convert)
class MissingMember(object):
"""
A placeholder type for a missing Qt object not
included in Qt.py
Args:
name (str): The name of the missing type
details (str): An optional custom error message
"""
ERR_TMPL = ("{} is not a common object across PySide2 "
"and the other Qt bindings. It is not included "
"as a common member in the Qt.py layer")
def __init__(self, name, details=''):
self.__name = name
self.__err = self.ERR_TMPL.format(name)
if details:
self.__err = "{}: {}".format(self.__err, details)
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self.__name)
def __getattr__(self, name):
raise NotImplementedError(self.__err)
def __call__(self, *a, **kw):
raise NotImplementedError(self.__err)
def _install():
# Default order (customise order and content via QT_PREFERRED_BINDING)
default_order = ("PySide2", "PyQt5", "PySide", "PyQt4")
preferred_order = list(
b for b in QT_PREFERRED_BINDING.split(os.pathsep) if b
)
order = preferred_order or default_order
available = {
"PySide2": _pyside2,
"PyQt5": _pyqt5,
"PySide": _pyside,
"PyQt4": _pyqt4,
"None": _none
}
_log("Order: '%s'" % "', '".join(order))
# Allow site-level customization of the available modules.
_apply_site_config()
found_binding = False
for name in order:
_log("Trying %s" % name)
try:
available[name]()
found_binding = True
break
except ImportError as e:
_log("ImportError: %s" % e)
except KeyError:
_log("ImportError: Preferred binding '%s' not found." % name)
if not found_binding:
# If not binding were found, throw this error
raise ImportError("No Qt binding were found.")
# Install individual members
for name, members in _common_members.items():
try:
their_submodule = getattr(Qt, "_%s" % name)
except AttributeError:
continue
our_submodule = getattr(Qt, name)
# Enable import *
__all__.append(name)
# Enable direct import of submodule,
# e.g. import Qt.QtCore
sys.modules[__name__ + "." + name] = our_submodule
for member in members:
# Accept that a submodule may miss certain members.
try:
their_member = getattr(their_submodule, member)
except AttributeError:
_log("'%s.%s' was missing." % (name, member))
continue
setattr(our_submodule, member, their_member)
# Install missing member placeholders
for name, members in _missing_members.items():
our_submodule = getattr(Qt, name)
for member in members:
# If the submodule already has this member installed,
# either by the common members, or the site config,
# then skip installing this one over it.
if hasattr(our_submodule, member):
continue
placeholder = MissingMember("{}.{}".format(name, member),
details=members[member])
setattr(our_submodule, member, placeholder)
# Enable direct import of QtCompat
sys.modules['Qt.QtCompat'] = Qt.QtCompat
# Backwards compatibility
if hasattr(Qt.QtCompat, 'loadUi'):
Qt.QtCompat.load_ui = Qt.QtCompat.loadUi
_install()
# Setup Binding Enum states
Qt.IsPySide2 = Qt.__binding__ == 'PySide2'
Qt.IsPyQt5 = Qt.__binding__ == 'PyQt5'
Qt.IsPySide = Qt.__binding__ == 'PySide'
Qt.IsPyQt4 = Qt.__binding__ == 'PyQt4'
"""Augment QtCompat
QtCompat contains wrappers and added functionality
to the original bindings, such as the CLI interface
and otherwise incompatible members between bindings,
such as `QHeaderView.setSectionResizeMode`.
"""
Qt.QtCompat._cli = _cli
Qt.QtCompat._convert = _convert
# Enable command-line interface
if __name__ == "__main__":
_cli(sys.argv[1:])
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 Marcus Ottosson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# In PySide(2), loadUi does not exist, so we implement it
#
# `_UiLoader` is adapted from the qtpy project, which was further influenced
# by qt-helpers which was released under a 3-clause BSD license which in turn
# is based on a solution at:
#
# - https://gist.github.com/cpbotha/1b42a20c8f3eb9bb7cb8
#
# The License for this code is as follows:
#
# qt-helpers - a common front-end to various Qt modules
#
# Copyright (c) 2015, Chris Beaumont and Thomas Robitaille
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Glue project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Which itself was based on the solution at
#
# https://gist.github.com/cpbotha/1b42a20c8f3eb9bb7cb8
#
# which was released under the MIT license:
#
# Copyright (c) 2011 Sebastian Wiesner <lunaryorn@gmail.com>
# Modifications by Charl Botha <cpbotha@vxlabs.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files
# (the "Software"),to deal in the Software without restriction,
# including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
pyblish/pyblish-qml
|
pyblish_qml/vendor/Qt.py
|
Python
|
lgpl-3.0
| 63,899
|
[
"VisIt"
] |
165fd34878e1ef4b8dd96c02fe911bc7401f141a4dc8fb3b2c6ca4c2198ce7d7
|
# fileloader.py ---
#
# Filename: fileloader.py
# Description:
# Author:
# Maintainer:
# Created: Mon Feb 25 15:59:54 2013 (+0530)
# Version:
# Last-Updated: Wed May 22 12:30:14 2013 (+0530)
# By: subha
# Update #: 106
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Dialog for loading model files
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import sys
from PyQt4 import QtGui,QtCore,Qt
import moose
import os
import posixpath
class LoaderDialog(QtGui.QFileDialog):
# Update ftypes to include new file types
ftypes='All Supported Files (*.cspace *.g *.xml *.p);; CSPACE (*.cspace);; GENESIS (*.g);; GENESIS Prototype (*.p);; NeuroML/SBML (*.xml)'
target_default = '' # The default target when loading a model
def __init__(self, *args):
self.modelpath = None
super(LoaderDialog, self).__init__(*args)
self.setNameFilter(self.tr(self.ftypes))
self.setNameFilterDetailsVisible(True)
self.setReadOnly(True)
self.setFileMode(self.ExistingFile)
# self.targetPanel = QtGui.QFrame()
# self.targetLabel = QtGui.QLabel('Model name')
# self.targetText = QtGui.QLineEdit(self.target_default)
# form = QtGui.QFormLayout()
# form.addRow(self.targetLabel, self.targetText)
# self.modelChoiceBox = QtGui.QGroupBox('Model name')
# self.replaceExistingButton = QtGui.QRadioButton('&Replace current model')
# self.mergeExistingButton = QtGui.QRadioButton('&Keep current model')
# self.replaceExistingButton.setChecked(True)
# vbox = QtGui.QVBoxLayout()
# vbox.addWidget(self.replaceExistingButton)
# vbox.addWidget(self.mergeExistingButton)
# self.modelChoiceBox.setLayout(vbox)
# self.targetPanel.setLayout(form)
# self.layout().addWidget(self.targetPanel)
# self.layout().addWidget(self.modelChoiceBox)
self.fileSelected.connect(self.fileSelectedSlot)
def fileSelectedSlot(self, fpath):
"""On selecting a file, this function will cause the target location to change to:
/model/filename_minus_extension
"""
self.modelpath = os.path.splitext(os.path.basename(str(fpath)))[0]
# def isReplace(self):
# return self.replaceExistingButton.isChecked()
# def isMerge(self):
# return self.mergeExistingButton.isChecked()
def getTargetPath(self):
return self.modelpath
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
QtGui.qApp = app
mw = LoaderDialog()
mw.show()
# mw.exec_()
sys.exit(app.exec_())
#
# fileloader.py ends here
|
dilawar/moose-full
|
moose-gui/loaderdialog.py
|
Python
|
gpl-2.0
| 3,433
|
[
"MOOSE"
] |
83c6e321643710d634136b197e7322e1c53b50aa73b37aa457b07aaad8604c47
|
__RCSID__ = "$Id$"
""" FileCatalogFactory class to create file catalog client objects according to the
configuration description
"""
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getCatalogPath
from DIRAC.Resources.Catalog.FileCatalogProxyClient import FileCatalogProxyClient
from DIRAC.Core.Utilities import ObjectLoader
class FileCatalogFactory:
def __init__( self ):
self.log = gLogger.getSubLogger( 'FileCatalogFactory' )
def createCatalog( self, catalogName, useProxy = False ):
""" Create a file catalog object from its name and CS description
"""
if useProxy:
catalog = FileCatalogProxyClient( catalogName )
return S_OK( catalog )
# get the CS description first
catalogPath = getCatalogPath( catalogName )
catalogType = gConfig.getValue( catalogPath + '/CatalogType', catalogName )
catalogURL = gConfig.getValue( catalogPath + '/CatalogURL', "DataManagement/" + catalogType )
self.log.debug( 'Creating %s client' % catalogName )
objectLoader = ObjectLoader.ObjectLoader()
result = objectLoader.loadObject( 'Resources.Catalog.%sClient' % catalogType, catalogType + 'Client' )
if not result['OK']:
gLogger.error( 'Failed to load catalog object', '%s' % result['Message'] )
return result
catalogClass = result['Value']
try:
# FIXME: is it really needed? This is the factory, can't this be moved out?
if catalogType in [ 'LcgFileCatalogCombined', 'LcgFileCatalog' ]:
# The LFC special case
infoSys = gConfig.getValue( catalogPath + '/LcgGfalInfosys', '' )
host = gConfig.getValue( catalogPath + '/MasterHost', '' )
catalog = catalogClass( infoSys, host )
else:
if catalogURL:
catalog = catalogClass( url = catalogURL )
else:
catalog = catalogClass()
self.log.debug( 'Loaded module %sClient' % catalogType )
return S_OK( catalog )
except Exception, x:
errStr = "Failed to instantiate %s()" % ( catalogType )
gLogger.exception( errStr, lException = x )
return S_ERROR( errStr )
# Catalog module was not loaded
return S_ERROR( 'No suitable client found for %s' % catalogName )
|
vmendez/DIRAC
|
Resources/Catalog/FileCatalogFactory.py
|
Python
|
gpl-3.0
| 2,272
|
[
"DIRAC"
] |
ab3b7958ed7102f0d33c6c9cb1456a0b044574606a6a919ae4f4c8d7d0b8fa69
|
from dirac.tests import *
class TestControlController(TestController):
def test_index(self):
response = self.app.get(url_for(controller='web/control'))
# Test response...
|
DIRACGrid/DIRACWeb
|
dirac/tests/functional/test_web_control.py
|
Python
|
gpl-3.0
| 193
|
[
"DIRAC"
] |
8e6fedead02a4367b54ed6810588b87f6560a91bb88090149beaac2725070911
|
from __future__ import print_function
import sys
from sympy import symbols,sin,cos
from galgebra.printer import Format,xpdf,Get_Program,Print_Function
from galgebra.ga import Ga
def Maxwells_Equations_in_Geom_Calculus():
Print_Function()
X = symbols('t x y z',real=True)
(st4d,g0,g1,g2,g3) = Ga.build('gamma*t|x|y|z',g=[1,-1,-1,-1],coords=X)
I = st4d.i
B = st4d.mv('B','vector',f=True)
E = st4d.mv('E','vector',f=True)
B.set_coef(1,0,0)
E.set_coef(1,0,0)
B *= g0
E *= g0
J = st4d.mv('J','vector',f=True)
F = E+I*B
print(r'\text{Pseudo Scalar\;\;}I =',I)
print('\\text{Magnetic Field Bi-Vector\\;\\;} B = \\bm{B\\gamma_{t}} =',B)
print('\\text{Electric Field Bi-Vector\\;\\;} E = \\bm{E\\gamma_{t}} =',E)
print('\\text{Electromagnetic Field Bi-Vector\\;\\;} F = E+IB =',F)
print('%\\text{Four Current Density\\;\\;} J =',J)
gradF = st4d.grad*F
print('#Geom Derivative of Electomagnetic Field Bi-Vector')
gradF.Fmt(3,'grad*F')
print('#Maxwell Equations')
print('grad*F = J')
print('#Div $E$ and Curl $H$ Equations')
print((gradF.get_grade(1)-J).Fmt(3,'%\\grade{\\nabla F}_{1} -J = 0'))
print('#Curl $E$ and Div $B$ equations')
print((gradF.get_grade(3)).Fmt(3,'%\\grade{\\nabla F}_{3} = 0'))
return
def Dirac_Equation_in_Geom_Calculus():
Print_Function()
coords = symbols('t x y z',real=True)
(st4d,g0,g1,g2,g3) = Ga.build('gamma*t|x|y|z',g=[1,-1,-1,-1],coords=coords)
I = st4d.i
(m,e) = symbols('m e')
psi = st4d.mv('psi','spinor',f=True)
A = st4d.mv('A','vector',f=True)
sig_z = g3*g0
print('\\text{4-Vector Potential\\;\\;}\\bm{A} =',A)
print('\\text{8-component real spinor\\;\\;}\\bm{\\psi} =',psi)
dirac_eq = (st4d.grad*psi)*I*sig_z-e*A*psi-m*psi*g0
dirac_eq = dirac_eq.simplify()
print(dirac_eq.Fmt(3,r'%\text{Dirac Equation\;\;}\nabla \bm{\psi} I \sigma_{z}-e\bm{A}\bm{\psi}-m\bm{\psi}\gamma_{t} = 0'))
return
def Lorentz_Tranformation_in_Geog_Algebra():
Print_Function()
(alpha,beta,gamma) = symbols('alpha beta gamma')
(x,t,xp,tp) = symbols("x t x' t'",real=True)
(st2d,g0,g1) = Ga.build('gamma*t|x',g=[1,-1])
from sympy import sinh,cosh
R = cosh(alpha/2)+sinh(alpha/2)*(g0^g1)
X = t*g0+x*g1
Xp = tp*g0+xp*g1
print('R =',R)
print(r"#%t\bm{\gamma_{t}}+x\bm{\gamma_{x}} = t'\bm{\gamma'_{t}}+x'\bm{\gamma'_{x}} = R\lp t'\bm{\gamma_{t}}+x'\bm{\gamma_{x}}\rp R^{\dagger}")
Xpp = R*Xp*R.rev()
Xpp = Xpp.collect()
Xpp = Xpp.trigsimp()
print(r"%t\bm{\gamma_{t}}+x\bm{\gamma_{x}} =",Xpp)
Xpp = Xpp.subs({sinh(alpha):gamma*beta,cosh(alpha):gamma})
print(r'%\f{\sinh}{\alpha} = \gamma\beta')
print(r'%\f{\cosh}{\alpha} = \gamma')
print(r"%t\bm{\gamma_{t}}+x\bm{\gamma_{x}} =",Xpp.collect())
return
def General_Lorentz_Tranformation():
Print_Function()
(alpha,beta,gamma) = symbols('alpha beta gamma')
(x,y,z,t) = symbols("x y z t",real=True)
(st4d,g0,g1,g2,g3) = Ga.build('gamma*t|x|y|z',g=[1,-1,-1,-1])
B = (x*g1+y*g2+z*g3)^(t*g0)
print(B)
print(B.exp(hint='+'))
print(B.exp(hint='-'))
def Lie_Group():
Print_Function()
coords = symbols('t x y z',real=True)
(st4d,g0,g1,g2,g3) = Ga.build('gamma*t|x|y|z',g=[1,-1,-1,-1],coords=coords)
I = st4d.i
a = st4d.mv('a','vector')
B = st4d.mv('B','bivector')
print('a =',a)
print('B =',B)
print('a|B =', a|B)
print(((a|B)|B).simplify().Fmt(3,'(a|B)|B'))
print((((a|B)|B)|B).simplify().Fmt(3,'((a|B)|B)|B'))
return
def dummy():
return
def main():
Get_Program()
Format()
#Maxwells_Equations_in_Geom_Calculus()
#Dirac_Equation_in_Geom_Calculus()
#Lorentz_Tranformation_in_Geog_Algebra()
General_Lorentz_Tranformation()
#Lie_Group()
# xpdf()
xpdf(pdfprog=None)
return
if __name__ == "__main__":
main()
|
arsenovic/galgebra
|
examples/LaTeX/physics_check_latex.py
|
Python
|
bsd-3-clause
| 3,934
|
[
"DIRAC"
] |
32af54dbcbac48bf33b7e413165d531928df2e8b71747180fb89556dc73303f5
|
# This file is adapted from https://github.com/ray-project/ray/blob
# /master/examples/parameter_server/async_parameter_server.py
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import time
from zoo.examples.ray_on_spark.parameter_server import model
import ray
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca import OrcaContext
os.environ["LANG"] = "C.UTF-8"
parser = argparse.ArgumentParser(description="Run the asynchronous parameter "
"server example.")
parser.add_argument('--cluster_mode', type=str, default="local",
help='The mode for the Spark cluster. local or yarn.')
parser.add_argument("--num_workers", default=4, type=int,
help="The number of workers to use.")
parser.add_argument("--iterations", default=50, type=int,
help="Iteration time.")
parser.add_argument("--executor_cores", type=int, default=8,
help="The number of driver's cpu cores you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--executor_memory", type=str, default="10g",
help="The size of slave(executor)'s memory you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--driver_memory", type=str, default="2g",
help="The size of driver's memory you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--driver_cores", type=int, default=8,
help="The number of driver's cpu cores you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--extra_executor_memory_for_ray", type=str, default="20g",
help="The extra executor memory to store some data."
"You can change it depending on your own cluster setting.")
parser.add_argument("--object_store_memory", type=str, default="4g",
help="The memory to store data on local."
"You can change it depending on your own cluster setting.")
@ray.remote
class ParameterServer(object):
def __init__(self, keys, values):
# These values will be mutated, so we must create a copy that is not
# backed by the object store.
values = [value.copy() for value in values]
self.weights = dict(zip(keys, values))
def push(self, keys, values):
for key, value in zip(keys, values):
self.weights[key] += value
def pull(self, keys):
return [self.weights[key] for key in keys]
@ray.remote
def worker_task(ps, worker_index, batch_size=50):
# Download MNIST.
print("Worker " + str(worker_index))
mnist = model.download_mnist_retry(seed=worker_index)
# Initialize the model.
net = model.SimpleCNN()
keys = net.get_weights()[0]
while True:
# Get the current weights from the parameter server.
weights = ray.get(ps.pull.remote(keys))
net.set_weights(keys, weights)
# Compute an update and push it to the parameter server.
xs, ys = mnist.train.next_batch(batch_size)
gradients = net.compute_update(xs, ys)
ps.push.remote(keys, gradients)
if __name__ == "__main__":
args = parser.parse_args()
cluster_mode = args.cluster_mode
if cluster_mode == "yarn":
sc = init_orca_context(cluster_mode=cluster_mode,
cores=args.executor_cores,
memory=args.executor_memory,
init_ray_on_spark=True,
num_executors=args.num_workers,
driver_memory=args.driver_memory,
driver_cores=args.driver_cores,
extra_executor_memory_for_ray=args.extra_executor_memory_for_ray,
object_store_memory=args.object_store_memory,
additional_archive="MNIST_data.zip#MNIST_data")
ray_ctx = OrcaContext.get_ray_context()
elif cluster_mode == "local":
sc = init_orca_context(cores=args.driver_cores)
ray_ctx = OrcaContext.get_ray_context()
else:
print("init_orca_context failed. cluster_mode should be either 'local' or 'yarn' but got "
+ cluster_mode)
# Create a parameter server with some random weights.
net = model.SimpleCNN()
all_keys, all_values = net.get_weights()
ps = ParameterServer.remote(all_keys, all_values)
# Start some training tasks.
worker_tasks = [worker_task.remote(ps, i) for i in range(args.num_workers)]
# Download MNIST.
mnist = model.download_mnist_retry()
print("Begin iteration")
i = 0
while i < args.iterations:
# Get and evaluate the current model.
print("-----Iteration" + str(i) + "------")
current_weights = ray.get(ps.pull.remote(all_keys))
net.set_weights(all_keys, current_weights)
test_xs, test_ys = mnist.test.next_batch(1000)
accuracy = net.compute_accuracy(test_xs, test_ys)
print("Iteration {}: accuracy is {}".format(i, accuracy))
i += 1
time.sleep(1)
ray_ctx.stop()
stop_orca_context()
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/examples/ray_on_spark/parameter_server/async_parameter_server.py
|
Python
|
apache-2.0
| 6,132
|
[
"ORCA"
] |
43bef9a32c453d756377ad07b6e8c769d4bab8ab268bad9aff9402026d3fade1
|
"""Validate dependencies."""
import ast
from pathlib import Path
from typing import Dict, Set
from homeassistant.requirements import DISCOVERY_INTEGRATIONS
from .model import Integration
class ImportCollector(ast.NodeVisitor):
"""Collect all integrations referenced."""
def __init__(self, integration: Integration):
"""Initialize the import collector."""
self.integration = integration
self.referenced: Dict[Path, Set[str]] = {}
# Current file or dir we're inspecting
self._cur_fil_dir = None
def collect(self) -> None:
"""Collect imports from a source file."""
for fil in self.integration.path.glob("**/*.py"):
if not fil.is_file():
continue
self._cur_fil_dir = fil.relative_to(self.integration.path)
self.referenced[self._cur_fil_dir] = set()
self.visit(ast.parse(fil.read_text()))
self._cur_fil_dir = None
def _add_reference(self, reference_domain: str):
"""Add a reference."""
self.referenced[self._cur_fil_dir].add(reference_domain)
def visit_ImportFrom(self, node):
"""Visit ImportFrom node."""
if node.module is None:
return
if node.module.startswith("homeassistant.components."):
# from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME
# from homeassistant.components.logbook import bla
self._add_reference(node.module.split(".")[2])
elif node.module == "homeassistant.components":
# from homeassistant.components import sun
for name_node in node.names:
self._add_reference(name_node.name)
def visit_Import(self, node):
"""Visit Import node."""
# import homeassistant.components.hue as hue
for name_node in node.names:
if name_node.name.startswith("homeassistant.components."):
self._add_reference(name_node.name.split(".")[2])
def visit_Attribute(self, node):
"""Visit Attribute node."""
# hass.components.hue.async_create()
# Name(id=hass)
# .Attribute(attr=hue)
# .Attribute(attr=async_create)
# self.hass.components.hue.async_create()
# Name(id=self)
# .Attribute(attr=hass)
# .Attribute(attr=hue)
# .Attribute(attr=async_create)
if (
isinstance(node.value, ast.Attribute)
and node.value.attr == "components"
and (
(
isinstance(node.value.value, ast.Name)
and node.value.value.id == "hass"
)
or (
isinstance(node.value.value, ast.Attribute)
and node.value.value.attr == "hass"
)
)
):
self._add_reference(node.attr)
else:
# Have it visit other kids
self.generic_visit(node)
ALLOWED_USED_COMPONENTS = {
# This component will always be set up
"persistent_notification",
# These allow to register things without being set up
"conversation",
"frontend",
"hassio",
"system_health",
"websocket_api",
"automation",
"device_automation",
"zone",
"homeassistant",
"system_log",
"person",
# Discovery
"discovery",
# Other
"mjpeg", # base class, has no reqs or component to load.
"stream", # Stream cannot install on all systems, can be imported without reqs.
}
IGNORE_VIOLATIONS = {
# Has same requirement, gets defaults.
("sql", "recorder"),
# Sharing a base class
("openalpr_cloud", "openalpr_local"),
("lutron_caseta", "lutron"),
("ffmpeg_noise", "ffmpeg_motion"),
# Demo
("demo", "manual"),
("demo", "openalpr_local"),
# This should become a helper method that integrations can submit data to
("websocket_api", "lovelace"),
("websocket_api", "shopping_list"),
# Expose HA to external systems
"homekit",
"alexa",
"google_assistant",
"emulated_hue",
"prometheus",
"conversation",
"logbook",
"mobile_app",
# These should be extracted to external package
"pvoutput",
"dwd_weather_warnings",
# Should be rewritten to use own data fetcher
"scrape",
}
def calc_allowed_references(integration: Integration) -> Set[str]:
"""Return a set of allowed references."""
allowed_references = (
ALLOWED_USED_COMPONENTS
| set(integration.manifest["dependencies"])
| set(integration.manifest.get("after_dependencies", []))
)
# Discovery requirements are ok if referenced in manifest
for check_domain, to_check in DISCOVERY_INTEGRATIONS.items():
if any(check in integration.manifest for check in to_check):
allowed_references.add(check_domain)
return allowed_references
def find_non_referenced_integrations(
integrations: Dict[str, Integration],
integration: Integration,
references: Dict[Path, Set[str]],
):
"""Find intergrations that are not allowed to be referenced."""
allowed_references = calc_allowed_references(integration)
referenced = set()
for path, refs in references.items():
if len(path.parts) == 1:
# climate.py is stored as climate
cur_fil_dir = path.stem
else:
# climate/__init__.py is stored as climate
cur_fil_dir = path.parts[0]
is_platform_other_integration = cur_fil_dir in integrations
for ref in refs:
# We are always allowed to import from ourselves
if ref == integration.domain:
continue
# These references are approved based on the manifest
if ref in allowed_references:
continue
# Some violations are whitelisted
if (integration.domain, ref) in IGNORE_VIOLATIONS:
continue
# If it's a platform for another integration, the other integration is ok
if is_platform_other_integration and cur_fil_dir == ref:
continue
# These have a platform specified in this integration
if not is_platform_other_integration and (
(integration.path / f"{ref}.py").is_file()
# Platform dir
or (integration.path / ref).is_dir()
):
continue
referenced.add(ref)
return referenced
def validate_dependencies(
integrations: Dict[str, Integration], integration: Integration
):
"""Validate all dependencies."""
# Some integrations are allowed to have violations.
if integration.domain in IGNORE_VIOLATIONS:
return
# Find usage of hass.components
collector = ImportCollector(integration)
collector.collect()
for domain in sorted(
find_non_referenced_integrations(
integrations, integration, collector.referenced
)
):
integration.add_error(
"dependencies",
f"Using component {domain} but it's not in 'dependencies' "
"or 'after_dependencies'",
)
def validate(integrations: Dict[str, Integration], config):
"""Handle dependencies for integrations."""
# check for non-existing dependencies
for integration in integrations.values():
if not integration.manifest:
continue
validate_dependencies(integrations, integration)
# check that all referenced dependencies exist
for dep in integration.manifest["dependencies"]:
if dep not in integrations:
integration.add_error(
"dependencies", f"Dependency {dep} does not exist"
)
|
postlund/home-assistant
|
script/hassfest/dependencies.py
|
Python
|
apache-2.0
| 7,826
|
[
"VisIt"
] |
565929dc6c82434e6e84bf53c1c0d7cbea364fdb72ad742289b7d8d1c300ed61
|
"""
Helper Classes and Functions for docking fingerprint computation.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar and Jacob Durrant"
__license__ = "GNU General Public License"
import math
import os
import subprocess
import numpy as np
import deepchem.utils.rdkit_util as rdkit_util
def force_partial_charge_computation(mol):
"""Force computation of partial charges for molecule.
Parameters
----------
mol: Rdkit Mol
Molecule on which we compute partial charges.
"""
rdkit_util.compute_charges(mol)
def pdbqt_to_pdb(input_file, output_directory):
"""Convert pdbqt file to pdb file.
Parameters
----------
input_file: String
Path to input file.
output_directory: String
Path to desired output directory.
"""
print(input_file, output_directory)
raise ValueError("Not yet implemented")
def hydrogenate_and_compute_partial_charges(input_file,
input_format,
hyd_output=None,
pdbqt_output=None,
protein=True,
verbose=True):
"""Outputs a hydrogenated pdb and a pdbqt with partial charges.
Takes an input file in specified format. Generates two outputs:
-) A pdb file that contains a hydrogenated (at pH 7.4) version of
original compound.
-) A pdbqt file that has computed Gasteiger partial charges. This pdbqt
file is build from the hydrogenated pdb.
TODO(rbharath): Can do a bit of refactoring between this function and
pdbqt_to_pdb.
Parameters
----------
input_file: String
Path to input file.
input_format: String
Name of input format.
"""
mol = rdkit_util.load_molecule(
input_file, add_hydrogens=True, calc_charges=True)[1]
if verbose:
print("Create pdb with hydrogens added")
rdkit_util.write_molecule(mol, str(hyd_output), is_protein=protein)
if verbose:
print("Create a pdbqt file from the hydrogenated pdb above.")
rdkit_util.write_molecule(mol, str(pdbqt_output), is_protein=protein)
if protein:
print("Removing ROOT/ENDROOT/TORSDOF")
with open(pdbqt_output) as f:
pdbqt_lines = f.readlines()
filtered_lines = []
for line in pdbqt_lines:
filtered_lines.append(line)
with open(pdbqt_output, "w") as f:
f.writelines(filtered_lines)
class AromaticRing(object):
"""Holds information about an aromatic ring."""
def __init__(self, center, indices, plane_coeff, radius):
"""
Initializes an aromatic.
Parameters
----------
center: float
Center of the ring.
indices: list
List of the atom indices for ring atoms.
plane_coeff: list
A list of elements [a, b, c, d] that define a plane by equation
a x + b y + c z = d.
radius: float
Ring radius from center.
"""
self.center = center
self.indices = indices
# a*x + b*y + c*z = dI think that
self.plane_coeff = plane_coeff
self.radius = radius
def average_point(points):
"""Returns the point with averaged coordinates of arguments.
Parameters
----------
points: list
List of point objects.
Returns
-------
pavg: Point object
Has coordinates the arithmetic average of those of p1 and p2.
"""
coords = np.array([0, 0, 0])
for point in points:
coords += point.as_array().astype(coords.dtype)
if len(points) > 0:
return Point(coords=coords / len(points))
else:
return Point(coords=coords)
class Point(object):
"""
Simple implementation for a point in 3-space.
"""
def __init__(self, x=None, y=None, z=None, coords=None):
"""
Inputs can be specified either by explicitly providing x, y, z coords
or by providing a numpy array of length 3.
Parameters
----------
x: float
X-coord.
y: float
Y-coord.
z: float
Z-coord.
coords: np.ndarray
Should be of length 3 in format np.array([x, y, z])
Raises
------
ValueError: If no arguments are provided.
"""
if x and y and z:
#self.x, self.y, self.z = x, y, z
self.coords = np.array([x, y, z])
elif coords is not None: # Implicit eval doesn't work on numpy arrays.
#self.x, self.y, self.z = coords[0], coords[1], coords[2]
self.coords = coords
else:
raise ValueError("Must specify coordinates for Point!")
# TODO(bramsundar): Should this be __copy__?
def copy_of(self):
"""Return a copy of this point."""
return Point(coords=np.copy(self.coords))
def dist_to(self, point):
"""Distance (in 2-norm) from this point to another."""
return np.linalg.norm(self.coords - point.coords)
def magnitude(self):
"""Magnitude of this point (in 2-norm)."""
return np.linalg.norm(self.coords)
#return self.dist_to(Point(coords=np.array([0, 0, 0])))
def as_array(self):
"""Return the coordinates of this point as array."""
#return np.array([self.x, self.y, self.z])
return self.coords
class Atom(object):
"""
Implements a container class for atoms. This class contains useful
annotations about the atom.
"""
def __init__(self,
atomname="",
residue="",
coordinates=Point(coords=np.array([99999, 99999, 99999])),
element="",
pdb_index="",
line="",
atomtype="",
indices_of_atoms_connecting=None,
charge=0,
resid=0,
chain="",
structure="",
comment=""):
"""
Initializes an atom.
Assumes that atom is loaded from a PDB file.
Parameters
----------
atomname: string
Name of atom. Note that atomname is not the same as residue since
atomnames often have extra annotations (e.g., CG, NZ, etc).
residue: string:
Name of protein residue this atom belongs to.
element: string
Name of atom's element.
coordinate: point
A point object (x, y, z are in Angstroms).
pdb_index: string
Index of the atom in source PDB file.
line: string
The line in the PDB file which specifies this atom.
atomtype: string
Element of atom. This differs from atomname which typically has extra
annotations (e.g. CA, OA, HD, etc)
IndicesOfAtomConnecting: list
The indices (in a PDB object) of all atoms bonded to this one.
charge: float
Associated electrostatic charge.
resid: int
The residue number in the receptor (listing the protein as a chain from
N-Terminus to C-Terminus). Assumes this is a protein atom.
chain: string
Chain identifier for molecule. See PDB spec.
structure: string
One of ALPHA, BETA, or OTHER for the type of protein secondary
structure this atom resides in (assuming this is a receptor atom).
comment: string
Either LIGAND or RECEPTOR depending on whether this is a ligand or
receptor atom.
"""
self.atomname = atomname
self.residue = residue
self.coordinates = coordinates
self.element = element
self.pdb_index = pdb_index
self.line = line
self.atomtype = atomtype
if indices_of_atoms_connecting is not None:
self.indices_of_atoms_connecting = indices_of_atoms_connecting
else:
self.indices_of_atoms_connecting = []
self.charge = charge
self.resid = resid
self.chain = chain
self.structure = structure
self.comment = comment
def copy_of(self):
"""Make a copy of this atom."""
theatom = Atom()
theatom.atomname = self.atomname
theatom.residue = self.residue
theatom.coordinates = self.coordinates.copy_of()
theatom.element = self.element
theatom.pdb_index = self.pdb_index
theatom.line = self.line
theatom.atomtype = self.atomtype
theatom.indices_of_atoms_connecting = self.indices_of_atoms_connecting[:]
theatom.charge = self.charge
theatom.resid = self.resid
theatom.chain = self.chain
theatom.structure = self.structure
theatom.comment = self.comment
return theatom
def create_pdb_line(self, index):
"""
Generates appropriate ATOM line for pdb file.
Parameters
----------
index: int
Index in associated PDB file.
"""
output = "ATOM "
output = (
output + str(index).rjust(6) + self.atomname.rjust(5) +
self.residue.rjust(4) + self.chain.rjust(2) + str(self.resid).rjust(4))
coords = self.coordinates.as_array() # [x, y, z]
output = output + ("%.3f" % coords[0]).rjust(12)
output = output + ("%.3f" % coords[1]).rjust(8)
output = output + ("%.3f" % coords[2]).rjust(8)
output = output + self.element.rjust(24)
return output
def number_of_neighbors(self):
"""Reports number of neighboring atoms."""
return len(self.indices_of_atoms_connecting)
def add_neighbor_atom_indices(self, indices):
"""
Adds atoms with provided PDB indices as neighbors.
Parameters
----------
index: list
List of indices of neighbors in PDB object.
"""
for index in indices:
if index not in self.indices_of_atoms_connecting:
self.indices_of_atoms_connecting.append(index)
def side_chain_or_backbone(self):
"""Determine whether receptor atom belongs to residue sidechain or backbone.
"""
# TODO(rbharath): Should this be an atom function?
if (self.atomname.strip() == "CA" or self.atomname.strip() == "C" or
self.atomname.strip() == "O" or self.atomname.strip() == "N"):
return "BACKBONE"
else:
return "SIDECHAIN"
def read_atom_pdb_line(self, line):
"""
TODO(rbharath): This method probably belongs in the PDB class, and not
in the Atom class.
Reads an ATOM or HETATM line from PDB and instantiates fields.
Atoms in PDBs are represented by ATOM or HETATM statements. ATOM and
HETATM statements follow the following record format:
(see ftp://ftp.wwpdb.org/pub/pdb/doc/format_descriptions/Format_v33_Letter.pdf)
COLUMNS DATA TYPE FIELD DEFINITION
-------------------------------------------------------------------------------------
1 - 6 Record name "ATOM "/"HETATM"
7 - 11 Integer serial Atom serial number.
13 - 16 Atom name Atom name.
17 Character altLoc Alternate location indicator.
18 - 20 Residue name resName Residue name.
22 Character chainID Chain identifier.
23 - 26 Integer resSeq Residue sequence number.
27 AChar iCode Code for insertion of residues.
31 - 38 Real(8.3) x Orthogonal coordinates for X in Angstroms.
39 - 46 Real(8.3) y Orthogonal coordinates for Y in Angstroms.
47 - 54 Real(8.3) z Orthogonal coordinates for Z in Angstroms.
55 - 60 Real(6.2) occupancy Occupancy.
61 - 66 Real(6.2) tempFactor Temperature factor.
77 - 78 LString(2) element Element symbol, right-justified.
79 - 80 LString(2) charge Charge on the atom.
"""
self.line = line
self.atomname = line[11:16].strip()
if len(self.atomname) == 1:
self.atomname = self.atomname + " "
elif len(self.atomname) == 2:
self.atomname = self.atomname + " "
elif len(self.atomname) == 3:
# This line is necessary for babel to work, though many PDBs in
# the PDB would have this line commented out
self.atomname = self.atomname + " "
self.coordinates = Point(coords=np.array(
[float(line[30:38]), float(line[38:46]), float(line[46:54])]))
# now atom type (for pdbqt)
if line[77:79].strip():
self.atomtype = line[77:79].strip().upper()
elif self.atomname:
# If atomtype is not specified, but atomname is, set atomtype to the
# first letter of atomname. This heuristic suffices for proteins,
# since no two-letter elements appear in standard amino acids.
self.atomtype = self.atomname[:1]
else:
self.atomtype = ""
if line[69:76].strip() != "":
self.charge = float(line[69:76])
else:
self.charge = 0.0
if self.element == "": # try to guess at element from name
two_letters = self.atomname[0:2].strip().upper()
valid_two_letters = [
"BR", "CL", "BI", "AS", "AG", "LI", "HG", "MG", "MN", "RH", "ZN", "FE"
]
if two_letters in valid_two_letters:
self.element = two_letters
else: #So, just assume it's the first letter.
# Any number needs to be removed from the element name
self.element = self.atomname
self.element = self.element.replace('0', '')
self.element = self.element.replace('1', '')
self.element = self.element.replace('2', '')
self.element = self.element.replace('3', '')
self.element = self.element.replace('4', '')
self.element = self.element.replace('5', '')
self.element = self.element.replace('6', '')
self.element = self.element.replace('7', '')
self.element = self.element.replace('8', '')
self.element = self.element.replace('9', '')
self.element = self.element.replace('@', '')
self.element = self.element[0:1].strip().upper()
self.pdb_index = line[6:12].strip()
self.residue = line[16:20]
# this only uses the rightmost three characters, essentially
# removing unique rotamer identification
self.residue = " " + self.residue[-3:]
if line[23:26].strip() != "":
self.resid = int(line[23:26])
else:
self.resid = 1
self.chain = line[21:22]
if self.residue.strip() == "":
self.residue = " MOL"
class Charged(object):
"""
A class that represeents a charged atom.
"""
def __init__(self, coordinates, indices, positive):
"""
Parameters
----------
coordinates: point
Coordinates of atom.
indices: list
Contains boolean true or false entries for self and neighbors to
specify if positive or negative charge
positive: bool
Whether this atom is positive or negative.
"""
self.coordinates = coordinates
self.indices = indices
self.positive = positive
def vector_subtraction(point1, point2): # point1 - point2
"""Subtracts the coordinates of the provided points."""
return Point(coords=point1.as_array() - point2.as_array())
def cross_product(point1, point2): # never tested
"""Calculates the cross-product of provided points."""
return Point(coords=np.cross(point1.as_array(), point2.as_array()))
def vector_scalar_multiply(point, scalar):
"""Multiplies the provided point by scalar."""
return Point(coords=scalar * point.as_array())
def dot_product(point1, point2):
"""Dot product of points."""
return np.dot(point1.as_array(), point2.as_array())
def dihedral(point1, point2, point3, point4): # never tested
"""Compute dihedral angle between 4 points.
TODO(rbharath): Write a nontrivial test for this.
"""
b1 = vector_subtraction(point2, point1)
b2 = vector_subtraction(point3, point2)
b3 = vector_subtraction(point4, point3)
b2Xb3 = cross_product(b2, b3)
b1Xb2 = cross_product(b1, b2)
b1XMagb2 = vector_scalar_multiply(b1, b2.magnitude())
radians = math.atan2(dot_product(b1XMagb2, b2Xb3), dot_product(b1Xb2, b2Xb3))
return radians
def angle_between_three_points(point1, point2, point3):
"""Computes the angle (in radians) between the three provided points."""
return angle_between_points(
vector_subtraction(point1, point2), vector_subtraction(point3, point2))
def angle_between_points(point1, point2):
"""Computes the angle (in radians) between two points."""
return math.acos(
dot_product(point1, point2) / (point1.magnitude() * point2.magnitude()))
def normalized_vector(point):
"""Normalize provided point."""
return Point(coords=point.as_array() / np.linalg.norm(point.as_array()))
def distance(point1, point2):
"""Computes distance between two points."""
return point1.dist_to(point2)
def project_point_onto_plane(point, plane_coefficients):
"""Finds nearest point on specified plane to given point.
Parameters
----------
point: Point
Given point
plane_coefficients: list
[a, b, c, d] where place equation is ax + by + cz = d
"""
# The normal vector to plane is n = [a, b, c]
offset = plane_coefficients[3]
normal = np.array(plane_coefficients[:3])
# We first shift by basepoint (a point on given plane) to make math
# simpler. basepoint is given by d/||n||^2 * n
basepoint = (offset / np.linalg.norm(normal)**2) * normal
diff = point.as_array() - basepoint
# The perpendicular component of diff to plane is
# (n^T diff / ||n||^2) * n
perp = (np.dot(normal, diff) / np.linalg.norm(normal)**2) * normal
closest = basepoint + (diff - perp)
return Point(coords=np.array(closest))
|
joegomes/deepchem
|
deepchem/feat/nnscore_utils.py
|
Python
|
mit
| 17,214
|
[
"RDKit"
] |
2287a9f326898ad09dd35ed190af88519891ad993895e83f025272791b8b7dae
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.