text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
""" Metrics for Symmetric Diffeomorphic Registration """
from __future__ import print_function
import abc
import numpy as np
import scipy as sp
from scipy import gradient, ndimage
from dipy.utils.six import with_metaclass
from dipy.align import vector_fields as vfu
from dipy.align import sumsqdiff as ssd
from dipy.align import crosscorr as cc
from dipy.align import expectmax as em
from dipy.align import floating
class SimilarityMetric(with_metaclass(abc.ABCMeta, object)):
def __init__(self, dim):
r""" Similarity Metric abstract class
A similarity metric is in charge of keeping track of the numerical
value of the similarity (or distance) between the two given images. It
also computes the update field for the forward and inverse displacement
fields to be used in a gradient-based optimization algorithm. Note that
this metric does not depend on any transformation (affine or
non-linear) so it assumes the static and moving images are already
warped
Parameters
----------
dim : int (either 2 or 3)
the dimension of the image domain
"""
self.dim = dim
self.levels_above = None
self.levels_below = None
self.static_image = None
self.static_affine = None
self.static_spacing = None
self.static_direction = None
self.moving_image = None
self.moving_affine = None
self.moving_spacing = None
self.moving_direction = None
self.mask0 = False
def set_levels_below(self, levels):
r"""Informs the metric how many pyramid levels are below the current one
Informs this metric the number of pyramid levels below the current one.
The metric may change its behavior (e.g. number of inner iterations)
accordingly
Parameters
----------
levels : int
the number of levels below the current Gaussian Pyramid level
"""
self.levels_below = levels
def set_levels_above(self, levels):
r"""Informs the metric how many pyramid levels are above the current one
Informs this metric the number of pyramid levels above the current one.
The metric may change its behavior (e.g. number of inner iterations)
accordingly
Parameters
----------
levels : int
the number of levels above the current Gaussian Pyramid level
"""
self.levels_above = levels
def set_static_image(self, static_image, static_affine, static_spacing,
static_direction):
r"""Sets the static image being compared against the moving one.
Sets the static image. The default behavior (of this abstract class) is
simply to assign the reference to an attribute, but
generalizations of the metric may need to perform other operations
Parameters
----------
static_image : array, shape (R, C) or (S, R, C)
the static image
"""
self.static_image = static_image
self.static_affine = static_affine
self.static_spacing = static_spacing
self.static_direction = static_direction
def use_static_image_dynamics(self, original_static_image, transformation):
r"""This is called by the optimizer just after setting the static image.
This method allows the metric to compute any useful
information from knowing how the current static image was generated
(as the transformation of an original static image). This method is
called by the optimizer just after it sets the static image.
Transformation will be an instance of DiffeomorficMap or None
if the original_static_image equals self.moving_image.
Parameters
----------
original_static_image : array, shape (R, C) or (S, R, C)
original image from which the current static image was generated
transformation : DiffeomorphicMap object
the transformation that was applied to original image to generate
the current static image
"""
pass
def set_moving_image(self, moving_image, moving_affine, moving_spacing,
moving_direction):
r"""Sets the moving image being compared against the static one.
Sets the moving image. The default behavior (of this abstract class) is
simply to assign the reference to an attribute, but
generalizations of the metric may need to perform other operations
Parameters
----------
moving_image : array, shape (R, C) or (S, R, C)
the moving image
"""
self.moving_image = moving_image
self.moving_affine = moving_affine
self.moving_spacing = moving_spacing
self.moving_direction = moving_direction
def use_moving_image_dynamics(self, original_moving_image, transformation):
r"""This is called by the optimizer just after setting the moving image
This method allows the metric to compute any useful
information from knowing how the current static image was generated
(as the transformation of an original static image). This method is
called by the optimizer just after it sets the static image.
Transformation will be an instance of DiffeomorficMap or None if
the original_moving_image equals self.moving_image.
Parameters
----------
original_moving_image : array, shape (R, C) or (S, R, C)
original image from which the current moving image was generated
transformation : DiffeomorphicMap object
the transformation that was applied to original image to generate
the current moving image
"""
pass
@abc.abstractmethod
def initialize_iteration(self):
r"""Prepares the metric to compute one displacement field iteration.
This method will be called before any compute_forward or
compute_backward call, this allows the Metric to pre-compute any useful
information for speeding up the update computations. This
initialization was needed in ANTS because the updates are called once
per voxel. In Python this is unpractical, though.
"""
@abc.abstractmethod
def free_iteration(self):
r"""Releases the resources no longer needed by the metric
This method is called by the RegistrationOptimizer after the required
iterations have been computed (forward and / or backward) so that the
SimilarityMetric can safely delete any data it computed as part of the
initialization
"""
@abc.abstractmethod
def compute_forward(self):
r"""Computes one step bringing the reference image towards the static.
Computes the forward update field to register the moving image towards
the static image in a gradient-based optimization algorithm
"""
@abc.abstractmethod
def compute_backward(self):
r"""Computes one step bringing the static image towards the moving.
Computes the backward update field to register the static image towards
the moving image in a gradient-based optimization algorithm
"""
@abc.abstractmethod
def get_energy(self):
r"""Numerical value assigned by this metric to the current image pair
Must return the numeric value of the similarity between the given
static and moving images
"""
class CCMetric(SimilarityMetric):
def __init__(self, dim, sigma_diff=2.0, radius=4):
r"""Normalized Cross-Correlation Similarity metric.
Parameters
----------
dim : int (either 2 or 3)
the dimension of the image domain
sigma_diff : the standard deviation of the Gaussian smoothing kernel to
be applied to the update field at each iteration
radius : int
the radius of the squared (cubic) neighborhood at each voxel to be
considered to compute the cross correlation
"""
super(CCMetric, self).__init__(dim)
self.sigma_diff = sigma_diff
self.radius = radius
self._connect_functions()
def _connect_functions(self):
r"""Assign the methods to be called according to the image dimension
Assigns the appropriate functions to be called for precomputing the
cross-correlation factors according to the dimension of the input
images
"""
if self.dim == 2:
self.precompute_factors = cc.precompute_cc_factors_2d
self.compute_forward_step = cc.compute_cc_forward_step_2d
self.compute_backward_step = cc.compute_cc_backward_step_2d
self.reorient_vector_field = vfu.reorient_vector_field_2d
elif self.dim == 3:
self.precompute_factors = cc.precompute_cc_factors_3d
self.compute_forward_step = cc.compute_cc_forward_step_3d
self.compute_backward_step = cc.compute_cc_backward_step_3d
self.reorient_vector_field = vfu.reorient_vector_field_3d
else:
raise ValueError('CC Metric not defined for dim. %d' % (self.dim))
def initialize_iteration(self):
r"""Prepares the metric to compute one displacement field iteration.
Pre-computes the cross-correlation factors for efficient computation
of the gradient of the Cross Correlation w.r.t. the displacement field.
It also pre-computes the image gradients in the physical space by
re-orienting the gradients in the voxel space using the corresponding
affine transformations.
"""
self.factors = self.precompute_factors(self.static_image,
self.moving_image,
self.radius)
self.factors = np.array(self.factors)
self.gradient_moving = np.empty(
shape=(self.moving_image.shape)+(self.dim,), dtype=floating)
for i, grad in enumerate(sp.gradient(self.moving_image)):
self.gradient_moving[..., i] = grad
# Convert moving image's gradient field from voxel to physical space
if self.moving_spacing is not None:
self.gradient_moving /= self.moving_spacing
if self.moving_direction is not None:
self.reorient_vector_field(self.gradient_moving,
self.moving_direction)
self.gradient_static = np.empty(
shape=(self.static_image.shape)+(self.dim,), dtype=floating)
for i, grad in enumerate(sp.gradient(self.static_image)):
self.gradient_static[..., i] = grad
# Convert moving image's gradient field from voxel to physical space
if self.static_spacing is not None:
self.gradient_static /= self.static_spacing
if self.static_direction is not None:
self.reorient_vector_field(self.gradient_static,
self.static_direction)
def free_iteration(self):
r"""Frees the resources allocated during initialization
"""
del self.factors
del self.gradient_moving
del self.gradient_static
def compute_forward(self):
r"""Computes one step bringing the moving image towards the static.
Computes the update displacement field to be used for registration of
the moving image towards the static image
"""
displacement, self.energy = self.compute_forward_step(
self.gradient_static, self.factors, self.radius)
displacement = np.array(displacement)
for i in range(self.dim):
displacement[..., i] = ndimage.filters.gaussian_filter(
displacement[..., i], self.sigma_diff)
return displacement
def compute_backward(self):
r"""Computes one step bringing the static image towards the moving.
Computes the update displacement field to be used for registration of
the static image towards the moving image
"""
displacement, energy = self.compute_backward_step(self.gradient_moving,
self.factors,
self.radius)
displacement = np.array(displacement)
for i in range(self.dim):
displacement[..., i] = ndimage.filters.gaussian_filter(
displacement[..., i], self.sigma_diff)
return displacement
def get_energy(self):
r"""Numerical value assigned by this metric to the current image pair
Returns the Cross Correlation (data term) energy computed at the
largest iteration
"""
return self.energy
class EMMetric(SimilarityMetric):
def __init__(self,
dim,
smooth=1.0,
inner_iter=5,
q_levels=256,
double_gradient=True,
step_type='gauss_newton'):
r"""Expectation-Maximization Metric
Similarity metric based on the Expectation-Maximization algorithm to
handle multi-modal images. The transfer function is modeled as a set of
hidden random variables that are estimated at each iteration of the
algorithm.
Parameters
----------
dim : int (either 2 or 3)
the dimension of the image domain
smooth : float
smoothness parameter, the larger the value the smoother the
deformation field
inner_iter : int
number of iterations to be performed at each level of the multi-
resolution Gauss-Seidel optimization algorithm (this is not the
number of steps per Gaussian Pyramid level, that parameter must
be set for the optimizer, not the metric)
q_levels : number of quantization levels (equal to the number of hidden
variables in the EM algorithm)
double_gradient : boolean
if True, the gradient of the expected static image under the moving
modality will be added to the gradient of the moving image,
similarly, the gradient of the expected moving image under the
static modality will be added to the gradient of the static image.
step_type : string ('gauss_newton', 'demons')
the optimization schedule to be used in the multi-resolution
Gauss-Seidel optimization algorithm (not used if Demons Step is
selected)
"""
super(EMMetric, self).__init__(dim)
self.smooth = smooth
self.inner_iter = inner_iter
self.q_levels = q_levels
self.use_double_gradient = double_gradient
self.step_type = step_type
self.static_image_mask = None
self.moving_image_mask = None
self.staticq_means_field = None
self.movingq_means_field = None
self.movingq_levels = None
self.staticq_levels = None
self._connect_functions()
def _connect_functions(self):
r"""Assign the methods to be called according to the image dimension
Assigns the appropriate functions to be called for image quantization,
statistics computation and multi-resolution iterations according to the
dimension of the input images
"""
if self.dim == 2:
self.quantize = em.quantize_positive_2d
self.compute_stats = em.compute_masked_class_stats_2d
self.reorient_vector_field = vfu.reorient_vector_field_2d
elif self.dim == 3:
self.quantize = em.quantize_positive_3d
self.compute_stats = em.compute_masked_class_stats_3d
self.reorient_vector_field = vfu.reorient_vector_field_3d
else:
raise ValueError('EM Metric not defined for dim. %d' % (self.dim))
if self.step_type == 'demons':
self.compute_step = self.compute_demons_step
elif self.step_type == 'gauss_newton':
self.compute_step = self.compute_gauss_newton_step
else:
raise ValueError('Opt. step %s not defined' % (self.step_type))
def initialize_iteration(self):
r"""Prepares the metric to compute one displacement field iteration.
Pre-computes the transfer functions (hidden random variables) and
variances of the estimators. Also pre-computes the gradient of both
input images. Note that once the images are transformed to the opposite
modality, the gradient of the transformed images can be used with the
gradient of the corresponding modality in the same fashion as
diff-demons does for mono-modality images. If the flag
self.use_double_gradient is True these gradients are averaged.
"""
sampling_mask = self.static_image_mask*self.moving_image_mask
self.sampling_mask = sampling_mask
staticq, self.staticq_levels, hist = self.quantize(self.static_image,
self.q_levels)
staticq = np.array(staticq, dtype=np.int32)
self.staticq_levels = np.array(self.staticq_levels)
staticq_means, staticq_vars = self.compute_stats(sampling_mask,
self.moving_image,
self.q_levels,
staticq)
staticq_means[0] = 0
self.staticq_means = np.array(staticq_means)
self.staticq_variances = np.array(staticq_vars)
self.staticq_sigma_sq_field = self.staticq_variances[staticq]
self.staticq_means_field = self.staticq_means[staticq]
self.gradient_moving = np.empty(
shape=(self.moving_image.shape)+(self.dim,), dtype=floating)
for i, grad in enumerate(sp.gradient(self.moving_image)):
self.gradient_moving[..., i] = grad
# Convert moving image's gradient field from voxel to physical space
if self.moving_spacing is not None:
self.gradient_moving /= self.moving_spacing
if self.moving_direction is not None:
self.reorient_vector_field(self.gradient_moving,
self.moving_direction)
self.gradient_static = np.empty(
shape=(self.static_image.shape)+(self.dim,), dtype=floating)
for i, grad in enumerate(sp.gradient(self.static_image)):
self.gradient_static[..., i] = grad
# Convert moving image's gradient field from voxel to physical space
if self.static_spacing is not None:
self.gradient_static /= self.static_spacing
if self.static_direction is not None:
self.reorient_vector_field(self.gradient_static,
self.static_direction)
movingq, self.movingq_levels, hist = self.quantize(self.moving_image,
self.q_levels)
movingq = np.array(movingq, dtype=np.int32)
self.movingq_levels = np.array(self.movingq_levels)
movingq_means, movingq_variances = self.compute_stats(
sampling_mask, self.static_image, self.q_levels, movingq)
movingq_means[0] = 0
self.movingq_means = np.array(movingq_means)
self.movingq_variances = np.array(movingq_variances)
self.movingq_sigma_sq_field = self.movingq_variances[movingq]
self.movingq_means_field = self.movingq_means[movingq]
if self.use_double_gradient:
for i, grad in enumerate(sp.gradient(self.staticq_means_field)):
self.gradient_moving[..., i] += grad
for i, grad in enumerate(sp.gradient(self.movingq_means_field)):
self.gradient_static[..., i] += grad
def free_iteration(self):
r"""
Frees the resources allocated during initialization
"""
del self.sampling_mask
del self.staticq_levels
del self.movingq_levels
del self.staticq_sigma_sq_field
del self.staticq_means_field
del self.movingq_sigma_sq_field
del self.movingq_means_field
del self.gradient_moving
del self.gradient_static
def compute_forward(self):
"""Computes one step bringing the reference image towards the static.
Computes the forward update field to register the moving image towards
the static image in a gradient-based optimization algorithm
"""
return self.compute_step(True)
def compute_backward(self):
r"""Computes one step bringing the static image towards the moving.
Computes the update displacement field to be used for registration of
the static image towards the moving image
"""
return self.compute_step(False)
def compute_gauss_newton_step(self, forward_step=True):
r"""Computes the Gauss-Newton energy minimization step
Computes the Newton step to minimize this energy, i.e., minimizes the
linearized energy function with respect to the
regularized displacement field (this step does not require
post-smoothing, as opposed to the demons step, which does not include
regularization). To accelerate convergence we use the multi-grid
Gauss-Seidel algorithm proposed by Bruhn and Weickert et al [Bruhn05]
Parameters
----------
forward_step : boolean
if True, computes the Newton step in the forward direction
(warping the moving towards the static image). If False,
computes the backward step (warping the static image to the
moving image)
Returns
-------
displacement : array, shape (R, C, 2) or (S, R, C, 3)
the Newton step
References
----------
[Bruhn05] Andres Bruhn and Joachim Weickert, "Towards ultimate motion
estimation: combining highest accuracy with real-time
performance", 10th IEEE International Conference on Computer
Vision, 2005. ICCV 2005.
"""
reference_shape = self.static_image.shape
if forward_step:
gradient = self.gradient_static
delta = self.staticq_means_field - self.moving_image
sigma_sq_field = self.staticq_sigma_sq_field
else:
gradient = self.gradient_moving
delta = self.movingq_means_field - self.static_image
sigma_sq_field = self.movingq_sigma_sq_field
displacement = np.zeros(shape=(reference_shape)+(self.dim,),
dtype=floating)
if self.dim == 2:
self.energy = v_cycle_2d(self.levels_below,
self.inner_iter, delta,
sigma_sq_field,
gradient,
None,
self.smooth,
displacement)
else:
self.energy = v_cycle_3d(self.levels_below,
self.inner_iter, delta,
sigma_sq_field,
gradient,
None,
self.smooth,
displacement)
return displacement
def compute_demons_step(self, forward_step=True):
r"""Demons step for EM metric
Parameters
----------
forward_step : boolean
if True, computes the Demons step in the forward direction
(warping the moving towards the static image). If False,
computes the backward step (warping the static image to the
moving image)
Returns
-------
displacement : array, shape (R, C, 2) or (S, R, C, 3)
the Demons step
"""
sigma_reg_2 = np.sum(self.static_spacing**2)/self.dim
if forward_step:
gradient = self.gradient_static
delta_field = self.static_image - self.movingq_means_field
sigma_sq_field = self.movingq_sigma_sq_field
else:
gradient = self.gradient_moving
delta_field = self.moving_image - self.staticq_means_field
sigma_sq_field = self.staticq_sigma_sq_field
if self.dim == 2:
step, self.energy = em.compute_em_demons_step_2d(delta_field,
sigma_sq_field,
gradient,
sigma_reg_2,
None)
else:
step, self.energy = em.compute_em_demons_step_3d(delta_field,
sigma_sq_field,
gradient,
sigma_reg_2,
None)
for i in range(self.dim):
step[..., i] = ndimage.filters.gaussian_filter(step[..., i],
self.smooth)
return step
def get_energy(self):
r"""The numerical value assigned by this metric to the current image pair
Returns the EM (data term) energy computed at the largest
iteration
"""
return self.energy
def use_static_image_dynamics(self, original_static_image, transformation):
r"""This is called by the optimizer just after setting the static image.
EMMetric takes advantage of the image dynamics by computing the
current static image mask from the originalstaticImage mask (warped
by nearest neighbor interpolation)
Parameters
----------
original_static_image : array, shape (R, C) or (S, R, C)
the original static image from which the current static image was
generated, the current static image is the one that was provided
via 'set_static_image(...)', which may not be the same as the
original static image but a warped version of it (even the static
image changes during Symmetric Normalization, not only the moving
one).
transformation : DiffeomorphicMap object
the transformation that was applied to the original_static_image
to generate the current static image
"""
self.static_image_mask = (original_static_image > 0).astype(np.int32)
if transformation is None:
return
shape = np.array(self.static_image.shape, dtype=np.int32)
affine = self.static_affine
self.static_image_mask = transformation.transform(
self.static_image_mask, 'nearest', None, shape, affine)
def use_moving_image_dynamics(self, original_moving_image, transformation):
r"""This is called by the optimizer just after setting the moving image.
EMMetric takes advantage of the image dynamics by computing the
current moving image mask from the original_moving_image mask (warped
by nearest neighbor interpolation)
Parameters
----------
original_moving_image : array, shape (R, C) or (S, R, C)
the original moving image from which the current moving image was
generated, the current moving image is the one that was provided
via 'set_moving_image(...)', which may not be the same as the
original moving image but a warped version of it.
transformation : DiffeomorphicMap object
the transformation that was applied to the original_moving_image
to generate the current moving image
"""
self.moving_image_mask = (original_moving_image > 0).astype(np.int32)
if transformation is None:
return
shape = np.array(self.moving_image.shape, dtype=np.int32)
affine = self.moving_affine
self.moving_image_mask = transformation.transform(
self.moving_image_mask, 'nearest', None, shape, affine)
class SSDMetric(SimilarityMetric):
def __init__(self, dim, smooth=4, inner_iter=10, step_type='demons'):
r"""Sum of Squared Differences (SSD) Metric
Similarity metric for (mono-modal) nonlinear image registration defined
by the sum of squared differences (SSD)
Parameters
----------
dim : int (either 2 or 3)
the dimension of the image domain
smooth : float
smoothness parameter, the larger the value the smoother the
deformation field
inner_iter : int
number of iterations to be performed at each level of the multi-
resolution Gauss-Seidel optimization algorithm (this is not the
number of steps per Gaussian Pyramid level, that parameter must
be set for the optimizer, not the metric)
step_type : string
the displacement field step to be computed when 'compute_forward'
and 'compute_backward' are called. Either 'demons' or
'gauss_newton'
"""
super(SSDMetric, self).__init__(dim)
self.smooth = smooth
self.inner_iter = inner_iter
self.step_type = step_type
self.levels_below = 0
self._connect_functions()
def _connect_functions(self):
r"""Assign the methods to be called according to the image dimension
Assigns the appropriate functions to be called for vector field
reorientation and displacement field steps according to the
dimension of the input images and the select type of step (either
Demons or Gauss Newton)
"""
if self.dim == 2:
self.reorient_vector_field = vfu.reorient_vector_field_2d
elif self.dim == 3:
self.reorient_vector_field = vfu.reorient_vector_field_3d
else:
raise ValueError('SSD Metric not defined for dim. %d' % (self.dim))
if self.step_type == 'gauss_newton':
self.compute_step = self.compute_gauss_newton_step
elif self.step_type == 'demons':
self.compute_step = self.compute_demons_step
else:
raise ValueError('Opt. step %s not defined' % (self.step_type))
def initialize_iteration(self):
r"""Prepares the metric to compute one displacement field iteration.
Pre-computes the gradient of the input images to be used in the
computation of the forward and backward steps.
"""
self.gradient_moving = np.empty(
shape=(self.moving_image.shape)+(self.dim,), dtype=floating)
for i, grad in enumerate(gradient(self.moving_image)):
self.gradient_moving[..., i] = grad
# Convert static image's gradient field from voxel to physical space
if self.moving_spacing is not None:
self.gradient_moving /= self.moving_spacing
if self.moving_direction is not None:
self.reorient_vector_field(self.gradient_moving,
self.moving_direction)
self.gradient_static = np.empty(
shape=(self.static_image.shape)+(self.dim,), dtype=floating)
for i, grad in enumerate(gradient(self.static_image)):
self.gradient_static[..., i] = grad
# Convert static image's gradient field from voxel to physical space
if self.static_spacing is not None:
self.gradient_static /= self.static_spacing
if self.static_direction is not None:
self.reorient_vector_field(self.gradient_static,
self.static_direction)
def compute_forward(self):
r"""Computes one step bringing the reference image towards the static.
Computes the update displacement field to be used for registration of
the moving image towards the static image
"""
return self.compute_step(True)
def compute_backward(self):
r"""Computes one step bringing the static image towards the moving.
Computes the update displacement field to be used for registration of
the static image towards the moving image
"""
return self.compute_step(False)
def compute_gauss_newton_step(self, forward_step=True):
r"""Computes the Gauss-Newton energy minimization step
Minimizes the linearized energy function (Newton step) defined by the
sum of squared differences of corresponding pixels of the input images
with respect to the displacement field.
Parameters
----------
forward_step : boolean
if True, computes the Newton step in the forward direction
(warping the moving towards the static image). If False,
computes the backward step (warping the static image to the
moving image)
Returns
-------
displacement : array, shape = static_image.shape + (3,)
if forward_step==True, the forward SSD Gauss-Newton step,
else, the backward step
"""
reference_shape = self.static_image.shape
if forward_step:
gradient = self.gradient_static
delta_field = self.static_image-self.moving_image
else:
gradient = self.gradient_moving
delta_field = self.moving_image - self.static_image
displacement = np.zeros(shape=(reference_shape)+(self.dim,),
dtype=floating)
if self.dim == 2:
self.energy = v_cycle_2d(self.levels_below, self.inner_iter,
delta_field, None, gradient, None,
self.smooth, displacement)
else:
self.energy = v_cycle_3d(self.levels_below, self.inner_iter,
delta_field, None, gradient, None,
self.smooth, displacement)
return displacement
def compute_demons_step(self, forward_step=True):
r"""Demons step for SSD metric
Computes the demons step proposed by Vercauteren et al.[Vercauteren09]
for the SSD metric.
Parameters
----------
forward_step : boolean
if True, computes the Demons step in the forward direction
(warping the moving towards the static image). If False,
computes the backward step (warping the static image to the
moving image)
Returns
-------
displacement : array, shape (R, C, 2) or (S, R, C, 3)
the Demons step
References
----------
[Vercauteren09] Tom Vercauteren, Xavier Pennec, Aymeric Perchant,
Nicholas Ayache, "Diffeomorphic Demons: Efficient
Non-parametric Image Registration", Neuroimage 2009
"""
sigma_reg_2 = np.sum(self.static_spacing**2)/self.dim
if forward_step:
gradient = self.gradient_static
delta_field = self.static_image - self.moving_image
else:
gradient = self.gradient_moving
delta_field = self.moving_image - self.static_image
if self.dim == 2:
step, self.energy = ssd.compute_ssd_demons_step_2d(delta_field,
gradient,
sigma_reg_2,
None)
else:
step, self.energy = ssd.compute_ssd_demons_step_3d(delta_field,
gradient,
sigma_reg_2,
None)
for i in range(self.dim):
step[..., i] = ndimage.filters.gaussian_filter(step[..., i],
self.smooth)
return step
def get_energy(self):
r"""The numerical value assigned by this metric to the current image pair
Returns the Sum of Squared Differences (data term) energy computed at
the largest iteration
"""
return self.energy
def free_iteration(self):
r"""
Nothing to free for the SSD metric
"""
pass
def v_cycle_2d(n, k, delta_field, sigma_sq_field, gradient_field, target,
lambda_param, displacement, depth=0):
r"""Multi-resolution Gauss-Seidel solver using V-type cycles
Multi-resolution Gauss-Seidel solver: solves the Gauss-Newton linear system
by first filtering (GS-iterate) the current level, then solves for the
residual at a coarser resolution and finally refines the solution at the
current resolution. This scheme corresponds to the V-cycle proposed by
Bruhn and Weickert[Bruhn05].
Parameters
----------
n : int
number of levels of the multi-resolution algorithm (it will be called
recursively until level n == 0)
k : int
the number of iterations at each multi-resolution level
delta_field : array, shape (R, C)
the difference between the static and moving image (the 'derivative
w.r.t. time' in the optical flow model)
sigma_sq_field : array, shape (R, C)
the variance of the gray level value at each voxel, according to the
EM model (for SSD, it is 1 for all voxels). Inf and 0 values
are processed specially to support infinite and zero variance.
gradient_field : array, shape (R, C, 2)
the gradient of the moving image
target : array, shape (R, C, 2)
right-hand side of the linear system to be solved in the Weickert's
multi-resolution algorithm
lambda_param : float
smoothness parameter, the larger its value the smoother the
displacement field
displacement : array, shape (R, C, 2)
the displacement field to start the optimization from
Returns
-------
energy : the energy of the EM (or SSD if sigmafield[...]==1) metric at this
iteration
References
----------
[Bruhn05] Andres Bruhn and Joachim Weickert, "Towards ultimate motion
estimation: combining highest accuracy with real-time
performance", 10th IEEE International Conference on Computer
Vision, 2005. ICCV 2005.
"""
# pre-smoothing
for i in range(k):
ssd.iterate_residual_displacement_field_ssd_2d(delta_field,
sigma_sq_field,
gradient_field,
target,
lambda_param,
displacement)
if n == 0:
energy = ssd.compute_energy_ssd_2d(delta_field)
return energy
# solve at coarser grid
residual = None
residual = ssd.compute_residual_displacement_field_ssd_2d(delta_field,
sigma_sq_field,
gradient_field,
target,
lambda_param,
displacement,
residual)
sub_residual = np.array(vfu.downsample_displacement_field_2d(residual))
del residual
subsigma_sq_field = None
if sigma_sq_field is not None:
subsigma_sq_field = vfu.downsample_scalar_field_2d(sigma_sq_field)
subdelta_field = vfu.downsample_scalar_field_2d(delta_field)
subgradient_field = np.array(
vfu.downsample_displacement_field_2d(gradient_field))
shape = np.array(displacement.shape).astype(np.int32)
half_shape = ((shape[0] + 1) // 2, (shape[1] + 1) // 2, 2)
sub_displacement = np.zeros(shape=half_shape,
dtype=floating)
sublambda_param = lambda_param*0.25
v_cycle_2d(n-1, k, subdelta_field, subsigma_sq_field, subgradient_field,
sub_residual, sublambda_param, sub_displacement, depth+1)
# displacement += np.array(
# vfu.upsample_displacement_field(sub_displacement, shape))
displacement += vfu.resample_displacement_field_2d(sub_displacement,
np.array([0.5, 0.5]),
shape)
# post-smoothing
for i in range(k):
ssd.iterate_residual_displacement_field_ssd_2d(delta_field,
sigma_sq_field,
gradient_field,
target,
lambda_param,
displacement)
energy = ssd.compute_energy_ssd_2d(delta_field)
return energy
def v_cycle_3d(n, k, delta_field, sigma_sq_field, gradient_field, target,
lambda_param, displacement, depth=0):
r"""Multi-resolution Gauss-Seidel solver using V-type cycles
Multi-resolution Gauss-Seidel solver: solves the linear system by first
filtering (GS-iterate) the current level, then solves for the residual
at a coarser resolution and finally refines the solution at the current
resolution. This scheme corresponds to the V-cycle proposed by Bruhn and
Weickert[1].
[1] Andres Bruhn and Joachim Weickert, "Towards ultimate motion estimation:
combining highest accuracy with real-time performance",
10th IEEE International Conference on Computer Vision, 2005.
ICCV 2005.
Parameters
----------
n : int
number of levels of the multi-resolution algorithm (it will be called
recursively until level n == 0)
k : int
the number of iterations at each multi-resolution level
delta_field : array, shape (S, R, C)
the difference between the static and moving image (the 'derivative
w.r.t. time' in the optical flow model)
sigma_sq_field : array, shape (S, R, C)
the variance of the gray level value at each voxel, according to the
EM model (for SSD, it is 1 for all voxels). Inf and 0 values
are processed specially to support infinite and zero variance.
gradient_field : array, shape (S, R, C, 3)
the gradient of the moving image
target : array, shape (S, R, C, 3)
right-hand side of the linear system to be solved in the Weickert's
multi-resolution algorithm
lambda_param : float
smoothness parameter, the larger its value the smoother the
displacement field
displacement : array, shape (S, R, C, 3)
the displacement field to start the optimization from
Returns
-------
energy : the energy of the EM (or SSD if sigmafield[...]==1) metric at this
iteration
"""
# pre-smoothing
for i in range(k):
ssd.iterate_residual_displacement_field_ssd_3d(delta_field,
sigma_sq_field,
gradient_field,
target,
lambda_param,
displacement)
if n == 0:
energy = ssd.compute_energy_ssd_3d(delta_field)
return energy
# solve at coarser grid
residual = ssd.compute_residual_displacement_field_ssd_3d(delta_field,
sigma_sq_field,
gradient_field,
target,
lambda_param,
displacement,
None)
sub_residual = np.array(vfu.downsample_displacement_field_3d(residual))
del residual
subsigma_sq_field = None
if sigma_sq_field is not None:
subsigma_sq_field = vfu.downsample_scalar_field_3d(sigma_sq_field)
subdelta_field = vfu.downsample_scalar_field_3d(delta_field)
subgradient_field = np.array(
vfu.downsample_displacement_field_3d(gradient_field))
shape = np.array(displacement.shape).astype(np.int32)
sub_displacement = np.zeros(
shape=((shape[0]+1)//2, (shape[1]+1)//2, (shape[2]+1)//2, 3),
dtype=floating)
sublambda_param = lambda_param*0.25
v_cycle_3d(n-1, k, subdelta_field, subsigma_sq_field, subgradient_field,
sub_residual, sublambda_param, sub_displacement, depth+1)
del subdelta_field
del subsigma_sq_field
del subgradient_field
del sub_residual
displacement += vfu.resample_displacement_field_3d(sub_displacement,
0.5 * np.ones(3),
shape)
del sub_displacement
# post-smoothing
for i in range(k):
ssd.iterate_residual_displacement_field_ssd_3d(delta_field,
sigma_sq_field,
gradient_field,
target,
lambda_param,
displacement)
energy = ssd.compute_energy_ssd_3d(delta_field)
return energy
|
StongeEtienne/dipy
|
dipy/align/metrics.py
|
Python
|
bsd-3-clause
| 46,544
|
[
"Gaussian"
] |
9646f1961ec0798fc22e447831947aa433031dfd99c34cddf04ce5fed0dfbb47
|
"""
End-to-end test for cohorted courseware. This uses both Studio and LMS.
"""
import json
from nose.plugins.attrib import attr
from studio.base_studio_test import ContainerBase
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.fixtures import LMS_BASE_URL
from common.test.acceptance.pages.studio.component_editor import ComponentVisibilityEditorView
from common.test.acceptance.pages.lms.instructor_dashboard import InstructorDashboardPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage as LmsAutoAuthPage
from common.test.acceptance.tests.lms.test_lms_user_preview import verify_expected_problem_visibility
from bok_choy.promise import EmptyPromise
from bok_choy.page_object import XSS_INJECTION
@attr(shard=5)
class EndToEndCohortedCoursewareTest(ContainerBase):
"""
End-to-end of cohorted courseware.
"""
def setUp(self, is_staff=True):
super(EndToEndCohortedCoursewareTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.content_group_a = "Content Group A" + XSS_INJECTION
self.content_group_b = "Content Group B" + XSS_INJECTION
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_student"
self.cohort_a_student_email = "cohort_a_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_student"
self.cohort_b_student_email = "cohort_b_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
# Create a student who will end up in the default cohort group
self.cohort_default_student_username = "cohort_default_student"
self.cohort_default_student_email = "cohort_default_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_default_student_username,
email=self.cohort_default_student_email, no_login=True
).visit()
# Start logged in as the staff user.
StudioAutoAuthPage(
self.browser, username=self.staff_user["username"], email=self.staff_user["email"]
).visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_problem = 'GROUP A CONTENT'
self.group_b_problem = 'GROUP B CONTENT'
self.group_a_and_b_problem = 'GROUP A AND B CONTENT'
self.visible_to_all_problem = 'VISIBLE TO ALL CONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('problem', self.group_a_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_a_and_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.visible_to_all_problem, data='<problem></problem>')
)
)
)
)
def enable_cohorting(self, course_fixture):
"""
Enables cohorting for the current course.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/settings' # pylint: disable=protected-access
data = json.dumps({'is_cohorted': True})
response = course_fixture.session.patch(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to enable cohorts")
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_problems_to_content_groups_and_publish(self):
"""
Updates 3 of the 4 existing problems to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
def set_visibility(problem_index, content_group, second_content_group=None):
problem = container_page.xblocks[problem_index]
problem.edit_visibility()
if second_content_group:
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(
second_content_group, save=False
)
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(content_group)
set_visibility(1, self.content_group_a)
set_visibility(2, self.content_group_b)
set_visibility(3, self.content_group_a, self.content_group_b)
container_page.publish_action.click()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
def add_cohort_with_student(cohort_name, content_group, student):
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
def view_cohorted_content_as_different_users(self):
"""
View content as staff, student in Cohort A, student in Cohort B, and student in Default Cohort.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
def login_and_verify_visible_problems(username, email, expected_problems):
LmsAutoAuthPage(
self.browser, username=username, email=email, course_id=self.course_id
).visit()
courseware_page.visit()
verify_expected_problem_visibility(self, courseware_page, expected_problems)
login_and_verify_visible_problems(
self.staff_user["username"], self.staff_user["email"],
[self.group_a_problem, self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_a_student_username, self.cohort_a_student_email,
[self.group_a_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_b_student_username, self.cohort_b_student_email,
[self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_default_student_username, self.cohort_default_student_email,
[self.visible_to_all_problem]
)
def test_cohorted_courseware(self):
"""
Scenario: Can create content that is only visible to students in particular cohorts
Given that I have course with 4 problems, 1 staff member, and 3 students
When I enable cohorts in the course
And I create two content groups, Content Group A, and Content Group B, in the course
And I link one problem to Content Group A
And I link one problem to Content Group B
And I link one problem to both Content Group A and Content Group B
And one problem remains unlinked to any Content Group
And I create two manual cohorts, Cohort A and Cohort B,
linked to Content Group A and Content Group B, respectively
And I assign one student to each manual cohort
And one student remains in the default cohort
Then the staff member can see all 4 problems
And the student in Cohort A can see all the problems except the one linked to Content Group B
And the student in Cohort B can see all the problems except the one linked to Content Group A
And the student in the default cohort can ony see the problem that is unlinked to any Content Group
"""
self.enable_cohorting(self.course_fixture)
self.create_content_groups()
self.link_problems_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self.view_cohorted_content_as_different_users()
|
synergeticsedx/deployment-wipro
|
common/test/acceptance/tests/test_cohorted_courseware.py
|
Python
|
agpl-3.0
| 9,840
|
[
"VisIt"
] |
edc359fd67cea089b3c42b24fa9961909fbccd00c59e18712575a5d7a3282e00
|
#!/usr/bin/env python
"""
Loprop model implementation (J. Chem. Phys. 121, 4494 (2004))
"""
import os
import sys
import math
import numpy
from daltools import one, mol, dens, prop, lr, qr, sirifc
from daltools.util import full, blocked, subblocked, timing
#full.matrix.fmt = "%14.6f"
xtang = 0.5291772108
angtx = 1.0/xtang
mc = False
# Bragg-Slater radii () converted from Angstrom to Bohr
rbs = numpy.array([0,
0.25, 0.25,
1.45, 1.05, 0.85, 0.70, 0.65, 0.60, 0.50, 0.45,
1.80, 1.50, 1.25, 1.10, 1.00, 1.00, 1.00, 1.00,
])*angtx
bond_co = { ( 1, 1 ) : 1.2,
( 1, 6 ) : 1.2,
( 1, 7 ) : 1.2,
( 1, 8 ) : 1.2,
( 1, 15 ) : 1.2,
( 6, 6 ) : 1.6,
( 6, 7 ) : 1.6,
( 6, 8 ) : 1.6,
( 6, 15 ) : 1.6,
( 7, 7 ) : 1.6,
( 7, 8 ) : 1.6,
( 7, 15 ) : 1.6,
( 8, 8 ) : 1.6,
( 8, 8 ) : 1.6,
( 8, 15 ) : 1.6,
}
#permute dict items in keypairs
for key1, key2 in bond_co.keys():
bond_co[ ( key2, key1 ) ] = bond_co[ (key1, key2) ]
def symmetrize_first_beta( beta ):
# naive solution, transforms matrix B[ (x,y,z) ][ (xx, xy, xz, yy, yz, zz) ] into array
# Symmtrized UT array B[ (xxx, xxy, xxz, xyy, xyz, xzz, yyy, yyz, yzz, zzz) ]
new = full.matrix( 10 )
new[0] = beta[0,0]
new[1] = (beta[0,1] + beta[1,0] ) /2
new[2] = (beta[0,2] + beta[2,0] ) /2
new[3] = (beta[0,3] + beta[1,1] ) /2
new[4] = (beta[0,4] + beta[1,2] + beta[2,1] ) /3
new[5] = (beta[0,5] + beta[2,2] ) /2
new[6] = beta[1,3]
new[7] = (beta[1,4] + beta[2,3] ) /2
new[8] = (beta[1,5] + beta[2,4] ) /2
new[9] = beta[2,5]
return new
def penalty_function(alpha=2):
"""Returns function object """
def pf(Za, Ra, Zb, Rb):
"""Inverse half of penalty function defined in Gagliardi"""
from math import exp
ra = rbs[int(round(Za))]
rb = rbs[int(round(Zb))]
xa, ya, za = Ra
xb, yb, zb = Rb
rab2 = (xa - xb)**2 + (ya - yb)**2 + (za - zb)**2
f = 0.5*exp(-alpha*(rab2/(ra+rb)**2))
return f
return pf
def pairs(n):
"""Generate index pairs for triangular packed matrices up to n """
ij = 0
for i in range(n):
for j in range(i+1):
yield (ij, i, j)
ij += 1
def shift_function(*args):
"""Return value twice max value of F"""
F, = args
return 2*numpy.max(numpy.abs(F))
def header(string):
"""Pretty print header"""
border = '-'*len(string)
print "\n%s\n%s\n%s" % (border, string, border)
def output_beta(beta, dip=None, fmt="%12.6f"):
"""Repeated output format for b(x; yz)"""
print "Hyperpolarizability"
print "beta(:, xx xy xz yy yz zz)"
print "--------------------------"
print "beta(x, *) " + (6*fmt) % tuple(beta[0,:])
print "beta(y, *) " + (6*fmt) % tuple(beta[1,:])
print "beta(z, *) " + (6*fmt) % tuple(beta[2,:])
betakk = beta[:,0] + beta[:, 3] + beta[:, 5]
print "beta(:, kk)" + (3*fmt) % tuple(betakk)
if dip is not None:
betapar = 0.2*(betakk & dip)/dip.norm2()
print "beta//dip " + (fmt) % betapar
class MolFrag:
"""An instance of the MolFrag class is created and populated with
data from a Dalton runtime scratch directory"""
def __init__(
self, tmpdir, max_l=0, pol=0, freqs=None, pf=penalty_function, sf=shift_function, gc=None
):
"""Constructur of MolFrac class objects
input: tmpdir, scratch directory of Dalton calculation
"""
self.max_l = max_l
self.pol = pol
self.tmpdir = tmpdir
if freqs is None:
self.freqs = (0,)
self.nfreqs = 1
else:
self.freqs = freqs
self.nfreqs = len(freqs)
self.rfreqs = range(self.nfreqs)
self.pf = pf
self.sf = sf
self.gc = gc
#
# Dalton files
#
self.aooneint = os.path.join(tmpdir,'AOONEINT')
self.dalton_bas = os.path.join(tmpdir,'DALTON.BAS')
self.sirifc = os.path.join(tmpdir,'SIRIFC')
assert sirifc.sirifc(name=self.sirifc).nsym == 1
self._T = None
self._D = None
self._Dk = None
self._D2k = None
self.get_basis_info()
self.get_isordk()
self._x = None
self._Qab = None
self._Da = None
self._Dab = None
self._Dsym = None
self._QUab = None
self._QUa = None
self._QUsym = None
self._QUN = None
self._QUc = None
self._dQa = None
self._d2Qa = None
self._dQab = None
self._d2Qab = None
self._Fab = None
self._la = None
self._l2a = None
self._Aab = None
self._Bab = None
self._Am = None
self._Bm = None
self._dAab = None
self._dBab = None
#if maxl >= 0: self.charge()
#if maxl >= 1: self.dipole()
#if maxl >= 2: self.quadrupole()
#if pol: self.pol()
def get_basis_info(self, debug=False):
""" Obtain basis set info from DALTON.BAS """
molecule = mol.readin(self.dalton_bas)
self.cpa = mol.contracted_per_atom(molecule)
self.cpa_l = mol.contracted_per_atom_l(molecule)
self.opa = mol.occupied_per_atom(molecule)
self.noa = len(self.opa)
#
# Total number of basis functions and occpied orbitals
#
self.nbf = sum(self.cpa)
self.noc = 0
for o in self.opa:
self.noc += len(o)
if debug:
print "Orbitals/atom", self.cpa, "\nTotal", self.nbf
print "Occupied/atom", self.opa, "\nTotal", self.noc
def S(self):
"""
Get overlap, nuclear charges and coordinates from AOONEINT
"""
S = one.read("OVERLAP", self.aooneint)
return S.unpack().unblock()
def get_isordk(self):
"""
Get overlap, nuclear charges and coordinates from AOONEINT
"""
#
# Data from the ISORDK section in AOONEINT
#
isordk = one.readisordk(filename=self.aooneint)
#
# Number of nuclei
#
N = isordk["nucdep"]
#
# MXCENT , Fix dimension defined in nuclei.h
#
mxcent = len(isordk["chrn"])
#
# Nuclear charges
#
self.Z = full.matrix((N,))
self.Z[:] = isordk["chrn"][:N]
#
# Nuclear coordinates
#
R = full.matrix((mxcent*3,))
R[:] = isordk["cooo"][:]
self.R = R.reshape((mxcent, 3), order='F')[:N, :]
#
# Form Rc molecular gauge origin, default nuclear center of charge
#
if self.gc is None:
self.Rc = self.Z*self.R/self.Z.sum()
else:
self.Rc = numpy.array(self.gc).view(full.matrix)
#
# Bond center matrix and half bond vector
#
noa = self.noa
self.Rab = full.matrix((noa, noa, 3))
self.dRab = full.matrix((noa, noa, 3))
for a in range(noa):
for b in range(noa):
self.Rab[a, b, :] = (self.R[a, :] + self.R[b, :])/2
self.dRab[a, b, :] = (self.R[a, :] - self.R[b, :])/2
@property
def D(self, debug=False):
"""
Density from SIRIFC in blocked loprop basis
"""
if self._D is not None:
return self._D
Di, Dv = dens.ifc(filename=self.sirifc)
D = Di + Dv
Ti = self.T.I
self._D = ( Ti * D * Ti.T ).subblocked(self.cpa, self.cpa)
return self._D
@property
def T(self, debug=False):
"""
Generate loprop transformation matrix according to the
following steps
Given atomic overlap matrix:
1. orthogonalize in each atomic block
2. a) Lowdin orthogonalize occupied subspace
b) Lowdin orthogonalize virtual subspace
3. project occupied out of virtual
4. Lowdin orthogonalize virtual
Input: overlap S (matrix)
contracted per atom (list)
occupied per atom (nested list)
Returns: transformation matrix T
such that T+ST = 1 (unit) """
if self._T is not None: return self._T
S = self.S()
cpa = self.cpa
opa = self.opa
#
# 1. orthogonalize in each atomic block
#
#t1=timing("step 1")
if debug:
print "Initial S", S
nbf = S.shape[0]
#
# obtain atomic blocking
#
#assert(len(cpa) == len(opa))
noa = len(opa)
nocc = 0
for at in range(noa):
nocc += len(opa[at])
if debug:
print "nocc", nocc
Satom = S.block(cpa, cpa)
Ubl = full.unit(nbf).subblocked((nbf,), cpa)
if debug:
print "Ubl", Ubl
#
# Diagonalize atomwise
#
GS = 1
if GS:
T1 = blocked.BlockDiagonalMatrix(cpa, cpa)
for at in range(noa):
T1.subblock[at] = Ubl.subblock[0][at].GST(S)
if debug:
print "T1", T1
T1 = T1.unblock()
else:
u, v = Satom.eigvec()
T1 = v.unblock()
if debug:
print "T1", T1
#
# Full transformration
#
S1 = T1.T * S * T1
if debug:
print "Overlap after step 1", S1
#t1.stop()
# 2. a) Lowdin orthogonalize occupied subspace
#
# Reorder basis (permute)
#
#t2=timing("step 2")
vpa = []
adim = []
for at in range(noa):
vpa.append(cpa[at]-len(opa[at]))
adim.append(len(opa[at]))
adim.append(vpa[at])
if debug:
print "Blocking: Ao Av Bo Bv...", adim
#
# dimensions for permuted basis
#
pdim = []
if debug:
print "opa", opa
for at in range(noa):
pdim.append(len(opa[at]))
for at in range(noa):
pdim.append(vpa[at])
if debug:
print "Blocking: Ao Bo... Av Bv...", pdim
#
# within atom permute occupied first
#
P1 = subblocked.matrix(cpa, cpa)
for at in range(noa):
P1.subblock[at][at][:, :] = full.permute(opa[at], cpa[at])
n = len(adim)
if debug:
print "P1", P1
P1 = P1.unblock()
P2 = subblocked.matrix(adim, pdim)
for i in range(0, len(adim), 2):
P2.subblock[i][i/2] = full.unit(adim[i])
for i in range(1, len(adim), 2):
P2.subblock[i][noa+(i-1)/2] = full.unit(adim[i])
if debug:
print "P2", P2
P2 = P2.unblock()
#
# new permutation scheme
#
P = P1*P2
if debug:
print "P", P
if not numpy.allclose(P.inv(), P.T):
print "P not unitary"
sys.exit(1)
S1P = P.T*S1*P
if debug:
print "Overlap in permuted basis", S1P
#invsq=lambda x: 1.0/math.sqrt(x)
occdim = (nocc, sum(vpa))
S1Pbl = S1P.block(occdim, occdim)
### SYM ### S1Pbl += S1Pbl.T; S1Pbl *= 0.5 ###SYM###
#T2bl=S1Pbl.func(invsq)
T2bl = S1Pbl.invsqrt()
T2 = T2bl.unblock()
S2 = T2.T*S1P*T2
if debug:
print "Overlap after step 2", S2
#t2.stop()
#
# Project occupied out of virtual
#
#t3=timing("step 3")
if 0:
T3 = full.unit(nbf).GST(S2)
else:
S2sb = S2.subblocked(occdim, occdim)
T3sb = full.unit(nbf).subblocked(occdim, occdim)
T3sb.subblock[0][1] = -S2sb.subblock[0][1]
T3 = T3sb.unblock()
S3 = T3.T*S2*T3
#
if debug:
print "T3", T3
print "Overlap after step 3", S3
#t3.stop()
#
# 4. Lowdin orthogonalize virtual
#
#t4=timing("step 4")
T4b = blocked.unit(occdim)
S3b = S3.block(occdim, occdim)
if debug:
print "S3b", S3b
print "T4b", T4b
### SYM ### S3b += S3b.T; S3b *= 0.5 ###SYM###
T4b.subblock[1] = S3b.subblock[1].invsqrt()
T4 = T4b.unblock()
S4 = T4.T*S3*T4
#S4=S3
if debug:
print "T4", T4
print "Overlap after step 4", S4
#t4.stop()
#
# permute back to original basis
#
S4 = P*S4*P.T
if debug:
print "Final overlap ", S4
#
# Return total transformation
#
T = T1*P*T2*T3*T4*P.T
#
# Test
#
if debug:
print "Transformation determinant", T.det()
print "original S", S, "final", T.T*S*T
self._T = T
return self._T
#T = property(fget=transformation)
#def charge(self, debug=False):
@property
def Qab(self, debug=False):
""" set charge/atom property"""
if self._Qab is not None: return self._Qab
D = self.D
T = self.T
cpa = self.cpa
noa = self.noa
_Qab = full.matrix((noa, noa))
for a in range(noa):
_Qab[a, a] = - D.subblock[a][a].tr()
self._Qab = _Qab
return self._Qab
@property
def Qa(self):
return self.Qab.diagonal()
@property
def Dab(self, debug=False):
"""Set dipole property"""
if self._Dab is not None: return self._Dab
x = self.x
D = self.D
Rab = self.Rab
Qab = self.Qab
noa = self.noa
_Dab = full.matrix((3, noa, noa))
for i in range(3):
for a in range(noa):
for b in range(noa):
_Dab[i, a, b] = -(
x[i].subblock[a][b]&D.subblock[a][b]
) \
-Qab[a, b]*Rab[a, b, i]
self._Dab = _Dab
return self._Dab
@property
def Da(self):
"""Sum up bonds contributions to atom"""
if self._Da is not None: return self._Da
Dab = self.Dab
self._Da = Dab.sum(axis=2).view(full.matrix)
return self._Da
@property
def Dsym(self):
"""Symmetrize density contributions from atom pairs """
if self._Dsym is not None: return self._Dsym
Dab = self.Dab
noa = self.noa
dsym = full.matrix((3, noa*(noa+1)//2))
ab = 0
for a in range(noa):
for b in range(a):
dsym[:, ab] = Dab[:, a, b] + Dab[:, b, a]
ab += 1
dsym[:, ab] = Dab[:, a, a]
ab += 1
self._Dsym = dsym
return self._Dsym
@property
def Dtot(self):
_Dtot = self.Da.sum(axis=1).view(full.matrix)
_Dtot += self.Qa*self.R - self.Qa.sum()*self.Rc
return _Dtot
@property
def QUab(self, debug=False):
"""Quadrupole moment"""
if self._QUab is not None: return self._QUab
D = self.D
R = self.R
Rc = self.Rc
dRab = self.dRab
Qab = self.Qab
Dab = self.Dab
lab = ("XXSECMOM", "XYSECMOM", "XZSECMOM",
"YYSECMOM", "YZSECMOM",
"ZZSECMOM")
xy = self.getprop(*lab)
noa = self.noa
QUab = full.matrix((6, noa, noa))
rrab = full.matrix((6, noa, noa))
rRab = full.matrix((6, noa, noa))
RRab = full.matrix((6, noa, noa))
Rab = self.Rab
for a in range(noa):
for b in range(noa):
ij = 0
for i in range(3):
for j in range(i, 3):
rrab[ij, a, b] = -(
xy[ij].subblock[a][b]&D.subblock[a][b]
)
rRab[ij, a, b] = Dab[i, a, b]*Rab[a,b,j]+Dab[j, a, b]*Rab[a,b,i]
RRab[ij, a, b] = Rab[a,b,i]*Rab[a,b,j]*Qab[a, b]
ij += 1
QUab = rrab-rRab-RRab
self._QUab = QUab
#
# Addition term - gauge correction summing up bonds
#
dQUab = full.matrix(self.QUab.shape)
for a in range(noa):
for b in range(noa):
ij = 0
for i in range(3):
for j in range(i, 3):
dQUab[ij, a, b] = dRab[a, b, i]*Dab[j, a, b] \
+dRab[a, b, j]*Dab[i, a, b]
ij += 1
self.dQUab = - dQUab
return self._QUab
@property
def QUa(self):
"""Sum up quadrupole bond terms to atoms"""
if self._QUa is not None: return self._QUa
QUab = self.QUab + self.dQUab
noa = self.noa
self._QUa = QUab.sum(axis=2).view(full.matrix)
return self._QUa
@property
def QUsym(self):
"""Quadrupole moment symmetrized over atom pairs"""
if self._QUsym is not None: return self._QUsym
QUab = self.QUab
noa = self.noa
qusym = full.matrix((6, noa*(noa+1)//2))
ab = 0
for a in range(noa):
for b in range(a):
qusym[:, ab] = QUab[:, a, b] + QUab[:, b, a]
ab += 1
qusym[:, ab] = QUab[:, a, a]
ab += 1
self._QUsym = qusym
return self._QUsym
@property
def QUN(self):
"""Nuclear contribution to quadrupole"""
if self._QUN is not None: return self._QUN
qn = full.matrix(6)
Z = self.Z
R = self.R
Rc = self.Rc
for a in range(len(Z)):
ij = 0
for i in range(3):
for j in range(i, 3):
qn[ij] += Z[a]*(R[a, i]-Rc[i])*(R[a, j]-Rc[j])
ij += 1
self._QUN = qn
return self._QUN
@property
def QUc(self):
if self._QUc is not None: return self._QUc
rrab=full.matrix((6, self.noa, self.noa))
rRab=full.matrix((6, self.noa, self.noa))
RRab=full.matrix((6, self.noa, self.noa))
Rabc = 1.0*self.Rab
for a in range(self.noa):
for b in range(self.noa):
Rabc[a,b,:] -= self.Rc
for a in range(self.noa):
for b in range(self.noa):
ij = 0
for i in range(3):
for j in range(i,3):
rRab[ij, a, b] = self.Dab[i, a, b]*Rabc[a, b, j]\
+ self.Dab[j, a, b]*Rabc[a, b, i]
RRab[ij, a, b] = self.Qab[a, b]*(self.R[a, i] - self.Rc[i])*(self.R[b, j] - self.Rc[j])
ij += 1
QUcab = self.QUab + rRab + RRab
self._QUc = QUcab.sum(axis=2).sum(axis=1).view(full.matrix)
return self._QUc
@property
def Fab(self, **kwargs):
"""Penalty function"""
if self._Fab is not None: return self._Fab
Fab = full.matrix((self.noa, self.noa))
for a in range(self.noa):
Za = self.Z[a]
Ra = self.R[a]
for b in range(a):
Zb = self.Z[b]
Rb = self.R[b]
Fab[a, b] = self.pf(Za, Ra, Zb, Rb, **kwargs)
Fab[b, a] = Fab[a, b]
for a in range(self.noa):
Fab[a, a] += - Fab[a, :].sum()
self._Fab = Fab
return self._Fab
@property
def la(self):
"""Lagrangian for local poplarizabilities"""
#
# The shift should satisfy
# sum(a) sum(b) (F(a,b) + C)l(b) = sum(a) dq(a) = 0
# =>sum(a, b) F(a, b) + N*C*sum(b) l(b) = 0
# => C = -sum(a, b)F(a,b) / sum(b)l(b)
#
if self._la is not None: return self._la
#
dQa = self.dQa
Fab = self.Fab
Lab = Fab + self.sf(Fab)
self._la = [rhs/Lab for rhs in dQa]
return self._la
@property
def l2a(self):
"""Lagrangian for local poplarizabilities"""
#
# The shift should satisfy
# sum(a) sum(b) (F(a,b) + C)l(b) = sum(a) dq(a) = 0
# =>sum(a, b) F(a, b) + N*C*sum(b) l(b) = 0
# => C = -sum(a, b)F(a,b) / sum(b)l(b)
#
if self._l2a is not None: return self._l2a
#
d2Qa = self.d2Qa
Fab = self.Fab
Lab = Fab + self.sf(Fab)
self._l2a = [rhs/Lab for rhs in d2Qa]
return self._l2a
@property
def Dk(self):
"""Read perturbed densities"""
if self._Dk is not None:
return self._Dk
lab = ['XDIPLEN', "YDIPLEN", "ZDIPLEN"]
prp = os.path.join(self.tmpdir,"AOPROPER")
T = self.T
cpa = self.cpa
Dkao = lr.Dk(*lab, freqs=self.freqs, tmpdir=self.tmpdir)
_Dk = {lw:(T.I*Dkao[lw]*T.I.T).subblocked(cpa, cpa) for lw in Dkao}
self._Dk = _Dk
return self._Dk
@property
def D2k(self):
"""Read perturbed densities"""
if self._D2k is not None:
return self._D2k
lab = ['XDIPLEN ', "YDIPLEN ", "ZDIPLEN "]
qrlab = [lab[j]+lab[i] for i in range(3) for j in range(i,3)]
prp = os.path.join(self.tmpdir, "AOPROPER")
T = self.T
cpa = self.cpa
Dkao = qr.D2k(*qrlab, freqs=self.freqs, tmpdir=self.tmpdir)
#print "Dkao.keys", Dkao.keys()
_D2k = {lw:(T.I*Dkao[lw]*T.I.T).subblocked(cpa, cpa) for lw in Dkao}
self._D2k = _D2k
return self._D2k
@property
def x(self):
"""Read dipole matrices to blocked loprop basis"""
if self._x is not None:
return self._x
lab = ['XDIPLEN', "YDIPLEN", "ZDIPLEN"]
self._x = self.getprop(*lab)
return self._x
def getprop(self, *args):
"""Read general property matrices to blocked loprop basis"""
T = self.T
cpa = self.cpa
prp = os.path.join(self.tmpdir,"AOPROPER")
return [
(T.T*p*T).subblocked(cpa, cpa) for p in
prop.read(*args, filename=prp, unpack=True)
]
@property
def dQa(self):
"""Charge shift per atom"""
if self._dQa is not None: return self._dQa
T = self.T
cpa = self.cpa
noa = self.noa
Dk = self.Dk
labs = ('XDIPLEN', 'YDIPLEN', 'ZDIPLEN')
dQa = full.matrix((self.nfreqs, noa, 3))
for a in range(noa):
for il, l in enumerate(labs):
for iw, w in enumerate(self.freqs):
dQa[iw, a, il] = - Dk[(l,w)].subblock[a][a].tr()
self._dQa = dQa
return self._dQa
@property
def d2Qa(self):
"""Charge shift per atom"""
if self._d2Qa is not None: return self._d2Qa
T = self.T
cpa = self.cpa
noa = self.noa
D2k = self.D2k
# static
wb = wc = 0.0
d2Qa = full.matrix((1, noa, 6))
lab = ['XDIPLEN ', "YDIPLEN ", "ZDIPLEN "]
qrlab = [lab[j]+lab[i] for i in range(3) for j in range(i,3)]
for a in range(noa):
for il, l in enumerate(qrlab):
il = qrlab.index(l)
d2Qa[0, a, il] = - D2k[(l,wb,wc)].subblock[a][a].tr()
self._d2Qa = d2Qa
return self._d2Qa
@property
def dQab(self):
"""Charge transfer matrix"""
if self._dQab is not None: return self._dQab
dQa = self.dQa
la = self.la
noa = self.noa
dQab = full.matrix((self.nfreqs, noa, noa, 3))
for field in range(3):
for a in range(noa):
Za = self.Z[a]
Ra = self.R[a]
for b in range(a):
Zb = self.Z[b]
Rb = self.R[b]
for w in self.rfreqs:
dQab[w, a, b, field] = \
- (la[w][a, field]-la[w][b, field]) * \
self.pf(Za, Ra, Zb, Rb)
dQab[w, b, a, field] = -dQab[w, a, b, field]
self._dQab = dQab
return self._dQab
@property
def d2Qab(self):
"""Charge transfer matrix for double perturbation"""
if self._d2Qab is not None: return self._d2Qab
d2Qa = self.d2Qa
l2a = self.l2a
noa = self.noa
d2Qab = full.matrix((self.nfreqs, noa, noa, 6))
for field in range(6):
for a in range(noa):
Za = self.Z[a]
Ra = self.R[a]
for b in range(a):
Zb = self.Z[b]
Rb = self.R[b]
for w in self.rfreqs:
d2Qab[w, a, b, field] = \
- (l2a[w][a, field]-l2a[w][b, field]) * \
self.pf(Za, Ra, Zb, Rb)
d2Qab[w, b, a, field] = -d2Qab[w, a, b, field]
self._d2Qab = d2Qab
return self._d2Qab
@property
def Aab(self):
"""Localized polariziabilities:
Contribution from change in localized dipole moment
- d (r - R(AB)):D(AB) = - r:dD(AB) + dQ(A) R(A) \delta(A,B)
"""
if self._Aab is not None: return self._Aab
D = self.D
Dk = self.Dk
#T = self.T
cpa = self.cpa
Z = self.Z
Rab = self.Rab
Qab = self.Qab
dQa = self.dQa
x = self.x
noa = len(cpa)
labs = ('XDIPLEN', 'YDIPLEN', 'ZDIPLEN')
Aab = full.matrix((self.nfreqs, 3, 3, noa, noa))
# correction term for shifting origin from O to Rab
for i,li in enumerate(labs):
for j,lj in enumerate(labs):
for a in range(noa):
for b in range(noa):
for jw, w in enumerate(self.freqs):
Aab[jw, i, j, a, b] = (
-x[i].subblock[a][b]&Dk[(lj, w)].subblock[a][b]
)
for jw in self.rfreqs:
Aab[jw, i, j, a, a] -= dQa[jw, a, j]*Rab[a, a, i]
self._Aab = Aab
return self._Aab
@property
def dAab(self):
"""Charge transfer contribution to bond polarizability"""
if self._dAab is not None: return self._dAab
dQa = self.dQa
dQab = self.dQab
dRab = self.dRab
noa = self.noa
dAab = full.matrix((self.nfreqs, 3, 3, noa, noa))
for a in range(noa):
for b in range(noa):
for i in range(3):
for j in range(3):
if mc:
dAab[:, i, j, a, b] = 2*dRab[a, b, i]*dQab[:, a, b, j]
else:
dAab[:, i, j, a, b] = (
dRab[a, b, i]*dQab[:, a, b, j]+
dRab[a, b, j]*dQab[:, a, b, i]
)
self._dAab = dAab
return self._dAab
@property
def Am(self):
"""Molecular polarizability:
To reconstruct the molecular polarizability from localized
polarizabilties one has to reexpand in terms of an arbitrary but common
origin leading to the correction term below
d<-r> = - sum(A,B) (r-R(A,B))dD(A,B) + R(A) dQ(A) \delta(A,B)
"""
if self._Am is not None: return self._Am
dQa = self.dQa
Rab = self.Rab
Aab = self.Aab
dAab = self.dAab
noa = self.noa
self._Am = (Aab + 0.5*dAab).sum(axis=4).sum(axis=3).view(full.matrix)
return self._Am
@property
def Bab(self):
"""Localized hyperpolariziabilities"""
if self._Bab is not None: return self._Bab
D = self.D
D2k = self.D2k
#T = self.T
cpa = self.cpa
Z = self.Z
Rab = self.Rab
Qab = self.Qab
d2Qa = self.d2Qa
x = self.x
noa = len(cpa)
labs = ('XDIPLEN ', 'YDIPLEN ', 'ZDIPLEN ')
qlabs = [labs[i] + labs[j] for i in range(3) for j in range(i,3)]
Bab = full.matrix( (self.nfreqs, 3, 6, noa, noa) )
#pdb.set_trace()
#correction term for shifting origin from O to Rab
for i, li in enumerate(labs):
for jk,ljk in enumerate(qlabs):
#print i,jk, li, ljk
for a in range(noa):
for b in range(noa):
for iw, w in enumerate(self.freqs):
Bab[iw, i, jk, a, b] = (
-x[i].subblock[a][b] & D2k [(ljk, w, w)].subblock[a][b]
)
for iw in self.rfreqs:
Bab[iw, i, jk, a, a] -= d2Qa[iw, a, jk]*Rab[a, a, i]
self._Bab = Bab
return self._Bab
@property
def dBab(self):
"""Charge transfer contribution to bond hyperpolarizabilitypolarizability"""
if self._dBab is not None: return self._dBab
dQa = self.dQa
d2Qa = self.d2Qa
dQab = self.dQab
d2Qab = self.d2Qab
dRab = self.dRab
noa = self.noa
dBab = full.matrix((self.nfreqs, 3, 6, noa, noa))
for a in range(noa):
for b in range(noa):
for i in range(3):
for j in range(6):
if True:
dBab[:, i, j, a, b] = 2*dRab[a, b, i]*d2Qab[:, a, b, j]
else:
dAab[:, i, j, a, b] = (
dRab[a, b, i]*d2Qab[:, a, b, j]+
dRab[a, b, j]*d2Qab[:, a, b, i]
)
self._dBab = dBab
return self._dBab
@property
def Bm(self):
"Molecular hyperpolarizability"
if self._Bm is not None: return self._Bm
d2Qa = self.d2Qa
Rab = self.Rab
Bab = self.Bab #+ 0.25 * self.dBab
dBab = self.dBab
noa = self.noa
self._Bm = (Bab + 0.5*dBab).sum(axis=4).sum(axis=3).view(full.matrix)
return self._Bm
def output_by_atom(self, fmt="%9.5f", max_l=0, pol=0, hyperpol=0, bond_centers=False, angstrom=False):
"""Print nfo"""
if max_l >= 0:
Qab = self.Qab
Qa = Qab.diagonal()
if max_l >= 1:
Dab = self.Dab
Da = self.Da
Dsym = self.Dsym
if max_l >= 2:
QUab = self.QUab
QUN = self.QUN
dQUab = self.dQUab
QUa = self.QUa
if pol:
Aab = self.Aab + self.dAab
if hyperpol:
Bab = self.Bab + self.dBab
if angstrom:
unit = "AA"
xconv = 0.5291772108
xconv3 = 0.5291772108**3
else:
unit = "AU"
xconv = 1
xconv3 = 1
Z = self.Z
R = self.R
Rc = self.Rc
noa = self.noa
#
# Form net atomic properties P(a) = sum(b) P(a,b)
#
if self._Aab is not None:
Aab = self.Aab + 0.5*self.dAab
Aa = Aab.sum(axis=4)
if self._Bab is not None:
Bab = self.Bab + 0.5*self.dBab
Ba = Bab.sum(axis=4)
if bond_centers:
for a in range(noa):
for b in range(a):
header("Bond %d %d" % (a+1, b+1))
print "Bond center: " + \
(3*fmt) % tuple(0.5*(R[a, :]+R[b, :])*xconv)
print "Electronic charge: "+fmt % Qab[a, b]
print "Total charge: "+fmt % Qab[a, b]
if self._Dab is not None:
print "Electronic dipole " + \
(3*fmt) % tuple(Dab[:, a, b]+Dab[:, b, a])
print "Electronic dipole norm" + \
fmt % (Dab[:, a, b]+Dab[:, b, a]).norm2()
if self._QUab is not None:
print "Electronic quadrupole" + \
(6*fmt) % tuple(QUab[:, a, b]+QUab[:, b, a])
if self._Aab is not None:
for iw, w in enumerate(self.freqs):
Asym = Aab[iw, :, :, a, b] + Aab[iw, :, :, b, a]
if pol > 0:
print "Isotropic polarizability (%g)" % w, fmt % (Asym.trace()/3*xconv3)
if pol > 1:
print "Polarizability (%g) " % w,
print (6*fmt) % tuple(Asym.pack().view(full.matrix)*xconv3)
if self._Bab is not None:
for iw, w in enumerate(self.freqs):
Bsym = Bab[iw, :, :, a, b] + Bab[iw, :, :, b, a]
output_beta(Bsym, self.Da[:, a])
header("Atom %d"%(a+1))
print "Atom center: " + \
(3*fmt) % tuple(R[a,:]*xconv)
print "Nuclear charge: "+fmt % Z[a]
print "Electronic charge: "+fmt % Qab[a, a]
print "Total charge: "+fmt % (Z[a]+Qab[a, a])
if self._Dab is not None:
print "Electronic dipole " + \
(3*fmt) % tuple(Dab[:, a, a])
print "Electronic dipole norm" + \
fmt % Dab[:, a, a].norm2()
if self._QUab is not None:
print "Electronic quadrupole" + \
(6*fmt) % tuple(QUab[:, a, a])
if self._Aab is not None:
for iw, w in enumerate(self.freqs):
Asym = Aab[iw, :, :, a, a]
if pol > 0:
print "Isotropic polarizability (%g)" % w, fmt % (Asym.trace()/3*xconv3)
if pol > 1:
print "Polarizability (%g) " % w, (6*fmt) % tuple(Asym.pack().view(full.matrix)*xconv3)
if self._Bab is not None:
for iw, w in enumerate(self.freqs):
Bsym = Bab[iw, :, :, a, a]
output_beta(Bsym, Da[:, a])
else:
for a in range(noa):
header("Atomic domain %d" % (a+1))
print "Domain center: "+(3*fmt) % tuple(R[a, :]*xconv)
line = " 0"
line += (3*"%17.10f") % tuple(xtang*R[a, :])
print "Nuclear charge: "+fmt % Z[a]
if self.max_l >= 0:
print "Electronic charge: "+fmt % Qa[a]
print "Total charge: "+fmt % (Z[a]+Qa[a])
if self._Dab is not None:
print "Electronic dipole "+(3*fmt) % tuple(self.Da[:, a])
print "Electronic dipole norm"+(fmt) % self.Da[:, a].view(full.matrix).norm2()
if self._QUab is not None:
#print "QUab", QUab
print "Electronic quadrupole"+(6*fmt) % tuple(QUa[:, a])
if self._Aab is not None:
for iw, w in enumerate(self.freqs):
Asym = Aa[iw, :, :, a].view(full.matrix)
print "Isotropic polarizablity (w=%g)" % w + fmt % (Aa[iw, :, :, a].trace()/3*xconv3)
print "Electronic polarizability (w=%g)" % w + \
(6*fmt) % tuple(Asym.pack().view(full.matrix)*xconv3)
if self._Bab is not None:
for iw, w in enumerate(self.freqs):
Bsym = Ba[iw, :, :, a].view(full.matrix)
output_beta(Bsym, self.Da[:, a])
#
# Total molecular properties
#
Ztot = Z.sum()
if self.max_l >= 0:
Qtot = Qa.sum()
if self.max_l >= 1:
Dm = self.Da.sum(axis=1).view(full.matrix)
Dc = Qa*(R-Rc)
DT = Dm+Dc
if self._QUab is not None:
QUm = self.QUc
QUT = QUm+QUN
if self._Bab is not None:
Dm = self.Da.sum(axis=1).view(full.matrix)
header("Molecular")
print "Domain center: "+(3*fmt) % tuple(Rc*xconv)
print "Nuclear charge: "+fmt % Ztot
if self.max_l >= 0:
print "Electronic charge: "+fmt % Qtot
print "Total charge: "+fmt % (Ztot+Qtot)
if self.max_l >= 1:
print "Electronic dipole "+(3*fmt) % tuple(Dm)
print "Gauge dipole "+(3*fmt) % tuple(Dc)
print "Total dipole "+(3*fmt) % tuple(DT)
if self._QUab is not None:
print "Electronic quadrupole"+(6*fmt) % tuple(QUm)
print "Nuclear quadrupole"+(6*fmt) % tuple(QUN)
print "Total quadrupole"+(6*fmt) % tuple(QUT)
if self._Aab is not None:
for iw, w in enumerate(self.freqs):
Am = self.Am[iw]
print "Polarizability av (%g) " % w, fmt % (Am.trace()/3*xconv3)
print "Polarizability (%g) " % w, (6*fmt) % tuple(Am.pack().view(full.matrix)*xconv3)
if self._Bab is not None:
for iw, w in enumerate(self.freqs):
Bm = self.Bm[iw]
output_beta(Bm, dip=Dm, fmt=fmt)
def output_template(self, maxl = 0, pol = 0, hyper = 0, template_full = False, decimal = 4, full_loc =0, freqs = None):
l_dict = { 0 : "charge", 1 : "dipole", 2 : "quadrupole",
}
#Upper triangular alpha
a_dict = { 0 : "", 2 : "alpha" }
#Upper triangular beta
b_dict = { 0 : "", 2 : "beta" }
fmt = "%."+"%df" %decimal
line = ""
if pol > 0:
Aab = self.Aab + 0.5 * self.dAab
if hyper > 0:
Bab = self.Bab + 0.5 * self.dBab
if maxl not in l_dict:
print "ERROR: called output_template with wrong argument range"
if pol not in a_dict:
print "ERROR: called output_template with wrong argument range"
if hyper not in b_dict:
print "ERROR: called output_template with wrong argument range"
elem_dict = {1:"H", 6:"C", 7: "N", 8 : "O", 16 : "S"}
if maxl >= 0:
if template_full:
# Put point dipole on center of charge
line += "( '%s%d', "%(elem_dict[self.Z[full_loc]] , full_loc + 1) + '"charge") : [ %s ],\n'%fmt %(self.Z.sum()+ self.Qab.sum())
else:
for a in range(self.noa):
line += "( '%s%d', "%(elem_dict[self.Z[a]] ,a+1) + '"charge") : [ %s ],\n'%fmt %(self.Z[a] + self.Qab[a, a])
if maxl >= 1:
if template_full:
Dm = self.Da.sum(axis=1).view(full.matrix)
Dc = self.Qab.diagonal()*(self.R-self.Rc)
DT = Dm+Dc
line += "( '%s%d', "%(elem_dict[self.Z[full_loc]] , full_loc + 1) + '"dipole") : [ %s, %s, %s ],\n'%tuple([fmt for i in range(3)]) %(tuple(DT))
else:
for a in range(self.noa):
line += "( '%s%d', "%(elem_dict[self.Z[a]] ,a+1) + '"dipole") : [ %s, %s, %s ],\n'%tuple([fmt for i in range(3)]) %(tuple(self.Dab.sum(axis=2)[:, a]))
if maxl >= 2:
if template_full:
line += "( '%s%d', "%(elem_dict[self.Z[full_loc]] , full_loc + 1) + '"quadrupole") : [ %s, %s, %s, %s, %s, %s ],\n'%tuple([fmt for i in range(6)]) %(tuple((self.QUab+self.dQUab).sum(axis=(1,2))[:]))
else:
for a in range(self.noa):
line += "( '%s%d', "%(elem_dict[self.Z[a]] ,a+1) + '"quadrupole") : [ %s, %s, %s, %s, %s, %s ],\n'%tuple([fmt for i in range(6)]) %(tuple((self.QUab+self.dQUab).sum(axis=2)[:, a]))
if pol >= 2:
if template_full:
Asym = Aab.sum(axis=(3,4))[0, :, :].view(full.matrix)
A = Asym.pack().view(full.matrix).copy()
A[2], A[3] = A[3], A[2]
line += "( '%s%d', "%(elem_dict[self.Z[full_loc]] , full_loc + 1) + '"alpha") : [ %s, %s, %s, %s, %s, %s ],\n'%tuple([fmt for i in range(6)]) %(tuple(A))
else:
for a in range(self.noa):
# Only for one frequency for now, todo, fix later if needed general
Asym = Aab.sum(axis=4)[0, :, :, a].view(full.matrix)
A = Asym.pack().view(full.matrix).copy()
A[2], A[3] = A[3], A[2]
line += "( '%s%d', "%(elem_dict[self.Z[a]] ,a+1) + '"alpha") : [ %s, %s, %s, %s, %s, %s ],\n'%tuple([fmt for i in range(6)]) %(tuple(A))
if hyper >= 2:
if template_full:
Bsym = symmetrize_first_beta( Bab.sum(axis=(3,4))[0, :, :].view(full.matrix) )
line += "( '%s%d', "%(elem_dict[self.Z[full_loc]] , full_loc + 1) + '"beta") : [ %s, %s, %s, %s, %s, %s, %s, %s, %s, %s ],\n' %tuple([fmt for i in range(len(Bsym))]) %(tuple(Bsym))
else:
for a in range(self.noa):
# Only for one frequency for now, todo, fix later if needed general
Bsym = symmetrize_first_beta( Bab.sum(axis=4)[0, :, :, a].view(full.matrix) )
line += "( '%s%d', "%(elem_dict[self.Z[a]] ,a+1) + '"beta") : [ %s, %s, %s, %s, %s, %s, %s, %s, %s, %s ],\n' %(tuple([fmt for i in range(len(Bsym))])) %(tuple(Bsym))
return line
def output_potential_file(
self, maxl, pol, hyper, bond_centers=False, angstrom=False, decimal = 3,
):
"""Output potential file"""
fmt = "%" + "%d." %(7 + decimal) + "%df" % decimal
lines = []
if angstrom:
unit = "AA"
xconv = 0.5291772108
xconv3 = 0.5291772108**3
else:
unit = "AU"
xconv = 1
xconv3 = 1
lines.append(unit)
noa = self.noa
#To get number of centers and bonding is on
bond_mat = numpy.zeros( (noa, noa,), dtype = int )
for a in range( noa ):
for b in range( a ):
r = numpy.sqrt( (( self.R[a] - self.R[b])**2 ).sum() )
if r < bond_co[ (int(self.Z[a]), int(self.Z[b])) ]/xtang:
bond_mat[ a, b ] = 1
bond_mat[ b, a ] = 1
if bond_centers:
#Where the number of bonds is the diagonal plus each entry with '1'
#in the upper triangular of bond_mat
noc = bond_mat.shape[0] + reduce(lambda a,x: a + len(numpy.where(x==1)[0]), [row[i+1:] for i, row in enumerate(bond_mat)], 0 )
#noc = noa*(noa + 1)/2
else:
noc = self.noa
lines.append("%d %d %d %d"%(noc, maxl, pol, 1))
if maxl >= 0: Qab = self.Qab
if maxl >= 1:
Dab = self.Dab
Dsym = self.Dsym
if maxl >= 2:
QUab = self.QUab
dQUab = self.dQUab
if pol > 0:
Aab = self.Aab + 0.5*self.dAab
if hyper > 0:
Bab = self.Bab + 0.5*self.dBab
if bond_centers:
ab = 0
for a in range(noa):
for b in range(a):
if bond_mat[ a, b]:
line = ("1" + 3*fmt) % tuple(self.Rab[a, b, :])
if maxl >= 0:
line += fmt % Qab[a, b]
if maxl >= 1:
line += (3*fmt) % tuple( Dab[:,b,a] + Dab[:, a, b] )
if maxl >= 2: line += (6*fmt) % \
tuple(QUab[:, a, b] +QUab[:, b, a])
if pol > 0:
Aab = self.Aab + 0.5*self.dAab
for iw, w in enumerate(self.freqs):
Asym = Aab[iw, :, :, a, b] + Aab[iw, :, :, b, a]
if pol == 1: line += fmt % (Asym.trace()*xconv3/3)
elif (pol%10) == 2:
out = Asym.pack().view(full.matrix)*xconv3
out[2:4] = out[3:1:-1]
line += (6*fmt)%tuple(out )
if hyper > 0:
for iw, w in enumerate(self.freqs):
Bsym = Bab[iw, :, :, a, b] + Bab[iw, :, :, b, a]
#if hyper == 1:
# dip = self.Da[:,a]
# betakk = Bsym[:,0] + Bsym[:, 3] + Bsym[:, 5]
# line += fmt % ( 0.2 * (betakk & dip) / dip.norm2() )
Btotsym = symmetrize_first_beta( Bsym )
line += 10*fmt % tuple( Btotsym )
lines.append(line)
#For atom a, non_bond_pos holds atoms that are not bonded to a
#Include only non bonded to atomic prop here
nbond_pos = numpy.where( bond_mat[ a ] == 0 )[0]
line = ("1" + 3*fmt) % tuple(self.Rab[a, a, :])
if maxl >= 0:
line += fmt % (self.Z[a]+Qab[a, a])
if maxl >= 1:
line += (3*fmt) % tuple( reduce(lambda x,y: x+ Dab[:, a, y], nbond_pos, 0.0 ))
if maxl >= 2:
print "Bond quadrupoles not supported yet"
raise SystemExit
if pol > 0:
for iw, w in enumerate(self.freqs):
if pol %10 == 2:
out = reduce( lambda x,y: x + Aab[iw, :, :, a, y], nbond_pos, 0.0 ).pack().view(full.matrix)*xconv3
out[2:4] = out[3:1:-1]
line += (6*fmt) % tuple(out)
elif pol == 1:
out = reduce( lambda x,y: x + Aab[iw, :, :, a, y],nbond_pos, 0.0 ).view(full.matrix).trace()/3.0 * xconv3
line += fmt % out
if hyper > 0:
for iw, w in enumerate(self.freqs):
Bsym = reduce( lambda x,y: x + Bab[iw, :, :, a, y], nbond_pos, 0.0 ).view(full.matrix)
#if hyper == 1:
# dip = self.Da[:,a]
# betakk = Bsym[:,0] + Bsym[:, 3] + Bsym[:, 5]
# line += fmt % ( 0.2 * (betakk & dip) / dip.norm2() )
Btotsym = symmetrize_first_beta( Bsym )
line += 10*fmt % tuple( Btotsym )
ab += 1
lines.append(line)
else:
for a in range(noa):
line = ("1" + 3*fmt) % tuple(self.Rab[a, a, :]*xconv)
if maxl >= 0: line += fmt % (self.Z[a] + Qab[a, a])
if maxl >= 1: line += (3*fmt) % tuple(Dab.sum(axis=2)[:, a])
if maxl >= 2:
line += (6*fmt) % tuple((QUab+dQUab).sum(axis=2)[:, a])
if pol > 0:
for iw in range(self.nfreqs):
Asym = Aab.sum(axis=4)[iw, :, :, a].view(full.matrix)
if pol == 1:
line += fmt % (Asym.trace()/3*xconv3)
elif pol %10 == 2:
out = Asym.pack().view(full.matrix)
out[2:4] = out[3:1:-1]
line += (6*fmt) % tuple(out*xconv3)
if hyper > 0:
for iw in range(self.nfreqs):
Bsym = Bab.sum(axis=4)[iw, :, :, a].view(full.matrix)
if hyper == 1:
dip = self.Da[:,a]
betakk = Bsym[:,0] + Bsym[:, 3] + Bsym[:, 5]
line += fmt % ( 0.2 * (betakk & dip) / dip.norm2() )
if hyper == 2:
Btotsym = symmetrize_first_beta( Bsym )
line += 10*fmt % tuple( Btotsym )
lines.append(line)
return "\n".join(lines) + "\n"
def print_atom_domain(self, n, angstrom=False):
fmt = "%9.5f"
if angstrom:
xconv = 0.5291772108
else:
xconv = 1
retstr = """\
---------------
Atomic domain %d
---------------
Domain center: """ % (n+1,) + (3*fmt+"\n") % tuple(self.Rab[n, n, :]*xconv)
print "self.max_l", self.max_l
if self.max_l >= 0:
retstr += ("Nuclear charge: " + fmt + "\n") % self.Z[n]
retstr += ("Electronic charge: " + fmt + "\n") % self.Qab[n, n]
retstr += ("Total charge: " + fmt + "\n") % (self.Z[n] + self.Qab[n,n])
if self.max_l >= 1:
retstr += ("Electronic dipole " + 3*fmt + "\n") % tuple(self.Dab.sum(axis=2)[:, n])
if self.max_l >= 2:
retstr += ("Electronic quadrupole" + 6*fmt + "\n") % tuple((self.QUab+self.dQUab).sum(axis=2)[:, n])
if self.pol == 1:
for iw, w in enumerate(self.freqs):
retstr += ("Isotropic polarizablity (w=%g)" % w + fmt + "\n") % (
(self.Aab + 0.5*self.dAab).sum(axis=4)[iw, :, :, n].trace()/3
)
if self.pol == 2:
for iw, w in enumerate(self.freqs):
a_lower = (self.Aab + 0.5*self.dAab).sum(axis=4)[iw, :, :, n].view(full.matrix).pack()
retstr += ("Electronic polarizability (w=%g)" % w + 6*fmt + "\n") % tuple(
a_lower
)
return retstr
if __name__ == "__main__":
import optparse
OP = optparse.OptionParser()
OP.add_option(
'-d', '--debug',
dest='debug', action='store_true', default=False,
help='print for debugging [False]'
)
OP.add_option(
'-v', '--verbose',
dest='verbose', action='store_true', default=False,
help='print details [False]'
)
OP.add_option(
'-t','--tmpdir',
dest='tmpdir', default='/tmp',
help='scratch directory [/tmp]'
)
OP.add_option(
'-f','--daltgz',
dest='daltgz', default=None,
help='Dalton restart tar ball [None]'
)
OP.add_option(
'-p', '--potfile',
dest='potfile', default='LOPROP.POT',
help='Potential input file [LOPROP.POT]'
)
OP.add_option(
'-b','--bond',
dest='bc', action='store_true',default=False,
help='include bond centers [False]'
)
OP.add_option(
'-g','--gauge-center',
dest='gc', default=None,
help='gauge center'
)
OP.add_option(
'-l', '--angular-momentum',
dest='max_l', type='int', default=2,
help='Max angular momentum [2]'
)
OP.add_option(
'-A', '--Anstrom',
dest='angstrom', action='store_true', default=False,
help="Output in Angstrom"
)
OP.add_option(
'-w','--frequencies',
dest='freqs', default=None,
help='Dynamic polarizabilities (0.)'
)
OP.add_option(
'-a','--polarizabilities',
dest='pol', type='int', default=0,
help='Localized polarizabilities (1=isotropic, 2=full)'
)
OP.add_option(
'-B','--hyperpolarizabilities',
dest='beta', type='int', default=0,
help='Localized hyperpolarizabilities (1=isotropic, 2=full)'
)
OP.add_option(
'-s','--screening (alpha)',
dest='alpha', type='float', default=2.0,
help='Screening parameter for penalty function'
)
OP.add_option(
'--template',
action = 'store_true',
default= False,
help='Write atomic properties in templated format',
)
OP.add_option(
'--template_full',
action = 'store_true',
default= False,
help='Write atomic properties in templated format, centered on first atom',
)
OP.add_option(
'--decimal',
default= 3,
type = int,
help='Significant digits for template output.',
)
OP.add_option(
'--full_loc',
default= 0,
type = int,
help='Significant digits for template output.',
)
o, a = OP.parse_args(sys.argv[1:])
#
# Check consistency: present Dalton files
#
if not os.path.isdir(o.tmpdir):
print "%s: Directory not found: %s" % (sys.argv[0], o.tmpdir)
raise SystemExit
import tarfile
if o.daltgz:
tgz = tarfile.open(o.daltgz, 'r:gz')
tgz.extractall(path=o.tmpdir)
if o.freqs:
freqs = map(float, o.freqs.split())
else:
freqs = (0.0, )
needed_files = ["AOONEINT", "DALTON.BAS", "SIRIFC", "AOPROPER", "RSPVEC"]
for file_ in needed_files:
df = os.path.join(o.tmpdir, file_)
if not os.path.isfile(df):
print "%s: %s does not exists" % (sys.argv[0], df)
print "Needed Dalton files to run loprop.py:"
print "\n".join(needed_files)
raise SystemExit
if o.gc is not None:
#Gauge center
try:
#gc = map(float, o.gc.split())
gc = [float(i) for i in o.gc.split()]
except(ValueError):
sys.stderr.write("Gauge center incorrect:%s\n" % o.gc)
sys.exit(1)
else:
gc = None
t = timing.timing('Loprop')
molfrag = MolFrag(
o.tmpdir, o.max_l, pf=penalty_function(o.alpha), gc=gc, freqs=freqs
)
print molfrag.output_potential_file(
o.max_l, o.pol, o.beta, o.bc, o.angstrom, decimal = o.decimal
)
if o.template:
print molfrag.output_template(
o.max_l, o.pol, o.beta,
template_full = o.template_full,
decimal = o.decimal,
freqs = freqs,
full_loc = o.full_loc,
)
if o.verbose:
molfrag.output_by_atom(fmt="%12.5f", max_l=o.max_l, pol=o.pol, hyperpol=o.beta, bond_centers=o.bc, angstrom=o.angstrom)
print t
|
fishstamp82/loprop-1
|
loprop/loprop.py
|
Python
|
gpl-3.0
| 54,871
|
[
"Dalton"
] |
b81452e1660f4f3d701acb2b7900ab12b53f7e450239179da624782e1d42ab51
|
from ase import *
from hotbit import *
from pylab import *
atoms = Atoms('Au2',[(0,0,0),(2.2,0,0)])
atoms.center(vacuum=10)
traj = PickleTrajectory('dimer_curve.traj','w',atoms)
R = [2.2,2.4,2.54,2.8,3.0,3.2,3.4]
E = [-1.06,-2.08,-2.22,-1.99,-1.66,-1.31,-1.00]
class Calc:
def __init__(self):
pass
def set(self,e):
self.e = e
def get_potential_energy(self,atoms):
return self.e
def get_forces(self,atoms):
return None
def get_stress(self,atoms):
return None
calc = Calc()
atoms.set_calculator(calc)
for r,e in zip(R,E):
atoms[1].x=atoms[0].x+r
calc.set(e)
print atoms.get_potential_energy()
traj.write()
plot(R,E)
calc = Hotbit()
atoms.set_calculator(calc)
E2=[]
R=linspace(2.3,4,50)
for r in R:
atoms[1].x=atoms[0].x+r
E2.append( atoms.get_potential_energy() )
ylim(ymax=0.0)
plot(R,E2)
show()
|
pekkosk/hotbit
|
examples/CH_parametrization/Au2.py
|
Python
|
gpl-2.0
| 914
|
[
"ASE"
] |
1ce68245a42811b24cdd0f9b77e6aa407959e18fbcba663624ebcda052a1ee3d
|
#!/usr/bin/env python
#
# LSST Data Management System
# Copyright 2008-2015 AURA/LSST.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <https://www.lsstcorp.org/LegalNotices/>.
#
import unittest
import lsst.utils.tests as utilsTests
import lsst.daf.persistence as dafPersist
from lsst.obs.decam import DecamMapper
class GetIdTestCase(unittest.TestCase):
"""Testing butler exposure id retrieval"""
def setUp(self):
self.bf = dafPersist.ButlerFactory(mapper=DecamMapper(root="."))
self.butler = self.bf.create()
def tearDown(self):
del self.butler
del self.bf
def testId(self):
"""Test retrieval of exposure ids"""
bits = self.butler.get("ccdExposureId_bits")
self.assertEqual(bits, 32)
id = self.butler.get("ccdExposureId", visit=229388, ccdnum=13, filter="z")
self.assertEqual(id, 22938813)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def suite():
"""Returns a suite containing all the test cases in this module."""
utilsTests.init()
suites = []
suites += unittest.makeSuite(GetIdTestCase)
suites += unittest.makeSuite(utilsTests.MemoryTestCase)
return unittest.TestSuite(suites)
def run(shouldExit = False):
"""Run the tests"""
utilsTests.run(suite(), shouldExit)
if __name__ == "__main__":
run(True)
|
yalsayyad/obs_decam
|
tests/getId.py
|
Python
|
gpl-3.0
| 2,055
|
[
"VisIt"
] |
1fe926f1334116b08776f64e598126dcd2c72b06b7c57ed993466aead87e563d
|
"""
Visualizations (:mod:`skbio.draw`)
==================================
.. currentmodule:: skbio.draw
This module provides functionality for visualization of data.
Distribution visualizations
---------------------------
Functions
^^^^^^^^^
.. autosummary::
:toctree: generated/
boxplots
grouped_distributions
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.util import TestRunner
from ._distributions import boxplots, grouped_distributions
__all__ = ['boxplots', 'grouped_distributions']
test = TestRunner(__file__).test
|
Achuth17/scikit-bio
|
skbio/draw/__init__.py
|
Python
|
bsd-3-clause
| 864
|
[
"scikit-bio"
] |
fd2699ec440142bfe82db9a335a29a57e4c2dbc7ae0d7c54cf8cdbe965e6ed51
|
from copy import deepcopy
from warnings import warn
from itertools import chain
from ast import NodeTransformer
from six import iteritems
from .. import Reaction, Metabolite, Gene
from .delete import get_compiled_gene_reaction_rules
from ..core.Gene import ast2str
_renames = (
(".", "_DOT_"),
("(", "_LPAREN_"),
(")", "_RPAREN_"),
("-", "__"),
("[", "_LSQBKT"),
("]", "_RSQBKT"),
(",", "_COMMA_"),
(":", "_COLON_"),
(">", "_GT_"),
("<", "_LT"),
("/", "_FLASH"),
("\\", "_BSLASH"),
("+", "_PLUS_"),
("=", "_EQ_"),
(" ", "_SPACE_"),
("'", "_SQUOT_"),
('"', "_DQUOT_"),
)
def _escape_str_id(id_str):
"""make a single string id SBML compliant"""
for c in ("'", '"'):
if id_str.startswith(c) and id_str.endswith(c) \
and id_str.count(c) == 2:
id_str = id_str.strip(c)
for char, escaped_char in _renames:
id_str = id_str.replace(char, escaped_char)
return id_str
class _GeneEscaper(NodeTransformer):
def visit_Name(self, node):
node.id = _escape_str_id(node.id)
return node
def escape_ID(cobra_model):
"""makes all ids SBML compliant"""
for x in chain([cobra_model],
cobra_model.metabolites,
cobra_model.reactions,
cobra_model.genes):
x.id = _escape_str_id(x.id)
cobra_model.repair()
gene_renamer = _GeneEscaper()
for rxn, rule in iteritems(get_compiled_gene_reaction_rules(cobra_model)):
if rule is not None:
rxn._gene_reaction_rule = ast2str(gene_renamer.visit(rule))
def rename_genes(cobra_model, rename_dict):
"""renames genes in a model from the rename_dict"""
recompute_reactions = set() # need to recomptue related genes
remove_genes = []
for old_name, new_name in iteritems(rename_dict):
# undefined if there a value matches a different key
# because dict is unordered
try:
gene_index = cobra_model.genes.index(old_name)
except ValueError:
gene_index = None
old_gene_present = gene_index is not None
new_gene_present = new_name in cobra_model.genes
if old_gene_present and new_gene_present:
old_gene = cobra_model.genes.get_by_id(old_name)
remove_genes.append(old_gene)
recompute_reactions.update(old_gene._reaction)
elif old_gene_present and not new_gene_present:
# rename old gene to new gene
gene = cobra_model.genes[gene_index]
# trick DictList into updating index
cobra_model.genes._dict.pop(gene.id) # ugh
gene.id = new_name
cobra_model.genes[gene_index] = gene
elif not old_gene_present and new_gene_present:
pass
else: # not old gene_present and not new_gene_present
# the new gene's _model will be set by repair
cobra_model.genes.append(Gene(new_name))
cobra_model.repair()
class Renamer(NodeTransformer):
def visit_Name(self, node):
node.id = rename_dict.get(node.id, node.id)
return node
gene_renamer = Renamer()
for rxn, rule in iteritems(get_compiled_gene_reaction_rules(cobra_model)):
if rule is not None:
rxn._gene_reaction_rule = ast2str(gene_renamer.visit(rule))
for rxn in recompute_reactions:
rxn.gene_reaction_rule = rxn._gene_reaction_rule
for i in remove_genes:
cobra_model.genes.remove(i)
def initialize_growth_medium(cobra_model, the_medium='MgM',
external_boundary_compartment='e',
external_boundary_reactions=None,
reaction_lower_bound=0.,
reaction_upper_bound=1000.,
irreversible=False,
reactions_to_disable=None):
"""Sets all of the input fluxes to the model to zero and then will
initialize the input fluxes to the values specified in the_medium if
it is a dict or will see if the model has a composition dict and use
that to do the initialization.
cobra_model: A cobra.Model object.
the_medium: A string, or a dictionary.
If a string then the initialize_growth_medium function expects that
the_model has an attribute dictionary called media_compositions, which is a
dictionary of dictionaries for various medium compositions. Where a medium
composition is a dictionary of external boundary reaction ids for the
medium components and the external boundary fluxes for each medium
component.
external_boundary_compartment: None or a string.
If not None then it specifies the compartment in which to disable all of
the external systems boundaries.
external_boundary_reactions: None or a list of external_boundaries that are
to have their bounds reset. This acts in conjunction with
external_boundary_compartment.
reaction_lower_bound: Float. The default value to use for the lower
bound for the boundary reactions.
reaction_upper_bound: Float. The default value to use for the upper
bound for the boundary.
irreversible: Boolean. If the model is irreversible then the medium
composition is taken as the upper bound
reactions_to_disable: List of reactions for which the upper and lower
bounds are disabled. This is superceded by the contents of
media_composition
"""
# Zero all of the inputs to the model
if hasattr(the_medium, 'keys'):
medium_composition = the_medium
else:
if hasattr(cobra_model, 'media_compositions'):
if the_medium in cobra_model.media_compositions:
medium_composition = cobra_model.media_compositions[the_medium]
else:
raise Exception("%s is not in the model's media list" %
the_medium)
else:
raise Exception("the model doesn't have attribute "
"media_compositions and the medium is not a dict")
if external_boundary_reactions is not None:
if isinstance(external_boundary_reactions[0], str):
external_boundary_reactions = map(cobra_model.reactions.get_by_id,
external_boundary_reactions)
elif external_boundary_compartment is None:
warn("We are initializing the medium without first adjusting all"
"external boundary reactions")
# Select the system_boundary reactions to reset
if external_boundary_compartment is not None:
_system_boundaries = dict([(x, x.get_compartments())
for x in cobra_model.reactions
if x.boundary == 'system_boundary'])
[_system_boundaries.pop(k) for k, v in list(_system_boundaries.items())
if len(v) == 1 and external_boundary_compartment not in v]
if external_boundary_reactions is None:
external_boundary_reactions = _system_boundaries.keys()
else:
external_boundary_reactions += _system_boundaries.keys()
for the_reaction in external_boundary_reactions:
the_reaction.lower_bound = reaction_lower_bound
if the_reaction.upper_bound == 0:
the_reaction.upper_bound = reaction_upper_bound
# Disable specified reactions
if reactions_to_disable is not None:
if isinstance(reactions_to_disable[0], str):
reactions_to_disable = map(cobra_model.reactions.get_by_id,
reactions_to_disable)
for the_reaction in reactions_to_disable:
the_reaction.lower_bound = the_reaction.upper_bound = 0.
# Update the model inputs based on the_medium
for the_component in medium_composition.keys():
the_reaction = cobra_model.reactions.get_by_id(the_component)
if irreversible:
the_reaction.upper_bound = medium_composition[the_component]
else:
the_reaction.lower_bound = medium_composition[the_component]
def convert_to_irreversible(cobra_model):
"""Split reversible reactions into two irreversible reactions
These two reactions will proceed in opposite directions. This
guarentees that all reactions in the model will only allow
positive flux values, which is useful for some modeling problems.
cobra_model: A Model object which will be modified in place.
"""
reactions_to_add = []
for reaction in cobra_model.reactions:
# If a reaction is reverse only, the forward reaction (which
# will be constrained to 0) will be left in the model.
if reaction.lower_bound < 0:
reverse_reaction = Reaction(reaction.id + "_reverse")
reverse_reaction.lower_bound = max(0, -reaction.upper_bound)
reverse_reaction.upper_bound = -reaction.lower_bound
reverse_reaction.objective_coefficient = \
reaction.objective_coefficient * -1
reaction.lower_bound = max(0, reaction.lower_bound)
reaction.upper_bound = max(0, reaction.upper_bound)
# Make the directions aware of each other
reaction.notes["reflection"] = reverse_reaction.id
reverse_reaction.notes["reflection"] = reaction.id
reaction_dict = {k: v * -1
for k, v in iteritems(reaction._metabolites)}
reverse_reaction.add_metabolites(reaction_dict)
reverse_reaction._model = reaction._model
reverse_reaction._genes = reaction._genes
for gene in reaction._genes:
gene._reaction.add(reverse_reaction)
reverse_reaction.subsystem = reaction.subsystem
reverse_reaction._gene_reaction_rule = reaction._gene_reaction_rule
reactions_to_add.append(reverse_reaction)
cobra_model.add_reactions(reactions_to_add)
def revert_to_reversible(cobra_model, update_solution=True):
"""This function will convert a reversible model made by
convert_to_irreversible into a reversible model.
cobra_model: A cobra.Model which will be modified in place.
"""
reverse_reactions = [x for x in cobra_model.reactions
if "reflection" in x.notes and
x.id.endswith('_reverse')]
# If there are no reverse reactions, then there is nothing to do
if len(reverse_reactions) == 0:
return
update_solution = update_solution and cobra_model.solution is not None \
and cobra_model.solution.status != "NA"
if update_solution:
x_dict = cobra_model.solution.x_dict
for reverse in reverse_reactions:
forward_id = reverse.notes.pop("reflection")
forward = cobra_model.reactions.get_by_id(forward_id)
forward.lower_bound = -reverse.upper_bound
if forward.upper_bound == 0:
forward.upper_bound = -reverse.lower_bound
# update the solution dict
if update_solution:
if reverse.id in x_dict:
x_dict[forward_id] -= x_dict.pop(reverse.id)
if "reflection" in forward.notes:
forward.notes.pop("reflection")
# Since the metabolites and genes are all still in
# use we can do this faster removal step. We can
# probably speed things up here.
cobra_model.remove_reactions(reverse_reactions)
# update the solution vector
if update_solution:
cobra_model.solution.x_dict = x_dict
cobra_model.solution.x = [x_dict[r.id] for r in cobra_model.reactions]
def canonical_form(model, objective_sense='maximize',
already_irreversible=False, copy=True):
"""Return a model (problem in canonical_form).
Converts a minimization problem to a maximization, makes all variables
positive by making reactions irreversible, and converts all constraints to
<= constraints.
model: class:`~cobra.core.Model`. The model/problem to convert.
objective_sense: str. The objective sense of the starting problem, either
'maximize' or 'minimize'. A minimization problems will be converted to a
maximization.
already_irreversible: bool. If the model is already irreversible, then pass
True.
copy: bool. Copy the model before making any modifications.
"""
if copy:
model = model.copy()
if not already_irreversible:
convert_to_irreversible(model)
if objective_sense == "minimize":
# if converting min to max, reverse all the objective coefficients
for reaction in model.reactions:
reaction.objective_coefficient = - reaction.objective_coefficient
elif objective_sense != "maximize":
raise Exception("Invalid objective sense '%s'. "
"Must be 'minimize' or 'maximize'." % objective_sense)
# convert G and E constraints to L constraints
for metabolite in model.metabolites:
if metabolite._constraint_sense == "G":
metabolite._constraint_sense = "L"
metabolite._bound = - metabolite._bound
for reaction in metabolite.reactions:
coeff = reaction.get_coefficient(metabolite)
# reverse the coefficient
reaction.add_metabolites({metabolite: -2 * coeff})
elif metabolite._constraint_sense == "E":
# change existing constraint to L
metabolite._constraint_sense = "L"
# add new constraint
new_constr = Metabolite("%s__GE_constraint" % metabolite.id)
new_constr._constraint_sense = "L"
new_constr._bound = - metabolite._bound
for reaction in metabolite.reactions:
coeff = reaction.get_coefficient(metabolite)
reaction.add_metabolites({new_constr: -coeff})
# convert lower bounds to LE constraints
for reaction in model.reactions:
if reaction.lower_bound < 0:
raise Exception("Bounds of irreversible reactions should be >= 0,"
" for %s" % reaction.id)
elif reaction.lower_bound == 0:
continue
# new constraint for lower bound
lb_constr = Metabolite("%s__LB_constraint" % reaction.id)
lb_constr._constraint_sense = "L"
lb_constr._bound = - reaction.lower_bound
reaction.add_metabolites({lb_constr: -1})
reaction.lower_bound = 0
return model
|
aebrahim/cobrapy
|
cobra/manipulation/modify.py
|
Python
|
lgpl-2.1
| 14,568
|
[
"VisIt"
] |
6eae24cda223de1b1625afd6b0553ed21791380a65c2102b2fe3cdffab058f57
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with Medline.
Classes:
Record A dictionary holding Medline data.
Functions:
read Reads one Medline record
parse Allows you to iterate over a bunch of Medline records
"""
class Record(dict):
"""A dictionary holding information from a Medline record.
All data are stored under the mnemonic appearing in the Medline
file. These mnemonics have the following interpretations:
Mnemonic Description
AB Abstract
CI Copyright Information
AD Affiliation
IRAD Investigator Affiliation
AID Article Identifier
AU Author
FAU Full Author
CN Corporate Author
DCOM Date Completed
DA Date Created
LR Date Last Revised
DEP Date of Electronic Publication
DP Date of Publication
EDAT Entrez Date
GS Gene Symbol
GN General Note
GR Grant Number
IR Investigator Name
FIR Full Investigator Name
IS ISSN
IP Issue
TA Journal Title Abbreviation
JT Journal Title
LA Language
LID Location Identifier
MID Manuscript Identifier
MHDA MeSH Date
MH MeSH Terms
JID NLM Unique ID
RF Number of References
OAB Other Abstract
OCI Other Copyright Information
OID Other ID
OT Other Term
OTO Other Term Owner
OWN Owner
PG Pagination
PS Personal Name as Subject
FPS Full Personal Name as Subject
PL Place of Publication
PHST Publication History Status
PST Publication Status
PT Publication Type
PUBM Publishing Model
PMC PubMed Central Identifier
PMID PubMed Unique Identifier
RN Registry Number/EC Number
NM Substance Name
SI Secondary Source ID
SO Source
SFM Space Flight Mission
STAT Status
SB Subset
TI Title
TT Transliterated Title
VI Volume
CON Comment on
CIN Comment in
EIN Erratum in
EFR Erratum for
CRI Corrected and Republished in
CRF Corrected and Republished from
PRIN Partial retraction in
PROF Partial retraction of
RPI Republished in
RPF Republished from
RIN Retraction in
ROF Retraction of
UIN Update in
UOF Update of
SPIN Summary for patients in
ORI Original report in
"""
def parse(handle):
"""Read Medline records one by one from the handle.
The handle is either is a Medline file, a file-like object, or a list
of lines describing one or more Medline records.
Typical usage:
from Bio import Medline
handle = open("mymedlinefile")
records = Medline.parse(handle)
for record in record:
print record['TI']
"""
# These keys point to string values
textkeys = ("ID", "PMID", "SO", "RF", "NI", "JC", "TA", "IS", "CY", "TT",
"CA", "IP", "VI", "DP", "YR", "PG", "LID", "DA", "LR", "OWN",
"STAT", "DCOM", "PUBM", "DEP", "PL", "JID", "SB", "PMC",
"EDAT", "MHDA", "PST", "AB", "AD", "EA", "TI", "JT")
handle = iter(handle)
# First skip blank lines
for line in handle:
line = line.rstrip()
if line:
break
else:
return
record = Record()
finished = False
while not finished:
if line[:6]==" ": # continuation line
record[key].append(line[6:])
elif line:
key = line[:4].rstrip()
if not key in record:
record[key] = []
record[key].append(line[6:])
try:
line = handle.next()
except StopIteration:
finished = True
else:
line = line.rstrip()
if line:
continue
# Join each list of strings into one string.
for key in textkeys:
if key in record:
record[key] = " ".join(record[key])
if record:
yield record
record = Record()
def read(handle):
"""Read a single Medline records from the handle.
The handle is either is a Medline file, a file-like object, or a list
of lines describing a Medline record.
Typical usage:
from Bio import Medline
handle = open("mymedlinefile")
record = Medline.read(handle)
print record['TI']
"""
records = parse(handle)
return records.next()
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Medline/__init__.py
|
Python
|
gpl-2.0
| 4,985
|
[
"Biopython"
] |
d4a6f4869a4a350c11aa6e724b23a67b6a6f0de177fb43f50f0544c9f9644226
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import platform
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import FormatStrFormatter
"""
This module provides classes for plotting Pourbaix objects.
"""
import six
from six.moves import map
from six.moves import zip
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Sai Jayaraman"
__email__ = "sjayaram@mit.edu"
__status__ = "Production"
__date__ = "Jan 26, 2012"
import numpy as np
import re
import collections
from pymatgen.analysis.pourbaix.analyzer import PourbaixAnalyzer
from pymatgen.analysis.pourbaix.maker import PREFAC
from pymatgen.analysis.pourbaix.entry import MultiEntry
from pymatgen.phasediagram.plotter import uniquelines
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.util.coord_utils import in_coord_list
class PourbaixPlotter(object):
"""
A plotter class for phase diagrams.
Args:
phasediagram: A PhaseDiagram object.
show_unstable: Whether unstable phases will be plotted as well as
red crosses. Defaults to False.
"""
def __init__(self, pourbaixdiagram, show_unstable=False):
self._pd = pourbaixdiagram
self.lines = uniquelines(self._pd.facets)
self.show_unstable = show_unstable
@property
def pourbaix_hull_plot_data(self):
"""
Pourbaix diagram convex hull data.
Returns:
(lines, stable_entries, unstable_entries)
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
facetlines = self.lines
lines = list()
stable_entries = dict()
for line in facetlines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
x = [data[line[0]][0], data[line[1]][0]]
y = [data[line[0]][1], data[line[1]][1]]
z = [data[line[0]][2], data[line[1]][2]]
coord = [x, y, z]
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
allentries = pd.all_entries
alldata = np.array(pd.qhull_data)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(len(allentries)):
entry = allentries[i]
if entry not in stable:
x = [alldata[i][0], alldata[i][0]]
y = [alldata[i][1], alldata[i][1]]
z = [alldata[i][2], alldata[i][2]]
coord = [x, y, z]
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def show(self, label_stable=True, label_unstable=False, filename=""):
"""
Draws the convex hull diagram using Matplotlib and show it.
"""
plt = self._get_plot(label_stable=label_stable,
label_unstable=label_unstable)
if filename == "":
plt.show()
else:
plt.savefig(filename, bbox_inches=0)
def _get_plot(self, label_stable=True, label_unstable=False):
"""
Plot convex hull of Pourbaix Diagram entries
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(14)
(lines, labels, unstable) = self.pourbaix_hull_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = self.print_name(entry)
if label_stable:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(
count, latexify_ion(latexify(label))))
count += 1
if label_unstable:
for entry in unstable.keys():
label = self.print_name(entry)
coords = unstable[entry]
ax.plot([coords[0], coords[0]], [coords[1], coords[1]],
[coords[2], coords[2]], "bo", markerfacecolor="g",
markersize=10)
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(
count, latexify_ion(latexify(label))))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
plt.xlabel("pH")
plt.ylabel("V")
return plt
def plot_planes(self):
"""
Plot the free energy facets as a function of pH and V
"""
if self.show_unstable:
entries = self._pd._all_entries
else:
entries = self._pd.stable_entries
num_plots = len(entries)
import matplotlib.pyplot as plt
colormap = plt.cm.gist_ncar
fig = plt.figure().gca(projection='3d')
color_array = [colormap(i) for i in np.linspace(0, 0.9, num_plots)]
labels = []
color_index = -1
for entry in entries:
normal = np.array([-PREFAC * entry.npH, -entry.nPhi, +1])
d = entry.g0
color_index += 1
pH, V = np.meshgrid(np.linspace(-10, 28, 100),
np.linspace(-3, 3, 100))
g = (-normal[0] * pH - normal[1] * V + d) / normal[2]
lbl = latexify_ion(
latexify(entry._entry.composition.reduced_formula))
labels.append(lbl)
fig.plot_surface(pH, V, g, color=color_array[color_index],
label=lbl)
plt.legend(labels)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.show()
def plot_chempot_range_map(self, limits=None, title="", filename=""):
self.plot_pourbaix(limits, title, filename)
def plot_pourbaix(self, limits=None, title="", filename="", label_domains=True):
plt = self.get_pourbaix_plot(limits=limits, title=title, label_domains=label_domains)
if filename == "":
plt.show()
else:
f = plt.gcf()
f.set_size_inches((11.5, 9))
plt.tight_layout(pad=1.09)
def pourbaix_plot_data(self, limits=None):
"""
Get data required to plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
Returns:
stable_entries, unstable_entries
stable_entries: dict of lines. The keys are Pourbaix Entries, and
lines are in the form of a list
unstable_entries: list of unstable entries
"""
analyzer = PourbaixAnalyzer(self._pd)
self._analyzer = analyzer
if limits:
analyzer.chempot_limits = limits
chempot_ranges = analyzer.get_chempot_range_map(limits)
self.chempot_ranges = chempot_ranges
stable_entries_list = collections.defaultdict(list)
for entry in chempot_ranges:
for line in chempot_ranges[entry]:
x = [line.coords[0][0], line.coords[1][0]]
y = [line.coords[0][1], line.coords[1][1]]
coords = [x, y]
stable_entries_list[entry].append(coords)
unstable_entries_list = [entry for entry in self._pd.all_entries
if entry not in self._pd.stable_entries]
return stable_entries_list, unstable_entries_list
def get_center(self, lines):
"""
Returns coordinates of center of a domain. Useful
for labeling a Pourbaix plot.
Args:
lines:
Lines corresponding to a domain
limits:
Limits of Pourbaix diagram
Returns:
center_x, center_y:
x,y coordinate of center of domain. If domain lies
outside limits, center will lie on the boundary.
"""
center_x = 0.0
center_y = 0.0
coords = []
count_center = 0.0
for line in lines:
for coord in np.array(line).T:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
cx = coord[0]
cy = coord[1]
center_x += cx
center_y += cy
count_center += 1.0
if count_center == 0.0:
count_center = 1.0
center_x /= count_center
center_y /= count_center
return center_x, center_y
def get_distribution_corrected_center(self, lines, h2o_h_line=None, h2o_o_line=None, radius=None):
"""
Returns coordinates of distribution corrected center of a domain. Similar to get_center(), but
considers the distance to the surronding lines that mostly affects the feeling of "center".
This function will also try avoid overalapping the text babel with H2O stability line if H2O
stability line is provided. Useful for labeling a Pourbaix plot.
Args:
lines:
Lines corresponding to a domain
limits:
Limits of Pourbaix diagram
h2o_h_line: Hydrogen line of H2O stability
h2o_o_line: Oxygen line of H2O stablity
radius: Half height of the text label.
Returns:
center_x, center_y:
x,y coordinate of center of domain. If domain lies
outside limits, center will lie on the boundary.
"""
coords = []
pts_x = []
pts_y = []
for line in lines:
for coord in np.array(line).T:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
cx = coord[0]
cy = coord[1]
pts_x.append(cx)
pts_y.append(cy)
if len(pts_x) < 1:
return 0.0, 0.0
cx_1 = (max(pts_x) + min(pts_x)) / 2.0
cy_1 = (max(pts_y) + min(pts_y)) / 2.0
mid_x_list = []
mid_y_list = []
# move the center to the center of surrounding lines
for line in lines:
(x1, y1), (x2, y2) = np.array(line).T
if (x1 - cx_1) * (x2 - cx_1) <= 0.0:
# horizontal line
mid_y = ((y2 - y1) / (x2 - x1)) * (cx_1 - x1) + y1
assert (y2 - mid_y) * (y1 - mid_y) <= 0.0
mid_y_list.append(mid_y)
if (y1 - cy_1) * (y2 - cy_1) <= 0.0:
# vertical line
mid_x = ((x2 - x1) / (y2 - y1)) * (cy_1 - y1) + x1
assert (x2 - mid_x) * (x1 - mid_x) <= 0.0
mid_x_list.append(mid_x)
upper_y = sorted([y for y in mid_y_list if y >= cy_1])[0]
lower_y = sorted([y for y in mid_y_list if y < cy_1])[-1]
left_x = sorted([x for x in mid_x_list if x <= cx_1])[-1]
right_x = sorted([x for x in mid_x_list if x > cx_1])[0]
center_x = (left_x + right_x) / 2.0
center_y = (upper_y + lower_y) / 2.0
if h2o_h_line is not None:
(h2o_h_x1, h2o_h_y1), (h2o_h_x2, h2o_h_y2) = h2o_h_line.T
h_slope = (h2o_h_y2 - h2o_h_y1) / (h2o_h_x2 - h2o_h_x1)
(h2o_o_x1, h2o_o_y1), (h2o_o_x2, h2o_o_y2) = h2o_o_line.T
o_slope = (h2o_o_y2 - h2o_o_y1) / (h2o_o_x2 - h2o_o_x1)
h_y = h_slope * (cx_1 - h2o_h_x1) + h2o_h_y1
o_y = o_slope * (cx_1 - h2o_o_x1) + h2o_o_y1
h2o_y = None
if abs(center_y - h_y) < radius:
h2o_y = h_y
elif abs(center_y - o_y) < radius:
h2o_y = o_y
if h2o_y is not None:
if (upper_y - lower_y) / 2.0 > radius * 2.0:
# The space can hold the whole text (radius * 2.0)
if h2o_y > center_y:
center_y = h2o_y - radius
else:
center_y = h2o_y + radius
return center_x, center_y
def get_pourbaix_plot(self, limits=None, title="", label_domains=True):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
Returns:
plt:
matplotlib plot object
"""
# plt = pretty_plot(24, 14.4)
plt = pretty_plot(16)
(stable, unstable) = self.pourbaix_plot_data(limits)
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, lines in stable.items():
center_x = 0.0
center_y = 0.0
coords = []
count_center = 0.0
for line in lines:
(x, y) = line
plt.plot(x, y, "k-", linewidth=lw)
for coord in np.array(line).T:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
cx = coord[0]
cy = coord[1]
center_x += cx
center_y += cy
count_center += 1.0
if count_center == 0.0:
count_center = 1.0
center_x /= count_center
center_y /= count_center
if ((center_x <= xlim[0]) | (center_x >= xlim[1]) |
(center_y <= ylim[0]) | (center_y >= ylim[1])):
continue
xy = (center_x, center_y)
if label_domains:
plt.annotate(self.print_name(entry), xy, fontsize=20, color="b")
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def print_name(self, entry):
"""
Print entry name if single, else print multientry
"""
str_name = ""
if isinstance(entry, MultiEntry):
if len(entry.entrylist) > 2:
return str(self._pd.qhull_entries.index(entry))
for e in entry.entrylist:
str_name += latexify_ion(latexify(e.name)) + " + "
str_name = str_name[:-3]
return str_name
else:
return latexify_ion(latexify(entry.name))
def legend(self, label_unstable=False, legend_file=""):
if self._pd._multielement:
unprocessed_entries = self._pd.unprocessed_entries
set_of_entries = set()
list_of_entries = {}
for entry in self._pd.stable_entries:
index_ent = self._pd.qhull_entries.index(entry)
str_ename = ""
for e in entry.entrylist:
str_ename += e.name + " + "
for ent in unprocessed_entries:
if ent.name == e.name:
indx = unprocessed_entries.index(ent)
set_of_entries.add(indx)
continue
str_ename = str_ename[:-3]
list_of_entries[index_ent] = str_ename
if label_unstable:
for entry in [entry for entry in self._pd.all_entries
if entry not in self._pd.stable_entries]:
for e in entry.entrylist:
indx = unprocessed_entries.index(e)
set_of_entries.add(indx)
str_labels = " Species: \n"
if legend_file:
f = open(legend_file, 'w')
for i in list_of_entries.keys():
str_labels += str(i) + " : " + list_of_entries[i] + "\n"
f.write(str_labels)
f.close()
return str_labels
def write_image(self, plt, stream, image_format="svg"):
"""
Writes the phase diagram to an image in a stream.
Args:
plt:
matplotlib plot
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
"""
f = plt.gcf()
f.set_size_inches((12, 10))
plt.tight_layout(pad=1.09)
plt.savefig(stream, format=image_format)
def domain_vertices(self, entry):
"""
Returns the vertices of the Pourbaix domain.
Args:
entry: Entry for which domain vertices are desired
Returns:
list of vertices
"""
if entry not in self._analyzer.pourbaix_domain_vertices.keys():
return []
return self._analyzer.pourbaix_domain_vertices[entry]
def get_pourbaix_plot_colorfill_by_element(self, limits=None, title="",
label_domains=True, element=None):
"""
Color domains by element
"""
from matplotlib.patches import Polygon
entry_dict_of_multientries = collections.defaultdict(list)
plt = pretty_plot(16)
optim_colors = ['#0000FF', '#FF0000', '#00FF00', '#FFFF00', '#FF00FF',
'#FF8080', '#DCDCDC', '#800000', '#FF8000']
optim_font_color = ['#FFFFA0', '#00FFFF', '#FF00FF', '#0000FF', '#00FF00',
'#007F7F', '#232323', '#7FFFFF', '#007FFF']
hatch = ['/', '\\', '|', '-', '+', 'o', '*']
(stable, unstable) = self.pourbaix_plot_data(limits)
num_of_overlaps = {key: 0 for key in stable.keys()}
for entry in stable:
if isinstance(entry, MultiEntry):
for e in entry.entrylist:
if element in e.composition.elements:
entry_dict_of_multientries[e.name].append(entry)
num_of_overlaps[entry] += 1
else:
entry_dict_of_multientries[entry.name].append(entry)
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
from pymatgen import Composition, Element
from pymatgen.core.ion import Ion
def len_elts(entry):
if "(s)" in entry:
comp = Composition(entry[:-3])
else:
comp = Ion.from_formula(entry)
return len([el for el in comp.elements if el not in
[Element("H"), Element("O")]])
sorted_entry = entry_dict_of_multientries.keys()
sorted_entry.sort(key=len_elts)
i = -1
label_chr = map(chr, list(range(65, 91)))
for entry in sorted_entry:
color_indx = 0
x_coord = 0.0
y_coord = 0.0
npts = 0
i += 1
for e in entry_dict_of_multientries[entry]:
hc = 0
fc = 0
bc = 0
xy = self.domain_vertices(e)
c = self.get_center(stable[e])
x_coord += c[0]
y_coord += c[1]
npts += 1
color_indx = i
if "(s)" in entry:
comp = Composition(entry[:-3])
else:
comp = Ion.from_formula(entry)
if len([el for el in comp.elements if el not in
[Element("H"), Element("O")]]) == 1:
if color_indx >= len(optim_colors):
color_indx = color_indx -\
int(color_indx / len(optim_colors)) * len(optim_colors)
patch = Polygon(xy, facecolor=optim_colors[color_indx],
closed=True, lw=3.0, fill=True)
bc = optim_colors[color_indx]
else:
if color_indx >= len(hatch):
color_indx = color_indx - int(color_indx / len(hatch)) * len(hatch)
patch = Polygon(xy, hatch=hatch[color_indx], closed=True, lw=3.0, fill=False)
hc = hatch[color_indx]
ax.add_patch(patch)
xy_center = (x_coord / npts, y_coord / npts)
if label_domains:
if color_indx >= len(optim_colors):
color_indx = color_indx -\
int(color_indx / len(optim_colors)) * len(optim_colors)
fc = optim_font_color[color_indx]
if bc and not hc:
bbox = dict(boxstyle="round", fc=fc)
if hc and not bc:
bc = 'k'
fc = 'w'
bbox = dict(boxstyle="round", hatch=hc, fill=False)
if bc and hc:
bbox = dict(boxstyle="round", hatch=hc, fc=fc)
# bbox.set_path_effects([PathEffects.withSimplePatchShadow()])
plt.annotate(latexify_ion(latexify(entry)), xy_center,
color=bc, fontsize=30, bbox=bbox)
# plt.annotate(label_chr[i], xy_center,
# color=bc, fontsize=30, bbox=bbox)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def get_pourbaix_plot_colorfill_by_domain_name(self, limits=None, title="",
label_domains=True, label_color='k', domain_color=None, domain_fontsize=None,
domain_edge_lw=0.5, bold_domains=None, cluster_domains=(),
add_h2o_stablity_line=True, add_center_line=False, h2o_lw=0.5):
"""
Color domains by the colors specific by the domain_color dict
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
lable_domains (Bool): whether add the text lable for domains
label_color (str): color of domain lables, defaults to be black
domain_color (dict): colors of each domain e.g {"Al(s)": "#FF1100"}. If set
to None default color set will be used.
domain_fontsize (int): Font size used in domain text labels.
domain_edge_lw (int): line width for the boundaries between domains.
bold_domains (list): List of domain names to use bold text style for domain
lables.
cluster_domains (list): List of domain names in cluster phase
add_h2o_stablity_line (Bool): whether plot H2O stability line
add_center_line (Bool): whether plot lines shows the center coordinate
h2o_lw (int): line width for H2O stability line and center lines
"""
# helper functions
def len_elts(entry):
comp = Composition(entry[:-3]) if "(s)" in entry else Ion.from_formula(entry)
return len(set(comp.elements) - {Element("H"), Element("O")})
def special_lines(xlim, ylim):
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
return h_line, o_line, neutral_line, V0_line
from matplotlib.patches import Polygon
from pymatgen import Composition, Element
from pymatgen.core.ion import Ion
default_domain_font_size = 12
default_solid_phase_color = '#b8f9e7' # this slighly darker than the MP scheme, to
default_cluster_phase_color = '#d0fbef' # avoid making the cluster phase too light
plt = pretty_plot(8, dpi=300)
(stable, unstable) = self.pourbaix_plot_data(limits)
num_of_overlaps = {key: 0 for key in stable.keys()}
entry_dict_of_multientries = collections.defaultdict(list)
for entry in stable:
if isinstance(entry, MultiEntry):
for e in entry.entrylist:
entry_dict_of_multientries[e.name].append(entry)
num_of_overlaps[entry] += 1
else:
entry_dict_of_multientries[entry.name].append(entry)
xlim, ylim = limits[:2] if limits else self._analyzer.chempot_limits[:2]
h_line, o_line, neutral_line, V0_line = special_lines(xlim, ylim)
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax.tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
sorted_entry = list(entry_dict_of_multientries.keys())
sorted_entry.sort(key=len_elts)
if domain_fontsize is None:
domain_fontsize = {en: default_domain_font_size for en in sorted_entry}
if domain_color is None:
domain_color = {en: default_solid_phase_color if '(s)' in en else
(default_cluster_phase_color if en in cluster_domains else 'w')
for i, en in enumerate(sorted_entry)}
if bold_domains is None:
bold_domains = [en for en in sorted_entry if '(s)' not in en]
for entry in sorted_entry:
x_coord, y_coord, npts = 0.0, 0.0, 0
for e in entry_dict_of_multientries[entry]:
xy = self.domain_vertices(e)
if add_h2o_stablity_line:
c = self.get_distribution_corrected_center(stable[e], h_line, o_line, 0.3)
else:
c = self.get_distribution_corrected_center(stable[e])
x_coord += c[0]
y_coord += c[1]
npts += 1
patch = Polygon(xy, facecolor=domain_color[entry],
closed=True, lw=domain_edge_lw, fill=True, antialiased=True)
ax.add_patch(patch)
xy_center = (x_coord / npts, y_coord / npts)
if label_domains:
if platform.system() == 'Darwin':
# Have to hack to the hard coded font path to get current font On Mac OS X
if entry in bold_domains:
font = FontProperties(fname='/Library/Fonts/Times New Roman Bold.ttf',
size=domain_fontsize[entry])
else:
font = FontProperties(fname='/Library/Fonts/Times New Roman.ttf',
size=domain_fontsize[entry])
else:
if entry in bold_domains:
font = FontProperties(family='Times New Roman',
weight='bold',
size=domain_fontsize[entry])
else:
font = FontProperties(family='Times New Roman',
weight='regular',
size=domain_fontsize[entry])
plt.text(*xy_center, s=latexify_ion(latexify(entry)), fontproperties=font,
horizontalalignment="center", verticalalignment="center",
multialignment="center", color=label_color)
if add_h2o_stablity_line:
dashes = (3, 1.5)
line, = plt.plot(h_line[0], h_line[1], "k--", linewidth=h2o_lw, antialiased=True)
line.set_dashes(dashes)
line, = plt.plot(o_line[0], o_line[1], "k--", linewidth=h2o_lw, antialiased=True)
line.set_dashes(dashes)
if add_center_line:
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=h2o_lw, antialiased=False)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=h2o_lw, antialiased=False)
plt.xlabel("pH", fontname="Times New Roman", fontsize=18)
plt.ylabel("E (V)", fontname="Times New Roman", fontsize=18)
plt.xticks(fontname="Times New Roman", fontsize=16)
plt.yticks(fontname="Times New Roman", fontsize=16)
plt.title(title, fontsize=20, fontweight='bold', fontname="Times New Roman")
return plt
def get_pourbaix_mark_passive(self, limits=None, title="", label_domains=True, passive_entry=None):
"""
Color domains by element
"""
from matplotlib.patches import Polygon
from pymatgen import Element
from itertools import chain
import operator
plt = pretty_plot(16)
optim_colors = ['#0000FF', '#FF0000', '#00FF00', '#FFFF00', '#FF00FF',
'#FF8080', '#DCDCDC', '#800000', '#FF8000']
optim_font_colors = ['#FFC000', '#00FFFF', '#FF00FF', '#0000FF', '#00FF00',
'#007F7F', '#232323', '#7FFFFF', '#007FFF']
(stable, unstable) = self.pourbaix_plot_data(limits)
mark_passive = {key: 0 for key in stable.keys()}
if self._pd._elt_comp:
maxval = max(six.iteritems(self._pd._elt_comp), key=operator.itemgetter(1))[1]
key = [k for k, v in self._pd._elt_comp.items() if v == maxval]
passive_entry = key[0]
def list_elts(entry):
elts_list = set()
if isinstance(entry, MultiEntry):
for el in chain.from_iterable([[el for el in e.composition.elements]
for e in entry.entrylist]):
elts_list.add(el)
else:
elts_list = entry.composition.elements
return elts_list
for entry in stable:
if passive_entry + str("(s)") in entry.name:
mark_passive[entry] = 2
continue
if "(s)" not in entry.name:
continue
elif len(set([Element("O"), Element("H")]).intersection(set(list_elts(entry)))) > 0:
mark_passive[entry] = 1
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
for e in stable.keys():
xy = self.domain_vertices(e)
c = self.get_center(stable[e])
if mark_passive[e] == 1:
color = optim_colors[0]
fontcolor = optim_font_colors[0]
colorfill = True
elif mark_passive[e] == 2:
color = optim_colors[1]
fontcolor = optim_font_colors[1]
colorfill = True
else:
color = "w"
colorfill = False
fontcolor = "k"
patch = Polygon(xy, facecolor=color, closed=True, lw=3.0, fill=colorfill)
ax.add_patch(patch)
if label_domains:
plt.annotate(self.print_name(e), c, color=fontcolor, fontsize=20)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def latexify_ion(formula):
return re.sub(r"()\[([^)]*)\]", r"\1$^{\2}$", formula)
|
tallakahath/pymatgen
|
pymatgen/analysis/pourbaix/plotter.py
|
Python
|
mit
| 34,827
|
[
"pymatgen"
] |
920511d1c2daa210a37465617ea621dc334ad5140e78252052ee7f6956ae7c1c
|
""" Code that looks for mayavi contributions on sys.path or other
standard places, making it easy for users to add contributions to load
on startup.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2008, Prabhu Ramachandran
# License: BSD Style.
import sys
from os.path import isdir, exists, join, basename
from os import listdir
from traits.api import (HasTraits, List, Str, Instance,
DelegatesTo, Button)
from traitsui.api import View, Item, SetEditor
################################################################################
# `ContribFinder` class.
################################################################################
class ContribFinder(HasTraits):
"""
This class helps find installed mayavi contributions.
"""
# The preference helper whose contrib_packages trait we contribute
# to.
preference_helper = Instance(HasTraits)
# The selected contributions.
contrib_packages = DelegatesTo('preference_helper')
# The found contrib packages.
found_contrib = List(Str, desc='the mayavi contribution '
'packages on the system')
# Search for contributions.
search = Button('Search for packages',
desc='search again for contributions')
########################################
# View related code.
view = View(Item('contrib_packages',
show_label=False,
editor=SetEditor(name='found_contrib',
left_column_title='Available '\
'contributions',
right_column_title='Selected '\
'contributions',
can_move_all=False),
resizable=True,
),
Item('search', show_label=False),
resizable=True
)
######################################################################
# `object` interface.
######################################################################
def __init__(self, **traits):
super(ContribFinder, self).__init__(**traits)
# Find the contributions by default.
self.find()
######################################################################
# `ContribFinder` interface.
######################################################################
def find(self):
"""Find the contrib directories from sys.path."""
found = []
for d in sys.path:
if isdir(d):
for s in listdir(d):
if exists(join(d, s, 'user_mayavi.py')):
found.append(s)
self.found_contrib = found
######################################################################
# Non-public interface.
######################################################################
def _preference_helper_default(self):
from preference_manager import preference_manager
return preference_manager.root
def _search_fired(self):
self.find()
|
liulion/mayavi
|
mayavi/preferences/contrib_finder.py
|
Python
|
bsd-3-clause
| 3,216
|
[
"Mayavi"
] |
b77e6d3f5cdfc9213352420f5877cfd19f89b54fbc21554bba1e40676b9f8c8e
|
#!/usr/bin/env python
import os, sys
new_path = [ os.path.join( os.getcwd(), "lib" ) ]
new_path.extend( sys.path[1:] ) # remove scripts/ from the path
sys.path = new_path
from galaxy import eggs
import pkg_resources
pkg_resources.require( "SQLAlchemy >= 0.4" )
import time, ConfigParser, shutil
from datetime import datetime, timedelta
from time import strftime
from optparse import OptionParser
import galaxy.model.mapping
import sqlalchemy as sa
from galaxy.model.orm import and_, eagerload
assert sys.version_info[:2] >= ( 2, 4 )
def main():
parser = OptionParser()
parser.add_option( "-d", "--days", dest="days", action="store", type="int", help="number of days (60)", default=60 )
parser.add_option( "-r", "--remove_from_disk", action="store_true", dest="remove_from_disk", help="remove datasets from disk when purged", default=False )
parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
parser.add_option( "-f", "--force_retry", action="store_true", dest="force_retry", help="performs the requested actions, but ignores whether it might have been done before. Useful when -r wasn't used, but should have been", default=False )
parser.add_option( "-1", "--delete_userless_histories", action="store_true", dest="delete_userless_histories", default=False, help="delete userless histories and datasets" )
parser.add_option( "-2", "--purge_histories", action="store_true", dest="purge_histories", default=False, help="purge deleted histories" )
parser.add_option( "-3", "--purge_datasets", action="store_true", dest="purge_datasets", default=False, help="purge deleted datasets" )
parser.add_option( "-4", "--purge_libraries", action="store_true", dest="purge_libraries", default=False, help="purge deleted libraries" )
parser.add_option( "-5", "--purge_folders", action="store_true", dest="purge_folders", default=False, help="purge deleted library folders" )
parser.add_option( "-6", "--delete_datasets", action="store_true", dest="delete_datasets", default=False, help="mark deletable datasets as deleted and purge associated dataset instances" )
( options, args ) = parser.parse_args()
ini_file = args[0]
if not ( options.purge_folders ^ options.delete_userless_histories ^ \
options.purge_libraries ^ options.purge_histories ^ \
options.purge_datasets ^ options.delete_datasets ):
parser.print_help()
sys.exit(0)
if options.remove_from_disk and options.info_only:
parser.error( "remove_from_disk and info_only are mutually exclusive" )
conf_parser = ConfigParser.ConfigParser( {'here':os.getcwd()} )
conf_parser.read( ini_file )
configuration = {}
for key, value in conf_parser.items( "app:main" ):
configuration[key] = value
if 'database_connection' in configuration:
database_connection = configuration['database_connection']
else:
database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % configuration["database_file"]
file_path = configuration['file_path']
app = CleanupDatasetsApplication( database_connection=database_connection, file_path=file_path )
cutoff_time = datetime.utcnow() - timedelta( days=options.days )
now = strftime( "%Y-%m-%d %H:%M:%S" )
print "##########################################"
print "\n# %s - Handling stuff older than %i days" % ( now, options.days )
if options.info_only:
print "# Displaying info only ( --info_only )\n"
elif options.remove_from_disk:
print "Datasets will be removed from disk.\n"
else:
print "Datasets will NOT be removed from disk.\n"
if options.delete_userless_histories:
delete_userless_histories( app, cutoff_time, info_only = options.info_only, force_retry = options.force_retry )
elif options.purge_histories:
purge_histories( app, cutoff_time, options.remove_from_disk, info_only = options.info_only, force_retry = options.force_retry )
elif options.purge_datasets:
purge_datasets( app, cutoff_time, options.remove_from_disk, info_only = options.info_only, force_retry = options.force_retry )
elif options.purge_libraries:
purge_libraries( app, cutoff_time, options.remove_from_disk, info_only = options.info_only, force_retry = options.force_retry )
elif options.purge_folders:
purge_folders( app, cutoff_time, options.remove_from_disk, info_only = options.info_only, force_retry = options.force_retry )
elif options.delete_datasets:
delete_datasets( app, cutoff_time, options.remove_from_disk, info_only = options.info_only, force_retry = options.force_retry )
sys.exit(0)
def delete_userless_histories( app, cutoff_time, info_only = False, force_retry = False ):
# Deletes userless histories whose update_time value is older than the cutoff_time.
# The purge history script will handle marking DatasetInstances as deleted.
# Nothing is removed from disk yet.
history_count = 0
start = time.time()
if force_retry:
histories = app.sa_session.query( app.model.History ) \
.filter( and_( app.model.History.table.c.user_id==None,
app.model.History.table.c.update_time < cutoff_time ) )
else:
histories = app.sa_session.query( app.model.History ) \
.filter( and_( app.model.History.table.c.user_id==None,
app.model.History.table.c.deleted==False,
app.model.History.table.c.update_time < cutoff_time ) )
for history in histories:
if not info_only:
print "Deleting history id ", history.id
history.deleted = True
app.sa_session.add( history )
app.sa_session.flush()
history_count += 1
stop = time.time()
print "Deleted %d histories" % history_count
print "Elapsed time: ", stop - start
print "##########################################"
def purge_histories( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted histories whose update_time is older than the cutoff_time.
# The dataset associations of each history are also marked as deleted.
# The Purge Dataset method will purge each Dataset as necessary
# history.purged == True simply means that it can no longer be undeleted
# i.e. all associated datasets are marked as deleted
history_count = 0
start = time.time()
if force_retry:
histories = app.sa_session.query( app.model.History ) \
.filter( and_( app.model.History.table.c.deleted==True,
app.model.History.table.c.update_time < cutoff_time ) ) \
.options( eagerload( 'datasets' ) )
else:
histories = app.sa_session.query( app.model.History ) \
.filter( and_( app.model.History.table.c.deleted==True,
app.model.History.table.c.purged==False,
app.model.History.table.c.update_time < cutoff_time ) ) \
.options( eagerload( 'datasets' ) )
for history in histories:
for dataset_assoc in history.datasets:
_purge_dataset_instance( dataset_assoc, app, remove_from_disk, info_only = info_only ) #mark a DatasetInstance as deleted, clear associated files, and mark the Dataset as deleted if it is deletable
if not info_only:
# TODO: should the Delete DefaultHistoryPermissions be deleted here? This was incorrectly
# done in the _list_delete() method of the history controller, so copied it here. Not sure
# if we should ever delete info like this from the db though, so commented out for now...
#for dhp in history.default_permissions:
# dhp.delete()
print "Purging history id ", history.id
history.purged = True
app.sa_session.add( history )
app.sa_session.flush()
history_count += 1
stop = time.time()
print 'Purged %d histories.' % history_count
print "Elapsed time: ", stop - start
print "##########################################"
def purge_libraries( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted libraries whose update_time is older than the cutoff_time.
# The dataset associations of each library are also marked as deleted.
# The Purge Dataset method will purge each Dataset as necessary
# library.purged == True simply means that it can no longer be undeleted
# i.e. all associated LibraryDatasets/folders are marked as deleted
library_count = 0
start = time.time()
if force_retry:
libraries = app.sa_session.query( app.model.Library ) \
.filter( and_( app.model.Library.table.c.deleted==True,
app.model.Library.table.c.update_time < cutoff_time ) )
else:
libraries = app.sa_session.query( app.model.Library ) \
.filter( and_( app.model.Library.table.c.deleted==True,
app.model.Library.table.c.purged==False,
app.model.Library.table.c.update_time < cutoff_time ) )
for library in libraries:
_purge_folder( library.root_folder, app, remove_from_disk, info_only = info_only )
if not info_only:
print "Purging library id ", library.id
library.purged = True
app.sa_session.add( library )
app.sa_session.flush()
library_count += 1
stop = time.time()
print '# Purged %d libraries .' % library_count
print "Elapsed time: ", stop - start
print "##########################################"
def purge_folders( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted folders whose update_time is older than the cutoff_time.
# The dataset associations of each folder are also marked as deleted.
# The Purge Dataset method will purge each Dataset as necessary
# libraryFolder.purged == True simply means that it can no longer be undeleted
# i.e. all associated LibraryDatasets/folders are marked as deleted
folder_count = 0
start = time.time()
if force_retry:
folders = app.sa_session.query( app.model.LibraryFolder ) \
.filter( and_( app.model.LibraryFolder.table.c.deleted==True,
app.model.LibraryFolder.table.c.update_time < cutoff_time ) )
else:
folders = app.sa_session.query( app.model.LibraryFolder ) \
.filter( and_( app.model.LibraryFolder.table.c.deleted==True,
app.model.LibraryFolder.table.c.purged==False,
app.model.LibraryFolder.table.c.update_time < cutoff_time ) )
for folder in folders:
_purge_folder( folder, app, remove_from_disk, info_only = info_only )
folder_count += 1
stop = time.time()
print '# Purged %d folders.' % folder_count
print "Elapsed time: ", stop - start
print "##########################################"
def delete_datasets( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Marks datasets as deleted if associated items are all deleted.
start = time.time()
if force_retry:
history_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
app.model.Dataset.table.c.state ),
whereclause = app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time,
from_obj = [ sa.outerjoin( app.model.Dataset.table,
app.model.HistoryDatasetAssociation.table ) ] )
library_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
app.model.Dataset.table.c.state ),
whereclause = app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time,
from_obj = [ sa.outerjoin( app.model.Dataset.table,
app.model.LibraryDatasetDatasetAssociation.table ) ] )
else:
# We really only need the id column here, but sqlalchemy barfs when trying to select only 1 column
history_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
app.model.Dataset.table.c.state ),
whereclause = sa.and_( app.model.Dataset.table.c.deleted == False,
app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time,
app.model.HistoryDatasetAssociation.table.c.deleted == True ),
from_obj = [ sa.outerjoin( app.model.Dataset.table,
app.model.HistoryDatasetAssociation.table ) ] )
library_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
app.model.Dataset.table.c.state ),
whereclause = sa.and_( app.model.Dataset.table.c.deleted == False,
app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time,
app.model.LibraryDatasetDatasetAssociation.table.c.deleted == True ),
from_obj = [ sa.outerjoin( app.model.Dataset.table,
app.model.LibraryDatasetDatasetAssociation.table ) ] )
history_dataset_ids = [ row.id for row in history_dataset_ids_query.execute() ]
library_dataset_ids = [ row.id for row in library_dataset_ids_query.execute() ]
dataset_ids = history_dataset_ids + library_dataset_ids
skip = []
deleted_dataset_count = 0
deleted_instance_count = 0
for dataset_id in dataset_ids:
print "######### Processing dataset id:", dataset_id
dataset = app.sa_session.query( app.model.Dataset ).get( dataset_id )
if dataset.id not in skip and _dataset_is_deletable( dataset ):
deleted_dataset_count += 1
for dataset_instance in dataset.history_associations + dataset.library_associations:
print "Associated Dataset instance: ", dataset_instance.__class__.__name__, dataset_instance.id
_purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children=True, info_only=info_only, is_deletable=True )
deleted_instance_count += 1
skip.append( dataset.id )
stop = time.time()
print "Examined %d datasets, marked %d as deleted and purged %d dataset instances" % ( len( skip ), deleted_dataset_count, deleted_instance_count )
print "Total elapsed time: ", stop - start
print "##########################################"
def purge_datasets( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted datasets whose update_time is older than cutoff_time. Files may or may
# not be removed from disk.
dataset_count = 0
disk_space = 0
start = time.time()
if force_retry:
datasets = app.sa_session.query( app.model.Dataset ) \
.filter( and_( app.model.Dataset.table.c.deleted==True,
app.model.Dataset.table.c.purgable==True,
app.model.Dataset.table.c.update_time < cutoff_time ) )
else:
datasets = app.sa_session.query( app.model.Dataset ) \
.filter( and_( app.model.Dataset.table.c.deleted==True,
app.model.Dataset.table.c.purgable==True,
app.model.Dataset.table.c.purged==False,
app.model.Dataset.table.c.update_time < cutoff_time ) )
for dataset in datasets:
file_size = dataset.file_size
_purge_dataset( app, dataset, remove_from_disk, info_only = info_only )
dataset_count += 1
try:
disk_space += file_size
except:
pass
stop = time.time()
print 'Purged %d datasets' % dataset_count
if remove_from_disk:
print 'Freed disk space: ', disk_space
print "Elapsed time: ", stop - start
print "##########################################"
def _purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children=True, info_only=False, is_deletable=False ):
# A dataset_instance is either a HDA or an LDDA. Purging a dataset instance marks the instance as deleted,
# and marks the associated dataset as deleted if it is not associated with another active DatsetInstance.
if not info_only:
print "Deleting dataset_instance ", str( dataset_instance ), " id ", dataset_instance.id
dataset_instance.mark_deleted( include_children = include_children )
dataset_instance.clear_associated_files()
app.sa_session.add( dataset_instance )
app.sa_session.flush()
app.sa_session.refresh( dataset_instance.dataset )
if is_deletable or _dataset_is_deletable( dataset_instance.dataset ):
# Calling methods may have already checked _dataset_is_deletable, if so, is_deletable should be True
_delete_dataset( dataset_instance.dataset, app, remove_from_disk, info_only=info_only, is_deletable=is_deletable )
#need to purge children here
if include_children:
for child in dataset_instance.children:
_purge_dataset_instance( child, app, remove_from_disk, include_children = include_children, info_only = info_only )
def _dataset_is_deletable( dataset ):
#a dataset is deletable when it no longer has any non-deleted associations
return not bool( dataset.active_history_associations or dataset.active_library_associations )
def _delete_dataset( dataset, app, remove_from_disk, info_only=False, is_deletable=False ):
#marks a base dataset as deleted, hdas/ldas associated with dataset can no longer be undeleted
#metadata files attached to associated dataset Instances is removed now
if not is_deletable and not _dataset_is_deletable( dataset ):
print "This Dataset (%i) is not deletable, associated Metadata Files will not be removed.\n" % ( dataset.id )
else:
# Mark all associated MetadataFiles as deleted and purged and remove them from disk
metadata_files = []
#lets create a list of metadata files, then perform actions on them
for hda in dataset.history_associations:
for metadata_file in app.sa_session.query( app.model.MetadataFile ) \
.filter( app.model.MetadataFile.table.c.hda_id==hda.id ):
metadata_files.append( metadata_file )
for lda in dataset.library_associations:
for metadata_file in app.sa_session.query( app.model.MetadataFile ) \
.filter( app.model.MetadataFile.table.c.lda_id==lda.id ):
metadata_files.append( metadata_file )
for metadata_file in metadata_files:
print "The following metadata files attached to associations of Dataset '%s' have been purged:" % dataset.id
if not info_only:
if remove_from_disk:
try:
print "Removing disk file ", metadata_file.file_name
os.unlink( metadata_file.file_name )
except Exception, e:
print "Error, exception: %s caught attempting to purge metadata file %s\n" %( str( e ), metadata_file.file_name )
metadata_file.purged = True
app.sa_session.add( metadata_file )
app.sa_session.flush()
metadata_file.deleted = True
app.sa_session.add( metadata_file )
app.sa_session.flush()
print "%s" % metadata_file.file_name
print "Deleting dataset id", dataset.id
dataset.deleted = True
app.sa_session.add( dataset )
app.sa_session.flush()
def _purge_dataset( app, dataset, remove_from_disk, info_only = False ):
if dataset.deleted:
try:
if dataset.purgable and _dataset_is_deletable( dataset ):
if not info_only:
# Remove files from disk and update the database
if remove_from_disk:
# TODO: should permissions on the dataset be deleted here?
print "Removing disk, file ", dataset.file_name
os.unlink( dataset.file_name )
# Remove associated extra files from disk if they exist
if dataset.extra_files_path and os.path.exists( dataset.extra_files_path ):
shutil.rmtree( dataset.extra_files_path ) #we need to delete the directory and its contents; os.unlink would always fail on a directory
print "Purging dataset id", dataset.id
dataset.purged = True
app.sa_session.add( dataset )
app.sa_session.flush()
else:
print "This dataset (%i) is not purgable, the file (%s) will not be removed.\n" % ( dataset.id, dataset.file_name )
except OSError, exc:
print "Error, dataset file has already been removed: %s" % str( exc )
print "Purging dataset id", dataset.id
dataset.purged = True
app.sa_session.add( dataset )
app.sa_session.flush()
except Exception, exc:
print "Error attempting to purge data file: ", dataset.file_name, " error: ", str( exc )
else:
print "Error: '%s' has not previously been deleted, so it cannot be purged\n" % dataset.file_name
def _purge_folder( folder, app, remove_from_disk, info_only = False ):
"""Purges a folder and its contents, recursively"""
for ld in folder.datasets:
print "Deleting library dataset id ", ld.id
ld.deleted = True
for ldda in [ld.library_dataset_dataset_association] + ld.expired_datasets:
_purge_dataset_instance( ldda, app, remove_from_disk, info_only = info_only ) #mark a DatasetInstance as deleted, clear associated files, and mark the Dataset as deleted if it is deletable
for sub_folder in folder.folders:
_purge_folder( sub_folder, app, remove_from_disk, info_only = info_only )
if not info_only:
# TODO: should the folder permissions be deleted here?
print "Purging folder id ", folder.id
folder.purged = True
app.sa_session.add( folder )
app.sa_session.flush()
class CleanupDatasetsApplication( object ):
"""Encapsulates the state of a Universe application"""
def __init__( self, database_connection=None, file_path=None ):
if database_connection is None:
raise Exception( "CleanupDatasetsApplication requires a database_connection value" )
if file_path is None:
raise Exception( "CleanupDatasetsApplication requires a file_path value" )
self.database_connection = database_connection
self.file_path = file_path
# Setup the database engine and ORM
self.model = galaxy.model.mapping.init( self.file_path, self.database_connection, engine_options={}, create_tables=False )
@property
def sa_session( self ):
"""
Returns a SQLAlchemy session -- currently just gets the current
session from the threadlocal session context, but this is provided
to allow migration toward a more SQLAlchemy 0.4 style of use.
"""
return self.model.context.current
if __name__ == "__main__": main()
|
volpino/Yeps-EURAC
|
scripts/cleanup_datasets/cleanup_datasets.py
|
Python
|
mit
| 25,034
|
[
"Galaxy"
] |
1da98f211f9430267e867d99ee0439bd1c3973604c111e2c048b544c03aacc3a
|
#
# Copyright (C) 2010-2020 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import os
import numpy as np
try:
import vtk
from vtk.util import numpy_support as VN
skipIfMissingPythonPackage = utx.no_skip
except ImportError:
skipIfMissingPythonPackage = ut.skip(
"Python module vtk not available, skipping test!")
import espressomd
import espressomd.lb
if espressomd.has_features('LB_BOUNDARIES'):
import espressomd.lbboundaries
import espressomd.shapes
class TestLBWrite:
system = espressomd.System(box_l=[10, 11, 12])
system.time_step = 0.01
system.cell_system.skin = 0.4
def tearDown(self):
self.system.actors.clear()
self.system.thermostat.turn_off()
def set_lbf(self):
# setup LB system
lbf = self.lb_class(
kT=1, agrid=1.0, dens=1.0, visc=1.0, tau=0.1, seed=42,
ext_force_density=[0, 0.03, 0])
self.system.actors.add(lbf)
if espressomd.has_features('LB_BOUNDARIES'):
self.system.lbboundaries.add(espressomd.lbboundaries.LBBoundary(
shape=espressomd.shapes.Wall(normal=[1, 0, 0], dist=1.5)))
self.system.lbboundaries.add(espressomd.lbboundaries.LBBoundary(
shape=espressomd.shapes.Wall(normal=[-1, 0, 0], dist=-10.5)))
return lbf
def parse_vtk(self, filepath, name, shape):
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName(filepath)
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
reader.Update()
data = reader.GetOutput()
points = data.GetPointData()
return VN.vtk_to_numpy(points.GetArray(name)).reshape(shape, order='F')
def test_vtk(self):
'''
Check VTK files.
'''
os.makedirs('vtk_out', exist_ok=True)
filepaths = ['vtk_out/boundary.vtk', 'vtk_out/velocity.vtk',
'vtk_out/velocity_bb.vtk']
# cleanup action
for filepath in filepaths:
if os.path.exists(filepath):
os.remove(filepath)
shape = [10, 11, 12]
lbf = self.set_lbf()
self.system.integrator.run(100)
# write VTK files
with self.assertRaises(RuntimeError):
lbf.write_vtk_velocity('non_existent_folder/file')
with self.assertRaises(RuntimeError):
lbf.write_vtk_boundary('non_existent_folder/file')
lbf.write_vtk_boundary('vtk_out/boundary.vtk')
lbf.write_vtk_velocity('vtk_out/velocity.vtk')
with self.assertRaises(ValueError):
lbf.write_vtk_velocity('vtk_out/delme', 3 * [0], None)
with self.assertRaises(ValueError):
lbf.write_vtk_velocity('vtk_out/delme', None, 3 * [0])
with self.assertRaises(RuntimeError):
lbf.write_vtk_velocity('vtk_out/delme', [-2, 1, 1], 3 * [1])
with self.assertRaises(RuntimeError):
lbf.write_vtk_velocity('vtk_out/delme', 3 * [0], [1, 2, 16])
with self.assertRaises(ValueError):
lbf.write_vtk_velocity('vtk_out/delme', [1, 1], 3 * [1])
with self.assertRaises(ValueError):
lbf.write_vtk_velocity('vtk_out/delme', 3 * [1], np.array([2, 3]))
bb1, bb2 = ([1, 2, 3], [9, 10, 11])
lbf.write_vtk_velocity('vtk_out/velocity_bb.vtk', bb1, bb2)
# check VTK files exist
for filepath in filepaths:
self.assertTrue(
os.path.exists(filepath),
f'VTK file "{filepath}" not written to disk')
# check VTK values match node values
node_velocity = np.zeros(shape + [3])
node_boundary = np.zeros(shape, dtype=int)
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
node = lbf[i, j, k]
node_velocity[i, j, k] = node.velocity
node_boundary[i, j, k] = node.boundary
node_velocity_bb = node_velocity[bb1[0]:bb2[0],
bb1[1]:bb2[1],
bb1[2]:bb2[2]]
vtk_velocity = self.parse_vtk('vtk_out/velocity.vtk', 'velocity',
node_velocity.shape)
np.testing.assert_allclose(vtk_velocity, node_velocity, atol=5e-7)
vtk_velocity_bb = self.parse_vtk('vtk_out/velocity_bb.vtk', 'velocity',
node_velocity_bb.shape)
np.testing.assert_allclose(
vtk_velocity_bb, node_velocity_bb, atol=5e-7)
vtk_boundary = self.parse_vtk(
'vtk_out/boundary.vtk', 'boundary', shape)
np.testing.assert_equal(vtk_boundary, node_boundary.astype(int))
def test_print(self):
'''
Check data files.
'''
os.makedirs('vtk_out', exist_ok=True)
filepaths = ['vtk_out/boundary.dat', 'vtk_out/velocity.dat']
# cleanup action
for filepath in filepaths:
if os.path.exists(filepath):
os.remove(filepath)
shape = [10, 11, 12]
lbf = self.set_lbf()
self.system.integrator.run(100)
# write data files
with self.assertRaises(RuntimeError):
lbf.write_velocity('non_existent_folder/file')
with self.assertRaises(RuntimeError):
lbf.write_boundary('non_existent_folder/file')
lbf.write_boundary('vtk_out/boundary.dat')
lbf.write_velocity('vtk_out/velocity.dat')
# check data files exist
for filepath in filepaths:
self.assertTrue(
os.path.exists(filepath),
f'data file "{filepath}" not written to disk')
# check data values match node values
node_velocity = np.zeros(shape + [3])
node_boundary = np.zeros(shape, dtype=int)
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
node = lbf[i, j, k]
node_velocity[i, j, k] = node.velocity
node_boundary[i, j, k] = node.boundary
ref_coord = np.array([
np.tile(np.arange(shape[0]), shape[1] * shape[2]),
np.tile(np.repeat(np.arange(shape[1]), shape[0]), shape[2]),
np.repeat(np.arange(shape[2]), shape[0] * shape[1])]).T
dat_velocity = np.loadtxt('vtk_out/velocity.dat')
dat_coord = (dat_velocity[:, 0:3] - 0.5).astype(int)
np.testing.assert_equal(dat_coord, ref_coord)
dat_vel = dat_velocity[:, 3:]
ref_vel = np.swapaxes(node_velocity, 0, 2).reshape((-1, 3))
np.testing.assert_allclose(dat_vel, ref_vel, atol=5e-7)
dat_boundary = np.loadtxt('vtk_out/boundary.dat')
dat_coord = (dat_boundary[:, 0:3] - 0.5).astype(int)
np.testing.assert_equal(dat_coord, ref_coord)
dat_bound = dat_boundary[:, 3].astype(int)
ref_bound = np.swapaxes(node_boundary, 0, 2).reshape(-1)
if isinstance(lbf, espressomd.lb.LBFluid):
ref_bound = (ref_bound != 0).astype(int)
np.testing.assert_equal(dat_bound, ref_bound)
@skipIfMissingPythonPackage
class TestLBWriteCPU(TestLBWrite, ut.TestCase):
def setUp(self):
self.lb_class = espressomd.lb.LBFluid
@utx.skipIfMissingGPU()
@skipIfMissingPythonPackage
class TestLBWriteGPU(TestLBWrite, ut.TestCase):
def setUp(self):
self.lb_class = espressomd.lb.LBFluidGPU
if __name__ == '__main__':
ut.main()
|
espressomd/espresso
|
testsuite/python/lb_vtk.py
|
Python
|
gpl-3.0
| 8,220
|
[
"ESPResSo",
"VTK"
] |
6697c02435dc081fceed6d51f1e593eab9ef1a48c8fe0b5a48b288898d5d79c9
|
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils.timezone import now
from ...models import Replay
from twython import Twython
import os
import random
import praw
class Command(BaseCommand):
help = "Send out the most exciting replay of the day to Twitter."
def handle(self, *args, **options):
replay = Replay.objects.filter(
timestamp__startswith=now().date()
).order_by('-excitement_factor')[:1]
if not replay:
return
replay = replay[0]
# Post to Twitter.
if (
'TWITTER_API_KEY' in os.environ and 'TWITTER_API_SECRET' in os.environ and
'TWITTER_ACCESS_TOKEN' in os.environ and 'TWITTER_ACCESS_SECRET' in os.environ
):
twitter = Twython(
os.environ['TWITTER_API_KEY'],
os.environ['TWITTER_API_SECRET'],
os.environ['TWITTER_ACCESS_TOKEN'],
os.environ['TWITTER_ACCESS_SECRET'],
)
status_strings = [
'The most exciting match today was a {size}v{size} on {map}. Take a look! {url} #RocketLeague',
]
twitter.update_status(status=random.choice(status_strings).format(
size=replay.team_sizes,
map=str(replay.map),
url='http://{base_url}{replay_url}'.format(
base_url=settings.SITE_DOMAIN,
replay_url=replay.get_absolute_url(),
)
))
# Post to reddit.
if 'REDDIT_USERNAME' in os.environ and 'REDDIT_PASSWORD' in os.environ:
reddit = praw.Reddit(user_agent='RocketLeagueReplays.com posting as /u/RocketLeagueReplays. Written by /u/danielsamuels')
reddit.login(os.environ['REDDIT_USERNAME'], os.environ['REDDIT_PASSWORD'])
reddit.submit(
'RocketLeagueReplays',
now().strftime('Match of the Day - %d/%m/%Y'),
url='http://{base_url}{replay_url}'.format(
base_url=settings.SITE_DOMAIN,
replay_url=replay.get_absolute_url(),
)
)
|
rocket-league-replays/rocket-league-replays
|
rocket_league/apps/replays/management/commands/social_post.py
|
Python
|
gpl-3.0
| 2,215
|
[
"exciting"
] |
dc98fd383d09b7d597b1e06f5cfc5b6a7a1ce8d2ec1d988b774f0c95b6c434ca
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import (
assert_,
)
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import (
HoomdXMLdata,
)
class TestHoomdXMLParser(ParserBase):
parser = mda.topology.HoomdXMLParser.HoomdXMLParser
filename = HoomdXMLdata
expected_attrs = ['types', 'masses', 'charges', 'radii',
'bonds', 'angles', 'dihedrals']
expected_n_atoms = 769
expected_n_residues = 1
expected_n_segments = 1
def test_attr_size(self):
assert_(len(self.top.types) == self.top.n_atoms)
assert_(len(self.top.charges) == self.top.n_atoms)
assert_(len(self.top.masses) == self.top.n_atoms)
def test_bonds(self):
assert_(len(self.top.bonds.values) == 704)
def test_angles(self):
assert_(len(self.top.angles.values) == 640)
def test_dihedrals(self):
assert_(len(self.top.dihedrals.values) == 576)
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/topology/test_hoomdxml.py
|
Python
|
gpl-2.0
| 1,991
|
[
"MDAnalysis"
] |
82f07a9562661eb7384c5cfa786c72e70490a56e7530e34e17bc97b377c54bf2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("digestus.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^updates/', include('updates.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permissin Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
|
patpatpatpatpat/digestus
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,479
|
[
"VisIt"
] |
c294710045cc54c674ec648bc99ea5a171301a15f8b43eef061dc41b97e18241
|
#!/usr/bin/env python3
#
# Copyright (C) 2017, 2018 Jaguar Land Rover
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Authors: Shane Fagan - shane.fagan@collabora.com
#
# Authors:
# * Gustavo Noronha <gustavo.noronha@collabora.com>
# * Travis Reitter <travis.reitter@collabora.co.uk>
# * Shane Fagan <shane.fagan@collabora.com>
# * Luis Araujo <luis.araujo@collabora.co.uk>
# * Guillaume Tucker <guillaume.tucker@collabora.com>
import os
import unittest
from subprocess import Popen, PIPE, TimeoutExpired
import vsmlib.utils
import zmq
import ipc.zeromq
import ipc.stream
RULES_PATH = os.path.abspath(os.path.join('.', 'sample_rules'))
LOGS_PATH = os.path.abspath(os.path.join('.', 'sample_logs'))
SIGNAL_NUMBER_PATH = os.path.abspath(os.path.join('.', 'signal_number_maps'))
SIGNAL_FORMAT = '{},{},\'{}\'\n'
VSM_LOG_FILE = 'vsm-tests.log'
SIGNAL_NUM_FILE = 'samples.vsi'
SIGNUM_DEFAULT = "[SIGNUM]"
def format_ipc_input(data):
if not data:
return []
return [ (x.strip(), y.strip()) for x, y in \
[ elm.split('=') for elm in data.split('\n') ] ]
def _remove_timestamp(output_string):
# strip any prepended timestamp, if it exists
output = ''
for line in output_string.splitlines():
try:
timestamp, remainder = line.split(',', 1)
output += remainder
except ValueError:
output += line
# this re-adds a trailing newline
output += '\n'
return output
def _signal_format_safe(signal_to_num, signal, value):
string = ''
signum = None
if signal in signal_to_num:
signum = signal_to_num[signal]
elif signal != '':
signum = SIGNUM_DEFAULT
if signum:
string = SIGNAL_FORMAT.format(signal, signum, value)
return string
class TestVSMDebug(object):
module = None
quit_command = "\nquit"
def close(self):
pass
def _run_vsm(self, cmd, input_data, sig_num_path, wait_time_ms):
data = (input_data + self.quit_command).encode('utf8')
timeout_s = 2
if wait_time_ms > 0:
timeout_s = wait_time_ms / 1000
process = Popen(cmd, stdin=PIPE, stdout=PIPE)
try:
output, _ = process.communicate(data, timeout_s)
except TimeoutExpired:
process.kill()
return None
cmd_output = output.decode()
return _remove_timestamp(cmd_output)
class NoneSignalIPC(ipc.stream.StdioIPC):
def receive(self):
return super(ipc.stream.StdioIPC, self).receive()
def _readline(self):
line = super(NoneSignalIPC, self)._readline()
if line == 'not-acceptable':
return None
return line
class TestVSMNoneSignal(TestVSMDebug):
module = 'tests.NoneSignalIPC'
quit_command = "\nquit=''"
class TestVSMZeroMQ(object):
module = 'ipc.zeromq.ZeromqIPC'
def __init__(self):
self._zmq_addr = ipc.zeromq.SOCKET_ADDR
context = zmq.Context()
self._zmq_socket = context.socket(zmq.PAIR)
self._zmq_socket.connect(self._zmq_addr)
# set maximum wait on receiving (in ms)
self._zmq_socket.RCVTIMEO = 200
def close(self):
self._zmq_socket.close()
def _send(self, signal, value):
self._zmq_socket.send_pyobj((signal, value))
def _receive(self):
return self._zmq_socket.recv_pyobj()
def _receive_all(self, signal_to_num):
process_output = ''
# keep receiving output, one line at a time, until empty (defined as
# a timeout of self._zmq_socket.RCVTIMEO ms -- see where that is set
# for more information)
while True:
try:
sig, val = self._receive()
process_output += _signal_format_safe(signal_to_num, sig, val)
except zmq.error.Again:
# timed out on receive (which happens when we've received
# all output)
break
return process_output
def _run_vsm(self, cmd, input_data, sig_num_path, wait_time_ms):
signal_to_num, _ = vsmlib.utils.parse_signal_num_file(sig_num_path)
process = Popen(cmd)
process_output = self._receive_all(signal_to_num)
for signal, value in format_ipc_input(input_data):
self._send(signal, value)
# Record sent signal directly from the test.
process_output += _signal_format_safe(signal_to_num, signal,
value)
# fetch any pending output so send and receive output maintain
# chronological ordering
process_output += self._receive_all(signal_to_num)
self._send('quit', '')
process.wait()
process_output += self._receive_all(signal_to_num)
return process_output
class TestVSM(unittest.TestCase):
ipc_class = None
def setUp(self):
self.ipc = self.ipc_class()
def tearDown(self):
self.ipc.close()
def run_vsm(self, name, input_data, expected_output, use_initial=True,
replay_case=None, wait_time_ms=0):
conf = os.path.join(RULES_PATH, name + '.yaml')
initial_state = os.path.join(RULES_PATH, name + '.initial.yaml')
cmd = ['./vsm.py' ]
sig_num_path = os.path.join(SIGNAL_NUMBER_PATH, SIGNAL_NUM_FILE)
cmd += [ '--signal-number-file={}'.format(sig_num_path) ]
# Direct verbose output (including state dumps) to log file so the tests
# can parse them.
cmd += [ '--log-file={}'.format(VSM_LOG_FILE) ]
if use_initial and os.path.exists(initial_state):
cmd += ['--initial-state={}'.format(initial_state)]
cmd += [conf]
if replay_case:
replay_file = os.path.join(LOGS_PATH, replay_case + '.log')
if os.path.exists(replay_file):
cmd += ['--replay-log-file={}'.format(replay_file)]
if self.ipc.module:
cmd += ['--ipc-modules={}'.format(self.ipc.module)]
process_output = self.ipc._run_vsm(cmd, input_data, sig_num_path,
wait_time_ms)
if process_output is None:
self.fail("VSM process failed")
# Read state dump from log file.
with open(VSM_LOG_FILE) as f:
state_output = f.read()
log_output = _remove_timestamp(state_output)
output_final = log_output + process_output
self.assertEqual(output_final , expected_output)
class VSMTestCases(TestVSM):
def test_simple0(self):
input_data = 'transmission.gear = "reverse"'
expected_output = '''
transmission.gear,9,'reverse'
State = {
transmission.gear = reverse
}
condition: (transmission.gear == 'reverse') => True
car.backup,3,'True'
State = {
car.backup = True
transmission.gear = reverse
}
transmission.gear,9,'"reverse"'
car.backup,3,'True'
'''
self.run_vsm('simple0', input_data, expected_output.strip() + '\n')
def test_simple0_delayed(self):
input_data = 'transmission.gear = "reverse"'
expected_output = '''
transmission.gear,9,'reverse'
State = {
transmission.gear = reverse
}
condition: (transmission.gear == 'reverse') => True
car.backup,3,'True'
State = {
car.backup = True
transmission.gear = reverse
}
transmission.gear,9,'"reverse"'
car.backup,3,'True'
'''
self.run_vsm('simple0_delay', input_data, expected_output.strip() + '\n')
def test_simple0_uninteresting(self):
'''
A test case where conditions to emit another signal are never triggered
'''
input_data = 'phone.call = "inactive"'
expected_output = '''
phone.call,7,'inactive'
State = {
phone.call = inactive
}
condition: (phone.call == 'active') => False
phone.call,7,'"inactive"'
'''
self.run_vsm('simple0', input_data, expected_output.strip() + '\n')
def test_simple2_initial(self):
input_data = 'damage = True'
expected_output = '''
damage,5,True
State = {
damage = True
moving = False
}
condition: (moving != True and damage == True) => True
car.stop,4,'True'
State = {
car.stop = True
damage = True
moving = False
}
damage,5,'True'
car.stop,4,'True'
'''
self.run_vsm('simple2', input_data, expected_output.strip() + '\n')
def test_simple2_initial_uninteresting(self):
'''
A test case where conditions to emit another signal are never triggered
'''
input_data = 'moving = False'
expected_output = '''
moving,6,False
State = {
moving = False
}
moving,6,'False'
'''
self.run_vsm('simple2', input_data, expected_output.strip() + '\n')
def test_simple2_modify_uninteresting(self):
'''
A test case where conditions to emit another signal are never triggered
'''
input_data = 'moving = True\ndamage = True'
expected_output = '''
moving,6,True
State = {
moving = True
}
condition: (moving != True and damage == True) => False
damage,5,True
State = {
damage = True
moving = True
}
condition: (moving != True and damage == True) => False
moving,6,'True'
damage,5,'True'
'''
self.run_vsm('simple2', input_data, expected_output.strip() + '\n')
def test_simple2_multiple_signals(self):
input_data = 'moving = False\ndamage = True'
expected_output = '''
moving,6,False
State = {
moving = False
}
damage,5,True
State = {
damage = True
moving = False
}
condition: (moving != True and damage == True) => True
car.stop,4,'True'
State = {
car.stop = True
damage = True
moving = False
}
moving,6,'False'
damage,5,'True'
car.stop,4,'True'
'''
self.run_vsm('simple2', input_data, expected_output.strip() + '\n', False)
def test_simple0_log_replay(self):
'''
A test of the log replay functionality
'''
# replay output is not currently forwarded to IPC modules
if self.ipc.module:
self.skipTest("test not compatible with IPC module")
input_data = ''
expected_output = '''
phone.call,7,'active'
State = {
phone.call = active
}
car.stop,4,'True'
State = {
car.stop = True
phone.call = active
}
phone.call,7,'active'
car.stop,4,'True'
'''
self.run_vsm('simple0', input_data, expected_output.strip() + '\n',
replay_case='simple0-replay', wait_time_ms=5000)
def test_unconditional_emit_log_replay(self):
'''
Regression test to ensure we don't issue duplicate unconditional emits
when replaying.
'''
input_data = ''
expected_output = '''
lock.state,13,'true'
State = {
lock.state = true
}
lock.state,13,'true'
'''
self.run_vsm('unconditional_emit', input_data,
expected_output.strip() + '\n',
replay_case='unconditional_emit', wait_time_ms=500)
def test_simple3_xor_condition(self):
input_data = 'phone.call = "active"\nspeed.value = 5.0'
expected_output = '''
phone.call,7,'active'
State = {
phone.call = active
}
speed.value,8,5.0
State = {
phone.call = active
speed.value = 5.0
}
condition: (phone.call == 'active' ^^ speed.value > 50.90) => True
car.stop,4,'True'
State = {
car.stop = True
phone.call = active
speed.value = 5.0
}
phone.call,7,'"active"'
speed.value,8,'5.0'
car.stop,4,'True'
'''
self.run_vsm('simple3', input_data, expected_output.strip() + '\n')
def test_monitored_condition_satisfied(self):
'''
This test case sets up the monitor for the subcondition and
satisfies the subcondition before the 'stop' timeout (and thus omits the
error message in the expected output).
'''
input_data = 'transmission.gear = "forward"\n' \
'transmission.gear = "reverse"\n' \
'camera.backup.active = True'
expected_output = '''
transmission.gear,9,'reverse'
State = {
transmission.gear = reverse
}
transmission.gear,9,'forward'
State = {
transmission.gear = forward
}
condition: (transmission.gear == 'reverse') => False
transmission.gear,9,'reverse'
State = {
transmission.gear = reverse
}
condition: (transmission.gear == 'reverse') => True
lights.external.backup,14,'True'
State = {
lights.external.backup = True
transmission.gear = reverse
}
camera.backup.active,15,True
State = {
camera.backup.active = True
lights.external.backup = True
transmission.gear = reverse
}
parent condition: transmission.gear == reverse
condition: (camera.backup.active == True) => True
transmission.gear,9,'reverse'
transmission.gear,9,'"forward"'
transmission.gear,9,'"reverse"'
lights.external.backup,14,'True'
camera.backup.active,15,'True'
'''
self.run_vsm('monitored_condition', input_data,
expected_output.strip() + '\n', wait_time_ms=2500)
def test_monitored_condition_child_failure(self):
'''
This test case sets up the monitor for the subcondition and
intentionally allows it to fail by not satisfying the subcondition
before the 'stop' timeout.
'''
input_data = 'transmission.gear = "forward"\n' \
'transmission.gear = "reverse"'
expected_output = '''
transmission.gear,9,'reverse'
State = {
transmission.gear = reverse
}
transmission.gear,9,'forward'
State = {
transmission.gear = forward
}
condition: (transmission.gear == 'reverse') => False
transmission.gear,9,'reverse'
State = {
transmission.gear = reverse
}
condition: (transmission.gear == 'reverse') => True
lights.external.backup,14,'True'
State = {
lights.external.backup = True
transmission.gear = reverse
}
condition not met by 'start' time of 1000ms
transmission.gear,9,'reverse'
transmission.gear,9,'"forward"'
transmission.gear,9,'"reverse"'
lights.external.backup,14,'True'
'''
self.run_vsm('monitored_condition', input_data,
expected_output.strip() + '\n', wait_time_ms=1500)
def test_monitored_condition_parent_cancellation(self):
'''
This test case sets up the monitor for the subcondition and changes the
evaluation of the parent condition to cancel the monitor before the
'stop' timeout.
'''
input_data = 'transmission.gear = "forward"\n' \
'transmission.gear = "reverse" \n' \
'transmission.gear = "forward"'
expected_output = '''
transmission.gear,9,'reverse'
State = {
transmission.gear = reverse
}
transmission.gear,9,'forward'
State = {
transmission.gear = forward
}
condition: (transmission.gear == 'reverse') => False
transmission.gear,9,'reverse'
State = {
transmission.gear = reverse
}
condition: (transmission.gear == 'reverse') => True
lights.external.backup,14,'True'
State = {
lights.external.backup = True
transmission.gear = reverse
}
transmission.gear,9,'forward'
State = {
lights.external.backup = True
transmission.gear = forward
}
condition: (transmission.gear == 'reverse') => False
transmission.gear,9,'reverse'
transmission.gear,9,'"forward"'
transmission.gear,9,'"reverse"'
lights.external.backup,14,'True'
transmission.gear,9,'"forward"'
'''
self.run_vsm('monitored_condition', input_data,
expected_output.strip() + '\n', wait_time_ms=1500)
def test_nested_4_condition_satisfied(self):
'''
This test case triggers the parent monitored condition and satisfies its
three descendents to fully-satisfy a 4-deep nesting of conditions.
'''
input_data = 'a = true\n' \
'b = true\n' \
'c = true\n' \
'd = true'
expected_output = '''
a,5040,True
State = {
a = True
}
condition: (a == True) => True
b,5041,True
State = {
a = True
b = True
}
parent condition: a == True
condition: (b == True) => True
c,5042,True
State = {
a = True
b = True
c = True
}
parent condition: b == True
parent condition: a == True
condition: (c == True) => True
d,5043,True
State = {
a = True
b = True
c = True
d = True
}
parent condition: c == True
parent condition: b == True
parent condition: a == True
condition: (d == True) => True
a,5040,'true'
b,5041,'true'
c,5042,'true'
d,5043,'true'
'''
self.run_vsm('nested_4', input_data,
expected_output.strip() + '\n', wait_time_ms=2200)
def test_nested_4_condition_child_failure(self):
'''
This test case triggers the parent monitored condition and fails one of
the middle conditions by the timeout.
'''
input_data = 'a = true\n' \
'b = true'
expected_output = '''
a,5040,True
State = {
a = True
}
condition: (a == True) => True
b,5041,True
State = {
a = True
b = True
}
parent condition: a == True
condition: (b == True) => True
condition not met by 'start' time of 1000ms
condition not met by 'start' time of 1500ms
a,5040,'true'
b,5041,'true'
'''
self.run_vsm('nested_4', input_data,
expected_output.strip() + '\n', wait_time_ms=2200)
def test_parallel(self):
input_data = 'transmission.gear = "reverse"\n'\
'wipers = True'
expected_output = '''
transmission.gear,9,'reverse'
State = {
transmission.gear = reverse
}
condition: (transmission.gear == 'reverse') => True
reverse,16,'True'
State = {
reverse = True
transmission.gear = reverse
}
wipers,17,True
State = {
reverse = True
transmission.gear = reverse
wipers = True
}
condition: (wipers == True) => True
lights,18,'on'
State = {
lights = on
reverse = True
transmission.gear = reverse
wipers = True
}
transmission.gear,9,'"reverse"'
reverse,16,'True'
wipers,17,'True'
lights,18,'on'
'''
self.run_vsm('parallel', input_data, expected_output.strip() + '\n',
False)
def test_sequence_in_order(self):
input_data = 'transmission.gear = "park"\n' \
'ignition = True'
expected_output = '''
transmission.gear,9,'park'
State = {
transmission.gear = park
}
condition: (transmission.gear == 'park') => True
parked,11,'True'
State = {
parked = True
transmission.gear = park
}
ignition,10,True
State = {
ignition = True
parked = True
transmission.gear = park
}
condition: (ignition == True) => True
ignited,12,'True'
State = {
ignited = True
ignition = True
parked = True
transmission.gear = park
}
transmission.gear,9,'"park"'
parked,11,'True'
ignition,10,'True'
ignited,12,'True'
'''
self.run_vsm('sequence', input_data, expected_output.strip() + '\n')
def test_sequence_out_then_in_order(self):
input_data = 'ignition = True\n' \
'transmission.gear = "park"\n' \
'ignition = True'
expected_output = '''
ignition,10,True
State = {
ignition = True
}
changed value for signal 'ignition' ignored because prior conditions in its sequence block have not been met
transmission.gear,9,'park'
State = {
ignition = True
transmission.gear = park
}
condition: (transmission.gear == 'park') => True
parked,11,'True'
State = {
ignition = True
parked = True
transmission.gear = park
}
ignition,10,True
State = {
ignition = True
parked = True
transmission.gear = park
}
condition: (ignition == True) => True
ignited,12,'True'
State = {
ignited = True
ignition = True
parked = True
transmission.gear = park
}
ignition,10,'True'
transmission.gear,9,'"park"'
parked,11,'True'
ignition,10,'True'
ignited,12,'True'
'''
self.run_vsm('sequence', input_data, expected_output.strip() + '\n')
def test_unconditional_emit(self):
input_data = ''
expected_output = '''
lock.state,13,'True'
State = {
lock.state = True
}
lock.state,13,'True'
'''
self.run_vsm('unconditional_emit', input_data,
expected_output.strip() + '\n')
def test_delay(self):
input_data = 'wipers.front.on = True'
expected_output = '''
wipers.front.on,5020,True
State = {
wipers.front.on = True
}
condition: (wipers.front.on == True) => True
lights.external.headlights,19,'True'
State = {
lights.external.headlights = True
wipers.front.on = True
}
wipers.front.on,5020,'True'
lights.external.headlights,19,'True'
'''
# NOTE: ideally, this would ensure the delay in output but, for
# simplicity, that is handled in a manual test case. This simply ensures
# the output is correct.
self.run_vsm('delay', input_data, expected_output.strip() + '\n', False,
wait_time_ms=2500)
def test_subclauses_arithmetic_booleans(self):
input_data = 'flux_capacitor.energy_generated = 1.1\nspeed.value = 140'
expected_output = '''
flux_capacitor.energy_generated,5030,1.1
State = {
flux_capacitor.energy_generated = 1.1
}
condition: (flux_capacitor.energy_generated >= 1.21 * 0.9 and not (flux_capacitor.energy_generated >= 1.21)
) => True
lights.external.time_travel_imminent,5032,'True'
State = {
flux_capacitor.energy_generated = 1.1
lights.external.time_travel_imminent = True
}
condition: (flux_capacitor.energy_generated >= 1.21 * 0.9 and not (flux_capacitor.energy_generated >= 1.21)
) => True
lights.external.time_travel_imminent,5032,'True'
State = {
flux_capacitor.energy_generated = 1.1
lights.external.time_travel_imminent = True
}
speed.value,8,140
State = {
flux_capacitor.energy_generated = 1.1
lights.external.time_travel_imminent = True
speed.value = 140
}
condition: (( speed.value >= (88 - 10) * 1.6 and speed.value < 88 * 1.6 ) or ( flux_capacitor.energy_generated >= 1.21 * 0.9 and flux_capacitor.energy_generated < 1.21 )
) => True
lights.internal.time_travel_imminent,5031,'True'
State = {
flux_capacitor.energy_generated = 1.1
lights.external.time_travel_imminent = True
lights.internal.time_travel_imminent = True
speed.value = 140
}
condition: (( speed.value >= (88 - 10) * 1.6 and speed.value < 88 * 1.6 ) or ( flux_capacitor.energy_generated >= 1.21 * 0.9 and flux_capacitor.energy_generated < 1.21 )
) => True
lights.internal.time_travel_imminent,5031,'True'
State = {
flux_capacitor.energy_generated = 1.1
lights.external.time_travel_imminent = True
lights.internal.time_travel_imminent = True
speed.value = 140
}
flux_capacitor.energy_generated,5030,'1.1'
lights.external.time_travel_imminent,5032,'True'
lights.external.time_travel_imminent,5032,'True'
speed.value,8,'140'
lights.internal.time_travel_imminent,5031,'True'
lights.internal.time_travel_imminent,5031,'True'
'''
self.run_vsm('subclauses_arithmetic_booleans', input_data,
expected_output.strip() + '\n', False)
def test_nested_child_before_parent(self):
'''
Ensure that we can safely set a nested condition before its parent.
Originally, this caused a crash.
'''
input_data = 'horn = true'
expected_output = '''
horn,20,True
State = {
horn = True
}
parent condition: parked == (unset)
parent condition: car.stop == (unset)
condition: (horn == True) => True
horn,20,'true'
'''
self.run_vsm('nested_simple', input_data,
expected_output.strip() + '\n', wait_time_ms=1500)
def test_start_0_child_unmet(self):
'''
Ensure that we can use a start time of zero and meet its parent
condition without crashing.
'''
input_data = 'parked = true'
expected_output = '''
parked,11,True
State = {
parked = True
}
condition not met by 'start' time of 0ms
condition: (parked == True) => True
parked,11,'true'
'''
self.run_vsm('start_0', input_data,
expected_output.strip() + '\n', wait_time_ms=1200)
def test_start_0_child_met(self):
'''
Ensure that we can use a start time of zero and meet the full chain of
conditions without crashing.
'''
input_data = 'horn = true\n' \
'parked = true'
expected_output = '''
horn,20,True
State = {
horn = True
}
parent condition: parked == (unset)
condition: (horn == True) => True
parked,11,True
State = {
horn = True
parked = True
}
condition: (parked == True) => True
horn,20,'true'
parked,11,'true'
'''
self.run_vsm('start_0', input_data,
expected_output.strip() + '\n', wait_time_ms=1200)
class VSMStdTests(VSMTestCases):
ipc_class = TestVSMDebug
class VSMZeroMQTests(VSMTestCases):
ipc_class = TestVSMZeroMQ
class VSMNoneSignalTests(TestVSM):
ipc_class = TestVSMNoneSignal
def test_none_signal(self):
input_data = 'transmission.gear = "reverse"\nnot-acceptable'
expected_output = '''
transmission.gear,9,'reverse'
State = {
transmission.gear = reverse
}
condition: (transmission.gear == 'reverse') => True
car.backup,3,'True'
State = {
car.backup = True
transmission.gear = reverse
}
skipping invalid message
car.backup=True
'''
self.run_vsm('simple0', input_data, expected_output.strip() + '\n')
if __name__ == '__main__':
for cls in [VSMStdTests, VSMZeroMQTests, VSMNoneSignalTests]:
suite = unittest.TestLoader().loadTestsFromTestCase(cls)
unittest.TextTestRunner(verbosity=2).run(suite)
|
GENIVI/vehicle_signal_manager
|
tests.py
|
Python
|
mpl-2.0
| 25,241
|
[
"Jaguar"
] |
90181fa61987f03de9f329028f438b8e2e2a63af8d06fec441e8385efe7a353a
|
#! /usr/bin/env python
#
# Copyright (C) 2016 Rich Lewis <rl403@cam.ac.uk>
# License: 3-clause BSD
import os
import zipfile
import logging
LOGGER = logging.getLogger(__name__)
import pandas as pd
import numpy as np
import skchem
from .base import Converter
from ... import standardizers
PATCHES = {
'820-75-7': r'NNC(=O)CNC(=O)C=[N+]=[N-]',
'2435-76-9': r'[N-]=[N+]=C1C=NC(=O)NC1=O',
'817-99-2': r'NC(=O)CNC(=O)\C=[N+]=[N-]',
'116539-70-9': r'CCCCN(CC(O)C1=C\C(=[N+]=[N-])\C(=O)C=C1)N=O',
'115-02-6': r'NC(COC(=O)\C=[N+]=[N-])C(=O)O',
'122341-55-3': r'NC(COC(=O)\C=[N+]=[N-])C(=O)O'
}
class MullerAmesConverter(Converter):
def __init__(self, directory, output_directory, output_filename='muller_ames.h5'):
"""
Args:
directory (str):
Directory in which input files reside.
output_directory (str):
Directory in which to save the converted dataset.
output_filename (str):
Name of the saved dataset. Defaults to `muller_ames.h5`.
Returns:
tuple of str:
Single-element tuple containing the path to the converted dataset.
"""
zip_path = os.path.join(directory, 'ci900161g_si_001.zip')
output_path = os.path.join(output_directory, output_filename)
with zipfile.ZipFile(zip_path) as f:
f.extractall()
# create dataframe
data = pd.read_csv(os.path.join(directory, 'smiles_cas_N6512.smi'),
delimiter='\t', index_col=1,
converters={1: lambda s: s.strip()},
header=None, names=['structure', 'id', 'is_mutagen'])
data = self.patch_data(data, PATCHES)
data['structure'] = data.structure.apply(skchem.Mol.from_smiles)
data = self.standardize(data)
data = self.optimize(data)
keep = self.filter(data)
ms, ys = keep.structure, keep.is_mutagen
indices = data.reset_index().index.difference(keep.reset_index().index)
train = self.parse_splits(os.path.join('splits_train_N6512.csv'))
train = self.drop_indices(train, indices)
splits = self.create_split_dict(train, 'train')
test = self.parse_splits(os.path.join(directory, 'splits_test_N6512.csv'))
test = self.drop_indices(test, indices)
splits.update(self.create_split_dict(test, 'test'))
self.run(ms, ys, output_path, splits=splits)
def patch_data(self, data, patches):
""" Patch smiles in a DataFrame with rewritten ones that specify diazo
groups in rdkit friendly way. """
LOGGER.info('Patching data...')
for cas, smiles in patches.items():
data.loc[cas, 'structure'] = smiles
return data
def parse_splits(self, f_path):
LOGGER.info('Parsing splits...')
with open(f_path) as f:
splits = [split for split in f.read().strip().splitlines()]
splits = [[n for n in split.strip().split(',')] for split in splits]
splits = [sorted(int(n) for n in split) for split in splits] # sorted ints
return [np.array(split) - 1 for split in splits] # zero based indexing
def drop_indices(self, splits, indices):
LOGGER.info('Dropping failed compounds from split indices...')
for i, split in enumerate(splits):
split = split - sum(split > ix for ix in indices)
splits[i] = np.delete(split, indices)
return splits
def create_split_dict(self, splits, name):
return {'{}_{}'.format(name, i + 1): split \
for i, split in enumerate(splits)}
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
LOGGER.info('Converting Muller Ames Dataset...')
MullerAmesConverter.convert()
|
richlewis42/scikit-chem
|
skchem/data/converters/muller_ames.py
|
Python
|
bsd-3-clause
| 3,856
|
[
"RDKit"
] |
c8e9cc3049a1d9da63bc4090148c71ee8cf54f83e6b40e3bc5901f3966612c7e
|
from ase import *
from ase.lattice import bulk
from ase.dft.kpoints import monkhorst_pack
from gpaw import *
from gpaw.mpi import serial_comm
from gpaw.test import equal
from gpaw.xc.rpa import RPACorrelation
from gpaw.xc.fxc import FXCCorrelation
import numpy as np
a0 = 5.43
cell = bulk('Si', 'fcc', a=a0).get_cell()
Si = Atoms('Si2', cell=cell, pbc=True,
scaled_positions=((0,0,0), (0.25,0.25,0.25)))
kpts = monkhorst_pack((2,2,2))
kpts += np.array([1/4., 1/4., 1/4.])
calc = GPAW(mode='pw',
kpts=kpts,
occupations=FermiDirac(0.001),
communicator=serial_comm)
Si.set_calculator(calc)
E = Si.get_potential_energy()
calc.diagonalize_full_hamiltonian(nbands=50)
rpa = RPACorrelation(calc)
E_rpa1 = rpa.calculate(ecut=[25, 50])
fxc = FXCCorrelation(calc, xc='RPA', nlambda=16)
E_rpa2 = fxc.calculate(ecut=[25, 50])
fxc = FXCCorrelation(calc, xc='rALDA', unit_cells=[1,1,2])
E_ralda = fxc.calculate(ecut=[25, 50])
fxc = FXCCorrelation(calc, xc='rAPBE', unit_cells=[1,1,2])
E_rapbe = fxc.calculate(ecut=[25, 50])
equal(E_rpa1[-1], E_rpa2[-1], 0.01)
equal(E_rpa2[-1], -12.6495, 0.001)
equal(E_ralda[-1], -11.3817, 0.001)
equal(E_rapbe[-1], -11.1640, 0.001)
|
robwarm/gpaw-symm
|
gpaw/test/ralda_energy_Si.py
|
Python
|
gpl-3.0
| 1,208
|
[
"ASE",
"GPAW"
] |
45499a6c9eab81e01b6da8a58b5ab1433fc2995ab318138f189db825251773b9
|
"""
DISPERSAO_AVANCADO.PY
Material de apoio para a série de posts "Gráficos de dispersão complexos
no Python", no Programando Ciência.
* Autor: Alexandre 'Jaguar' Fioravante de Siqueira
* Contato: http://www.programandociencia.com/sobre/
* Material de apoio:
http://www.github.com/alexandrejaguar/programandociencia
* Para citar esse material, por favor utilize a referência abaixo:
DE SIQUEIRA, Alexandre Fioravante. Gráficos de dispersão complexos no
Python [Parte I] - Obtendo dados e criando um gráfico preliminar.
Campinas: Programando Ciência, 08 de maio de 2016. Disponível em:
http://www.programandociencia.com/2016/05/08/graficos-de-dispersao-complexos-no-python-parte-i-obtendo-dados-e-criando-um-grafico-preliminar/
Acesso em: <DATA DE ACESSO>.
Copyright (C) Alexandre Fioravante de Siqueira
Este programa é um software livre; você pode redistribuí-lo e/ou
modificá-lo dentro dos termos da Licença Pública Geral GNU como publicada
pela Fundação do Software Livre (FSF); na versão 3 da Licença, ou qualquer
versão posterior.
Este programa é distribuído na esperança de que possa ser útil, mas SEM
NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer
MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para
maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU junto com
este programa. Se não, veja <http://www.gnu.org/licenses/>.
"""
# importando os pacotes necessários.
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import pandas as pd
# lendo o arquivo dados_ibge.xls
dados_brasil = pd.read_excel('dados_ibge.xls', sheetname=2)
# paleta de cores 5-class Dark2, do ColorBrewer2: http://colorbrewer2.org/
cores = ['#1b9e77',
'#d95f02',
'#7570b3',
'#e7298a',
'#66a61e']
# a função atribui_cor() aponta a cor correspondente a cada região.
def atribui_cor(regiao):
cores = {
'Norte': '#1b9e77',
'Nordeste': '#d95f02',
'Sudeste': '#7570b3',
'Sul': '#e7298a',
'CentroOeste': '#66a61e'
}
return cores.get(regiao, 'black')
# criando o vetor de cores.
cor_regiao = list()
qtde_estados = len(dados_brasil['Regiao'])
for estado in range(qtde_estados):
cor_regiao.append(atribui_cor(dados_brasil['Regiao'][estado]))
# gerando o gráfico.
plt.scatter(x=dados_brasil['ExpecVida'],
y=dados_brasil['PIBperCapita'],
s=dados_brasil['PopX1000'],
c=cor_regiao,
alpha=0.6)
plt.title('Desenvolvimento do Brasil em 2013, por estado', fontsize=22)
plt.xlabel('Expectativa de vida (anos)', fontsize=22)
plt.ylabel('PIB per capita (R$)', fontsize=22)
plt.grid(True)
# inserindo sigla dos estados em cada círculo.
for estado in range(len(dados_brasil['UF'])):
plt.text(x=dados_brasil['ExpecVida'][estado],
y=dados_brasil['PIBperCapita'][estado],
s=dados_brasil['UF'][estado],
fontsize=16)
# colocando legenda; como a legenda "normal" não funciona, a ideia
# é adaptar um objeto 2D com as cores que definimos.
regioes = ['Norte',
'Nordeste',
'Sudeste',
'Sul',
'Centro-Oeste']
# legenda 1
legend1_line2d = list()
for passo in range(len(cores)):
legend1_line2d.append(mlines.Line2D([0], [0],
linestyle="none",
marker="o",
alpha=0.6,
markersize=15,
markerfacecolor=cores[passo]))
legend1 = plt.legend(legend1_line2d,
regioes,
numpoints=1,
fontsize=22,
loc="best",
shadow=True)
# legenda 2
legend2_line2d = list()
legend2_line2d.append(mlines.Line2D([0], [0],
linestyle="none",
marker="o",
alpha=0.6,
markersize=np.sqrt(100),
markerfacecolor='#D3D3D3'))
legend2_line2d.append(mlines.Line2D([0], [0],
linestyle="none",
marker="o",
alpha=0.6,
markersize=np.sqrt(1000),
markerfacecolor='#D3D3D3'))
legend2_line2d.append(mlines.Line2D([0], [0],
linestyle="none",
marker="o",
alpha=0.6,
markersize=np.sqrt(10000),
markerfacecolor='#D3D3D3'))
legend2 = plt.legend(legend2_line2d,
['1', '10', '100'],
title='População (em 100.000)',
numpoints=1,
fontsize=20,
loc="upper left",
frameon=False, # sem bordas
labelspacing=3, # aumenta espaço entre rótulos
handlelength=5, # aumenta espaço entre obj e texto
borderpad=4) # aumenta a borda da legenda
plt.gca().add_artist(legend1)
plt.setp(legend2.get_title(), fontsize=22) # aumenta tamanho da fonte
plt.show()
|
alexandrejaguar/programandociencia
|
2016/0508-scatteradv/dispersao_avancado.py
|
Python
|
gpl-2.0
| 5,453
|
[
"Jaguar"
] |
7a35324eee765114d8ed6b879a06bbd2f3db3cb44294f9be54cc41ac89f3ab14
|
# encoding: utf-8
"""colors.py - select how to color the atoms in the GUI."""
import gtk
from gettext import gettext as _
from ase.gui.widgets import pack, cancel_apply_ok, oops, help
import ase
from ase.data.colors import jmol_colors
import numpy as np
import colorsys
named_colors = ('Green', 'Yellow', 'Blue', 'Red', 'Orange', 'Cyan',
'Magenta', 'Black', 'White', 'Grey', 'Violet', 'Brown',
'Navy')
class ColorWindow(gtk.Window):
"A window for selecting how to color the atoms."
def __init__(self, gui):
gtk.Window.__init__(self)
self.gui = gui
self.colormode = gui.colormode
self.actual_colordata = None
self.set_title(_("Colors"))
vbox = gtk.VBox()
self.add(vbox)
vbox.show()
# The main layout consists of two columns, the leftmost split in an upper and lower part.
self.maintable = gtk.Table(2,2)
pack(vbox, self.maintable)
self.methodbox = gtk.VBox()
self.methodbox.show()
self.maintable.attach(self.methodbox, 0, 1, 0, 1)
self.scalebox = gtk.VBox()
self.scalebox.show()
self.maintable.attach(self.scalebox, 0, 1, 1, 2)
self.colorbox = gtk.Frame()
self.colorbox.show()
self.maintable.attach(self.colorbox, 1, 2, 0, 2, gtk.EXPAND)
# Upper left: Choose how the atoms are colored.
lbl = gtk.Label(_("Choose how the atoms are colored:"))
pack(self.methodbox, [lbl])
self.radio_jmol = gtk.RadioButton(None, _('By atomic number, default "jmol" colors'))
self.radio_atno = gtk.RadioButton(self.radio_jmol,
_('By atomic number, user specified'))
self.radio_tag = gtk.RadioButton(self.radio_jmol, _('By tag'))
self.radio_force = gtk.RadioButton(self.radio_jmol, _('By force'))
self.radio_velocity = gtk.RadioButton(self.radio_jmol, _('By velocity'))
self.radio_charge = gtk.RadioButton(self.radio_jmol, _('By charge'))
self.radio_coordination = gtk.RadioButton(
self.radio_jmol, _('By coordination'))
self.radio_manual = gtk.RadioButton(self.radio_jmol, _('Manually specified'))
self.radio_same = gtk.RadioButton(self.radio_jmol, _('All the same color'))
self.force_box = gtk.VBox()
self.velocity_box = gtk.VBox()
self.charge_box = gtk.VBox()
for widget in (self.radio_jmol, self.radio_atno, self.radio_tag,
self.radio_force, self.force_box, self.radio_velocity,
self.radio_charge, self.charge_box,
self.radio_coordination,
self.velocity_box, self.radio_manual, self.radio_same):
pack(self.methodbox, [widget])
if isinstance(widget, gtk.RadioButton):
widget.connect('toggled', self.method_radio_changed)
# Now fill in the box for additional information in case the force is used.
self.force_label = gtk.Label(_("This should not be displayed!"))
pack(self.force_box, [self.force_label])
self.force_min = gtk.Adjustment(0.0, 0.0, 100.0, 0.05)
self.force_max = gtk.Adjustment(0.0, 0.0, 100.0, 0.05)
self.force_steps = gtk.Adjustment(10, 2, 500, 1)
force_apply = gtk.Button(_('Update'))
force_apply.connect('clicked', self.set_force_colors)
pack(self.force_box, [gtk.Label(_('Min: ')),
gtk.SpinButton(self.force_min, 1.0, 2),
gtk.Label(_(' Max: ')),
gtk.SpinButton(self.force_max, 1.0, 2),
gtk.Label(_(' Steps: ')),
gtk.SpinButton(self.force_steps, 1, 0),
gtk.Label(' '),
force_apply])
self.force_box.hide()
# Now fill in the box for additional information in case the velocity is used.
self.velocity_label = gtk.Label("This should not be displayed!")
pack(self.velocity_box, [self.velocity_label])
self.velocity_min = gtk.Adjustment(0.0, 0.0, 100.0, 0.005)
self.velocity_max = gtk.Adjustment(0.0, 0.0, 100.0, 0.005)
self.velocity_steps = gtk.Adjustment(10, 2, 500, 1)
velocity_apply = gtk.Button(_('Update'))
velocity_apply.connect('clicked', self.set_velocity_colors)
pack(self.velocity_box, [gtk.Label(_('Min: ')),
gtk.SpinButton(self.velocity_min, 1.0, 3),
gtk.Label(_(' Max: ')),
gtk.SpinButton(self.velocity_max, 1.0, 3),
gtk.Label(_(' Steps: ')),
gtk.SpinButton(self.velocity_steps, 1, 0),
gtk.Label(' '),
velocity_apply])
self.velocity_box.hide()
# Now fill in the box for additional information in case
# the charge is used.
self.charge_label = gtk.Label(_("This should not be displayed!"))
pack(self.charge_box, [self.charge_label])
self.charge_min = gtk.Adjustment(0.0, -100.0, 100.0, 0.05)
self.charge_max = gtk.Adjustment(0.0, -100.0, 100.0, 0.05)
self.charge_steps = gtk.Adjustment(10, 2, 500, 1)
charge_apply = gtk.Button(_('Update'))
charge_apply.connect('clicked', self.set_charge_colors)
pack(self.charge_box, [gtk.Label(_('Min: ')),
gtk.SpinButton(self.charge_min, 10.0, 2),
gtk.Label(_(' Max: ')),
gtk.SpinButton(self.charge_max, 10.0, 2),
gtk.Label(_(' Steps: ')),
gtk.SpinButton(self.charge_steps, 1, 0),
gtk.Label(' '),
charge_apply])
self.charge_box.hide()
# Lower left: Create a color scale
pack(self.scalebox, gtk.Label(""))
lbl = gtk.Label(_('Create a color scale:'))
pack(self.scalebox, [lbl])
color_scales = (
_('Black - white'),
_('Black - red - yellow - white'),
_('Black - green - white'),
_('Black - blue - cyan'),
_('Blue - white - red'),
_('Hue'),
_('Named colors')
)
self.scaletype_created = None
self.scaletype = gtk.combo_box_new_text()
for s in color_scales:
self.scaletype.append_text(s)
self.createscale = gtk.Button(_("Create"))
pack(self.scalebox, [self.scaletype, self.createscale])
self.createscale.connect('clicked', self.create_color_scale)
# The actually colors are specified in a box possibly with scrollbars
self.colorwin = gtk.ScrolledWindow()
self.colorwin.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.colorwin.show()
self.colorbox.add(self.colorwin)
self.colorwin.add_with_viewport(gtk.VBox()) # Dummy contents
buts = cancel_apply_ok(cancel=lambda widget: self.destroy(),
apply=self.apply,
ok=self.ok)
pack(vbox, [buts], end=True, bottom=True)
# Make the initial setup of the colors
self.color_errors = {}
self.init_colors_from_gui()
self.show()
gui.register_vulnerable(self)
def notify_atoms_changed(self):
"Called by gui object when the atoms have changed."
self.destroy()
def init_colors_from_gui(self):
cm = self.gui.colormode
# Disallow methods if corresponding data is not available
if not self.gui.images.T.any():
self.radio_tag.set_sensitive(False)
if self.radio_tag.get_active() or cm == 'tag':
self.radio_jmol.set_active(True)
return
else:
self.radio_tag.set_sensitive(True)
if np.isnan(self.gui.images.F).any() or not self.gui.images.F.any():
self.radio_force.set_sensitive(False)
if self.radio_force.get_active() or cm == 'force':
self.radio_jmol.set_active(True)
return
else:
self.radio_force.set_sensitive(True)
if np.isnan(self.gui.images.V).any() or not self.gui.images.V.any():
self.radio_velocity.set_sensitive(False)
if self.radio_velocity.get_active() or cm == 'velocity':
self.radio_jmol.set_active(True)
return
else:
self.radio_velocity.set_sensitive(True)
if not self.gui.images.q.any():
self.radio_charge.set_sensitive(False)
else:
self.radio_charge.set_sensitive(True)
self.radio_manual.set_sensitive(self.gui.images.natoms <= 1000)
# Now check what the current color mode is
if cm == 'jmol':
self.radio_jmol.set_active(True)
self.set_jmol_colors()
elif cm == 'atno':
self.radio_atno.set_active(True)
elif cm == 'tags':
self.radio_tag.set_active(True)
elif cm == 'force':
self.radio_force.set_active(True)
elif cm == 'velocity':
self.radio_velocity.set_active(True)
elif cm == 'charge':
self.radio_charge.set_active(True)
elif cm == 'coordination':
self.radio_coordination.set_active(True)
elif cm == 'manual':
self.radio_manual.set_active(True)
elif cm == 'same':
self.radio_same.set_active(True)
def method_radio_changed(self, widget=None):
"Called when a radio button is changed."
self.scaletype_created = None
self.scaletype.set_active(-1)
if not widget.get_active():
# Ignore most events when a button is turned off.
if widget is self.radio_force:
self.force_box.hide()
if widget is self.radio_velocity:
self.velocity_box.hide()
return
if widget is self.radio_jmol:
self.set_jmol_colors()
elif widget is self.radio_atno:
self.set_atno_colors()
elif widget is self.radio_tag:
self.set_tag_colors()
elif widget is self.radio_force:
self.show_force_stuff()
self.set_force_colors()
elif widget is self.radio_velocity:
self.show_velocity_stuff()
self.set_velocity_colors()
elif widget is self.radio_charge:
self.show_charge_stuff()
self.set_charge_colors()
elif widget is self.radio_coordination:
self.set_coordination_colors()
elif widget is self.radio_manual:
self.set_manual_colors()
elif widget is self.radio_same:
self.set_same_color()
else:
raise RuntimeError('Unknown widget in method_radio_changed')
def make_jmol_colors(self):
"Set the colors to the default jmol colors"
self.colordata_z = []
hasfound = {}
for z in self.gui.images.Z:
if z not in hasfound:
hasfound[z] = True
self.colordata_z.append([z, jmol_colors[z]])
def set_jmol_colors(self):
"We use the immutable jmol colors."
self.make_jmol_colors()
self.set_atno_colors()
for entry in self.color_entries:
entry.set_sensitive(False)
self.colormode = 'jmol'
def set_atno_colors(self):
"We use user-specified per-element colors."
if not hasattr(self, 'colordata_z'):
# No initial colors. Use jmol colors
self.make_jmol_colors()
self.actual_colordata = self.colordata_z
self.color_labels = ["%i (%s):" % (z, ase.data.chemical_symbols[z])
for z, col in self.colordata_z]
self.make_colorwin()
self.colormode = 'atno'
def set_tag_colors(self):
"We use per-tag colors."
# Find which tags are in use
tags = self.gui.images.T
existingtags = range(tags.min(), tags.max()+1)
if not hasattr(self, 'colordata_tags') or len(self.colordata_tags) != len(existingtags):
colors = self.get_named_colors(len(existingtags))
self.colordata_tags = [[x, y] for x, y in
zip(existingtags, colors)]
self.actual_colordata = self.colordata_tags
self.color_labels = [str(x)+':' for x, y in self.colordata_tags]
self.make_colorwin()
self.colormode = 'tags'
def set_same_color(self):
"All atoms have the same color"
if not hasattr(self, 'colordata_same'):
try:
self.colordata_same = self.actual_colordata[0:1]
except AttributeError:
self.colordata_same = self.get_named_colors(1)
self.actual_colordata = self.colordata_same
self.actual_colordata[0][0] = 0
self.color_labels = ['all:']
self.make_colorwin()
self.colormode = 'same'
def set_force_colors(self, *args):
"Use the forces as basis for the colors."
borders = np.linspace(self.force_min.value,
self.force_max.value,
self.force_steps.value,
endpoint=False)
if self.scaletype_created is None:
colors = self.new_color_scale([[0, [1,1,1]],
[1, [0,0,1]]], len(borders))
elif (not hasattr(self, 'colordata_force') or
len(self.colordata_force) != len(borders)):
colors = self.get_color_scale(len(borders), self.scaletype_created)
else:
colors = [y for x, y in self.colordata_force]
self.colordata_force = [[x, y] for x, y in zip(borders, colors)]
self.actual_colordata = self.colordata_force
self.color_labels = ["%.2f:" % x for x, y in self.colordata_force]
self.make_colorwin()
self.colormode = 'force'
fmin = self.force_min.value
fmax = self.force_max.value
factor = self.force_steps.value / (fmax -fmin)
self.colormode_force_data = (fmin, factor)
def set_velocity_colors(self, *args):
"Use the velocities as basis for the colors."
borders = np.linspace(self.velocity_min.value,
self.velocity_max.value,
self.velocity_steps.value,
endpoint=False)
if self.scaletype_created is None:
colors = self.new_color_scale([[0, [1,1,1]],
[1, [1,0,0]]], len(borders))
elif (not hasattr(self, 'colordata_velocity') or
len(self.colordata_velocity) != len(borders)):
colors = self.get_color_scale(len(borders), self.scaletype_created)
else:
colors = [y for x, y in self.colordata_velocity]
self.colordata_velocity = [[x, y] for x, y in zip(borders, colors)]
self.actual_colordata = self.colordata_velocity
self.color_labels = ["%.2f:" % x for x, y in self.colordata_velocity]
self.make_colorwin()
self.colormode = 'velocity'
vmin = self.velocity_min.value
vmax = self.velocity_max.value
factor = self.velocity_steps.value / (vmax -vmin)
self.colormode_velocity_data = (vmin, factor)
def set_charge_colors(self, *args):
"Use the charge as basis for the colors."
borders = np.linspace(self.charge_min.value,
self.charge_max.value,
self.charge_steps.value,
endpoint=False)
if self.scaletype_created is None:
colors = self.new_color_scale([[0, [1,1,1]],
[1, [0,0,1]]], len(borders))
elif (not hasattr(self, 'colordata_charge') or
len(self.colordata_charge) != len(borders)):
colors = self.get_color_scale(len(borders), self.scaletype_created)
else:
colors = [y for x, y in self.colordata_charge]
self.colordata_charge = [[x, y] for x, y in zip(borders, colors)]
self.actual_colordata = self.colordata_charge
self.color_labels = ["%.2f:" % x for x, y in self.colordata_charge]
self.make_colorwin()
self.colormode = 'charge'
qmin = self.charge_min.value
qmax = self.charge_max.value
factor = self.charge_steps.value / (qmax - qmin)
self.colormode_charge_data = (qmin, factor)
def set_coordination_colors(self, *args):
"Use coordination as basis for the colors."
if not hasattr(self.gui, 'coordination'):
self.gui.toggle_show_bonds(None)
coords = self.gui.coordination
existing = range(0, coords.max() + 1)
if not hasattr(self, 'colordata_coordination'):
colors = self.get_named_colors(len(named_colors))
self.colordata_coordination = [[x, y] for x, y in
enumerate(colors)]
self.actual_colordata = self.colordata_coordination
self.color_labels = [(str(x) + ':')
for x, y in self.colordata_coordination]
self.make_colorwin()
self.colormode = 'coordination'
def set_manual_colors(self):
"Set colors of all atoms from the last selection."
# We cannot directly make np.arrays of the colors, as they may
# be sequences of the same length, causing creation of a 2D
# array of characters/numbers instead of a 1D array of
# objects.
colors = np.array([None] * self.gui.images.natoms)
if self.colormode in ['atno', 'jmol', 'tags']:
maxval = max([x for x, y in self.actual_colordata])
oldcolors = np.array([None] * (maxval+1))
for x, y in self.actual_colordata:
oldcolors[x] = y
if self.colormode == 'tags':
colors[:] = oldcolors[self.gui.images.T[self.gui.frame]]
else:
colors[:] = oldcolors[self.gui.images.Z]
elif self.colormode == 'force':
oldcolors = np.array([None] * len(self.actual_colordata))
oldcolors[:] = [y for x, y in self.actual_colordata]
F = self.gui.images.F[self.gui.frame]
F = np.sqrt((F * F).sum(axis=-1))
nF = (F - self.colormode_force_data[0]) * self.colormode_force_data[1]
nF = np.clip(nF.astype(int), 0, len(oldcolors)-1)
colors[:] = oldcolors[nF]
elif self.colormode == 'velocity':
oldcolors = np.array([None] * len(self.actual_colordata))
oldcolors[:] = [y for x, y in self.actual_colordata]
V = self.gui.images.V[self.gui.frame]
V = np.sqrt((V * V).sum(axis=-1))
nV = (V - self.colormode_velocity_data[0]) * self.colormode_velocity_data[1]
nV = np.clip(nV.astype(int), 0, len(oldcolors)-1)
colors[:] = oldcolors[nV]
elif self.colormode == 'charge':
oldcolors = np.array([None] * len(self.actual_colordata))
oldcolors[:] = [y for x, y in self.actual_colordata]
q = self.gui.images.q[self.gui.frame]
nq = ((q - self.colormode_charge_data[0]) *
self.colormode_charge_data[1])
nq = np.clip(nq.astype(int), 0, len(oldcolors)-1)
## print "nq = ", nq
colors[:] = oldcolors[nq]
elif self.colormode == 'coordination':
oldcolors = np.array([None] * len(self.actual_colordata))
oldcolors[:] = [y for x, y in self.actual_colordata]
print self.gui.images.bonds
elif self.colormode == 'same':
oldcolor = self.actual_colordata[0][1]
if len(colors) == len(oldcolor):
# Direct assignment would be e.g. one letter per atom. :-(
colors[:] = [oldcolor] * len(colors)
else:
colors[:] = oldcolor
elif self.colormode == 'manual':
if self.actual_colordata is None: # import colors from gui, if they don't exist already
colors = [y for x,y in self.gui.colordata]
self.color_labels = ["%d:" % i for i in range(len(colors))]
self.actual_colordata = [[i, x] for i, x in enumerate(colors)]
self.make_colorwin()
self.colormode = 'manual'
def show_force_stuff(self):
"Show and update widgets needed for selecting the force scale."
self.force_box.show()
F = np.sqrt(((self.gui.images.F*self.gui.images.dynamic[:,np.newaxis])**2).sum(axis=-1))
fmax = F.max()
nimages = self.gui.images.nimages
assert len(F) == nimages
if nimages > 1:
fmax_frame = self.gui.images.F[self.gui.frame].max()
txt = _("Max force: %.2f (this frame), %.2f (all frames)") % (fmax_frame, fmax)
else:
txt = _("Max force: %.2f.") % (fmax,)
self.force_label.set_text(txt)
if self.force_max.value == 0.0:
self.force_max.value = fmax
def show_velocity_stuff(self):
"Show and update widgets needed for selecting the velocity scale."
self.velocity_box.show()
V = np.sqrt((self.gui.images.V * self.gui.images.V).sum(axis=-1))
vmax = V.max()
nimages = self.gui.images.nimages
assert len(V) == nimages
if nimages > 1:
vmax_frame = self.gui.images.V[self.gui.frame].max()
txt = _("Max velocity: %.2f (this frame), %.2f (all frames)") % (vmax_frame, vmax)
else:
txt = _("Max velocity: %.2f.") % (vmax,)
self.velocity_label.set_text(txt)
if self.velocity_max.value == 0.0:
self.velocity_max.value = vmax
def show_charge_stuff(self):
"Show and update widgets needed for selecting the charge scale."
self.charge_box.show()
qmin = self.gui.images.q.min()
qmax = self.gui.images.q.max()
nimages = self.gui.images.nimages
if nimages > 1:
qmin_frame = self.gui.images.q[self.gui.frame].min()
qmax_frame = self.gui.images.q[self.gui.frame].max()
txt = (_('Min, max charge: %.2f, %.2f (this frame),' +
'%.2f, %.2f (all frames)')
% (qmin_frame, qmax_frame, qmin, qmax))
else:
txt = _("Min, max charge: %.2f, %.2f.") % (qmin, qmax,)
self.charge_label.set_text(txt)
self.charge_max.value = qmax
self.charge_min.value = qmin
def make_colorwin(self):
"""Make the list of editable color entries.
Uses self.actual_colordata and self.color_labels. Produces self.color_entries.
"""
assert len(self.actual_colordata) == len(self.color_labels)
self.color_entries = []
old = self.colorwin.get_child()
self.colorwin.remove(old)
del old
table = gtk.Table(len(self.actual_colordata)+1, 4)
self.colorwin.add_with_viewport(table)
table.show()
self.color_display = []
for i in range(len(self.actual_colordata)):
lbl = gtk.Label(self.color_labels[i])
entry = gtk.Entry(max=20)
val = self.actual_colordata[i][1]
error = False
if not isinstance(val, str):
assert len(val) == 3
intval = tuple(np.round(65535*np.array(val)).astype(int))
val = "%.3f, %.3f, %.3f" % tuple(val)
clr = gtk.gdk.Color(*intval)
else:
try:
clr = gtk.gdk.color_parse(val)
except ValueError:
error = True
entry.set_text(val)
blob = gtk.EventBox()
space = gtk.Label
space = gtk.Label(" ")
space.show()
blob.add(space)
if error:
space.set_text(_("ERROR"))
else:
blob.modify_bg(gtk.STATE_NORMAL, clr)
table.attach(lbl, 0, 1, i, i+1, yoptions=0)
table.attach(entry, 1, 2, i, i+1, yoptions=0)
table.attach(blob, 2, 3, i, i+1, yoptions=0)
lbl.show()
entry.show()
blob.show()
entry.connect('changed', self.entry_changed, i)
self.color_display.append(blob)
self.color_entries.append(entry)
def entry_changed(self, widget, index):
"""The user has changed a color."""
txt = widget.get_text()
txtfields = txt.split(',')
if len(txtfields) == 3:
self.actual_colordata[index][1] = [float(x) for x in txtfields]
val = tuple([int(65535*float(x)) for x in txtfields])
clr = gtk.gdk.Color(*val)
else:
self.actual_colordata[index][1] = txt
try:
clr = gtk.gdk.color_parse(txt)
except ValueError:
# Cannot parse the color
displ = self.color_display[index]
displ.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse('white'))
displ.get_child().set_text(_("ERR"))
self.color_errors[index] = (self.color_labels[index], txt)
return
self.color_display[index].get_child().set_text(" ") # Clear error message
self.color_errors.pop(index, None)
self.color_display[index].modify_bg(gtk.STATE_NORMAL, clr)
def create_color_scale(self, *args):
if self.radio_jmol.get_active():
self.radio_atno.set_active(1)
n = len(self.color_entries)
s = self.scaletype.get_active()
scale = self.get_color_scale(n, s)
self.scaletype_created = s
for i in range(n):
if isinstance(scale[i], str):
self.color_entries[i].set_text(scale[i])
else:
s = "%.3f, %.3f, %.3f" % tuple(scale[i])
self.color_entries[i].set_text(s)
self.color_entries[i].activate()
def get_color_scale(self, n, s):
if s == 0:
# Black - White
scale = self.new_color_scale([[0, [0,0,0]],
[1, [1,1,1]]], n)
elif s == 1:
# Black - Red - Yellow - White (STM colors)
scale = self.new_color_scale([[0, [0,0,0]],
[0.33, [1,0,0]],
[0.67, [1,1,0]],
[1, [1,1,1]]], n)
elif s == 2:
# Black - Green - White
scale = self.new_color_scale([[0, [0,0,0]],
[0.5, [0,0.9,0]],
[0.75, [0.2,1.0,0.2]],
[1, [1,1,1]]], n)
elif s == 3:
# Black - Blue - Cyan
scale = self.new_color_scale([[0, [0,0,0]],
[0.5, [0,0,1]],
[1, [0,1,1]]], n)
elif s == 4:
# Blue - White - Red
scale = self.new_color_scale([[0, [0,0,1]],
[0.5, [1,1,1]],
[2, [1,0,0]]], n)
elif s == 5:
# Hues
hues = np.linspace(0.0, 1.0, n, endpoint=False)
scale = ["%.3f, %.3f, %.3f" % colorsys.hls_to_rgb(h, 0.5, 1)
for h in hues]
elif s == 6:
# Named colors
scale = self.get_named_colors(n)
else:
scale = None
return scale
def new_color_scale(self, fixpoints, n):
"Create a homogeneous color scale."
x = np.array([a[0] for a in fixpoints], float)
y = np.array([a[1] for a in fixpoints], float)
assert y.shape[1] == 3
res = []
for a in np.linspace(0.0, 1.0, n, endpoint=True):
n = x.searchsorted(a)
if n == 0:
v = y[0] # Before the start
elif n == len(x):
v = x[-1] # After the end
else:
x0 = x[n-1]
x1 = x[n]
y0 = y[n-1]
y1 = y[n]
v = y0 + (y1 - y0) / (x1 - x0) * (a - x0)
res.append(v)
return res
def get_named_colors(self, n):
if n <= len(named_colors):
return named_colors[:n]
else:
return named_colors + ('Black',) * (n - len(named_colors))
def apply(self, *args):
#if self.colormode in ['atno', 'jmol', 'tags']:
# Color atoms according to an integer value number
if self.color_errors:
oops(_("Incorrect color specification"),
"%s: %s" % self.color_errors.values()[0])
return False
colordata = self.actual_colordata
if self.colormode == 'force':
# Use integers instead for border values
colordata = [[i, x[1]] for i, x in enumerate(self.actual_colordata)]
self.gui.colormode_force_data = self.colormode_force_data
elif self.colormode == 'velocity':
# Use integers instead for border values
colordata = [[i, x[1]] for i, x in enumerate(self.actual_colordata)]
self.gui.colormode_velocity_data = self.colormode_velocity_data
elif self.colormode == 'charge':
# Use integers instead for border values
colordata = [[i, x[1]] for i, x in enumerate(self.actual_colordata)]
self.gui.colormode_charge_data = self.colormode_charge_data
maxval = max([x for x, y in colordata])
self.gui.colors = [None] * (maxval + 1)
new = self.gui.drawing_area.window.new_gc
alloc = self.gui.colormap.alloc_color
for z, val in colordata:
if isinstance(val, str):
self.gui.colors[z] = new(alloc(val))
else:
clr = tuple([int(65535*x) for x in val])
assert len(clr) == 3
self.gui.colors[z] = new(alloc(*clr))
self.gui.colormode = self.colormode
self.gui.colordata = colordata
self.gui.draw()
return True
def cancel(self, *args):
self.destroy()
def ok(self, *args):
if self.apply():
self.destroy()
|
askhl/ase
|
ase/gui/colors.py
|
Python
|
gpl-2.0
| 30,815
|
[
"ASE",
"Jmol"
] |
522c46b17a642df1f9502c49b3e5ea7615ae668b93f177aa9904a8885429f97f
|
import argparse
import pysam
from collections import Counter
from pomoxis.summary_from_stats import qscore
from pomoxis.stats_from_bam import stats_from_aligned_read
def get_errors(aln, ref_seq=None):
seq = aln.query_sequence
errors = {}
insertions = ''
pairs = aln.get_aligned_pairs(with_seq=True)
if pairs[0][0] is None or pairs[0][1] is None:
raise ValueError('It does not look like bam is trimmed to a common alignment window')
for qp, rp, rb in pairs[::-1]: # process pairs in reverse to easily accumulate insertions
if qp is None: # deletion
errors[rp] = (rb, '-')
elif rp is None: # insertion
insertions += seq[qp]
# if we reach here, qp is not None and rp is not None
elif len(insertions) > 0:
# this also includes cases where the ref and query don't agree
# e.g. ref A-TGC
# query GTTGC would emit, A -> GT
errors[rp] = (rb.upper(), seq[qp] + insertions[::-1])
insertions = ''
elif seq[qp] != rb: # mismatch
errors[rp] = (rb.upper(), seq[qp])
if ref_seq is not None and rp is not None:
assert ref_seq[rp] == rb.upper()
if ref_seq is not None:
# check we can scuff up reference using errors and get back our query
scuffed = scuff_ref(ref_seq, errors)
if not scuffed == aln.query_sequence:
raise ValueError('Scuffing up reference with errors did not recreate query')
return errors
def count_errors(errors):
counts = Counter()
for rp, (ref, query) in errors.items():
if query == '-':
counts['del'] += 1
elif ref != query[0]:
counts['sub'] += 1
if len(query) > 1:
counts['ins'] += len(query) - 1
return counts
def scuff_ref(ref_seq, errors):
orig_seq = list(ref_seq)
for rp, (rb, alt) in errors.items():
assert orig_seq[rp] == rb
if alt == '-':
orig_seq[rp] = ''
else:
orig_seq[rp] = alt
return ''.join(orig_seq)
def get_qscores(counts, ref_len):
return {
'Q(acc)': qscore(sum(counts.values()) / ref_len),
'Q(iden)': qscore(counts['sub'] / (ref_len - counts['del'])),
'Q(ins)': qscore(counts['ins'] / ref_len),
'Q(del)': qscore(counts['del'] / ref_len),
}
def main():
parser = argparse.ArgumentParser(
prog='common_errors_from_bam',
description='Get errors common to multiple assemblies aligned to ref.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bam', help='input bam file containing assemblies trimmed to a common alignment window')
parser.add_argument('ref_fasta', help='reference fasta file of the reference over that alignment window')
parser.add_argument('-o', '--output_prefix', default='common_errors',
help='Prefix for outputs.')
args = parser.parse_args()
bam = pysam.AlignmentFile(args.bam)
if len(bam.references) > 1:
raise ValueError('Bam should have just one reference')
ref_lengths = dict(zip(bam.references, bam.lengths))
ref_seq = pysam.FastaFile(args.ref_fasta).fetch(bam.references[0])
# reads should already be trimmed to a common aligment start and end point
reads = [r for r in bam]
ref_end, ref_start = reads[0].reference_end, reads[0].reference_start
ref_len = ref_end - ref_start
if not (all([r.reference_end == ref_end for r in reads]) and
all([r.reference_start == ref_start for r in reads])):
raise ValueError('Alignments have not been trimmed to a common overlap window, try trim_alignments')
# get errors in each read
data = {}
qscores = []
for aln in reads:
errors = get_errors(aln, ref_seq)
counts = count_errors(errors)
# check we got the same error counts as stats_from_aligned_read
stats = stats_from_aligned_read(aln, list(ref_lengths.keys()), list(ref_lengths.values()))
for k in counts.keys():
if stats[k] != counts[k]:
msg = "Error counts {} don't match those from the CIGAR str {}."
raise ValueError(msg.format(counts, {k: stats[k] for k in counts.keys()}))
qscores.append((aln.query_name, get_qscores(counts, ref_len)))
data[aln.query_name] = errors
# get intersection of errors
names = list(data.keys())
common_errors = set(data[names[0]].keys()) # set of reference positions
for name in names[1:]:
common_errors = common_errors.intersection(set(data[name].keys()))
remaining_errors = {}
# loop through common errors, checking ref is the same and retaining the
# error with the shortest edit distance
for rp in common_errors:
ref = data[names[0]][rp][0]
assert all([d[rp][0] == ref for d in data.values()]) # refs should be same
alts = [d[rp][1] for d in data.values()]
if len(set([len(alt) for alt in alts])) > 1:
# we should take the best one
alts = sorted(alts, key=lambda x: len(x))
shortest = alts[0]
others = alts[1:]
if shortest == '-' and any([alt[0] == ref for alt in others]):
# the alt with the insertion contained the ref,
# and the alt with the deletion did not contain the insertion
# so two wrongs make a right!
continue
else:
remaining_errors[rp] = (ref, shortest)
else: # pick most common, arbitrary for equal numbers
remaining_errors[rp] = (ref, Counter(alts).most_common()[0][0])
# write fasta of ref scuffed with just common errors
ref_scuffed = scuff_ref(ref_seq, remaining_errors)
with open('{}.fasta'.format(args.output_prefix), 'w') as fh:
fh.write('>{}\n'.format(args.output_prefix))
fh.write(ref_scuffed)
remaining_counts = count_errors(remaining_errors)
qscores.append(('common_errors', get_qscores(remaining_counts, ref_len)))
# print qscores of individual reads and overlapping errors
cols = ['Q(acc)', 'Q(iden)', 'Q(del)', 'Q(ins)']
with open('{}.txt'.format(args.output_prefix), 'w') as fh:
fh.write('\t'.join(['name'] + cols) + '\n')
for name, d in qscores:
fh.write('\t'.join([name] + [str(d[c]) for c in cols]) + '\n')
if __name__ == '__main__':
main()
|
nanoporetech/pomoxis
|
pomoxis/common_errors_from_bam.py
|
Python
|
mpl-2.0
| 6,482
|
[
"pysam"
] |
31f8af26d3c61dd8b65f8c2b902b04a189574573ec2bb7feba8557334703ad4e
|
#!/usr/bin/env python
"""
Runs Ben's simulation.
usage: %prog [options]
-i, --input=i: Input genome (FASTA format)
-g, --genome=g: If built-in, the genome being used
-l, --read_len=l: Read length
-c, --avg_coverage=c: Average coverage
-e, --error_rate=e: Error rate (0-1)
-n, --num_sims=n: Number of simulations to run
-p, --polymorphism=p: Frequency/ies for minor allele (comma-separate list of 0-1)
-d, --detection_thresh=d: Detection thresholds (comma-separate list of 0-1)
-p, --output_png=p: Plot output
-s, --summary_out=s: Whether or not to output a file with summary of all simulations
-m, --output_summary=m: File name for output summary of all simulations
-f, --new_file_path=f: Directory for summary output files
"""
# removed output of all simulation results on request (not working)
# -r, --sim_results=r: Output all tabular simulation results (number of polymorphisms times number of detection thresholds)
# -o, --output=o: Base name for summary output for each run
from rpy import *
import os
import random, sys, tempfile
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.cookbook import doc_optparse
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def __main__():
#Parse Command Line
options, args = doc_optparse.parse( __doc__ )
# validate parameters
error = ''
try:
read_len = int( options.read_len )
if read_len <= 0:
raise Exception, ' greater than 0'
except TypeError, e:
error = ': %s' % str( e )
if error:
stop_err( 'Make sure your number of reads is an integer value%s' % error )
error = ''
try:
avg_coverage = int( options.avg_coverage )
if avg_coverage <= 0:
raise Exception, ' greater than 0'
except Exception, e:
error = ': %s' % str( e )
if error:
stop_err( 'Make sure your average coverage is an integer value%s' % error )
error = ''
try:
error_rate = float( options.error_rate )
if error_rate >= 1.0:
error_rate = 10 ** ( -error_rate / 10.0 )
elif error_rate < 0:
raise Exception, ' between 0 and 1'
except Exception, e:
error = ': %s' % str( e )
if error:
stop_err( 'Make sure the error rate is a decimal value%s or the quality score is at least 1' % error )
try:
num_sims = int( options.num_sims )
except TypeError, e:
stop_err( 'Make sure the number of simulations is an integer value: %s' % str( e ) )
if options.polymorphism != 'None':
polymorphisms = [ float( p ) for p in options.polymorphism.split( ',' ) ]
else:
stop_err( 'Select at least one polymorphism value to use' )
if options.detection_thresh != 'None':
detection_threshes = [ float( dt ) for dt in options.detection_thresh.split( ',' ) ]
else:
stop_err( 'Select at least one detection threshold to use' )
# mutation dictionaries
hp_dict = { 'A':'G', 'G':'A', 'C':'T', 'T':'C', 'N':'N' } # heteroplasmy dictionary
mt_dict = { 'A':'C', 'C':'A', 'G':'T', 'T':'G', 'N':'N'} # misread dictionary
# read fasta file to seq string
all_lines = open( options.input, 'rb' ).readlines()
seq = ''
for line in all_lines:
line = line.rstrip()
if line.startswith('>'):
pass
else:
seq += line.upper()
seq_len = len( seq )
# output file name template
# removed output of all simulation results on request (not working)
# if options.sim_results == "true":
# out_name_template = os.path.join( options.new_file_path, 'primary_output%s_' + options.output + '_visible_tabular' )
# else:
# out_name_template = tempfile.NamedTemporaryFile().name + '_%s'
out_name_template = tempfile.NamedTemporaryFile().name + '_%s'
print 'out_name_template:', out_name_template
# set up output files
outputs = {}
i = 1
for p in polymorphisms:
outputs[ p ] = {}
for d in detection_threshes:
outputs[ p ][ d ] = out_name_template % i
i += 1
# run sims
for polymorphism in polymorphisms:
for detection_thresh in detection_threshes:
output = open( outputs[ polymorphism ][ detection_thresh ], 'wb' )
output.write( 'FP\tFN\tGENOMESIZE=%s\n' % seq_len )
sim_count = 0
while sim_count < num_sims:
# randomly pick heteroplasmic base index
hbase = random.choice( range( 0, seq_len ) )
#hbase = seq_len/2#random.randrange( 0, seq_len )
# create 2D quasispecies list
qspec = map( lambda x: [], [0] * seq_len )
# simulate read indices and assign to quasispecies
i = 0
while i < ( avg_coverage * ( seq_len / read_len ) ): # number of reads (approximates coverage)
start = random.choice( range( 0, seq_len ) )
#start = seq_len/2#random.randrange( 0, seq_len ) # assign read start
if random.random() < 0.5: # positive sense read
end = start + read_len # assign read end
if end > seq_len: # overshooting origin
read = range( start, seq_len ) + range( 0, ( end - seq_len ) )
else: # regular read
read = range( start, end )
else: # negative sense read
end = start - read_len # assign read end
if end < -1: # overshooting origin
read = range( start, -1, -1) + range( ( seq_len - 1 ), ( seq_len + end ), -1 )
else: # regular read
read = range( start, end, -1 )
# assign read to quasispecies list by index
for j in read:
if j == hbase and random.random() < polymorphism: # heteroplasmic base is variant with p = het
ref = hp_dict[ seq[ j ] ]
else: # ref is the verbatim reference nucleotide (all positions)
ref = seq[ j ]
if random.random() < error_rate: # base in read is misread with p = err
qspec[ j ].append( mt_dict[ ref ] )
else: # otherwise we carry ref through to the end
qspec[ j ].append(ref)
# last but not least
i += 1
bases, fpos, fneg = {}, 0, 0 # last two will be outputted to summary file later
for i, nuc in enumerate( seq ):
cov = len( qspec[ i ] )
bases[ 'A' ] = qspec[ i ].count( 'A' )
bases[ 'C' ] = qspec[ i ].count( 'C' )
bases[ 'G' ] = qspec[ i ].count( 'G' )
bases[ 'T' ] = qspec[ i ].count( 'T' )
# calculate max NON-REF deviation
del bases[ nuc ]
maxdev = float( max( bases.values() ) ) / cov
# deal with non-het sites
if i != hbase:
if maxdev >= detection_thresh: # greater than detection threshold = false positive
fpos += 1
# deal with het sites
if i == hbase:
hnuc = hp_dict[ nuc ] # let's recover het variant
if ( float( bases[ hnuc ] ) / cov ) < detection_thresh: # less than detection threshold = false negative
fneg += 1
del bases[ hnuc ] # ignore het variant
maxdev = float( max( bases.values() ) ) / cov # check other non-ref bases at het site
if maxdev >= detection_thresh: # greater than detection threshold = false positive (possible)
fpos += 1
# output error sums and genome size to summary file
output.write( '%d\t%d\n' % ( fpos, fneg ) )
sim_count += 1
# close output up
output.close()
# Parameters (heteroplasmy, error threshold, colours)
r( '''
het=c(%s)
err=c(%s)
grade = (0:32)/32
hues = rev(gray(grade))
''' % ( ','.join( [ str( p ) for p in polymorphisms ] ), ','.join( [ str( d ) for d in detection_threshes ] ) ) )
# Suppress warnings
r( 'options(warn=-1)' )
# Create allsum (for FP) and allneg (for FN) objects
r( 'allsum <- data.frame()' )
for polymorphism in polymorphisms:
for detection_thresh in detection_threshes:
output = outputs[ polymorphism ][ detection_thresh ]
cmd = '''
ngsum = read.delim('%s', header=T)
ngsum$fprate <- ngsum$FP/%s
ngsum$hetcol <- %s
ngsum$errcol <- %s
allsum <- rbind(allsum, ngsum)
''' % ( output, seq_len, polymorphism, detection_thresh )
r( cmd )
if os.path.getsize( output ) == 0:
for p in outputs.keys():
for d in outputs[ p ].keys():
sys.stderr.write(outputs[ p ][ d ] + ' '+str( os.path.getsize( outputs[ p ][ d ] ) )+'\n')
if options.summary_out == "true":
r( 'write.table(summary(ngsum), file="%s", quote=FALSE, sep="\t", row.names=FALSE)' % options.output_summary )
# Summary objects (these could be printed)
r( '''
tr_pos <- tapply(allsum$fprate,list(allsum$hetcol,allsum$errcol), mean)
tr_neg <- tapply(allsum$FN,list(allsum$hetcol,allsum$errcol), mean)
cat('\nFalse Positive Rate Summary\n\t', file='%s', append=T, sep='\t')
write.table(format(tr_pos, digits=4), file='%s', append=T, quote=F, sep='\t')
cat('\nFalse Negative Rate Summary\n\t', file='%s', append=T, sep='\t')
write.table(format(tr_neg, digits=4), file='%s', append=T, quote=F, sep='\t')
''' % tuple( [ options.output_summary ] * 4 ) )
# Setup graphs
#pdf(paste(prefix,'_jointgraph.pdf',sep=''), 15, 10)
r( '''
png('%s', width=800, height=500, units='px', res=250)
layout(matrix(data=c(1,2,1,3,1,4), nrow=2, ncol=3), widths=c(4,6,2), heights=c(1,10,10))
''' % options.output_png )
# Main title
genome = ''
if options.genome:
genome = '%s: ' % options.genome
r( '''
par(mar=c(0,0,0,0))
plot(1, type='n', axes=F, xlab='', ylab='')
text(1,1,paste('%sVariation in False Positives and Negatives (', %s, ' simulations, coverage ', %s,')', sep=''), font=2, family='sans', cex=0.7)
''' % ( genome, options.num_sims, options.avg_coverage ) )
# False positive boxplot
r( '''
par(mar=c(5,4,2,2), las=1, cex=0.35)
boxplot(allsum$fprate ~ allsum$errcol, horizontal=T, ylim=rev(range(allsum$fprate)), cex.axis=0.85)
title(main='False Positives', xlab='false positive rate', ylab='')
''' )
# False negative heatmap (note zlim command!)
num_polys = len( polymorphisms )
num_dets = len( detection_threshes )
r( '''
par(mar=c(5,4,2,1), las=1, cex=0.35)
image(1:%s, 1:%s, tr_neg, zlim=c(0,1), col=hues, xlab='', ylab='', axes=F, border=1)
axis(1, at=1:%s, labels=rownames(tr_neg), lwd=1, cex.axis=0.85, axs='i')
axis(2, at=1:%s, labels=colnames(tr_neg), lwd=1, cex.axis=0.85)
title(main='False Negatives', xlab='minor allele frequency', ylab='detection threshold')
''' % ( num_polys, num_dets, num_polys, num_dets ) )
# Scale alongside
r( '''
par(mar=c(2,2,2,3), las=1)
image(1, grade, matrix(grade, ncol=length(grade), nrow=1), col=hues, xlab='', ylab='', xaxt='n', las=1, cex.axis=0.85)
title(main='Key', cex=0.35)
mtext('false negative rate', side=1, cex=0.35)
''' )
# Close graphics
r( '''
layout(1)
dev.off()
''' )
# Tidy up
# r( 'rm(folder,prefix,sim,cov,het,err,grade,hues,i,j,ngsum)' )
if __name__ == "__main__" : __main__()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/tools/ngs_simulation/ngs_simulation.py
|
Python
|
gpl-3.0
| 12,156
|
[
"Galaxy"
] |
903641305a9457a6feafd080358c9393ba6f7b026ce2a304f303cbf017d26caf
|
########################################################################
# $HeadURL$
# File : Watchdog.py
# Author: Stuart Paterson
########################################################################
""" The Watchdog class is used by the Job Wrapper to resolve and monitor
the system resource consumption. The Watchdog can determine if
a running job is stalled and indicate this to the Job Wrapper.
Furthermore, the Watchdog will identify when the Job CPU limit has been
exceeded and fail jobs meaningfully.
Information is returned to the WMS via the heart-beat mechanism. This
also interprets control signals from the WMS e.g. to kill a running
job.
- Still to implement:
- CPU normalization for correct comparison with job limit
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities import Time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemInstance
from DIRAC.Core.Utilities.ProcessMonitor import ProcessMonitor
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.TimeLeft.TimeLeft import TimeLeft
import os, time
class Watchdog:
#############################################################################
def __init__( self, pid, exeThread, spObject, jobCPUtime, memoryLimit = 0, systemFlag = 'linux2.4' ):
""" Constructor, takes system flag as argument.
"""
self.log = gLogger.getSubLogger( "Watchdog" )
self.systemFlag = systemFlag
self.exeThread = exeThread
self.wrapperPID = pid
self.appPID = self.exeThread.getCurrentPID()
self.spObject = spObject
self.jobCPUtime = jobCPUtime
self.memoryLimit = memoryLimit
self.calibration = 0
self.initialValues = {}
self.parameters = {}
self.peekFailCount = 0
self.peekRetry = 5
self.processMonitor = ProcessMonitor()
self.checkError = ''
self.currentStats = {}
self.initialized = False
self.count = 0
#############################################################################
def initialize( self, loops = 0 ):
""" Watchdog initialization.
"""
if self.initialized:
self.log.info( 'Watchdog already initialized' )
return S_OK()
else:
self.initialized = True
setup = gConfig.getValue( '/DIRAC/Setup', '' )
if not setup:
return S_ERROR( 'Can not get the DIRAC Setup value' )
wms_instance = getSystemInstance( "WorkloadManagement" )
if not wms_instance:
return S_ERROR( 'Can not get the WorkloadManagement system instance' )
self.section = '/Systems/WorkloadManagement/%s/JobWrapper' % wms_instance
self.maxcount = loops
self.log.verbose( 'Watchdog initialization' )
self.log.info( 'Attempting to Initialize Watchdog for: %s' % ( self.systemFlag ) )
#Test control flags
self.testWallClock = gConfig.getValue( self.section + '/CheckWallClockFlag', 1 )
self.testDiskSpace = gConfig.getValue( self.section + '/CheckDiskSpaceFlag', 1 )
self.testLoadAvg = gConfig.getValue( self.section + '/CheckLoadAvgFlag', 1 )
self.testCPUConsumed = gConfig.getValue( self.section + '/CheckCPUConsumedFlag', 1 )
self.testCPULimit = gConfig.getValue( self.section + '/CheckCPULimitFlag', 0 )
self.testMemoryLimit = gConfig.getValue( self.section + '/CheckMemoryLimitFlag', 0 )
self.testTimeLeft = gConfig.getValue( self.section + '/CheckTimeLeftFlag', 1 )
#Other parameters
self.pollingTime = gConfig.getValue( self.section + '/PollingTime', 10 ) # 10 seconds
self.checkingTime = gConfig.getValue( self.section + '/CheckingTime', 30 * 60 ) #30 minute period
self.minCheckingTime = gConfig.getValue( self.section + '/MinCheckingTime', 20 * 60 ) # 20 mins
self.maxWallClockTime = gConfig.getValue( self.section + '/MaxWallClockTime', 3 * 24 * 60 * 60 ) # e.g. 4 days
self.jobPeekFlag = gConfig.getValue( self.section + '/JobPeekFlag', 1 ) # on / off
self.minDiskSpace = gConfig.getValue( self.section + '/MinDiskSpace', 10 ) #MB
self.loadAvgLimit = gConfig.getValue( self.section + '/LoadAverageLimit', 1000 ) # > 1000 and jobs killed
self.sampleCPUTime = gConfig.getValue( self.section + '/CPUSampleTime', 30 * 60 ) # e.g. up to 20mins sample
self.jobCPUMargin = gConfig.getValue( self.section + '/JobCPULimitMargin', 20 ) # %age buffer before killing job
self.minCPUWallClockRatio = gConfig.getValue( self.section + '/MinCPUWallClockRatio', 5 ) #ratio %age
self.nullCPULimit = gConfig.getValue( self.section + '/NullCPUCountLimit', 5 ) #After 5 sample times return null CPU consumption kill job
self.checkCount = 0
self.nullCPUCount = 0
if self.checkingTime < self.minCheckingTime:
self.log.info( 'Requested CheckingTime of %s setting to %s seconds (minimum)' % ( self.checkingTime, self.minCheckingTime ) )
self.checkingTime = self.minCheckingTime
# The time left is returned in seconds @ 250 SI00 = 1 HS06,
# the self.checkingTime and self.pollingTime are in seconds,
# thus they need to be multiplied by a large enough factor
self.grossTimeLeftLimit = 10 * self.checkingTime
self.fineTimeLeftLimit = gConfig.getValue( self.section + '/TimeLeftLimit', 150 * self.pollingTime )
self.timeLeftUtil = TimeLeft()
self.timeLeft = 0
self.littleTimeLeft = False
return S_OK()
def run( self ):
""" The main watchdog execution method
"""
result = self.initialize()
if not result['OK']:
gLogger.always( 'Can not start watchdog for the following reason' )
gLogger.always( result['Message'] )
return result
try:
while True:
gLogger.debug( 'Starting watchdog loop # %d' % self.count )
start_cycle_time = time.time()
result = self.execute()
exec_cycle_time = time.time() - start_cycle_time
if not result[ 'OK' ]:
gLogger.error( "Watchdog error during execution", result[ 'Message' ] )
break
elif result['Value'] == "Ended":
break
self.count += 1
if exec_cycle_time < self.pollingTime:
time.sleep( self.pollingTime - exec_cycle_time )
return S_OK()
except Exception:
gLogger.exception()
return S_ERROR( 'Exception' )
#############################################################################
def execute( self ):
""" The main agent execution method of the Watchdog.
"""
if not self.exeThread.isAlive():
#print self.parameters
self.__getUsageSummary()
self.log.info( 'Process to monitor has completed, Watchdog will exit.' )
return S_OK( "Ended" )
if self.littleTimeLeft:
# if we have gone over enough iterations query again
if self.littleTimeLeftCount == 0 and self.__timeLeft() == -1:
self.checkError = 'Job has reached the CPU limit of the queue'
self.log.error( self.checkError, self.timeLeft )
self.__killRunningThread()
return S_OK()
else:
self.littleTimeLeftCount -= 1
#Note: need to poll regularly to see if the thread is alive
# but only perform checks with a certain frequency
if ( time.time() - self.initialValues['StartTime'] ) > self.checkingTime * self.checkCount:
self.checkCount += 1
result = self.__performChecks()
if not result['OK']:
self.log.warn( 'Problem during recent checks' )
self.log.warn( result['Message'] )
return S_OK()
else:
#self.log.debug('Application thread is alive: checking count is %s' %(self.checkCount))
return S_OK()
#############################################################################
def __performChecks( self ):
"""The Watchdog checks are performed at a different period to the checking of the
application thread and correspond to the checkingTime.
"""
self.log.verbose( '------------------------------------' )
self.log.verbose( 'Checking loop starts for Watchdog' )
heartBeatDict = {}
msg = ''
result = self.getLoadAverage()
msg += 'LoadAvg: %s ' % ( result['Value'] )
heartBeatDict['LoadAverage'] = result['Value']
if not self.parameters.has_key( 'LoadAverage' ):
self.parameters['LoadAverage'] = []
self.parameters['LoadAverage'].append( result['Value'] )
result = self.getMemoryUsed()
msg += 'MemUsed: %.1f kb ' % ( result['Value'] )
heartBeatDict['MemoryUsed'] = result['Value']
if not self.parameters.has_key( 'MemoryUsed' ):
self.parameters['MemoryUsed'] = []
self.parameters['MemoryUsed'].append( result['Value'] )
result = self.processMonitor.getMemoryConsumed( self.wrapperPID )
if result['OK']:
vsize = result['Value']['Vsize']/1024.
rss = result['Value']['RSS']/1024.
heartBeatDict['Vsize'] = vsize
heartBeatDict['RSS'] = rss
self.parameters.setdefault( 'Vsize', [] )
self.parameters['Vsize'].append( vsize )
self.parameters.setdefault( 'RSS', [] )
self.parameters['RSS'].append( rss )
msg += "Job Vsize: %.1f kb " % vsize
msg += "Job RSS: %.1f kb " % rss
result = self.getDiskSpace()
msg += 'DiskSpace: %.1f MB ' % ( result['Value'] )
if not self.parameters.has_key( 'DiskSpace' ):
self.parameters['DiskSpace'] = []
self.parameters['DiskSpace'].append( result['Value'] )
heartBeatDict['AvailableDiskSpace'] = result['Value']
result = self.__getCPU()
msg += 'CPU: %s (h:m:s) ' % ( result['Value'] )
if not self.parameters.has_key( 'CPUConsumed' ):
self.parameters['CPUConsumed'] = []
self.parameters['CPUConsumed'].append( result['Value'] )
hmsCPU = result['Value']
rawCPU = self.__convertCPUTime( hmsCPU )
if rawCPU['OK']:
heartBeatDict['CPUConsumed'] = rawCPU['Value']
result = self.__getWallClockTime()
msg += 'WallClock: %.2f s ' % ( result['Value'] )
self.parameters['WallClockTime'].append( result['Value'] )
heartBeatDict['WallClockTime'] = result['Value']
self.log.info( msg )
result = self.__checkProgress()
if not result['OK']:
self.checkError = result['Message']
self.log.warn( self.checkError )
if self.jobPeekFlag:
result = self.__peek()
if result['OK']:
outputList = result['Value']
size = len( outputList )
self.log.info( 'Last %s lines of available application output:' % ( size ) )
self.log.info( '================START================' )
for line in outputList:
self.log.info( line )
self.log.info( '=================END=================' )
self.__killRunningThread()
return S_OK()
recentStdOut = 'None'
if self.jobPeekFlag:
result = self.__peek()
if result['OK']:
outputList = result['Value']
size = len( outputList )
recentStdOut = 'Last %s lines of application output from Watchdog on %s [UTC]:' % ( size, Time.dateTime() )
border = '=' * len( recentStdOut )
cpuTotal = 'Last reported CPU consumed for job is %s (h:m:s)' % ( hmsCPU )
if self.timeLeft:
cpuTotal += ', Batch Queue Time Left %s (s @ HS06)' % self.timeLeft
recentStdOut = '\n%s\n%s\n%s\n%s\n' % ( border, recentStdOut, cpuTotal, border )
self.log.info( recentStdOut )
for line in outputList:
self.log.info( line )
recentStdOut += line + '\n'
else:
recentStdOut = 'Watchdog is initializing and will attempt to obtain standard output from application thread'
self.log.info( recentStdOut )
self.peekFailCount += 1
if self.peekFailCount > self.peekRetry:
self.jobPeekFlag = 0
self.log.warn( 'Turning off job peeking for remainder of execution' )
if not os.environ.has_key( 'JOBID' ):
self.log.info( 'Running without JOBID so parameters will not be reported' )
return S_OK()
jobID = os.environ['JOBID']
staticParamDict = {'StandardOutput':recentStdOut}
self.__sendSignOfLife( int( jobID ), heartBeatDict, staticParamDict )
return S_OK( 'Watchdog checking cycle complete' )
#############################################################################
def __getCPU( self ):
"""Uses os.times() to get CPU time and returns HH:MM:SS after conversion.
"""
cpuTime = '00:00:00'
try:
cpuTime = self.processMonitor.getCPUConsumed( self.wrapperPID )
except Exception:
self.log.warn( 'Could not determine CPU time consumed with exception' )
self.log.exception()
return S_OK( cpuTime ) #just return null CPU
if not cpuTime['OK']:
self.log.warn( 'Problem while checking consumed CPU' )
self.log.warn( cpuTime )
return S_OK( '00:00:00' ) #again return null CPU in this case
cpuTime = cpuTime['Value']
self.log.verbose( "Raw CPU time consumed (s) = %s" % ( cpuTime ) )
result = self.__getCPUHMS( cpuTime )
return result
#############################################################################
def __getCPUHMS( self, cpuTime ):
mins, secs = divmod( cpuTime, 60 )
hours, mins = divmod( mins, 60 )
humanTime = '%02d:%02d:%02d' % ( hours, mins, secs )
self.log.verbose( 'Human readable CPU time is: %s' % humanTime )
return S_OK( humanTime )
#############################################################################
def __interpretControlSignal( self, signalDict ):
"""This method is called whenever a signal is sent via the result of
sending a sign of life.
"""
self.log.info( 'Received control signal' )
if type( signalDict ) == type( {} ):
if signalDict.has_key( 'Kill' ):
self.log.info( 'Received Kill signal, stopping job via control signal' )
self.checkError = 'Received Kill signal'
self.__killRunningThread()
else:
self.log.info( 'The following control signal was sent but not understood by the watchdog:' )
self.log.info( signalDict )
else:
self.log.info( 'Expected dictionary for control signal, received:\n%s' % ( signalDict ) )
return S_OK()
#############################################################################
def __checkProgress( self ):
"""This method calls specific tests to determine whether the job execution
is proceeding normally. CS flags can easily be added to add or remove
tests via central configuration.
"""
report = ''
if self.testWallClock:
result = self.__checkWallClockTime()
report += 'WallClock: OK, '
if not result['OK']:
self.log.warn( result['Message'] )
return result
else:
report += 'WallClock: NA,'
if self.testDiskSpace:
result = self.__checkDiskSpace()
report += 'DiskSpace: OK, '
if not result['OK']:
self.log.warn( result['Message'] )
return result
else:
report += 'DiskSpace: NA,'
if self.testLoadAvg:
result = self.__checkLoadAverage()
report += 'LoadAverage: OK, '
if not result['OK']:
self.log.warn( result['Message'] )
return result
else:
report += 'LoadAverage: NA,'
if self.testCPUConsumed:
result = self.__checkCPUConsumed()
report += 'CPUConsumed: OK, '
if not result['OK']:
return result
else:
report += 'CPUConsumed: NA, '
if self.testCPULimit:
result = self.__checkCPULimit()
report += 'CPULimit OK, '
if not result['OK']:
self.log.warn( result['Message'] )
return result
else:
report += 'CPULimit: NA, '
if self.testTimeLeft:
self.__timeLeft()
if self.timeLeft:
report += 'TimeLeft: OK'
else:
report += 'TimeLeft: NA'
if self.testMemoryLimit:
result = self.__checkMemoryLimit()
report += 'MemoryLimit OK, '
if not result['OK']:
self.log.warn( result['Message'] )
return result
else:
report += 'MemoryLimit: NA, '
self.log.info( report )
return S_OK( 'All enabled checks passed' )
#############################################################################
def __checkCPUConsumed( self ):
""" Checks whether the CPU consumed by application process is reasonable. This
method will report stalled jobs to be killed.
"""
self.log.info( "Checking CPU Consumed" )
if 'WallClockTime' not in self.parameters:
return S_ERROR( 'Missing WallClockTime info' )
if 'CPUConsumed' not in self.parameters:
return S_ERROR( 'Missing CPUConsumed info' )
wallClockTime = self.parameters['WallClockTime'][-1]
if wallClockTime < self.sampleCPUTime:
self.log.info( "Stopping check, wallclock time (%s) is still smalled than sample time (%s)" % ( wallClockTime,
self.sampleCPUTime ) )
return S_OK()
intervals = max( 1, int( self.sampleCPUTime / self.checkingTime ) )
if len( self.parameters['CPUConsumed'] ) < intervals + 1:
self.log.info( "Not enough snapshots to calculate, there are %s and we need %s" % ( len( self.parameters['CPUConsumed'] ),
intervals + 1 ) )
return S_OK()
wallClockTime = self.parameters['WallClockTime'][-1] - self.parameters['WallClockTime'][-1 - intervals ]
try:
cpuTime = self.__convertCPUTime( self.parameters['CPUConsumed'][-1] )['Value']
# For some reason, some times the CPU consumed estimation returns 0
# if cpuTime == 0:
# return S_OK()
cpuTime -= self.__convertCPUTime( self.parameters['CPUConsumed'][-1 - intervals ] )['Value']
ratio = ( cpuTime / wallClockTime ) * 100.
self.log.info( "CPU/Wallclock ratio is %.2f%%" % ratio )
# in case of error cpuTime might be 0, exclude this
if wallClockTime and ratio < self.minCPUWallClockRatio:
if os.path.exists( 'DISABLE_WATCHDOG_CPU_WALLCLOCK_CHECK' ):
self.log.info( 'N.B. job would be declared as stalled but CPU / WallClock check is disabled by payload' )
return S_OK()
self.log.info( "Job is stalled!" )
return S_ERROR( 'Watchdog identified this job as stalled' )
except Exception, e:
self.log.error( "Cannot convert CPU consumed from string to int", "%s" % str( e ) )
return S_OK()
#############################################################################
def __convertCPUTime( self, cputime ):
""" Method to convert the CPU time as returned from the Watchdog
instances to the equivalent DIRAC normalized CPU time to be compared
to the Job CPU requirement.
"""
cpuValue = 0
cpuHMS = cputime.split( ':' )
# for i in xrange( len( cpuHMS ) ):
# cpuHMS[i] = cpuHMS[i].replace( '00', '0' )
try:
hours = float( cpuHMS[0] ) * 60 * 60
mins = float( cpuHMS[1] ) * 60
secs = float( cpuHMS[2] )
cpuValue = float( hours + mins + secs )
except Exception, x:
self.log.warn( str( x ) )
return S_ERROR( 'Could not calculate CPU time' )
#Normalization to be implemented
normalizedCPUValue = cpuValue
result = S_OK()
result['Value'] = normalizedCPUValue
self.log.debug( 'CPU value %s converted to %s' % ( cputime, normalizedCPUValue ) )
return result
#############################################################################
def __checkCPULimit( self ):
""" Checks that the job has consumed more than the job CPU requirement
(plus a configurable margin) and kills them as necessary.
"""
consumedCPU = 0
if self.parameters.has_key( 'CPUConsumed' ):
consumedCPU = self.parameters['CPUConsumed'][-1]
consumedCPUDict = self.__convertCPUTime( consumedCPU )
if consumedCPUDict['OK']:
currentCPU = consumedCPUDict['Value']
else:
return S_OK( 'Not possible to determine current CPU consumed' )
if consumedCPU:
limit = self.jobCPUtime + self.jobCPUtime * ( self.jobCPUMargin / 100 )
cpuConsumed = float( currentCPU )
if cpuConsumed > limit:
self.log.info( 'Job has consumed more than the specified CPU limit with an additional %s%% margin' % ( self.jobCPUMargin ) )
return S_ERROR( 'Job has exceeded maximum CPU time limit' )
else:
return S_OK( 'Job within CPU limit' )
elif not currentCPU:
self.log.verbose( 'Both initial and current CPU consumed are null' )
return S_OK( 'CPU consumed is not measurable yet' )
else:
return S_OK( 'Not possible to determine CPU consumed' )
def __checkMemoryLimit( self ):
""" Checks that the job memory consumption is within a limit
"""
if self.parameters.has_key( 'Vsize' ):
vsize = self.parameters['Vsize'][-1]
if vsize and self.memoryLimit:
if vsize > self.memoryLimit:
vsize = vsize
# Just a warning for the moment
self.log.warn( "Job has consumed %f.2 KB of memory with the limit of %f.2 KB" % ( vsize, self.memoryLimit ) )
return S_OK()
#############################################################################
def __checkDiskSpace( self ):
"""Checks whether the CS defined minimum disk space is available.
"""
if self.parameters.has_key( 'DiskSpace' ):
availSpace = self.parameters['DiskSpace'][-1]
if availSpace >= 0 and availSpace < self.minDiskSpace:
self.log.info( 'Not enough local disk space for job to continue, defined in CS as %s MB' % ( self.minDiskSpace ) )
return S_ERROR( 'Job has insufficient disk space to continue' )
else:
return S_OK( 'Job has enough disk space available' )
else:
return S_ERROR( 'Available disk space could not be established' )
#############################################################################
def __checkWallClockTime( self ):
"""Checks whether the job has been running for the CS defined maximum
wall clock time.
"""
if self.initialValues.has_key( 'StartTime' ):
startTime = self.initialValues['StartTime']
if time.time() - startTime > self.maxWallClockTime:
self.log.info( 'Job has exceeded maximum wall clock time of %s seconds' % ( self.maxWallClockTime ) )
return S_ERROR( 'Job has exceeded maximum wall clock time' )
else:
return S_OK( 'Job within maximum wall clock time' )
else:
return S_ERROR( 'Job start time could not be established' )
#############################################################################
def __checkLoadAverage( self ):
"""Checks whether the CS defined maximum load average is exceeded.
"""
if self.parameters.has_key( 'LoadAverage' ):
loadAvg = self.parameters['LoadAverage'][-1]
if loadAvg > float( self.loadAvgLimit ):
self.log.info( 'Maximum load average exceeded, defined in CS as %s ' % ( self.loadAvgLimit ) )
return S_ERROR( 'Job exceeded maximum load average' )
else:
return S_OK( 'Job running with normal load average' )
else:
return S_ERROR( 'Job load average not established' )
#############################################################################
def __peek( self ):
""" Uses ExecutionThread.getOutput() method to obtain standard output
from running thread via subprocess callback function.
"""
result = self.exeThread.getOutput()
if not result['OK']:
self.log.warn( 'Could not obtain output from running application thread' )
self.log.warn( result['Message'] )
return result
#############################################################################
def calibrate( self ):
""" The calibrate method obtains the initial values for system memory and load
and calculates the margin for error for the rest of the Watchdog cycle.
"""
self.__getWallClockTime()
self.parameters['WallClockTime'] = []
initialCPU = 0.0
result = self.__getCPU()
self.log.verbose( 'CPU consumed %s' % ( result ) )
if not result['OK']:
msg = 'Could not establish CPU consumed'
self.log.warn( msg )
# result = S_ERROR(msg)
# return result
initialCPU = result['Value']
self.initialValues['CPUConsumed'] = initialCPU
self.parameters['CPUConsumed'] = []
result = self.getLoadAverage()
self.log.verbose( 'LoadAverage: %s' % ( result ) )
if not result['OK']:
msg = 'Could not establish LoadAverage'
self.log.warn( msg )
# result = S_ERROR(msg)
# return result
self.initialValues['LoadAverage'] = result['Value']
self.parameters['LoadAverage'] = []
result = self.getMemoryUsed()
self.log.verbose( 'MemUsed: %s' % ( result ) )
if not result['OK']:
msg = 'Could not establish MemoryUsed'
self.log.warn( msg )
# result = S_ERROR(msg)
# return result
self.initialValues['MemoryUsed'] = result['Value']
self.parameters['MemoryUsed'] = []
result = self.processMonitor.getMemoryConsumed( self.wrapperPID )
self.log.verbose( 'Job Memory: %s' % ( result['Value'] ) )
if not result['OK']:
self.log.warn( 'Could not get job memory usage' )
self.initialValues['Vsize'] = result['Value']['Vsize']/1024.
self.initialValues['RSS'] = result['Value']['RSS']/1024.
self.parameters['Vsize'] = []
self.parameters['RSS'] = []
result = self. getDiskSpace()
self.log.verbose( 'DiskSpace: %s' % ( result ) )
if not result['OK']:
msg = 'Could not establish DiskSpace'
self.log.warn( msg )
# result = S_ERROR(msg)
# return result
self.initialValues['DiskSpace'] = result['Value']
self.parameters['DiskSpace'] = []
result = self.getNodeInformation()
self.log.verbose( 'NodeInfo: %s' % ( result ) )
if not result['OK']:
msg = 'Could not establish static system information'
self.log.warn( msg )
# result = S_ERROR(msg)
# return result
if os.environ.has_key( 'LSB_JOBID' ):
result['LocalJobID'] = os.environ['LSB_JOBID']
if os.environ.has_key( 'PBS_JOBID' ):
result['LocalJobID'] = os.environ['PBS_JOBID']
if os.environ.has_key( 'QSUB_REQNAME' ):
result['LocalJobID'] = os.environ['QSUB_REQNAME']
if os.environ.has_key( 'JOB_ID' ):
result['LocalJobID'] = os.environ['JOB_ID']
self.__reportParameters( result, 'NodeInformation', True )
self.__reportParameters( self.initialValues, 'InitialValues' )
return S_OK()
def __timeLeft( self ):
"""
return Normalized CPU time left in the batch system
0 if not available
update self.timeLeft and self.littleTimeLeft
"""
# Get CPU time left in the batch system
result = self.timeLeftUtil.getTimeLeft( 0.0 )
if not result['OK']:
# Could not get CPU time left, we might need to wait for the first loop
# or the Utility is not working properly for this batch system
# or we are in a batch system
timeLeft = 0
else:
timeLeft = result['Value']
self.timeLeft = timeLeft
if not self.littleTimeLeft:
if timeLeft and timeLeft < self.grossTimeLeftLimit:
self.log.info( 'TimeLeft bellow %s, now checking with higher frequency' % timeLeft )
self.littleTimeLeft = True
# TODO: better configurable way of doing this to be coded
self.littleTimeLeftCount = 15
else:
if self.timeLeft and self.timeLeft < self.fineTimeLeftLimit:
timeLeft = -1
return timeLeft
#############################################################################
def __getUsageSummary( self ):
""" Returns average load, memory etc. over execution of job thread
"""
summary = {}
#CPUConsumed
if self.parameters.has_key( 'CPUConsumed' ):
cpuList = self.parameters['CPUConsumed']
if cpuList:
hmsCPU = cpuList[-1]
rawCPU = self.__convertCPUTime( hmsCPU )
if rawCPU['OK']:
summary['LastUpdateCPU(s)'] = rawCPU['Value']
else:
summary['LastUpdateCPU(s)'] = 'Could not be estimated'
#DiskSpace
if self.parameters.has_key( 'DiskSpace' ):
space = self.parameters['DiskSpace']
if space:
value = abs( float( space[-1] ) - float( self.initialValues['DiskSpace'] ) )
if value < 0:
value = 0
summary['DiskSpace(MB)'] = value
else:
summary['DiskSpace(MB)'] = 'Could not be estimated'
#MemoryUsed
if self.parameters.has_key( 'MemoryUsed' ):
memory = self.parameters['MemoryUsed']
if memory:
summary['MemoryUsed(kb)'] = abs( float( memory[-1] ) - float( self.initialValues['MemoryUsed'] ) )
else:
summary['MemoryUsed(kb)'] = 'Could not be estimated'
#LoadAverage
if self.parameters.has_key( 'LoadAverage' ):
laList = self.parameters['LoadAverage']
if laList:
summary['LoadAverage'] = float( sum( laList ) ) / float( len( laList ) )
else:
summary['LoadAverage'] = 'Could not be estimated'
result = self.__getWallClockTime()
wallClock = result['Value']
summary['WallClockTime(s)'] = wallClock
self.__reportParameters( summary, 'UsageSummary', True )
self.currentStats = summary
#############################################################################
def __reportParameters( self, params, title = None, report = False ):
"""Will report parameters for job.
"""
try:
parameters = []
self.log.info( '==========================================================' )
if title:
self.log.info( 'Watchdog will report %s' % ( title ) )
else:
self.log.info( 'Watchdog will report parameters' )
self.log.info( '==========================================================' )
vals = params
if params.has_key( 'Value' ):
if vals['Value']:
vals = params['Value']
for k, v in vals.items():
if v:
self.log.info( str( k ) + ' = ' + str( v ) )
parameters.append( ( k, v ) )
if report:
self.__setJobParamList( parameters )
self.log.info( '==========================================================' )
except Exception, x:
self.log.warn( 'Problem while reporting parameters' )
self.log.warn( str( x ) )
#############################################################################
def __getWallClockTime( self ):
""" Establishes the Wall Clock time spent since the Watchdog initialization"""
result = S_OK()
if self.initialValues.has_key( 'StartTime' ):
currentTime = time.time()
wallClock = currentTime - self.initialValues['StartTime']
result['Value'] = wallClock
else:
self.initialValues['StartTime'] = time.time()
result['Value'] = 0.0
return result
#############################################################################
def __killRunningThread( self ):
""" Will kill the running thread process and any child processes."""
self.log.info( 'Sending kill signal to application PID %s' % ( self.spObject.getChildPID() ) )
result = self.spObject.killChild()
self.applicationKilled = True
self.log.info( 'Subprocess.killChild() returned:%s ' % ( result ) )
return S_OK( 'Thread killed' )
#############################################################################
def __sendSignOfLife( self, jobID, heartBeatDict, staticParamDict ):
""" Sends sign of life 'heartbeat' signal and triggers control signal
interpretation.
"""
jobReport = RPCClient( 'WorkloadManagement/JobStateUpdate', timeout = 120 )
result = jobReport.sendHeartBeat( jobID, heartBeatDict, staticParamDict )
if not result['OK']:
self.log.warn( 'Problem sending sign of life' )
self.log.warn( result )
if result['OK'] and result['Value']:
self.__interpretControlSignal( result['Value'] )
return result
#############################################################################
def __setJobParamList( self, value ):
"""Wraps around setJobParameters of state update client
"""
#job wrapper template sets the jobID variable
if not os.environ.has_key( 'JOBID' ):
self.log.info( 'Running without JOBID so parameters will not be reported' )
return S_OK()
jobID = os.environ['JOBID']
jobReport = RPCClient( 'WorkloadManagement/JobStateUpdate', timeout = 120 )
jobParam = jobReport.setJobParameters( int( jobID ), value )
self.log.verbose( 'setJobParameters(%s,%s)' % ( jobID, value ) )
if not jobParam['OK']:
self.log.warn( jobParam['Message'] )
return jobParam
#############################################################################
def getNodeInformation( self ):
""" Attempts to retrieve all static system information, should be overridden in a subclass"""
methodName = 'getNodeInformation'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
#############################################################################
def getLoadAverage( self ):
""" Attempts to get the load average, should be overridden in a subclass"""
methodName = 'getLoadAverage'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
#############################################################################
def getMemoryUsed( self ):
""" Attempts to get the memory used, should be overridden in a subclass"""
methodName = 'getMemoryUsed'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
#############################################################################
def getDiskSpace( self ):
""" Attempts to get the available disk space, should be overridden in a subclass"""
methodName = 'getDiskSpace'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
fibbo/DIRAC
|
WorkloadManagementSystem/JobWrapper/Watchdog.py
|
Python
|
gpl-3.0
| 34,697
|
[
"DIRAC"
] |
0b71f86ec5450db69d2a6f08d6debde4395a320fca0c23f1c50a1c03812e619b
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
from builtins import str
from singa import tensor
from singa import singa_wrap as singa
from singa import device
from singa import autograd
import numpy as np
autograd.training = True
CTensor = singa.Tensor
gpu_dev = device.create_cuda_gpu()
cpu_dev = device.get_default_device()
dy = CTensor([2, 1, 2, 2])
singa.Gaussian(0.0, 1.0, dy)
def _tuple_to_string(t):
lt = [str(x) for x in t]
return '(' + ', '.join(lt) + ')'
def prepare_inputs_targets_for_rnn_test():
x_0 = np.random.random((2, 3)).astype(np.float32)
x_1 = np.random.random((2, 3)).astype(np.float32)
x_2 = np.random.random((2, 3)).astype(np.float32)
h_0 = np.zeros((2, 2)).astype(
np.float32)
t_0 = np.random.random((2, 2)).astype(np.float32)
t_1 = np.random.random((2, 2)).astype(np.float32)
t_2 = np.random.random((2, 2)).astype(np.float32)
x0 = tensor.Tensor(device=gpu_dev, data=x_0)
x1 = tensor.Tensor(device=gpu_dev, data=x_1)
x2 = tensor.Tensor(device=gpu_dev, data=x_2)
h0 = tensor.Tensor(device=gpu_dev, data=h_0)
t0 = tensor.Tensor(device=gpu_dev, data=t_0)
t1 = tensor.Tensor(device=gpu_dev, data=t_1)
t2 = tensor.Tensor(device=gpu_dev, data=t_2)
inputs = [x0, x1, x2]
targets = [t0, t1, t2]
return inputs, targets, h0
class TestPythonOperation(unittest.TestCase):
def check_shape(self, actual, expect):
self.assertEqual(actual, expect, 'shape mismatch, actual shape is %s'
' exepcted is %s' % (_tuple_to_string(actual),
_tuple_to_string(expect))
)
def test_conv2d_gpu(self):
# (in_channels, out_channels, kernel_size)
conv_0 = autograd.Conv2d(3, 1, 2)
conv_without_bias_0 = autograd.Conv2d(3, 1, 2, bias=False)
gpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
gpu_input_tensor.gaussian(0.0, 1.0)
y = conv_0(gpu_input_tensor) # PyTensor
dx, dW, db = y.creator.backward(dy) # CTensor
self.check_shape(y.shape, (2, 1, 2, 2))
self.check_shape(dx.shape(), (2, 3, 3, 3))
self.check_shape(dW.shape(), (1, 3, 2, 2))
self.check_shape(db.shape(), (1,))
# forward without bias
y_without_bias = conv_without_bias_0(gpu_input_tensor)
self.check_shape(y_without_bias.shape, (2, 1, 2, 2))
def test_conv2d_cpu(self):
# (in_channels, out_channels, kernel_size)
conv_1 = autograd.Conv2d(3, 1, 2)
conv_without_bias_1 = autograd.Conv2d(3, 1, 2, bias=False)
cpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=cpu_dev)
cpu_input_tensor.gaussian(0.0, 1.0)
y = conv_1(cpu_input_tensor) # PyTensor
dx, dW, db = y.creator.backward(dy) # CTensor
self.check_shape(y.shape, (2, 1, 2, 2))
self.check_shape(dx.shape(), (2, 3, 3, 3))
self.check_shape(dW.shape(), (1, 3, 2, 2))
self.check_shape(db.shape(), (1,))
# forward without bias
y_without_bias = conv_without_bias_1(cpu_input_tensor)
self.check_shape(y_without_bias.shape, (2, 1, 2, 2))
def test_SeparableConv2d_gpu(self):
separ_conv=autograd.SeparableConv2d(8, 16, 3, padding=1)
x=np.random.random((10,8,28,28)).astype(np.float32)
x=tensor.Tensor(device=gpu_dev, data=x)
#y = separ_conv(x)
y1 = separ_conv.spacial_conv(x)
y2 = separ_conv.depth_conv(y1)
dy1, dW_depth, _ = y2.creator.backward(y2.data)
dx, dW_spacial, _ = y1.creator.backward(dy1)
self.check_shape(y2.shape, (10, 16, 28, 28))
self.check_shape(dy1.shape(), (10, 8, 28, 28))
self.check_shape(dW_depth.shape(), (16, 8, 1, 1))
self.check_shape(dx.shape(), (10, 8, 28, 28))
self.check_shape(dW_spacial.shape(), (8, 1, 3, 3))
def test_batchnorm2d_gpu(self):
batchnorm_0 = autograd.BatchNorm2d(3)
gpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
gpu_input_tensor.gaussian(0.0, 1.0)
dy = CTensor([2, 3, 3, 3])
singa.Gaussian(0.0, 1.0, dy)
y = batchnorm_0(gpu_input_tensor)
dx, ds, db = y.creator.backward(dy)
self.check_shape(y.shape, (2, 3, 3, 3))
self.check_shape(dx.shape(), (2, 3, 3, 3))
self.check_shape(ds.shape(), (3,))
self.check_shape(db.shape(), (3,))
def test_vanillaRNN_gpu_tiny_ops_shape_check(self):
# gradients shape check.
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
rnn = autograd.RNN(3, 2)
hs, _ = rnn(inputs, h0)
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
# d=autograd.infer_dependency(loss.creator)
# print(d)
for t, dt in autograd.backward(loss):
self.check_shape(t.shape, dt.shape)
def test_LSTM_gpu_tiny_ops_shape_check(self):
# gradients shape check.
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
c_0 = np.random.random((2, 1)).astype(np.float32)
c0 = tensor.Tensor(device=gpu_dev, data=c_0)
rnn = autograd.LSTM(3, 2)
hs, _, _ = rnn(inputs, (h0, c0))
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
# d=autograd.infer_dependency(loss.creator)
# print(d)
for t, dt in autograd.backward(loss):
self.check_shape(t.shape, dt.shape)
def gradients_check(self, func, param, autograds, h=0.0005, df=1):
# param: PyTensor
# autograds: numpy_tensor
p = tensor.to_numpy(param)
it = np.nditer(p, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
diff = np.zeros_like(p)
diff[idx] += h
diff = tensor.from_numpy(diff)
diff.to_device(gpu_dev)
param += diff
pos = func()
pos = tensor.to_numpy(pos)
param -= diff
param -= diff
neg = func()
neg = tensor.to_numpy(neg)
numerical_grad = np.sum((pos - neg) * df) / (2 * h)
#print((autograds[idx] - numerical_grad)/numerical_grad)
# threshold set as -5% to +5%
#self.assertAlmostEqual((autograds[idx] - numerical_grad)/(numerical_grad+0.0000001), 0., places=1)
self.assertAlmostEqual(
autograds[idx] - numerical_grad, 0., places=2)
it.iternext()
def test_numerical_gradients_check_for_vallina_rnn(self):
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
rnn = autograd.RNN(3, 2)
def valinna_rnn_forward():
hs, _ = rnn(inputs, h0)
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
#grads = autograd.gradients(loss)
return loss
loss1 = valinna_rnn_forward()
auto_grads = autograd.gradients(loss1)
for param in rnn.params:
auto_grad = tensor.to_numpy(auto_grads[param])
self.gradients_check(valinna_rnn_forward, param, auto_grad)
def test_numerical_gradients_check_for_lstm(self):
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
c_0 = np.zeros((2, 2)).astype(np.float32)
c0 = tensor.Tensor(device=gpu_dev, data=c_0)
rnn = autograd.LSTM(3, 2)
def lstm_forward():
hs, _, _ = rnn(inputs, (h0, c0))
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
return loss
loss1 = lstm_forward()
auto_grads = autograd.gradients(loss1)
for param in rnn.params:
auto_grad = tensor.to_numpy(auto_grads[param])
self.gradients_check(lstm_forward, param, auto_grad)
def test_MeanSquareError(self):
X=np.array([4.3,5.4,3.3,3.6,5.7,6.0]).reshape(3,2).astype(np.float32)
T=np.array([4.4,5.3,3.2,3.7,5.4,6.3]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
t=tensor.from_numpy(T)
x.to_device(gpu_dev)
t.to_device(gpu_dev)
loss= autograd.mse_loss(x,t)
dx=loss.creator.backward()[0]
loss_np=tensor.to_numpy(loss)
self.assertAlmostEqual(loss_np, 0.0366666, places=4)
self.check_shape(dx.shape(), (3, 2))
def test_Abs(self):
X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32)
XT=np.array([0.8,1.2,3.3,3.6,0.5,0.5]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
result=autograd.abs(x)
Err=XT-result
dx=result.creator.backward()[0]
for ii in Err.flatten():
self.assertAlmostEquals(ii,0., places=3)
self.check_shape(dx.shape(), (3, 2))
def test_Exp(self):
X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32)
XT=np.array([2.2255409,0.22313017,27.112638,0.02732372,0.60653067,1.6487212]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
result=autograd.exp(x)
Err=XT-result
dx=result.creator.backward()[0]
for ii in Err.flatten():
self.assertAlmostEquals(ii,0., places=3)
self.check_shape(dx.shape(), (3, 2))
def test_LeakyRelu(self):
X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32)
XT=np.array([0.8,-0.012,3.3,-0.036,-0.005,0.5]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
result=autograd.LeakyRelu(x)
Err=XT-result
dx=result.creator.backward()[0]
for ii in Err.flatten():
self.assertAlmostEquals(ii,0., places=3)
self.check_shape(dx.shape(), (3, 2))
if __name__ == '__main__':
unittest.main()
|
ijingo/incubator-singa
|
test/python/test_operation.py
|
Python
|
apache-2.0
| 11,386
|
[
"Gaussian"
] |
0f4cf03e119ce7cae635d1155b1fde8451f3c46b167af0683c367465e804da54
|
# -*- coding: utf-8 -*-
import os
import glob
import inspect
import fnmatch
import datetime
from warnings import warn
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset as ncfile
import copy
import tempfile
from altimetry.tools.nctools import load_ncVar, load_ncVar_v2, nc as ncobj, dimStr,attrStr,\
ncStr, varStr
try:
import seawater.csiro as csw
except ImportError:
warn("[WARNING:%s] Module seawater doesn't exists. not loading it" % __name__)
pass # module doesn't exist, deal with it.
#import alti_tools as atools
try : from scipy import interpolate
except ImportError :
warn("[WARNING:%s] module scipy not found" % __name__)
from altimetry.tools import recale_limits, in_limits, cumulative_distance, calcul_distance, \
where_list, \
cnes_convert, \
plot_map, \
get_caller
from collections import OrderedDict
class hydro_data(object):
'''
A base object dedicated to handle oceanographic data (in-situ or remote sensing) with upper level processing methods.
.. note:: This object SHOULD NOT be called directly but through a subclass heritating of it (eg. :class:`altimetry.data.alti_data`)
'''
def __init__(self,file_pattern,limit=None,verbose=1,round=True,zero_2pi=True,output_is_dict=True,flatten=False,**kwargs):
'''
Returns the object filled with the data loaded from a single file or a concatenated set of files
:parameter file_pattern: a pattern of files to be globbed (:func:`glob.glob`) or a list of file names.
:keyword limit: the limits of the domain to handle ([latmin,lonmin,latmax,lonmax]).
:keyword verbose: verbosity level on a scale of 0 (silent) to 4 (max verobsity)
:keyword round: round limits (cf. :func:`altimetry.tools.in_limits`)
:keyword zero_2pi: limits goes from 0 to 360 degrees (not -180/180).
:keyword output_is_dict: data structures are dictionnaries (eg. my_hydro_data.variable['data']). If false uses an object with attributes (eg. my_hydro_data.variable.data).
.. note:: This methodes init all the attributes, then loads the data from files (:meth:`altimetry.data.hydro_data.read`) and appends it to the object (:meth:`altimetry.data.hydro_data.update_dataset`) before checking its content (:meth:`altimetry.data.hydro_data.check_variables`).
.. note:: The method :meth:`altimetry.data.hydro_data.read` MUST be defined (typically by overloading it). This method must return a data structure.
'''
#Init system variables
# if limit is None : limit=[-90.,0.,90.,360.]
self.zero_2pi=zero_2pi
self.limit_set=False
if limit is None :
limit=[-90.,0.,90.,360.]
else : self.limit_set = True
self.limit = np.array(recale_limits(limit, zero_2pi=self.zero_2pi))
'''
limits of the domain : [latmin,lonmin,latmax,lonmax] (default = [-90.,0.,90.,360.])/
.. note:: limits are automatically reset using :func:`altimetry.tools.recale_limits`
'''
self.verbose = verbose
'''
verbosity level on a scale of 0 (silent) to 4 (max verbosity)
'''
self.fileid = np.array([])
'''
array of file IDs
'''
self.count=0
'''
number of files loaded
'''
self.size=0
'''
length of the dataset
'''
#Setup file list
if isinstance(file_pattern, str) : ls=glob.glob(file_pattern)
else :
ls = file_pattern.tolist() if not isinstance(file_pattern,list) else file_pattern
file_pattern=file_pattern[0] if len(file_pattern) > 0 else []
if len(ls) == 0 :
if isinstance(file_pattern, str) :
self.Error('File pattern not matched '+file_pattern)
if isinstance(file_pattern, list) :
self.warning(2, 'Empty file list - will return empty object')
self._filename=None
'''
Name of the file currently used
'''
self._ncfile=None
'''
Name of the file currently used
'''
self.__tempfile=[]
'''
Temporary buffer where to unzip data file if asked for
'''
self.filelist=[os.path.basename(j) for i,j in enumerate(ls)]
#~ self.filelist=[j for i,j in enumerate(ls)]
'''
list of files being loaded
'''
self.filelist_count = [0]*len(self.filelist)
'''
number of counted values by files
'''
enum = list(enumerate(ls))
enum = zip(*enum)
self.fid_list=np.array(enum[0]) if len(enum) > 0 else np.array([])
self.dirname=os.getcwd()
'''
Directory name of the file pattern being globbed (:func:`glob.glob`). Defaulted to current directory
'''
if isinstance(file_pattern,str) : self.dirname=os.path.dirname(os.path.abspath(file_pattern))
elif len(file_pattern) > 0 : self.dirname=os.path.dirname(os.path.abspath(file_pattern[0]))
self.par_list=np.array([])
'''
array of parameters
'''
self.dim_list=np.array([])
'''
array containing the dimensions of each parameter
'''
self._dimensions=dimStr()
'''
dimensional strucutre
'''
#Loop over data files
#####################
for i in np.arange(len(self.fid_list)) :
#Read data file
###############
filename = enum[1][i]
self.message(1,"Loading "+os.path.basename(filename))
#Dezip if required
filename=self.dezip(filename)
if filename != enum[1][i]: self.filelist[i]=os.path.basename(filename)
res=self.read(filename,output_is_dict=output_is_dict,**kwargs) #read() function is specific of each class
self.update_dataset(res,flatten=flatten) #update class with loaded data
self.check_variables()
if not self.limit_set : self.limit=self.extension(round=round)
if self.count == 0 : self.warning(1,'Empty object!')
def update_dataset(self,dataStr,flatten=False):
'''
update class with a data structure.
:keyword flatten: use this to automatically flatten variables (squeeze dimensions)
'''
#Load keys and dimensions
#########################
dataDim = dataStr.pop('_dimensions',{})
attrStr = dataStr.pop('_attributes',{})
ndims = dataDim.pop('_ndims',0)
dimensions = [dataDim.keys(),dataDim.values()]
keys = dataStr.keys()
if len(keys) == 0:
self.warning(2, 'No data loaded')
return
self.message(2, 'Loaded variables : '+str(keys))
#Check what is the current variable type
isStructure = True if isinstance(dataStr[keys[0]],dict) else False
# datalen = [np.size(dataStr[key]) for key in keys]
datalen = [list(np.shape(dataStr[key]['data'])[::-1]) for key in keys] if isStructure else [list(np.shape(dataStr[key])[::-1]) for key in keys] #Shape is inverted wrt to order of dimensions to be consistent with check_variable
if isStructure :
varDim = [list(dataStr[key]['_dimensions'])[1:] for key in keys]
ind = [where_list(vDim,dimensions[0]) for vDim in varDim] #Dimensions indices from actual variables' dimensions
#Check dimension lengths
# dimOk = np.array([enum[1][0] == dimensions[1][ind[enum[0]][0]] for enum in enumerate(datalen)])
dimOk = [any([enum[1][ii] == dimensions[1][jj] for ii,jj in enumerate(ind[enum[0]])]) for enum in enumerate(datalen)]
if any([not d for d in dimOk]) :
notOk = np.where(~np.array(dimOk))[0]
print datalen
self.Error('Problem with {0} variables : {1}'.format(len(notOk),','.join(np.array(dataStr.keys())[notOk])))
else :
ind = [where_list(dlen,dimensions[1]) for dlen in datalen] #Dimensions indices from variable length
if (np.array(ind).sum() == -1)!= 0 : self.Error('At least one variable have not been properly defined')
dimname = [np.array(dimensions[0])[i].tolist() for i in ind] #Get correspondance between data structure dimensions and variables
curDim, nself=self.get_currentDim()
createDim=np.array([np.array([w == -1 for w in where_list(j, curDim[0])]) for i,j in enumerate(dimname) ])
createDim=np.squeeze(createDim)
# curInd = atools.where_list(dimname_reduced,curDim[0]) #Get correspondance between data structure dimensions and object dimensions
# createDim = (np.array(curInd) == -1) #Get dimensions to be created
toCreate = np.array([not self.__dict__.has_key(key) for key in keys])
updateDim=[]
self.message(2, 'Updating object with '+str(['{0}({1}:{2})'.format(i[0],i[1],i[2]) for i in zip(*(keys,dimname,datalen))]))
#Update variables available in files
for enum in enumerate(keys) :
ind=enum[0]
key=enum[1]
#Load variable
##############
# var=dataStr.get(key)
dum=dataStr.get(key).pop('data') if isStructure else copy.deepcopy(dataStr.get(key))
if flatten :
if isinstance(dum,dict) :dum['data']=dum['data'].flatten()
else : dum=dum.flatten()
if not isStructure :
dum={'_dimensions':dum._dimensions if hasattr(dum,'_dimensions') else {},
'_attributes':dum._attributes if hasattr(dum,'_attributes') else {},
'data':dum}
else :
dumStr=dataStr.get(key)
dumStr.update({'data':dum})
dum=dumStr
dumDim=dimStr(dimname[ind],datalen[ind])
# if dataStr[key].has_key('_attributes'):
# dum.update(dataStr[key]['_attributes'])
# if isinstance(dum,np.ma.masked_array) :
# #Get associated dimensions
# ##################################
# datalen = datalen[ind]#[len(dataStr[key]) for key in keys]
# ind = atools.where_list([datalen],dimensions[1])[0]
# if (ind == -1) : self.Error('Dimensions of current variable ('+key+') have not been properly defined')
# dimname = dimensions[
#Initialize variable if required
# if toCreate :
# updateDim.append(self.create_Variable(key, dum, dimensions={dimname[ind]:datalen[ind]},toCreate=toCreate[ind],createDim=createDim[ind]))
updateDim.append(self.create_Variable(key, dum, dimensions=dumDim,toCreate=toCreate[ind],createDim=createDim[ind]))
#Extend missing variables
# missing__keys = list(set(self.par_list).difference(keys))
# for enum in enumerate(missing__keys) :
# ind=enum[0]
# key=enum[1]
# updateDim.append(self.create_Variable(key, np.ma.repeat(self.dist_to_coast_leuliette.fill_value), dimensions=dumDim,toCreate=False,createDim=False))
#Final sequence
zipped_upd=zip(*(np.hstack(dimname)[~np.hstack(createDim)],np.hstack(datalen)[~np.hstack(createDim)]))
updateDim_List = np.array(list(set(tuple(i) for i in np.array(zipped_upd,dtype='|S16').tolist()))) #2D unique
# updateDim_List = np.unique(np.array(zipped_upd,dtype='|S16')) #[str(i) for i in datalen]
# if updateDim_List.size > 0 : updateDim_List.resize((2,updateDim_List.size/2))
# updateDim_List = np.unique(zip(*(np.array(dimname)[~createDim],np.array(datalen)[~createDim]))) #[str(i) for i in datalen]
zipped_dims=zip(*(np.hstack(dimname)[np.hstack(createDim)],np.hstack(datalen)[np.hstack(createDim)]))
createDim_list = np.array(list(set(tuple(i) for i in np.array(zipped_dims,dtype='|S16').tolist()))) #2D unique
# clist, inv = np.unique(np.array(zipped_dims,dtype='|S16'),return_inverse=True) #RQ : THIS WILL FAIL IF NUMBERS HAVE MORE THAN 16 DIGITS #[str(i) for i in datalen]
# if createDim_list.size > 0 : createDim_list.resize((2,createDim_list.size/2))
# createDim_list = np.unique(zip(*(np.array(dimname)[createDim],np.array(datalen)[createDim]))) #[str(i) for i in datalen]
for dname,dim in createDim_list :
self.create_Dim(dname, np.int(dim))
for dname,dim in updateDim_List:
self.update_Dim(dname, np.int(dim))
def check_variables(self):
"""
Forces variables to respect dimensions
"""
self.count = len(self.fileid)
self.size = np.size([np.size(self.__dict__.get(par)) for par in self.par_list])
infos = zip(*(self.par_list.tolist(),self.dim_list.tolist()))
# curDim, nself = self.get_currentDim()
for enum in enumerate(infos):
varSize = np.size(self.__dict__.get(enum[1][0]))
varShape = np.shape(self.__dict__.get(enum[1][0]))[::-1] #Data and Netcdf dimensions are inverted (not allways?)
dimSize = None
if hasattr(self.__dict__.get(enum[1][0]),'_dimensions') :
dumDim = self.__dict__.get(enum[1][0])._dimensions
dumDim.pop('_ndims')
dimSize = tuple(dumDim.values())
elif isinstance(self.__dict__.get(enum[1][0]),dict):
if self.__dict__.get(enum[1][0]).has_key('_dimensions'):
dumDim = self.__dict__.get(enum[1][0])['_dimensions']
dumDim.pop('_ndims')
dimSize = tuple(dumDim.values())
if dimSize is None :
if isinstance(self._dimensions,dimStr) :
dimSize = (self._dimensions).get(enum[1][1])
else :
dimSize = tuple([(self._dimensions).get([d]) for d in enum[1][1]])
masked = isinstance(self.__dict__.get(enum[1][0]), np.ma.masked_array)
#Check mask consistency (mask length should be the same as data)
if masked :
if (self.__dict__.get(enum[1][0]).mask.size != self.__dict__.get(enum[1][0]).data.size): raise np.ma.core.MaskError("Mask length is not consistent with data")
#Check dimensions
self.message(4, 'checking variables -> {0}(N={1}) - {2}:{3}'.format(enum[1][0],varSize,enum[1][1],dimSize))
for n,sh in enumerate(varShape) :
if (sh > dimSize[n]) :
self.Error('Object variable {0} greater than corresponding dimension ({1})'.format(enum[1][0],enum[1][1]))
elif (sh < dimSize[n]):
self.message(3, 'Variable {0}(N={1}) being extended to match dimension {2}:{3}'.format(enum[1][0],varSize,enum[1][1],dimSize))
# self.__dict__[enum[1][0]] = np.ma.concatenate((self.__dict__[enum[1][0]], np.ma.masked_array(np.repeat(np.nan,dimSize - varSize),mask=np.zeros(dimSize - varSize,dtype='bool'))))
#Save additonnal attributes
attrStr={}
for a in set(self.__dict__[enum[1][0]].__dict__.keys()).difference(np.ma.empty(0,dtype=bool).__dict__.keys()):
attrStr.update({a:self.__dict__[enum[1][0]].__dict__[a]})
self.__dict__[enum[1][0]] = np.ma.masked_array( np.append(self.__dict__[enum[1][0]].data,np.repeat(self.__dict__[enum[1][0]].fill_value if hasattr(self.__dict__[enum[1][0]],'fill_value') else np.NaN,dimSize[n] - varSize)),
mask=np.append(self.__dict__[enum[1][0]].mask,np.ones(dimSize[n] - varSize,dtype='bool')) )
self.__dict__[enum[1][0]].__dict__.update(attrStr)
def create_Dim(self, name,value):
'''
Adds a dimension to class.
:parameter name: dimension name
:parameter value: dimension value
'''
if not self._dimensions.has_key(name) :
self.message(3, 'Create dimension {0}:{1}'.format(name,value))
self._dimensions[name]=value
self._dimensions['_ndims']=len(self._dimensions) - 1
else :
self.message(3, 'Dimension {0} already exists'.format(name))
def update_Dim(self,name,value):
'''
update a dimension by appending the number of added elements to the dimensions ::
<upddated dimension> = <old dimension> + <number of added elements along this dimension>
'''
oldVal=self._dimensions[name]
self._dimensions[name] += value
self.message(2, 'Updating dimension {0} (from {1} to {2})'.format(name,oldVal,self._dimensions[name]))
#Update dimensions within all variables
for p in self.par_list:
if self.__dict__[p].__dict__.has_key('_dimensions'):
for d in self.__dict__[p]._dimensions.keys():
self.__dict__[p]._dimensions.update({d:self._dimensions[d]})
def update_fid_list(self,filename,N):
'''
update file indices attribute `altimetry.data.hydro_data.fileid`
'''
self.filelist_count[self.filelist.index(filename)] = N
fid=self.fid_list.compress([enum[1][0] == os.path.basename(filename) for enum in enumerate(zip(*(self.filelist,self.fid_list)))])
self.__dict__.update({'fileid' :np.append(self.fileid,np.repeat(fid,N))})
def delete_Variable(self,name):
'''
pops a variable from class and delete it from parameter list
:parameter name: name of the parameter to delete
'''
self.message(1,'Deleting variable {0}'.format(name))
self.par_list=self.par_list[self.par_list != name]
return self.__dict__.pop(name)
def create_Variable(self,name,value,dimensions,toCreate=None,createDim=None,extend=True):
"""
create_Variable : This function adds data to :class:`altimetry.data.hydro_data`
:parameter name: name of the parameter to create
:parameter value: values associated to the variable. Must be a numpy masked_array or a data structure.
:parameter dimensions: dimensional structure (cf. notes).
.. _structures:
.. note:: altimetry tools package handles the NetCDF data using specific structures.
NetCDF data is structured this way:
.. code-block:: python
:emphasize-lines: 1,3
NetCDF_data = {'_dimensions':dimension_structure, #File dimensions (COMPULSORY)
'_attributes':attribute_structure, #Global attributes
'dimension_1':data_structure, #Data associated to the dimensions. (COMPULSORY)
...,
'variable_1':data_structure, #Variables
...
}
In standard NetCDF files, dimensions are always associated to a variable.
If it is not the case, an array of indices the length of the dimension is generated and a warning is issued.
Moreover, dimensions MUST be defined to be accepted by :class:`altimetry.tools.nctools.nc` (empty NetCDF files would fail).
* a dimensional structure should be of the form :
.. code-block:: python
dimension_structure = {'_ndims':N, #Attribute setting the number of dimensions.
'dims':{'dim_A':A, #Structure containing the name
'dim_B':B, #of the dimensions and their size.
...,
'dim_N':N
}
}
* an attribute structure is a very simple structure containing the attribute names and values:
.. code-block:: python
data_structure = {'attribute_1':attribute_1,
...,
'attribute_N':attribute_N}
* a data structure should be of the form :
.. code-block:: python
:emphasize-lines: 1-2
data_structure = {'_dimensions':dimension_structure, #dimensions of hte variable (COMPULSORY)
'data':data, #data associated to the variable (COMPULSORY)
'long_name':long_name, #Variable attributes
'units':units,
...
}
DATA and _DIMENSIONS fields are compulsory.
Other fields are optional and will be treated as attributes.
Furthermore, code will have a special look at **scale**, **scale_factor** and **add_offset** while reading and writing data and to **_FillValue** and missing_value while reading (_FillValue being automatically filled by :class:`NetCDF4.Dataset` when writing)
"""
#Check variable name
####################
#This allows to remove impossible variable names
#!!!! This is not a good solution
name=name.replace('.','_')
#Check if data is structured or not
isStructure = True if isinstance(value,dict) else False
#Get dimensions
dimName = np.array(dimensions.keys())
dimVal = np.array(dimensions.values())
keys=np.array(self._dimensions.keys())
# if createDim is None : createDim = self._dimensions.has_key(dimName[0])
createDim = np.array([not self._dimensions.has_key(dim) for dim in dimName]) if createDim is None else np.array(createDim)
if toCreate is None : toCreate = np.sum(self.par_list == name) == 0
self.message(3,'Loading {0} ({1}:{2}) from {3}'.format(name,dimName,dimVal,os.path.basename(self._filename)))
#Cast variable into masked array first
######################################
if (not isinstance(value['data'],np.ma.core.MaskedArray) if isStructure else not isinstance(value,np.ma.core.MaskedArray)) :
value['data'] = np.ma.masked_array(value['data'],mask=np.zeros(tuple(dimVal),dtype='bool')) if isStructure else np.ma.masked_array(value,mask=np.zeros(tuple(dimVal),dtype='bool'))
self.message(4,'Casting variable to np.ma.MaskedArray')
#Restructure dataset if structure
if isStructure :
dumvalue=value.pop('data')
if value.has_key('_attributes'):
for a in value['_attributes'].keys():
self.message(4, "copying attribute %s" % a)
dumvalue.__setattr__(a,value['_attributes'][a])
value=copy.deepcopy(dumvalue)
curDim, nself=self.get_currentDim()
curInd=np.array(where_list(dimName,curDim[0]))
curDimVal=np.array(where_list(dimVal,curDim[1]))
existDims= (curInd != -1)
createDim = (curInd == -1)
createInd = np.where(createDim)[0]
appendDim=existDims & (curDimVal == -1)
appendInd=curInd[appendDim]
# curInd = set(atools.where_list(dimVal,curDim[1])).intersection(set(atools.where_list(dimName,curDim[0])))
#Get dims to be created
#######################
#Choose case between all different solutions :
##############################################
# 1: create a new variable with at least 1 new dimension
# 2: extend -> create a new variable using existing dimensions
# 3: append exisiting variable with data
# 4: impossible case ?
#1) Create variable
if createDim.any() & toCreate :
#Create Variable
self.message(4,'Create variable %s '+name)
# self.__setattr__(name,value)
# cmd='self.'+name+'=value'
#Append variable infos to object
self.par_list=np.append(self.par_list,name)
dimlist_cp=self.dim_list.tolist()
dimlist_cp.append(dimName.tolist())
self.dim_list=np.array(dimlist_cp) #np.append(self.dim_list,dimName.tolist())
updateDim=False
#2) Extend
elif (not createDim.any()) & toCreate :
#extend variable
if extend :
dumvalue = np.ma.masked_array(np.append(np.zeros(curDim[1][curInd]),value.data),mask=np.append(np.ones(curDim[1][curInd],dtype='bool'),value.mask))
for a in set(value.__dict__.keys()).difference(dumvalue.__dict__.keys()) :
dumvalue.__setattr__(a,value.__dict__[a] if hasattr(value, a) else self.__getattribute__(name).__getattribute__(a))
value=copy.deepcopy(dumvalue)
self.message(4,'Extend variable '+name)
# self.__setattr__(name,value)
# cmd='self.'+name+'=value'
# self.message(4,'exec : '+cmd)
#Append variable infos to object
self.par_list=np.append(self.par_list,name)
dimlist_cp=self.dim_list.tolist()
dimlist_cp.append(dimName.tolist())
self.dim_list=np.array(dimlist_cp)
# self.dim_list=np.append(self.dim_list,dimName)
updateDim=True
#3) Append
elif (not createDim.any()) & (not toCreate) :
#append variable
self.message(4,'Append data to variable '+name)
dumvalue = np.ma.masked_array(np.append(self.__getattribute__(name).data,value.data),mask=np.append(self.__getattribute__(name).mask,value.mask))
#We gather a list of attributes :
# - already in data structure,
# - in current data file
# - and not in output structure
attributes=set(self.__getattribute__(name).__dict__.keys())
attributes=attributes.union(value.__dict__.keys())
# attributes=attributes.difference(self.__getattribute__(name).__dict__.keys())
attributes=attributes.difference(dumvalue.__dict__.keys())
#Then :
# - we add attributes of current file not in data structure
# - we keep attributes of current data structure if they exist
for a in attributes :
dumvalue.__setattr__(a,value.__dict__[a] if hasattr(value, a) else self.__getattribute__(name).__getattribute__(a))
value=copy.deepcopy(dumvalue)
updateDim=True
elif createDim.any() & (not toCreate) :
#Impossible case ?
self.Error('Impossible case : create dimensions and variable {0} already existing'.format(name))
#Append dimensions to variable
if not dimensions.has_key('_ndims') :
dumDim=dimStr(dimensions)
dimensions=dumDim.copy()
#Update variable dimensions
if updateDim :
for k in dimensions.keys(): dimensions.update({k:self._dimensions[k]})
value.__setattr__('_dimensions',dimensions)
try : self.__setattr__(name,value)
except np.ma.core.MaskError : raise 'mask error'
#
# try : exec(cmd)
# except np.ma.core.MaskError :
# raise 'mask error'
# exec(cmd)
return updateDim
def get_currentDim(self):
'''
returns the current dimensions of the object
'''
selfDim = self._dimensions.copy()
if not isinstance(selfDim,dimStr):
if selfDim.has_key('_ndims') : nself = selfDim.pop('_ndims')
else :
self.warning(1, 'self._dimensions does not have the _ndims key')
nself = len(selfDim)
else : nself = selfDim['_ndims']
curDim = [[key for key in selfDim.keys()],[selfDim[key] for key in selfDim.keys()]]
return curDim, nself
def update(self,*args,**kwargs):
'''
Wrapper to :func:`altimetry.data.hydro_data.update_with_slice`.
'''
self.update_with_slice(*args,**kwargs)
def message(self,MSG_LEVEL,str):
"""
print function wrapper. Print a message depending on the verbose level
:parameter {in}{required}{type=int} MSG_LEVEL: level of the message to be compared with self.verbose
:example: To write a message
.. code-block:: python
self.message(0,'This message will be shown for any verbose level')
"""
caller=get_caller()
if MSG_LEVEL <= self.verbose : print('[{0}.{1}()] {2}'.format(__name__,caller.co_name,str))
def warning(self,MSG_LEVEL,str):
"""
Wrapper to :func:`warning.warn`. Returns a warning when verbose level is not 0.
:param MSG_LEVEL: level of the message to be compared with self.verbose
:example: To issued a warning
.. code-block:: python
self.warning(1,'Warning being issued)
"""
if self.verbose >= 1 : warn(str)
def Error(self,ErrorMsg):
'''
raises an exception
'''
raise Exception(ErrorMsg)
def updated_copy(self,flag,deep=True):
'''
Returns a sliced (updated) copy of current data object
:summary: This has the same effect as `obj.copy();obj.update(flag)` but is much less memory consumming.
.. note:: TypeError could arise if some object attributes are setted outside the :func:`__init__` function (eg. for data objects derived from :class:`hydro_data`). If this is the case, initialise these attributes within their respective :func:`__init__`.
'''
emptyObj=self.__class__([])
if deep : func_copy = copy.deepcopy
else : func_copy = copy.copy
#Get object attributes from self
for a in set(self.__dict__.keys()).intersection(emptyObj.__dict__.keys()):
self.message(4, 'updating attribute %s to returned object' % a)
emptyObj.__dict__[a] = func_copy(self.__dict__[a])
#Get slices
for p in set(self.__dict__.keys()).difference(emptyObj.__dict__.keys()):
self.message(4, 'updating parameter %s to returned object' % p)
try :
#Save additionnal attributes
attrlist=list(set(dir(self.__dict__[p])).difference(dir(np.ma.array(0))))
attrvalues=[self.__dict__[p].__dict__[l] for l in attrlist]
attr=attrStr(attrlist,attrvalues)
emptyObj.__dict__[p] = func_copy(self.__dict__[p][flag])
for a in attr.keys(): setattr(emptyObj.__dict__[p], a, attr[a])
except TypeError:
self.warning(1,'Could not slice %s - check if this attribute is in self.__init__')
#update dimensions
N=flag.sum()
emptyObj._dimensions['time']=N
emptyObj.count=N
# self.message(4, "Checking variables consistency")
# emptyObj.check_variables()
return emptyObj
def copy(self,*args,**kwargs):
'''
Returns a copy of the current data object
:param flag: if an argument is provided, this returns an updated copy of current object (ie. equivalent to obj.copy();obj.update(flag)), optimising the memory (
:keyword True deep: deep copies the object (object data will be copied as well).
'''
deep=kwargs.get('deep',True)
if len(args) > 0:
return self.updated_copy(*args)
else :
return copy.deepcopy(self) if deep else copy.copy(self)
def slice(self,param,range,surf=False):
'''
get a flag for indexing based on values (ange of fixed values).
:parameter param: variable name
:parameter range: numpy array defining the range of the values. If size(range) == 2 :
* flag is computed between min and max values of range
* flag is computed based on equality to range value.
'''
if np.size(range) == 2 :
if not surf : fg = (self.__dict__[param] >= np.min(range)) & (self.__dict__[param] < np.max(range))
else : fg = (self.__dict__[param+'_surf'] >= np.min(range)) & (self.__dict__[param+'_surf'] < np.max(range))
elif np.size(range) == 1 :
if not surf : fg = (self.__dict__[param] == range)
else : fg = (self.__dict__[param+'_surf'] == range)
else : self.Error('Range array must have 1 or 2 elements max')
return fg
def time_slice(self,timerange,surf=False):
'''
slice object given a time range
:parameter timerange: rime range to be used.
'''
if isinstance(timerange[0],str): trange = cnes_convert(timerange)[0]
else: trange=timerange
return self.slice('date',trange,surf=surf)
def update_with_slice(self,flag):
'''
update object with a given time slice flag
:parameter (boolean array) flag: a flag for indexing data along the ''time'' dimension
'''
N=flag.sum()
#Get object attributes to update
varSize = np.array([np.size(self.__dict__[k]) for k in self.__dict__.keys()])
par_list=np.array(self.__dict__.keys())[varSize == self._dimensions['time']].tolist()
self._dimensions['time']=N
self.count=N
for par in par_list :
# self.__setattr__(par,self.__dict__[par][flag])
# inVar=copy.deepcopy(self.__dict__[par])
if isinstance(self.__dict__[par],np.ma.masked_array) :
tmpVar=self.__dict__.pop(par)
dumVar=copy.deepcopy(tmpVar)
tmpVar=tmpVar.compress(flag)
for a in set(dumVar.__dict__.keys()).difference(tmpVar.__dict__.keys()) :
tmpVar.__dict__[a]=dumVar.__dict__[a]
self.__dict__[par] = tmpVar
elif isinstance(self.__dict__[par],np.ndarray):
self.__dict__[par] = self.__dict__[par].compress(flag)
elif isinstance(self.__dict__[par],list):
self.__dict__[par] = (np.array(self.__dict__[par])[flag]).tolist()
if (hasattr(self.__dict__[par], '_dimensions')) : self.__dict__[par]._dimensions['time']=self._dimensions['time']
def get_file(self,pattern):
'''
returns a flag array of the data loaded from a given file pattern
:parameter pattern: pattern to match in the file list.
'''
flag=[fnmatch.fnmatch(l,pattern) for l in self.filelist]
id=self.fid_list.compress(flag)
flag = self.fileid == id
return flag
def dezip(self,filename):
fname,extzip = os.path.splitext(filename)
__tempfile = [k for k in self.__dict__.keys() if k.endswith("__tempfile")][0]
if [".gz"].count(extzip) == 0: return filename
_,extension =os.path.splitext(fname)
self.__tempfile += [tempfile.mktemp(extension,__tempfile,'/tmp')]
self.message(2,"Unzipping %s to %s" % (os.path.basename(filename),os.path.basename(self.__tempfile[-1])))
if extzip == ".gz":
self.message(2,"gunzip -c %s > %s" % (filename,self.__tempfile[-1]))
os.system("gunzip -c %s > %s" % (filename,self.__tempfile[-1]))
return self.__tempfile[-1]
def time_range(self,flag=None):
'''
time range of the current dataset
:keyword flag: use a flag array to know the time range of an indexed slice of the object
'''
if self.count==0: return [[None,None],[None,None]]
if flag is None : return cnes_convert([self.date.min(),self.date.max()])
else : return cnes_convert([self.date.compress(flag).min(),self.date.compress(flag).max()])
def extension(self,flag=None,round=True):
'''
returns the limits of the dataset.
:keyword flag: an indexation flag array
:keyword round: round the limits to the south-west and north-east.
'''
if flag is None : limit = [self.lat.min(),self.lon.min(),self.lat.max(),self.lon.max()] \
if (self.__dict__.has_key('lat') and self.__dict__.has_key('lon')) \
else [-90,0,90,360]
else : limit = [self.lat.compress(flag).min(),self.lon.compress(flag).min(),self.lat.compress(flag).max(),self.lon.compress(flag).max()] \
if (self.__dict__.has_key('lat') and self.__dict__.has_key('lon')) \
else [-90,0,90,360]
if round :
limit[0]=np.floor(limit[0])
limit[2]=np.floor(limit[2])
limit[1]=np.ceil(limit[1])
limit[3]=np.ceil(limit[3])
return limit
def get_id_list(self,flag=None):
if flag is None : return np.unique(self.id)
else : return np.unique(self.id.compress(flag))
def get(self,name):
'''
retunrs a variable
'''
return self.__dict__[name]
def pop(self,*args,**kwargs):
'''
This is a wrapper to :meth:`altimetry.data.hydro_data.delete_Variable`
'''
return self.delete_Variable(*args,**kwargs)
def get_timestats(self,flag,par_list=None,full=True,bins=None):
'''
get temporal statistics about a part of the dataset
:keyword par_list: List of parameters
:keyword True full: cf. :return: section
:return: If not FULL returs par_stats only (dict of dicts), else returns (par_list, valid, per_valid, fname, trange, extent, N, avail_par, avail_par_per, par_stats)
:example: par_list, valid, per_valid, fname, trange, extent, N, avail_par, avail_par_per, par_stats = self.stats(flag)
'''
if par_list is None : par_list=self.par_list.compress([(not par.endswith('_surf')) & ( par != 'id') for par in self.par_list])
else :
if not isinstance(par_list,np.ma.masked_array) : par_list=np.ma.array(par_list,mask=np.zeros(len(par_list),dtype=bool))
valid= np.array([len(self.__dict__[par].compress(flag).compressed()) for par in par_list])
per_valid = (100.0* valid) /float(np.sum(flag))
fname = [self.filelist[i] for i in np.unique(self.fileid.compress(flag)).astype('i')]
trange = self.time_range(flag)
extent = self.extension(flag)
N = np.sum(flag)
avail_par = par_list.compress(per_valid > 0)
avail_par_per = per_valid.compress(per_valid > 0)
if bins is None and trange is not None: bins=(max(trange[1])-min(trange[1])).days + 1
else: bins=()
par_stats = OrderedDict()
for p in avail_par :
var = self.__dict__[p].compress(flag).compressed()
N,t_edges=np.histogram(self.date,bins=bins)
H,_=np.histogram(self.date,bins=bins,weights=var)
t_edges=t_edges[:-1]+(t_edges[1]-t_edges[0])/2.
par_stats.update({p:{'nb':N,
'mean':H/N,
't_axis':t_edges
}
})
if full : out = par_list, valid, per_valid, fname, trange, extent, N, avail_par, avail_par_per, par_stats
else : out = par_stats
return out
def get_stats(self,flag,par_list=None,full=True):
'''
get some statistics about a part of the dataset
:keyword par_list: List of parameters
:keyword True full: cf. :return: section
:return: If not FULL returs par_stats only (dict of dicts), else returns (par_list, valid, per_valid, fname, trange, extent, N, avail_par, avail_par_per, par_stats)
:example: par_list, valid, per_valid, fname, trange, extent, N, avail_par, avail_par_per, par_stats = self.stats(flag)
'''
if par_list is None : par_list=self.par_list.compress([(not par.endswith('_surf')) & ( par != 'id') for par in self.par_list])
else :
if not isinstance(par_list,np.ma.masked_array) : par_list=np.ma.array(par_list,mask=np.zeros(len(par_list),dtype=bool))
valid= np.array([len(self.__dict__[par].compress(flag).compressed()) for par in par_list])
per_valid = (100.0* valid) /float(np.sum(flag))
fname = [self.filelist[i] for i in np.unique(self.fileid.compress(flag)).astype('i')]
trange = self.time_range(flag)
extent = self.extension(flag)
N = np.sum(flag)
avail_par = par_list.compress(per_valid > 0)
avail_par_per = per_valid.compress(per_valid > 0)
par_stats = OrderedDict()
for p in avail_par :
var = self.__dict__[p].compress(flag).compressed()
par_stats.update({p:{'nb':N,
'mean':np.mean(var),
'median':np.median(var),
'std':np.std(var),
'min':np.min(var),
'max':np.max(var), #
'ptp':np.ptp(var)} #peak-to-peak
})
if full : out = par_list, valid, per_valid, fname, trange, extent, N, avail_par, avail_par_per, par_stats
else : out = par_stats
return out
def get_platform_stats(self,id,functype='overall'):
'''
get statistics based on `altimetry.data.hydro_data.id`
:param functype: set to 'overall' to get variables moments or 'time'|'temporal' to get temporal variability
'''
if functype == 'overall': get_stats = self.get_stats
elif functype == 'time' or functype == 'temporall': get_stats = self.get_timestats
else: get_stats = self.get_stats
par_list, valid, per_valid, fname, trange, extent, N, avail_par, avail_par_per, par_stats = get_stats(self.id == id)
return (fname,trange,extent,N,avail_par,avail_par_per, par_stats)
def get_object_stats(self,functype='overall',**kwargs):
'''
get some statistics about the whole dataset.
:param functype: set to 'overall' to get variables moments or 'time' to get temporal variability
:return: par_list, valid, per_valid, fname, trange, extent, N, avail_par, avail_par_per, par_stats
:example: par_list, valid, per_valid, fname, trange, extent, N, avail_par, avail_par_per, par_stats = self.get_object_stats()
'''
if functype == 'overall': get_stats = self.get_stats
elif functype == 'time': get_stats = self.get_timestats
return get_stats(np.ones(self.count,dtype='bool'),**kwargs)
def platform_summary(self,id,col='.k',functype='overall'):
'''
outputs a summary of the statistics for a given platform
'''
stats = self.get_platform_stats(id,functype=functype)
self.message(0, '\tPlatform {0} : '.format(id))
self.message(0, '\t-> file : {0}'.format(stats[0]))
self.message(0, '\t-> from : '+' - '.join(map(str,stats[1][0])))
self.message(0, '\t-> extent : ['+', '.join(['{0:.1f}'.format(x) for x in stats[2]])+']')
self.message(0, '\t-> size : {0} pts'.format(stats[3]))
self.message(0, '\t-> variables : [%s]' % ', '.join(['{0}({1:.0f} %)'.format(i[0],i[1]) for i in zip(*(stats[4],stats[5]))])+']')
if functype == 'time' or functype == 'temporal':
nsub=len(stats[6].keys())
nx,ny = ((nsub-1)/2) + 1, 2
plt.suptitle('Platform : %s' % id)
for i,item in enumerate(stats[6].iteritems()):
s,v=item[0],copy.deepcopy(item[1])
t=v.pop('t_axis')
ax1=plt.subplot(nx,ny,i+1)
ax1.plot(t,v['nb'],'-k')
plt.title(s)
if i == nsub: ax1.set_xlabel('time')
if i%2 == 0: ax1.set_ylabel('#')
ax2 = ax1.twinx()
ax2.set_ylabel('mean')
if i%2 == 1: ax1.set_ylabel('#')
ax2.plot(t,v['mean'],'-r')
plt.show()
else:
for p in stats[6].keys() :
self.message(0,'\t\to %s : {%s}' % (p,','.join(["%s:%f" % (s,stats[6][p][s]) for s in stats[6][p].keys()])))
def map(self, flag=None, fname=None, zoom=False, pmap=None, show=True, bathy=False, **kwargs):
'''
display (or not) a map based on a :class:`altimetry.tools.plot_map` object.
:keyword show: set to False not to show (and neither apply :meth:`altimetry.tools.plot_map.setup_map`)
.. note:: This function creates a :class:`altimetry.tools.plot_map` instance, plot a partion of the dataset using :meth:`altimetry.data.hydro_data.plot_track` and displays it if asked to.
'''
if zoom : limit = self.extension(flag)
else : limit = self.limit
if pmap is None : pmap=plot_map(0,0,0,limit=limit,bathy=bathy)
p,=self.plot_track(pmap, flag,**kwargs)
if show :
pmap.setup_map(bathy=bathy)
pmap.show()
return p,pmap
def summary(self,all=False,fig=None,col='.k',legend=None,bathy=False,functype='overall',**kwargs):
"""
outputs a summary of the whole current dataset
:param functype: set to 'overall' to get variables moments or 'time' to get temporal variability
"""
par_list, valid, per_valid, fname, trange, extent, N, avail_par, avail_par_per, par_stats = self.get_object_stats()
#Print parameter list
self.message(0, '\n\t DATA SUMMARY')
self.message(0, '\tObject type <class \'{0}\'> ({1})'.format(self.__class__.__name__,','.join(map(str,self.__class__.__bases__))))
self.message(0, '\tLoaded from : '+self.dirname)#', '.join(map(str,[os.path.basename(file) for file in self.filelist])))
self.message(0, '\tLimits = '+', '.join(map(str,self.limit)))
self.message(0, '\tDate range : '+' - '.join(map(str,trange[0])))
self.message(0, '\tSpatial extension: ['+', '.join(['{0:.1f}'.format(x) for x in extent])+']')
self.message(0, '\tRecords : [{0}]'.format(N))
self.message(0, '\tAvailable parameters : '+', '.join(map(str,self.par_list)))
self.message(0, '\tParam summary : [%s]' % ', '.join(['{0}({1:.0f} %)'.format(i[0],i[1]) for i in zip(*(avail_par,avail_par_per))]))
for p in avail_par :
self.message(0,'\t\to %s : {%s}' % (p,','.join(["%s:%s" % (s,par_stats[p][s]) for s in par_stats[p].keys()])))
if isinstance(fig,str) :
ffig = fig+'dataset.png'
show=False
else :
ffig = fig
show=True
if fig is not None :
if self.__dict__.has_key('id_surf') : n=len(self.id_surf)
else : n=N
# p,pmap=self.map(np.ones(n,dtype='bool'),ffig,col=col,show=show,**kwargs)
p,pmap=self.map(col=col,show=show,bathy=bathy,**kwargs)
pmap.setup_map()
if legend is not None : plt.legend([p],[legend])
if not show :
pmap.savefig(ffig)
plt.clf()
if all is not True :
return
self.message(0, '\t##### Details by float ID')
#Get all platform ID's
id_list=self.get_id_list().astype('a')
for id in id_list :
self.platform_summary(id,functype=functype)
if isinstance(fig,str) : ffig = fig+'{0}.png'.format(str(id))
else : ffig = None
if fig is not None :
if self.__dict__.has_key('id_surf') :
flag=self.id_surf == id
n=len(self.id_surf.compress(flag))
else :
flag=self.id == id
n=len(self.id.compress(flag))
p,pmap=self.map(flag=flag,col=col,show=show,**kwargs)
pmap.setup_map()
if legend is not None : plt.legend([p],['#{0}'.format(id)])
if show is not True :
plt.savefig(ffig)
plt.clf()
def in_limits(self,limit=None):
'''
wrapper to :func:`altimetry.tools.in_limits` based on dataset limits.
'''
if limit is None : limit = self.limit
flag=in_limits(self.lon, self.lat, limit)
return flag
def plot_track(self,pmap,flag=None,col='.k',endpoint='*r',endpoint_size=None,title=None,fontsize=8,textcolor='b',ms=5,linewidth=1,**kwargs):
'''
plot trajectories based on platform IDs
:parameter pmap: a :class:`altimetry.tools.plot_map` instance
:parameter col: color to be used along the trajectory. If this is an array of values, calls :func:`altimetry.tools.plot_map.scatter` instead of :func:`altimetry.tools.plot_map.plot`
.. note:: This method loops on data IDs. Then it calls :func:`altimetry.tools.plot_map.plot` or :func:`altimetry.tools.plot_map.scatter` to plot the trajectory and then labels the trajectory using :func:`altimetry.tools.plot_map.text`
'''
if self.__dict__.has_key('lon_surf') :
if flag is None : flag=np.ones(self.lon_surf.size,dtype='bool')
lon=self.lon_surf.compress(flag)
lat=self.lat_surf.compress(flag)
dat=self.date_surf.compress(flag)
id=self.id_surf.compress(flag)
else :
if flag is None : flag=np.ones(self.lon.size,dtype='bool')
lon=self.lon.compress(flag)
lat=self.lat.compress(flag)
dat=self.date.compress(flag)
id=self.id.compress(flag)
if endpoint_size is None : endpoint_size=2*ms
id_list=np.unique(id)
cnt=np.size(id_list)
# #Go to next iteration when no data
# if cnt == 0 :
# continue
for j in id_list :
dumflag = (id == j)
dumdat=dat.compress(dumflag)
sort = dumdat.argsort()
dumlon=lon.compress(dumflag)[sort]
dumlat=lat.compress(dumflag)[sort]
dumid=id.compress(dumflag)[sort]
dumcnt=np.size(dumid)
dumvalid=(~dumlon.mask & ~dumlat.mask & ~dumid.mask).sum()
if dumvalid > 0:
if isinstance(col,str) : p=pmap.plot(dumlon,dumlat,col,ms=ms,linewidth=linewidth,**kwargs)
else : p=pmap.scatter(dumlon,dumlat,col.compress(dumflag)[sort],s=ms,linewidth=linewidth,**kwargs)
pmap.plot(dumlon[dumcnt-1],dumlat[dumcnt-1],endpoint,ms=endpoint_size)
pmap.text(dumlon[dumcnt-1],dumlat[dumcnt-1],'{0}'.format(j),fontsize=fontsize,color=textcolor)
if title is not None : pmap.title(title)
# print j
# pmap.show()
try : return p
finally : return pmap.plot(-100,-370,'') #this is a hack to plot data outside the globe...
def plot_track_old(self,*args,**kwargs):
'''
plot a surface map of sampling track
.. warning:: DEPRECATED method!
'''
#Get map object (plot_map or Basemap)
flag_list=[inspect.ismodule(type(i)) | inspect.ismodule(type(i)) for i in args]
if (np.array(flag_list).max()) :
map_obj = args[0]
obj_plot=map_obj
args.pop(0)
else :
obj_plot=plt
#Retrieve other params
lon=args[0]
lat=args[1]
#override if passed directly through 3rd argument
if len(args) == 3 :
var = args[2]
scatter=True
else :
if 'c' in kwargs :
c = kwargs['c']
del kwargs['c']
else : c = '.k'
scatter=False
if scatter :obj_plot.scatter(lon,lat,var,**kwargs)
else : obj_plot.plot(lon,lat,**kwargs)
def plot_transect(self, x, z, var,
xrange=None, zrange=None,
vmin=None,vmax=None, #colormap limits
xstep=1,zstep=10, #Interpolation step
s=10, edgecolor='none',
**kwargs):
'''
shows a 2d space-depth section plotting point (using :func:`altimetry.tools.plot_map.scatter`)
:example: plot a temperature section along a glider transect
'''
ax=plt.gca()
if isinstance(var,np.ma.masked_array) :
flag=np.reshape(var.mask ,var.size)
values=var.compress(~flag)
x=x.compress(~flag)
z=z.compress(~flag)
else : values = var
return ax.scatter(x,z,c=values,s=s,edgecolor=edgecolor,vmin=vmin,vmax=vmax)
# plt.show()
pass
def contour_transect(self, x, z, var, xrange=None,zrange=None, xstep=1,zstep=10,vmin=None,vmax=None,marker='.k', **kwargs):
'''
shows a 2d space-depth section by interpolating the data along the section.
.. note:: This method interpolates using :func:`scipy.interpolate.griddata` and plots using :func:`matplotlib.pyplot.meshcolorgrid`
'''
if xrange is None : xrange=(x.min(),x.max())
if zrange is None : zrange=(z.min(),z.max())
gx=np.arange(xrange[0],xrange[1],xstep)
gz=np.arange(zrange[0],zrange[1],zstep)
grid_x, grid_y = np.meshgrid(gx,gz)
#Remove masks
flag=np.reshape(var.mask ,var.size)
values=var.compress(~flag)
x=x.compress(~flag)
z=z.compress(~flag)
npts=values.size
# plt.tricontourf(x,z,values) #VERY long!!
points = zip(*(np.reshape(x,npts),np.reshape(z,npts)))
Z = interpolate.griddata(points, values, (grid_x, grid_y), method='cubic')
# plt.imshow(gdist,gdepth,Z,vmin=13.1,vmax=13.6)
plt.pcolormesh(gx,gz,Z,vmin=vmin,vmax=vmax)
if marker is not None : plt.plot(x,z,marker,ms=1)
# plt.scatter(dist2d,depth,s=10,c=values,edgecolor='none',vmin=13.1,vmax=13.6)
# plt.show()
return Z, gx, gz
def read_ArgoNC(self,filename,params=None,force=False,dephrange=None,timerange=None,**kwargs):
"""
An Argo network NetCDF reader
:return outStr: Output data stricture (dict) containing all recorded parameters as specificied by NetCDF file PARAMETER list.
:author: Renaud Dussurget
"""
#Open file
self._filename = filename
self._ncfile = ncfile(self._filename, "r")
#Get list of recorded parameters:
dum=self._ncfile.variables['STATION_PARAMETERS'][0,:]
nparam=np.shape(dum)[0]
par_list=np.array([''.join(self._ncfile.variables['STATION_PARAMETERS'][0,i,:].compressed()) for i in np.arange(nparam)])
#remove empty items and update nparam
par_list=par_list.compress([len(par) != 0 for par in par_list])
nparam=par_list.size
if params is not None :
if force : par_list=[i.upper() for i in params]
else :par_list=list(set(params).intersection(par_list))
else : par_list=par_list.tolist()
self.message(1,'Recorded parameters : '+str(nparam)+' -> '+str(par_list))
lon = self.load_ncVar('LONGITUDE',**kwargs)
lat = self.load_ncVar('LATITUDE',**kwargs)
date = self.load_ncVar('JULD',**kwargs)
# lon = self._ncfile.variables['LONGITUDE'][:]
# lat = self._ncfile.variables['LATITUDE'][:]
#Extract within limits
ind, flag = in_limits(lon['data'],lat['data'],limit=self.limit)
if timerange is not None :
dflag = (date['data'] >= np.min(timerange)) & (date['data'] < np.max(timerange))
flag = flag & dflag
ind = np.where(flag)[0]
dim_lon = lon['_dimensions']
dim_date = date['_dimensions']
lat['data'] = lat['data'].compress(flag)
lon['data'] = lon['data'].compress(flag)
date['data'] = date['data'].compress(flag)
#Update dimensions
lat['_dimensions']['N_PROF']=flag.sum()
lon['_dimensions']['N_PROF']=flag.sum()
date['_dimensions']['N_PROF']=flag.sum()
# dist=cumulative_distance(lat['data'], lon['data'])
sz=np.shape(lon)
ndims=np.size(sz)
# if dephrange is not None :
# pres = self.load_ncVar('PRES',N_PROF=ind,**kwargs)
# deph=gsw.z_from_p(pres['data'],lat[0])
# depind=(np.abs(tutu-(-np.max(dephrange)))).argmin(1)
# if timerange is not None :deph=gsw.z_from_p(pres['data'],lat[0])
date = self.load_ncVar('JULD',N_PROF=ind,**kwargs)
dimStr = date['_dimensions']
# date=date['data']
# #Create dimension structure
# curDim = [str(dimname) for dimname in dimStr.keys()[1:]] #[str(dimname) for dimname in self._ncfile.variables['LONGITUDE'].dimensions]
# curDimval = [dimStr[dim] for dim in curDim] #[len(self._ncfile.dimensions[dimname]) for dimname in curDim]
#
# outStr={'_dimensions':{'_ndims':ndims,'nbpoints':sz[0]},'lon':lon,'lat':lat,'date':date}
outStr={'_dimensions':dimStr,'lon':lon,'lat':lat,'date':date}
for param in par_list :
dumVar = self.load_ncVar(param,N_PROF=ind,**kwargs) #Load variables
dimStr=dumVar.get('_dimensions')
# dimStr=dumVar.pop('_dimensions')
# dumVar['data'].__setattr__('_dimensions',dimStr)
#update current object dimensions with missing variable dimensions
curDim = [str(dimname) for dimname in dimStr.keys()[1:]] #[str(dimname) for dimname in self._ncfile.variables['LONGITUDE'].dimensions]
curDimval = [dimStr[dim] for dim in curDim] #[len(self._ncfile.dimensions[dimname]) for dimname in curDim]
# curDim = [str(dimname) for dimname in self._ncfile.variables[param].dimensions]
# curDimval = [len(self._ncfile.dimensions[dimname]) for dimname in curDim]
flag = [(np.array(dimname) == outStr['_dimensions'].keys()).sum() == 0 for dimname in curDim] #find dimensions to update
dimUpdate = np.array(curDim).compress(flag)
for enum in enumerate(dimUpdate) :
self.message(2, 'Appending dimensions {0}:{1} to dataStructure'.format(enum[1],np.array(curDimval).compress(flag)[enum[0]]))
outStr['_dimensions'].update({enum[1]:np.array(curDimval).compress(flag)[enum[0]]}) #Append new dimension
outStr['_dimensions']['_ndims']+=1 #update dimension counts
self.message(4, 'Loading {0} ({1})'.format(param.lower(),','.join(str(dimStr[key]) for key in dimStr.keys()[1:])))
dumStr=dumVar.copy()
# locals()[param.lower()] = {param.lower():dumVar['data']}
# cmd = 'dumStr = {\''+param.lower()+'\':dumVar[\'data\']}'
# self.message(4, 'exec : '+cmd)
# exec(cmd)
outStr.update({param.lower():dumStr})
# id=np.array([''.join([num for num in j.compressed()]) for j in self._ncfile.variables['PLATFORM_NUMBER'][ind]]) #get platform id into array
pid=self.load_ncVar('PLATFORM_NUMBER',N_PROF=ind,**kwargs)
pid_var=np.ma.MaskedArray([''.join([num for num in j.compressed()]) for j in pid['data']],mask=lon['data'].mask)
pid['data']=pid_var.copy()
for key in pid['_dimensions'].keys() :
if key.startswith('STRING') :
pid['_dimensions'].pop(key)
pid['_dimensions']['_ndims']-=1
outStr.update({'id':pid})
self._ncfile.close()
return outStr
def load_ncVar(self,varName,nc=None,**kwargs):
return load_ncVar_v2(varName, nc=self._ncfile, **kwargs)
def ncstruct(self,**kwargs):
'''
returns a data structure (dict) of the dataset.
:keyword params: Add this keyword to provide a list of variables to export. Default : all variables contained is self.par_list
'''
par_list = kwargs.get('params',self.par_list.tolist())
dimStr=self._dimensions
dimlist = dimStr.keys()
#outStr = OrderedDict({'_dimensions':dimStr})
outStr=ncStr()
outStr['_dimensions'].update(dimStr)
if (np.array(par_list) == 'sat').sum() : par_list.pop(par_list.index('sat')) #Remove satellite info
varlist=np.append(np.array(dimlist[1:]),np.array(par_list))
for d in varlist :
self.message(2, 'Updating output structure with {0}'.format(d))
curDim=getattr(self.__getattribute__(d),'_dimensions',None)
attributes=[a for a in self.__getattribute__(d).__dict__.keys() if not a.startswith('_')]
attr=attrStr(keys=attributes,
values=[self.__getattribute__(d).__getattribute__(a) for a in attributes])
data=self.__getattribute__(d)
outStr[d]=varStr(dimensions=curDim if curDim is not None else self._dimensions,
attributes=attr,
data=data)
return outStr
def write_nc(self,filename,clobber=False,**kwargs):
'''
write a NetCDF file from current dataset
:keyword kwargs: additional arguments are passed to :meth:`altimetry.tools.nctools.nc.write`
'''
obj=ncobj(verbose=kwargs.pop('verbose',self.verbose),limit=self.limit,use_local_dims=True)
ncalti=self.ncstruct() #Get an netcdf structure from data
return obj.write(ncalti,filename,clobber=clobber,**kwargs) #Save processed datase
def push_nc(self,*args,**kwargs):
'''
append a data structure to an exisiting netcdf file
'''
obj=ncobj(verbose=self.verbose,limit=self.limit,use_local_dims=True)
res=obj.push(*args,**kwargs)
def __len__(self): return self.count
def __del__(self):
for f in self.__tempfile : os.unlink(f)
class buoy_data(hydro_data):
def __init__(self,file_pattern,**kwargs):
hydro_data.__init__(self,file_pattern,**kwargs)
# self.count=np.size(self.lon)
def read(self,filename,**kwargs):
fname,extension = os.path.splitext(filename)
# if extension == '.nc' : outStr=self.read_ArgoNC(filename,N_LEVELS=0,**kwargs)
if extension == '.nc' : outStr=self.read_ArgoNC(filename,**kwargs)
elif extension == '.dat' : outStr = self.read_txt(filename)
elif extension == '.asc' :
kwread=kwargs.get('lon_name',{})
kwread=kwargs.get('lat_name',{})
outStr = self.read_asc(filename,**kwread) #CLS dumps
else : self.Error('Unknown formatting')
#Rework dimensions
##################
# Dimensions may be in 2D (more not compliant with current code)
# Dataobject only accepts flattened (1D) variables
dimStr=outStr.pop('_dimensions')
# if dimStr['_ndims'] != 2 : self.Error('Variables with {0} dimensions can not be used as glider profiles (2 dimensions)'.format(dimStr['_ndims']))
if not dimStr.has_key('N_LEVELS') or not dimStr.has_key('N_PROF') :
self.Error('Data structure must contain dimensions N_LEVELS and N_PROF (only contains {0})'.format(dimStr.keys()[1:]))
datalen = [np.size(outStr[key]) for key in outStr.keys()] #[np.shape(outStr[key]) for key in outStr.keys()]
nblevels = dimStr['N_LEVELS']
nbprofiles = dimStr['N_PROF']
nbpoints = nblevels*nbprofiles
#Change dimension names towards new ones
for key in outStr.keys() :
if outStr[key].has_key('_dimensions'):
outStr[key]['_dimensions'].pop('N_LEVELS',None)
outStr[key]['_dimensions'].pop('N_PROF',None)
# if outStr[key]['_dimensions'].has_key('N_PROF') : outStr[key]['_dimensions'].update({'nbprofiles':outStr[key]['_dimensions'].pop('N_PROF')})
outStr[key]['_dimensions'].update({'nbpoints':nbpoints})
#Reform 2D variables from 1D
############################
twoD_flag = []
for key in outStr.keys() : twoD_flag.append(len(np.shape(outStr[key]['data'])) == 2) if isinstance(outStr[key],dict) else twoD_flag.append(len(np.shape(outStr[key])) == 2)
twoD_flag = np.array(twoD_flag)
oneD_vars = np.array(outStr.keys()).compress(~twoD_flag)
twoD_vars = np.array(outStr.keys()).compress(twoD_flag)
#Transform 2D variables into 1D
# -> For each record (N_PROF), we only keep the valid depth bin (N_PROF)
for key in twoD_vars:
if isinstance(outStr[key],dict) : outStr[key]['data']=outStr[key]['data'][:,outStr[key]['data'].mask.argmin(1)].diagonal()
else : outStr[key]=outStr[key][:,outStr[key].mask.argmin(1)].diagonal()
#Flatten variables (force flattening in case...)
for key in outStr.keys():
if isinstance(outStr[key],dict) : outStr[key]['data']=outStr[key]['data'].flatten()
else : outStr[key]=outStr[key].flatten()
#Additonnal variables
dst=outStr['lat'].copy()
if isinstance(dst,dict) :
dst['data']=calcul_distance(outStr['lat']['data'],outStr['lon']['data'])
outStr.update({'dist':dst})
else : outStr.update({'dist':calcul_distance(outStr['lat'],outStr['lon'])})
#Update dimensions
newDim = {'_dimensions' : {'_ndims':1,'nbprofiles':nbprofiles}}
outStr.update(newDim)
self.update_fid_list(os.path.basename(filename),outStr['_dimensions']['nbprofiles'])
return outStr
def read_txt(self,filename):
#Open file
self._filename = filename
data=np.genfromtxt(filename,skip_header=1)
#Convert to numpy arrays
lon=np.ma.masked_array(data[:,3])
lat=np.ma.masked_array(data[:,2])
import datetime
date=np.ma.masked_array(data[:,1])-datetime.date(1951,1,2).toordinal()
id=np.ma.masked_array(data[:,0])
sz=np.shape(lon)
ndims=np.size(sz)
#Get SST, U & V data i available
basename=os.path.basename(filename)
dirname=os.path.dirname(filename)
sstuv_name=dirname+'/tempuv_'+basename[basename.find('_')+1:len(basename)]
data=np.genfromtxt(sstuv_name,skip_header=1)
temp=np.ma.masked_array(data[:,0])
temp=np.ma.masked_equal(temp, 9999.)
u=np.ma.masked_array(data[:,1])
u=np.ma.masked_equal(u, 9999.)
v=np.ma.masked_array(data[:,2])
v=np.ma.masked_equal(v, 9999.)
return {'_dimensions':{'_ndims':ndims,'N_PROF':sz[0],'N_LEVELS':1},'lon':lon,'lat':lat,'date':date,'id':id,'temp':temp,'u':u,'v':v}
def read_asc(self,filename,lon_name='LON_FILTRE',lat_name='LAT_FILTRE'):
#Open file
self._filename = filename
asc=open(self._filename)
#get header properties
l=0
par_list=[]
par_dv=[]
for line in asc.readlines():
if not line.startswith('//') : break
else :
if line.startswith('//\tClip') :
par_list.append(line.split(' ')[-1].replace('\n',''))
if line.startswith('//\tDefaut') :
par_dv.append(line.split('=')[-1].replace('\n',''))
l+=1
nPar=len(par_list)
col_id=np.arange(nPar)+2
#Get lon & lat
par_list[par_list.index(lon_name)]='lon'
par_list[par_list.index(lat_name)]='lat'
data=np.genfromtxt(filename,skip_header=l)
sz=data.shape[0]
mask=np.zeros(sz,dtype=bool)
#Construct output structure
dimStr={'_dimensions':{'_ndims':2,'N_PROF':sz,'N_LEVELS':1}}
outStr = OrderedDict()
outStr.update(dimStr)
outStr.update({'N_PROF':{'_dimensions':{'_ndims':1,'N_PROF':sz},'data':np.ma.array(np.arange(sz),mask=mask.copy())}})
outStr.update({'N_LEVELS':{'_dimensions':{'_ndims':1,'N_PROF':sz},'data':np.ma.array(np.repeat(25.,sz),mask=mask.copy())}})
#1st row is assigned to float ID, 2nd to date (julian seconds)
outStr.update({'id':{'_dimensions':{'_ndims':1,'N_PROF':sz},'data':np.ma.masked_array(data[:,0],mask=mask.copy())}})
outStr.update({'date':{'_dimensions':{'_ndims':1,'N_PROF':sz},'data':np.ma.masked_array(data[:,1]/86400.,mask=mask.copy())}})
for i,p in enumerate(par_list):
dumVar=data[:,col_id[i]]
mask=dumVar==eval('np.{0}'.format(dumVar.dtype))(par_dv[i])
outStr.update({p:{'_dimensions':{'_ndims':1,'N_PROF':sz},'data':np.ma.masked_array(dumVar,mask=mask.copy())}})
#recompute mask based on lon,lat,date validity
mask=outStr['lon']['data'].mask.copy() \
| outStr['lat']['data'].mask.copy() \
| outStr['date']['data'].mask.copy()
#Reapply mask (we may check dimensions?)
outStr['date']['data'].mask=mask.copy()
for k in par_list :
if outStr[k].has_key('data') : outStr[k]['data'].mask=mask.copy()
return outStr
# def plot_track(self,pmap,date):
#
# bflag=abs(self.date - date) <= 0.5
# blon=self.lon.compress(bflag)
# blat=self.lat.compress(bflag)
# bdat=self.date.compress(bflag)
# bid=self.id.compress(bflag)
#
# bid_list=np.unique(bid)
# cnt=np.size(bid_list)
#
## #Go to next iteration when no data
## if cnt == 0 :
## continue
#
# for j in bid_list :
# dumflag = (bid == j)
# dumdat=bdat.compress(dumflag)
# id = dumdat.argsort()
# dumlon=blon.compress(dumflag)[id]
# dumlat=blat.compress(dumflag)[id]
# dumid=bid.compress(dumflag)[id]
# dumcnt=np.size(dumid)
# pmap.plot(dumlon,dumlat,'.k',ms=5)
# pmap.plot(dumlon[dumcnt-1],dumlat[dumcnt-1],'*r',ms=10)
# pmap.text(dumlon[dumcnt-1],dumlat[dumcnt-1],str(int(j)),fontsize=8,color='b')
## pmap.title('IMEDIA day '+str(i))
## # plt.text(, y, s, fontsize=12)
## pmap.savefig(FIG_DIR+"/IMEDIA_alti_buoys_"+str(i)+".png")
class argo_trajectory(hydro_data):
def __init__(self,file_pattern,**kwargs):
hydro_data.__init__(self,file_pattern,**kwargs)
# self.count=np.size(self.lon)
def read(self,filename,**kwargs):
fname,extension = os.path.splitext(filename)
# if extension == '.nc' : outStr=self.read_ArgoNC(filename,N_LEVELS=0,**kwargs)
if extension == '.nc' : outStr=self.read_ArgoNC(filename,**kwargs)
elif extension == '.dat' : outStr = self.read_txt(filename)
else : self.Error('Unknown formatting')
#Rework dimensions
##################
# Dimensions may be in 2D (more not compliant with current code)
# Dataobject only accepts flattened (1D) variables
dimStr=outStr.pop('_dimensions')
# if dimStr['_ndims'] != 2 : self.Error('Variables with {0} dimensions can not be used as glider profiles (2 dimensions)'.format(dimStr['_ndims']))
if not dimStr.has_key('N_PROF') :
self.Error('Data structure must contain dimensions N_LEVELS and N_PROF (only contains {0})'.format(dimStr.keys()[1:]))
datalen = [np.size(outStr[key]) for key in outStr.keys()] #[np.shape(outStr[key]) for key in outStr.keys()]
nbprofiles = dimStr['N_PROF']
nbpoints = nbprofiles
#Reform 2D variables from 1D
############################
twoD_flag = np.array([len(np.shape(outStr[key])) for key in outStr.keys()]) == 2
oneD_vars = np.array(outStr.keys()).compress(~twoD_flag)
twoD_vars = np.array(outStr.keys()).compress(twoD_flag)
#Transform 2D variables into 1D
# -> For each record (N_PROF), we only keep the valid depth bin (N_PROF)
for key in twoD_vars: outStr.update({key:outStr[key][:,outStr[key].mask.argmin(1)].diagonal()})
#Flatten variables (force flattening in case...)
for key in outStr.keys(): outStr.update({key:outStr[key].flatten()})
#Additonnal variables
outStr.update({'dist':calcul_distance(outStr['lat'],outStr['lon'])})
#Update dimensions
newDim = {'_dimensions' : {'_ndims':1,'nbpoints':nbprofiles}}
outStr.update(newDim)
self.update_fid_list(os.path.basename(filename),outStr['_dimensions']['nbpoints'])
return outStr
class glider_data(hydro_data):
def __init__(self,file_pattern,**kwargs):
#Init hydro_data class (calling gilder_data.read() function)
hydro_data.__init__(self,file_pattern,**kwargs)
def read(self,filename,**kwargs):
fname,extension = os.path.splitext(filename)
outStr=self.read_ArgoNC(filename,**kwargs)
#Rework dimensions
##################
# Dimensions may be in 2D (more not compliant with current code)
# Dataobject only accepts flattened (1D) variables
dimStr=outStr.pop('_dimensions')
if dimStr['_ndims'] != 2 : self.Error('Variables with {0} dimensions can not be used as glider profiles (2 dimensions)'.format(dimStr['_ndims']))
if not dimStr.has_key('N_LEVELS') or not dimStr.has_key('N_PROF') :
self.Error('Data structure must contain dimensions N_LEVELS and N_PROF (only contains {0})'.format(dimStr.keys()[1:]))
# datalen = [np.size(outStr[key]) for key in outStr.keys()] #[np.shape(outStr[key]) for key in outStr.keys()]
nblevels = dimStr['N_LEVELS']
# nblevels = nblevels.astype(nblevels.dtype)
nbprofiles = dimStr['N_PROF']
nbpoints = nblevels*nbprofiles
#Change dimension names towards new ones
for key in outStr.keys() :
if outStr[key].has_key('_dimensions'):
outStr[key]['_dimensions'].pop('N_LEVELS',None)
outStr[key]['_dimensions'].pop('N_PROF',None)
# if outStr[key]['_dimensions'].has_key('N_PROF') : outStr[key]['_dimensions'].update({'nbprofiles':outStr[key]['_dimensions'].pop('N_PROF')})
outStr[key]['_dimensions'].update({'nbpoints':nbpoints})
#Reform 2D variables from 1D
############################
twoD_flag = []
for key in outStr.keys() : twoD_flag.append(len(np.shape(outStr[key]['data'])) == 2) if isinstance(outStr[key],dict) else twoD_flag.append(len(np.shape(outStr[key])) == 2)
twoD_flag = np.array(twoD_flag)
oneD_vars = np.array(outStr.keys()).compress(~twoD_flag)
twoD_vars = np.array(outStr.keys()).compress(twoD_flag)
#Transform 1D variables into 2D
for key in oneD_vars:
if isinstance(outStr[key],dict) : outStr[key]['data'] = np.reshape(np.repeat(outStr[key]['data'],nblevels),[nbprofiles,nblevels])
else : outStr[key]=np.reshape(np.repeat(outStr[key],nblevels),[nbprofiles,nblevels])
# [{key:np.shape(outStr[key])} for key in outStr.keys()] #Check variables
#Load surface data
surfStr=self.read_ArgoNC(filename,N_LEVELS=0) #read surface data
surfStr.pop('_dimensions')
#Change dimension names towards new ones
for key in surfStr.keys() :
if surfStr[key].has_key('_dimensions'):
surfStr[key]['_dimensions'].pop('N_LEVELS',None)
if surfStr[key]['_dimensions'].has_key('N_PROF') : surfStr[key]['_dimensions'].update({'nbprofiles':surfStr[key]['_dimensions'].pop('N_PROF')})
#Flatten surface variables
for key in surfStr.keys():
if isinstance(surfStr[key],dict) : surfStr[key]['data'] = surfStr[key]['data'].flatten()
else : surfStr[key] = surfStr[key].flatten()
outStr.update({key+'_surf':surfStr[key]})
#Flatten 2D variables (
for key in outStr.keys():
if isinstance(outStr[key],dict) : outStr[key]['data']=outStr[key]['data'].flatten()
else : outStr[key]=outStr[key].flatten()
#Additonnal variables
dst_surf=outStr['lat_surf'].copy()
dst=outStr['lat'].copy()
if isinstance(dst_surf,dict) :
dst_surf['data']=calcul_distance(outStr['lat_surf']['data'],outStr['lon_surf']['data'])
dst['data']=np.repeat(dst_surf['data'],nblevels)
outStr.update({'dist_surf':dst_surf})
outStr.update({'dist':dst})
else :
outStr.update({'dist_surf':calcul_distance(outStr['lat_surf'],outStr['lon_surf'])})
outStr.update({'dist':np.repeat(outStr['dist_surf'],nblevels)})
###TODO : Could be simplified? (if has_key(var) --> must have key var_surf
if outStr.has_key('pres') :
deph = outStr['pres'].copy()
deph_surf = outStr['pres_surf'].copy()
try:
deph['data']=gsw.z_from_p(outStr['pres']['data'],outStr['lat']['data'])
deph_surf['data']=gsw.z_from_p(outStr['pres_surf']['data'],outStr['lat_surf']['data'])
except :
deph['data'][:]=deph['data'].fill_value
deph_surf['data'][:]=deph['data'].fill_value
outStr.update({'deph' : deph})
outStr.update({'deph_surf' : deph_surf})
if outStr.has_key('psal') and outStr.has_key('temp') and outStr.has_key('pres') :
rho=outStr['psal'].copy()
rho_surf=outStr['psal_surf'].copy()
try:
rho['data']=gsw.rho(outStr['psal']['data'],outStr['temp']['data'],outStr['pres']['data'])
rho_surf['data']=gsw.rho(outStr['psal_surf']['data'],outStr['temp_surf']['data'],outStr['pres_surf']['data'])
except :
rho['data'][:]=deph['data'].fill_value
rho_surf['data'][:]=deph['data'].fill_value
outStr.update({'rho' : rho})
outStr.update({'rho_surf' : rho_surf})
#Update dimensions
newDim = {'_dimensions' : {'_ndims':2,'nbpoints':nbpoints,'nbprofiles':nbprofiles}}
outStr.update(newDim)
#Update fid
self.update_fid_list(os.path.basename(filename),outStr['_dimensions']['nbpoints'])
return outStr
class TSG_data(hydro_data) :
def __init__(self,file_pattern,**kwargs):
hydro_data.__init__(self,file_pattern,**kwargs)
def read(self,filename):
fname,extension = os.path.splitext(filename)
if extension == '.nc' :
outStr=self.read_ArgoNC(filename,params=['TEM2','PSAL'])
outStr.update({'temp':outStr.pop('tem2')}) #Use TEM2 as temperature field
# outStr.update({'depth':outStr.pop('deph')}) #Use DEPH as depth field
return outStr
elif extension == '.dat' :
return self.read_txt(filename)
else : self.Error('Unknown formatting')
def read_txt(self,filename):
#Open file
self._filename = filename
data=np.genfromtxt(filename,skip_header=1)
#Convert to numpy arrays
lon=np.ma.masked_array(data[:,2])
lat=np.ma.masked_array(data[:,1])
import datetime
date=np.ma.masked_array(data[:,0])+datetime.date(2012,1,1).toordinal()-datetime.date(1950,1,2).toordinal()
temp=np.ma.masked_array(data[:,3])
temp=np.ma.masked_greater(temp, 99.)
psal=np.ma.masked_array(data[:,4])
psal=np.ma.masked_less(psal, 35.)
fluo=np.ma.masked_array(data[:,5])
fluo=np.ma.masked_where((fluo == 0.0) | (fluo >= 99.),fluo)
id=np.repeat('IMEDIA_TSG',len(psal))
sz=np.shape(lon)
ndims=np.size(sz)
return {'_dimensions':{'_ndims':ndims,'nbpoints':sz[0]},'lon':lon,'lat':lat,'date':date,'id':id,'temp':temp,'psal':psal,'fluo':fluo}
class CTD_data(hydro_data):
def __init__(self,file_pattern,**kwargs):
#Init hydro_data class (calling gilder_data.read() function)
hydro_data.__init__(self,file_pattern,**kwargs)
def read(self,filename,**kwargs):
fname,extension = os.path.splitext(filename)
if extension == '.nc' :
outStr=self.read_ArgoNC(filename,params=['TEM2','PSAL'])
outStr.update({'temp':outStr.pop('tem2')}) #Use TEM2 as temperature field
# outStr.update({'depth':outStr.pop('deph')}) #Use DEPH as depth field
return outStr
elif extension == '.dat' :
return self.read_txt(filename)
#Seabird CTD data
elif extension == '.cnv' :
return self.read_cnv(filename,**kwargs)
elif extension == '.asc' :
return self.read_asc(filename,**kwargs)
else : self.Error('Unknown formatting')
def read_asc(self,filename,**kwargs):
self._filename = filename
#Check file length
nlines=0
for line in open(filename): nlines+=1
if nlines > 1 :
#Open file
data=np.genfromtxt(filename,skip_header=1)
#Convert to numpy arrays
lon=kwargs['lon']
lat=kwargs['lat']
import datetime
date=np.ma.masked_array(data[:,0])+datetime.date(2012,1,1).toordinal()-datetime.date(1950,1,2).toordinal()
pres=np.ma.masked_array(data[:,1])
temp=np.ma.masked_array(data[:,2])
cond=np.ma.masked_array(data[:,3])
obs1=np.ma.masked_array(data[:,4])
obs2=np.ma.masked_array(data[:,5])
descend_rate=np.ma.masked_array(data[:,6])
scan=np.ma.masked_array(data[:,7])
fluoro=np.ma.masked_array(data[:,8])
depth=np.ma.masked_array(data[:,9])
potemp=np.ma.masked_array(data[:,10])
psal=np.ma.masked_array(data[:,11])
dens=np.ma.masked_array(data[:,12])
svCM=np.ma.masked_array(data[:,13])
flag=np.ma.masked_array(data[:,14])
reclen=len(pres)
date.mask=np.zeros(reclen,dtype='bool')
pres.mask=np.zeros(reclen,dtype='bool')
temp.mask=np.zeros(reclen,dtype='bool')
cond.mask=np.zeros(reclen,dtype='bool')
obs1.mask=np.zeros(reclen,dtype='bool')
obs2.mask=np.zeros(reclen,dtype='bool')
descend_rate.mask=np.zeros(reclen,dtype='bool')
scan.mask=np.zeros(reclen,dtype='bool')
fluoro.mask=np.zeros(reclen,dtype='bool')
depth.mask=np.zeros(reclen,dtype='bool')
potemp.mask=np.zeros(reclen,dtype='bool')
psal.mask=np.zeros(reclen,dtype='bool')
dens.mask=np.zeros(reclen,dtype='bool')
svCM.mask=np.zeros(reclen,dtype='bool')
flag.mask=np.zeros(reclen,dtype='bool')
id=np.repeat('{0}'.format(kwargs['stationid']),reclen)
lon=np.ma.masked_array(np.repeat(lon,reclen),mask=np.zeros(reclen,dtype='bool'))
lat=np.ma.masked_array(np.repeat(lat,reclen),mask=np.zeros(reclen,dtype='bool'))
# psal=csw.salt(cond/gsw.cte.C3515, temp, pres)
# depth=gsw.z_from_p(pres,lat)
# dens= gsw.rho(psal,temp,pres)
sz=np.shape(lon)
ndims=np.size(sz)
else :
ndims = 1.
sz=(1,1)
lon=np.array(np.NaN)
lat=np.array(np.NaN)
date=np.array(np.NaN)
id=np.array(np.NaN)
depth=np.array(np.NaN)
pres=np.array(np.NaN)
temp=np.array(np.NaN)
psal=np.array(np.NaN)
fluoro=np.array(np.NaN)
dens=np.array(np.NaN)
potemp=np.array(np.NaN)
cond=np.array(np.NaN)
obs1=np.array(np.NaN)
obs2=np.array(np.NaN)
svCM=np.array(np.NaN)
descend_rate=np.array(np.NaN)
flag=np.array(np.NaN)
return {'_dimensions':{'_ndims':ndims,'nbpoints':sz[0]},'lon':lon,'lat':lat,'date':date,'id':id,'depth':depth,'pres':pres,'temp':temp, \
'psal':psal,'fluoro':fluoro,'dens':dens,'potemp':potemp,'cond':cond,'obs1':obs1,'obs2':obs2,'svCM':svCM,'descend_rate':descend_rate,'flag':flag}
def read_cnv(self,filename,**kwargs):
#Open file
data=np.genfromtxt(filename,skip_header=328)
#Convert to numpy arrays
lon=kwargs['lon']
lat=kwargs['lat']
import datetime
date=np.ma.masked_array(data[:,0])+datetime.date(2012,1,1).toordinal()-datetime.date(1950,1,2).toordinal()
pres=np.ma.masked_array(data[:,1])
temp=np.ma.masked_array(data[:,2])
cond=np.ma.masked_array(data[:,3])
obs1=np.ma.masked_array(data[:,6])
obs2=np.ma.masked_array(data[:,7])
fluoro=np.ma.masked_array(data[:,])
pres=np.ma.masked_greater(pres, 99999.)
temp=np.ma.masked_greater(temp, 99.)
cond=np.ma.masked_greater(cond, 99.)
#fluo=np.ma.masked_where((fluo == 0.0) | (fluo >= 99.),fluo)
reclen=len(pres)
id=np.repeat('{0}'.format(kwargs['stationid']),reclen)
lon=np.ma.masked_array(np.repeat(lon,reclen))
lat=np.ma.masked_array(np.repeat(lat,reclen))
try :
psal=csw.salt(cond/gsw.cte.C3515, temp, pres)
depth=gsw.z_from_p(pres,lat)
dens= gsw.rho(psal,temp,pres)
except :
psal=np.ma.masked_array(np.repeat(lon.fill_value,reclen),mask=True)
depth=np.ma.masked_array(np.repeat(lon.fill_value,reclen),mask=True)
dens=np.ma.masked_array(np.repeat(lon.fill_value,reclen),mask=True)
sz=np.shape(lon)
ndims=np.size(sz)
return {'_dimensions':{'_ndims':ndims,'nbpoints':sz[0]},'lon':lon,'lat':lat,'date':date,'id':id,'depth':depth,'pres':pres,'temp':temp,'psal':psal}
|
rdussurget/py-altimetry
|
altimetry/data/hydro.py
|
Python
|
lgpl-3.0
| 91,403
|
[
"NetCDF"
] |
c8bd7ba9152f732c180cef4cbe3b1208ff773c21094e80b2e5dfe6cea99594c7
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# INPUTS: {BrainProtonDensitySlice.png}
# OUTPUTS: {ResampleImageFilterOutput1.png}
# 0
# INPUTS: {BrainProtonDensitySlice.png}
# OUTPUTS: {ResampleImageFilterOutput2.png}
# 1
# INPUTS: {BrainProtonDensitySlice.png}
# OUTPUTS: {ResampleImageFilterOutput3.png}
# 2
# INPUTS: {BrainProtonDensitySlice.png}
# OUTPUTS: {ResampleImageFilterOutput4.png}
# 3
from __future__ import print_function
import itk
from sys import argv, stderr, exit
itk.auto_progress(2)
# if( len(argv) < 3 ):
# print("""Missing Parameters
# Usage: ResampleImageFilter.py inputImageFile outputImageFile
# [exampleAction={0,1,2,3}]""", file=stderr)
# exit(1)
dim = 2
SOType = itk.SpatialObject[dim]
InternalImageType = itk.Image[itk.F, dim]
OutputPixelType = itk.UC
OutputImageType = itk.Image[OutputPixelType, dim]
ellipse = itk.EllipseSpatialObject[dim].New(Radius=[10, 5])
ellipse.GetObjectToParentTransform().SetOffset([20, 20])
ellipse.ComputeObjectToWorldTransform()
box = itk.BoxSpatialObject[dim].New(Size=20)
box.GetObjectToParentTransform().SetOffset([20, 40])
box.ComputeObjectToWorldTransform()
gaussian = itk.GaussianSpatialObject[dim].New(Radius=100)
gaussian.GetObjectToParentTransform().SetOffset([60, 60])
gaussian.GetObjectToParentTransform().SetScale(10)
gaussian.ComputeObjectToWorldTransform()
group = itk.GroupSpatialObject[dim].New()
group.AddSpatialObject(ellipse)
group.AddSpatialObject(box)
group.AddSpatialObject(gaussian)
filter = itk.SpatialObjectToImageFilter[SOType, InternalImageType].New(
group, Size=[100, 100], UseObjectValue=True)
filter.Update() # required ?!
rescale = itk.RescaleIntensityImageFilter[
InternalImageType,
OutputImageType].New(
filter,
OutputMinimum=itk.NumericTraits[OutputPixelType].NonpositiveMin(),
OutputMaximum=itk.NumericTraits[OutputPixelType].max())
itk.imwrite(rescale, argv[1])
|
stnava/ITK
|
Wrapping/Generators/Python/Tests/SpatialObjectTest.py
|
Python
|
apache-2.0
| 2,675
|
[
"Gaussian"
] |
655e25ea2e680c46d74f766149625374510489491406a53423be047c6cc658ff
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PCR-GLOBWB (PCRaster Global Water Balance) Global Hydrological Model
#
# Copyright (C) 2016, Ludovicus P. H. (Rens) van Beek, Edwin H. Sutanudjaja, Yoshihide Wada,
# Joyce H. C. Bosmans, Niels Drost, Inge E. M. de Graaf, Kor de Jong, Patricia Lopez Lopez,
# Stefanie Pessenteiner, Oliver Schmitz, Menno W. Straatsma, Niko Wanders, Dominik Wisser,
# and Marc F. P. Bierkens,
# Faculty of Geosciences, Utrecht University, Utrecht, The Netherlands
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
from wflow.wf_DynamicFramework import configget
from wflow.wf_DynamicFramework import configsection
from . import landCover as lc
from . import parameterSoilAndTopo as parSoilAndTopo
from .ncConverter import *
logger = logging.getLogger("wflow_pcrglobwb")
class LandSurface(object):
def getState(self):
result = {}
if self.numberOfSoilLayers == 2:
for coverType in self.coverTypes:
result[coverType] = {}
result[coverType]["interceptStor"] = self.landCoverObj[
coverType
].interceptStor
result[coverType]["snowCoverSWE"] = self.landCoverObj[
coverType
].snowCoverSWE
result[coverType]["snowFreeWater"] = self.landCoverObj[
coverType
].snowFreeWater
result[coverType]["topWaterLayer"] = self.landCoverObj[
coverType
].topWaterLayer
result[coverType]["storUpp"] = self.landCoverObj[coverType].storUpp
result[coverType]["storLow"] = self.landCoverObj[coverType].storLow
result[coverType]["interflow"] = self.landCoverObj[coverType].interflow
if self.numberOfSoilLayers == 3:
for coverType in self.coverTypes:
result[coverType] = {}
result[coverType]["interceptStor"] = self.landCoverObj[
coverType
].interceptStor
result[coverType]["snowCoverSWE"] = self.landCoverObj[
coverType
].snowCoverSWE
result[coverType]["snowFreeWater"] = self.landCoverObj[
coverType
].snowFreeWater
result[coverType]["topWaterLayer"] = self.landCoverObj[
coverType
].topWaterLayer
result[coverType]["storUpp000005"] = self.landCoverObj[
coverType
].storUpp000005
result[coverType]["storUpp005030"] = self.landCoverObj[
coverType
].storUpp005030
result[coverType]["storLow030150"] = self.landCoverObj[
coverType
].storLow030150
result[coverType]["interflow"] = self.landCoverObj[coverType].interflow
return result
def getPseudoState(self):
result = {}
if self.numberOfSoilLayers == 2:
result["interceptStor"] = self.interceptStor
result["snowCoverSWE"] = self.snowCoverSWE
result["snowFreeWater"] = self.snowFreeWater
result["topWaterLayer"] = self.topWaterLayer
result["storUpp"] = self.storUpp
result["storLow"] = self.storLow
if self.numberOfSoilLayers == 3:
result["interceptStor"] = self.interceptStor
result["snowCoverSWE"] = self.snowCoverSWE
result["snowFreeWater"] = self.snowFreeWater
result["topWaterLayer"] = self.topWaterLayer
result["storUpp000005"] = self.storUpp000005
result["storUpp005030"] = self.storUpp005030
result["storLow030150"] = self.storLow030150
return result
def __init__(
self,
iniItems,
landmask,
Dir,
staticmaps,
cloneMap,
startTime,
initialState=None,
):
object.__init__(self)
# clone map, temporary directory, absolute path of input directory, and landmask
self.cloneMap = cloneMap # iniItems.cloneMap
self.tmpDir = os.path.join(os.path.abspath(Dir), "tmp") # iniItems.tmpDir
self.inputDir = os.path.join(
os.path.abspath(Dir), staticmaps
) # iniItems.globalOptions['inputDir']
self.stateDir = os.path.join(os.path.abspath(Dir), "instate")
self.landmask = landmask
self.startTime = startTime
# cellArea (unit: m2)
self.cellArea = vos.readPCRmapClone(
iniItems.get(
"routingOptions", "cellAreaMap"
), # iniItems.routingOptions['cellAreaMap'], \
self.cloneMap,
self.tmpDir,
self.inputDir,
)
self.cellArea = pcr.ifthen(self.landmask, self.cellArea)
# number of soil layers:
self.numberOfSoilLayers = int(
configget(iniItems, "landSurfaceOptions", "numberOfUpperSoilLayers", "2")
) # int(iniItems.landSurfaceOptions['numberOfUpperSoilLayers'])
# list of aggregated variables that MUST be defined in the module:
# - aggregated from landCover modules
# - some are needed for water balance checking
# - some are needed in other modules (e.g. routing, groundwater)
# - some are needed for initialConditions
#
# main state variables (unit: m)
self.mainStates = [
"interceptStor",
"snowCoverSWE",
"snowFreeWater",
"topWaterLayer",
]
#
# state variables (unit: m)
self.stateVars = [
"storUppTotal",
"storLowTotal",
"satDegUppTotal",
"satDegLowTotal",
]
#
# flux variables (unit: m/day)
self.fluxVars = [
"infiltration",
"gwRecharge",
"netLqWaterToSoil",
"totalPotET",
"actualET",
"interceptEvap",
"openWaterEvap",
"actSnowFreeWaterEvap",
"actBareSoilEvap",
"actTranspiUppTotal",
"actTranspiLowTotal",
"actTranspiTotal",
"directRunoff",
"interflow",
"interflowTotal",
"irrGrossDemand",
"nonIrrGrossDemand",
"totalPotentialGrossDemand",
"actSurfaceWaterAbstract",
"allocSurfaceWaterAbstract",
"desalinationAbstraction",
"desalinationAllocation",
"nonFossilGroundwaterAbs",
"allocNonFossilGroundwater",
"fossilGroundwaterAbstr",
"fossilGroundwaterAlloc",
"landSurfaceRunoff",
"satExcess",
"snowMelt",
"totalGroundwaterAbstraction",
"totalGroundwaterAllocation",
"totalPotentialMaximumGrossDemand",
"totalPotentialMaximumIrrGrossDemand",
"totalPotentialMaximumIrrGrossDemandPaddy",
"totalPotentialMaximumIrrGrossDemandNonPaddy",
"totalPotentialMaximumNonIrrGrossDemand",
"irrGrossDemandPaddy",
"irrGrossDemandNonPaddy",
"domesticWaterWithdrawal",
"industryWaterWithdrawal",
"livestockWaterWithdrawal",
"nonIrrReturnFlow",
"irrigationTranspirationDeficit",
]
#
# specific variables for 2 and 3 layer soil models:
#
if self.numberOfSoilLayers == 2:
self.mainStates += ["storUpp", "storLow"]
self.stateVars += self.mainStates
self.fluxVars += ["actTranspiUpp", "actTranspiLow", "netPercUpp"]
#
if self.numberOfSoilLayers == 3:
self.mainStates += ["storUpp000005", "storUpp005030", "storLow030150"]
self.stateVars += self.mainStates
self.fluxVars += [
"actTranspiUpp000005",
"actTranspiUpp005030",
"actTranspiLow030150",
"netPercUpp000005",
"netPercUpp005030",
"interflowUpp005030",
]
# list of all variables that will be calculated/reported in landSurface.py
self.aggrVars = self.stateVars + self.fluxVars
if self.numberOfSoilLayers == 2:
self.aggrVars += ["satDegUpp", "satDegLow"]
if self.numberOfSoilLayers == 3:
self.aggrVars += ["satDegUpp000005", "satDegUpp005030", "satDegLow030150"]
self.debugWaterBalance = iniItems.get(
"landSurfaceOptions", "debugWaterBalance"
) # iniItems.landSurfaceOptions['debugWaterBalance']
# TDOD: Perform water balance checks for aggregates values (from values of each land cover type).
# limitAbstraction
self.limitAbstraction = False
# if iniItems.landSurfaceOptions['limitAbstraction'] == "True": self.limitAbstraction = True
if (
configget(iniItems, "landSurfaceOptions", "limitAbstraction", "False")
== "True"
):
self.limitAbstraction = True
# landCover types included in the simulation:
self.coverTypes = ["forest", "grassland"]
#
self.includeIrrigation = False
# if iniItems.landSurfaceOptions['includeIrrigation'] == "True":
if (
configget(iniItems, "landSurfaceOptions", "includeIrrigation", "False")
== "True"
):
self.includeIrrigation = True
self.coverTypes += ["irrPaddy", "irrNonPaddy"]
logger.info("Irrigation is included/considered in this run.")
else:
logger.info("Irrigation is NOT included/considered in this run.")
# if user define their land cover types:
if "landCoverTypes" in configsection(
iniItems, "landSurfaceOptions"
): # iniItems.landSurfaceOptions.keys():
self.coverTypes = iniItems.get(
"landSurfaceOptions", "landCoverTypes"
).split(
","
) # iniItems.landSurfaceOptions['landCoverTypes'].split(",")
# water demand options: irrigation efficiency, non irrigation water demand, and desalination supply
self.waterDemandOptions(iniItems)
# TODO: Make an option so that users can easily perform natural runs (without water user, without reservoirs).
# pre-defined surface water source fraction for satisfying irrigation and livestock water demand
self.swAbstractionFractionData = None
self.swAbstractionFractionDataQuality = None
if "irrigationSurfaceWaterAbstractionFractionData" in configsection(
iniItems, "landSurfaceOptions"
) and "irrigationSurfaceWaterAbstractionFractionDataQuality" in configsection(
iniItems, "landSurfaceOptions"
):
if configget(
iniItems,
"landSurfaceOptions",
"irrigationSurfaceWaterAbstractionFractionData",
"None",
) not in ["None", "False"] or configget(
iniItems,
"landSurfaceOptions",
"irrigationSurfaceWaterAbstractionFractionDataQuality",
"None",
) not in [
"None",
"False",
]:
# iniItems.landSurfaceOptions['irrigationSurfaceWaterAbstractionFractionDataQuality'] not in ["None", "False"]:
logger.info(
"Using/incorporating the predefined surface water source of Siebert et al. (2010) for satisfying irrigation and livestock demand."
)
self.swAbstractionFractionData = pcr.cover(
# vos.readPCRmapClone(iniItems.landSurfaceOptions['irrigationSurfaceWaterAbstractionFractionData'],\
vos.readPCRmapClone(
configget(
iniItems,
"landSurfaceOptions",
"irrigationSurfaceWaterAbstractionFractionData",
"None",
),
self.cloneMap,
self.tmpDir,
self.inputDir,
),
0.0,
)
self.swAbstractionFractionData = pcr.ifthen(
self.swAbstractionFractionData >= 0.0,
self.swAbstractionFractionData,
)
self.swAbstractionFractionDataQuality = pcr.cover(
# vos.readPCRmapClone(iniItems.landSurfaceOptions['irrigationSurfaceWaterAbstractionFractionDataQuality'],\
vos.readPCRmapClone(
configget(
iniItems,
"landSurfaceOptions",
"irrigationSurfaceWaterAbstractionFractionDataQuality",
"None",
),
self.cloneMap,
self.tmpDir,
self.inputDir,
),
0.0,
)
# ignore value with the quality above 5 (very bad)
# - Note: The resulting map has values only in cells with the data auality <= 5.0
self.swAbstractionFractionData = pcr.ifthen(
self.swAbstractionFractionDataQuality <= 5.0,
self.swAbstractionFractionData,
)
# maximum pre-defined surface water source fraction for satisfying industrial and domestic water demand:
# - if not defined (default), set it to the maximum
self.maximumNonIrrigationSurfaceWaterAbstractionFractionData = pcr.scalar(1.0)
# - based on the map of McDonald et al. (2014)
if "maximumNonIrrigationSurfaceWaterAbstractionFractionData" in configsection(
iniItems, "landSurfaceOptions"
):
if (
configget(
iniItems,
"landSurfaceOptions",
"maximumNonIrrigationSurfaceWaterAbstractionFractionData",
"None",
)
!= "None"
or configget(
iniItems,
"landSurfaceOptions",
"maximumNonIrrigationSurfaceWaterAbstractionFractionData",
"False",
)
!= "False"
):
logger.info(
"Using/incorporating the predefined surface water source of McDonald et al. (2014) for satisfying domestic and industrial demand."
)
self.maximumNonIrrigationSurfaceWaterAbstractionFractionData = pcr.min(
1.0,
pcr.cover(
# vos.readPCRmapClone(iniItems.landSurfaceOptions['maximumNonIrrigationSurfaceWaterAbstractionFractionData'],\
vos.readPCRmapClone(
configget(
iniItems,
"landSurfaceOptions",
"maximumNonIrrigationSurfaceWaterAbstractionFractionData",
"None",
),
self.cloneMap,
self.tmpDir,
self.inputDir,
),
1.0,
),
)
# threshold values defining the preference for irrigation water source (unit: fraction/percentage)
self.treshold_to_maximize_irrigation_surface_water = vos.readPCRmapClone(
configget(
iniItems,
"landSurfaceOptions",
"treshold_to_maximize_irrigation_surface_water",
"1.0",
),
# vos.readPCRmapClone(iniItems.landSurfaceOptions['treshold_to_maximize_irrigation_surface_water'],\
self.cloneMap,
self.tmpDir,
self.inputDir,
)
self.treshold_to_minimize_fossil_groundwater_irrigation = vos.readPCRmapClone(
configget(
iniItems,
"landSurfaceOptions",
"treshold_to_minimize_fossil_groundwater_irrigation",
"1.0",
),
# vos.readPCRmapClone(iniItems.landSurfaceOptions['treshold_to_minimize_fossil_groundwater_irrigation'],\
self.cloneMap,
self.tmpDir,
self.inputDir,
)
# assign the topography and soil parameters
self.soil_topo_parameters = {}
# - default values used for all land cover types
self.soil_topo_parameters["default"] = parSoilAndTopo.SoilAndTopoParameters(
iniItems, self.landmask, self.inputDir, self.cloneMap, self.tmpDir
)
self.soil_topo_parameters["default"].read(iniItems)
# - specific soil and topography parameter (per land cover type)
for coverType in self.coverTypes:
name_of_section_given_in_ini_file = str(coverType) + "Options"
dictionary_of_land_cover_settings = iniItems._sections[
name_of_section_given_in_ini_file
] # __getattribute__(name_of_section_given_in_ini_file)
if "usingSpecificSoilTopo" not in list(
dictionary_of_land_cover_settings.keys()
):
dictionary_of_land_cover_settings["usingSpecificSoilTopo"] = "False"
if dictionary_of_land_cover_settings["usingSpecificSoilTopo"] == "True":
msg = "Using a specific set of soil and topo parameters "
msg += (
"as defined in the "
+ name_of_section_given_in_ini_file
+ " of the ini/configuration file."
)
self.soil_topo_parameters[
coverType
] = parSoilAndTopo.SoilAndTopoParameters(
iniItems, self.landmask, self.inputDir, self.cloneMap, self.tmpDir
)
self.soil_topo_parameters[coverType].read(
iniItems, dictionary_of_land_cover_settings
)
else:
msg = "Using the default set of soil and topo parameters "
msg += "as defined in the landSurfaceOptions of the ini/configuration file."
self.soil_topo_parameters[coverType] = self.soil_topo_parameters[
"default"
]
logger.info(msg)
# instantiate self.landCoverObj[coverType]
self.landCoverObj = {}
for coverType in self.coverTypes:
self.landCoverObj[coverType] = lc.LandCover(
iniItems,
str(coverType) + "Options",
self.soil_topo_parameters[coverType],
self.landmask,
self.irrigationEfficiency,
self.cloneMap,
self.inputDir,
self.tmpDir,
self.stateDir,
self.usingAllocSegments,
)
# rescale landCover Fractions
# - by default, the land cover fraction will always be corrected (to ensure the total of all fractions = 1.0)
self.noLandCoverFractionCorrection = False
if "noLandCoverFractionCorrection" in configsection(
iniItems, "landSurfaceOptions"
): # iniItems.landSurfaceOptions.keys():
# if iniItems.landSurfaceOptions["noLandCoverFractionCorrection"] == "True": self.noLandCoverFractionCorrection = True:
if (
configget(
iniItems,
"landSurfaceOptions",
"noLandCoverFractionCorrection",
"False",
)
== "True"
):
self.noLandCoverFractionCorrection = True
# - rescaling land cover fractions
if self.noLandCoverFractionCorrection == False:
self.scaleNaturalLandCoverFractions()
if self.includeIrrigation:
self.scaleModifiedLandCoverFractions()
# an option to introduce changes of land cover parameters (not only fracVegCover)
self.noAnnualChangesInLandCoverParameter = True
if "annualChangesInLandCoverParameters" in configsection(
iniItems, "landSurfaceOptions"
):
# if iniItems.landSurfaceOptions['annualChangesInLandCoverParameters'] == "True": self.noAnnualChangesInLandCoverParameter = False
if (
configget(
iniItems,
"landSurfaceOptions",
"annualChangesInLandCoverParameters",
"False",
)
== "True"
):
self.noAnnualChangesInLandCoverParameter = False
# Note that "dynamicIrrigationArea" CANNOT be combined with "noLandCoverFractionCorrection"
if self.noLandCoverFractionCorrection:
self.dynamicIrrigationArea = False
# Also note that "noAnnualChangesInLandCoverParameter = False" must be followed by "noLandCoverFractionCorrection"
if (
self.noAnnualChangesInLandCoverParameter == False
and self.noLandCoverFractionCorrection == False
):
self.noLandCoverFractionCorrection = True
msg = "WARNING! No land cover fraction correction will be performed. Please make sure that the 'total' of all fracVegCover adds to one."
logger.warning(msg)
logger.warning(msg)
logger.warning(msg)
logger.warning(msg)
logger.warning(msg)
#########################################################################################################################################################################################
# 29 July 2014:
#
# If using historical/dynamic irrigation file (changing every year), we have to get fraction over irrigation area
# (in order to calculate irrigation area for each irrigation type)
#
# Note that: totalIrrAreaFrac = fraction irrigated areas (e.g. paddy + nonPaddy) over the entire cell area (dimensionless) ; this value changes (if self.dynamicIrrigationArea = True)
# irrTypeFracOverIrr = fraction each land cover type (paddy or nonPaddy) over the irrigation area (dimensionless) ; this value is constant for the entire simulation
#
if self.dynamicIrrigationArea:
logger.info("Determining fraction of total irrigated areas over each cell")
# Note that this is needed ONLY if historical irrigation areas are used (if self.dynamicIrrigationArea = True).
# total irrigated area fraction (over the entire cell)
totalIrrAreaFrac = 0.0
for coverType in self.coverTypes:
if coverType.startswith("irr"):
totalIrrAreaFrac += self.landCoverObj[coverType].fracVegCover
# fraction over irrigation area
for coverType in self.coverTypes:
if coverType.startswith("irr"):
self.landCoverObj[coverType].irrTypeFracOverIrr = vos.getValDivZero(
self.landCoverObj[coverType].fracVegCover,
totalIrrAreaFrac,
vos.smallNumber,
)
# get the initial conditions (for every land cover type)
self.getInitialConditions(iniItems, initialState)
# initiate old style reporting (this is useful for debuging)
self.initiate_old_style_land_surface_reporting(iniItems)
# make iniItems available for the other methods/functions:
self.iniItems = iniItems
def initiate_old_style_land_surface_reporting(self, iniItems):
self.report = True
try:
self.outDailyTotNC = iniItems.get(
"landSurfaceOptions", "outDailyTotNC"
).split(",")
self.outMonthTotNC = iniItems.get(
"landSurfaceOptions", "outMonthTotNC"
).split(",")
self.outMonthAvgNC = iniItems.get(
"landSurfaceOptions", "outMonthAvgNC"
).split(",")
self.outMonthEndNC = iniItems.get(
"landSurfaceOptions", "outMonthEndNC"
).split(",")
self.outAnnuaTotNC = iniItems.get(
"landSurfaceOptions", "outAnnuaTotNC"
).split(",")
self.outAnnuaAvgNC = iniItems.get(
"landSurfaceOptions", "outAnnuaAvgNC"
).split(",")
self.outAnnuaEndNC = iniItems.get(
"landSurfaceOptions", "outAnnuaEndNC"
).split(",")
except:
self.report = False
if self.report == True:
# include self.outNCDir in wflow_pcrgobwb?
self.outNCDir = vos.getFullPath(
"netcdf/", iniItems.get("globalOptions", "outputDir")
) # iniItems.outNCDir
self.netcdfObj = PCR2netCDF(iniItems)
#
# daily output in netCDF files:
if self.outDailyTotNC[0] != "None":
for var in self.outDailyTotNC:
# creating the netCDF files:
self.netcdfObj.createNetCDF(
str(self.outNCDir) + "/" + str(var) + "_dailyTot.nc",
var,
"undefined",
)
# MONTHly output in netCDF files:
# - cummulative
if self.outMonthTotNC[0] != "None":
for var in self.outMonthTotNC:
# initiating monthlyVarTot (accumulator variable):
vars(self)[var + "MonthTot"] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(
str(self.outNCDir) + "/" + str(var) + "_monthTot.nc",
var,
"undefined",
)
# - average
if self.outMonthAvgNC[0] != "None":
for var in self.outMonthAvgNC:
# initiating monthlyTotAvg (accumulator variable)
vars(self)[var + "MonthTot"] = None
# initiating monthlyVarAvg:
vars(self)[var + "MonthAvg"] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(
str(self.outNCDir) + "/" + str(var) + "_monthAvg.nc",
var,
"undefined",
)
# - last day of the month
if self.outMonthEndNC[0] != "None":
for var in self.outMonthEndNC:
# creating the netCDF files:
self.netcdfObj.createNetCDF(
str(self.outNCDir) + "/" + str(var) + "_monthEnd.nc",
var,
"undefined",
)
# YEARly output in netCDF files:
# - cummulative
if self.outAnnuaTotNC[0] != "None":
for var in self.outAnnuaTotNC:
# initiating yearly accumulator variable:
vars(self)[var + "AnnuaTot"] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(
str(self.outNCDir) + "/" + str(var) + "_annuaTot.nc",
var,
"undefined",
)
# - average
if self.outAnnuaAvgNC[0] != "None":
for var in self.outAnnuaAvgNC:
# initiating annualyVarAvg:
vars(self)[var + "AnnuaAvg"] = None
# initiating annualyTotAvg (accumulator variable)
vars(self)[var + "AnnuaTot"] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(
str(self.outNCDir) + "/" + str(var) + "_annuaAvg.nc",
var,
"undefined",
)
# - last day of the year
if self.outAnnuaEndNC[0] != "None":
for var in self.outAnnuaEndNC:
# creating the netCDF files:
self.netcdfObj.createNetCDF(
str(self.outNCDir) + "/" + str(var) + "_annuaEnd.nc",
var,
"undefined",
)
def getInitialConditions(self, iniItems, iniConditions=None):
# starting year in integer
starting_year = (
self.startTime.timetuple().tm_year
) # int(iniItems.get("run","starttime")[0:4])
#
# check if the run start at the first day of the year:
start_on_1_Jan = False
# if iniItems.get("run","starttime")[-5:] == "01-01": start_on_1_Jan = True:
if self.startTime.timetuple().tm_yday == 1 and self.startTime.month == 1:
start_on_1_Jan = True
# condition to consider previous year land cover fraction
consider_previous_year_land_cover_fraction = False
#######################################################################################################################################
# obtaining initial land cover fractions for runs with dynamicIrrigationArea
#
# For non spin-up runs that start at the first day of the year (1 January),
# - we have to consider the previous year land cover fractions, specifically if we consider the dynamic/expansion of irrigation areas
#
if (
iniConditions == None
and start_on_1_Jan == True
and self.dynamicIrrigationArea
and self.noLandCoverFractionCorrection == False
):
# obtain the previous year land cover fractions:
self.scaleDynamicIrrigation(
starting_year - 1
) # the previous year land cover fractions
consider_previous_year_land_cover_fraction = True
#
# For spin-up runs or for runs that start after 1 January,
# - we do not have to consider the previous year land cover fractions
#
if (
consider_previous_year_land_cover_fraction == False
and self.dynamicIrrigationArea
and self.noLandCoverFractionCorrection == False
):
# just using the current year land cover fractions:
self.scaleDynamicIrrigation(
starting_year
) # the current year land cover fractions
#
#################################################################################################################################
#######################################################################################################################################
# obtaining initial land cover fractions for runs with noLandCoverFractionCorrection and annualChangesInLandCoverParameters
#
# For non spin-up runs that start at the first day of the year (1 January),
# - we have to consider the previous year land cover fractions
#
if (
iniConditions == None
and start_on_1_Jan == True
and self.noLandCoverFractionCorrection
and self.noAnnualChangesInLandCoverParameter == False
):
# obtain the previous year land cover fractions:
previous_year = starting_year - 1
one_january_prev_year = str(previous_year) + "-01-01"
for coverType in self.coverTypes:
self.landCoverObj[coverType].previousFracVegCover = self.landCoverObj[
coverType
].get_land_cover_parameters(
date_in_string=one_january_prev_year, get_only_fracVegCover=True
)
####################################################################################################################################################################
# correcting land cover fractions
total_fractions = pcr.scalar(0.0)
for coverType in self.coverTypes:
total_fractions += self.landCoverObj[coverType].previousFracVegCover
if "grassland" in list(self.landCoverObj.keys()):
self.landCoverObj["grassland"].previousFracVegCover = pcr.ifthenelse(
total_fractions > 0.1,
self.landCoverObj["grassland"].previousFracVegCover,
1.0,
)
if "short_natural" in list(self.landCoverObj.keys()):
self.landCoverObj[
"short_natural"
].previousFracVegCover = pcr.ifthenelse(
total_fractions > 0.1,
self.landCoverObj["short_natural"].previousFracVegCover,
1.0,
)
total_fractions = pcr.scalar(0.0)
for coverType in self.coverTypes:
total_fractions += self.landCoverObj[coverType].previousFracVegCover
for coverType in self.coverTypes:
self.landCoverObj[coverType].previousFracVegCover = (
self.landCoverObj[coverType].previousFracVegCover / total_fractions
)
####################################################################################################################################################################
consider_previous_year_land_cover_fraction = True
# For spin-up runs or for runs that start after 1 January,
# - we do not have to consider the previous year land cover fractions
#
if (
consider_previous_year_land_cover_fraction == False
and self.noLandCoverFractionCorrection
and self.noAnnualChangesInLandCoverParameter == False
):
# just using the current year land cover fractions:
one_january_this_year = str(starting_year) + "-01-01"
for coverType in self.coverTypes:
self.landCoverObj[coverType].previousFracVegCover = self.landCoverObj[
coverType
].get_land_cover_parameters(
date_in_string=one_january_this_year, get_only_fracVegCover=True
)
####################################################################################################################################################################
# correcting land cover fractions
total_fractions = pcr.scalar(0.0)
for coverType in self.coverTypes:
total_fractions += self.landCoverObj[coverType].previousFracVegCover
if "grassland" in list(self.landCoverObj.keys()):
self.landCoverObj["grassland"].previousFracVegCover = pcr.ifthenelse(
total_fractions > 0.1,
self.landCoverObj["grassland"].previousFracVegCover,
1.0,
)
if "short_natural" in list(self.landCoverObj.keys()):
self.landCoverObj[
"short_natural"
].previousFracVegCover = pcr.ifthenelse(
total_fractions > 0.1,
self.landCoverObj["short_natural"].previousFracVegCover,
1.0,
)
total_fractions = pcr.scalar(0.0)
for coverType in self.coverTypes:
total_fractions += self.landCoverObj[coverType].previousFracVegCover
for coverType in self.coverTypes:
self.landCoverObj[coverType].previousFracVegCover = (
self.landCoverObj[coverType].previousFracVegCover / total_fractions
)
####################################################################################################################################################################
# get initial conditions
# - first, we set all aggregated states to zero (only the ones in mainStates):
for var in self.mainStates:
vars(self)[var] = pcr.scalar(0.0)
# - then we initiate them in the following loop of land cover types:
for coverType in self.coverTypes:
if iniConditions != None:
self.landCoverObj[coverType].getICsLC(
iniItems, iniConditions["landSurface"][coverType]
)
else:
self.landCoverObj[coverType].getICsLC(iniItems)
# summarize/aggregate the initial states/storages (using the initial land cover fractions: previousFracVegCover)
for var in self.mainStates:
# - initial land cover fractions (dimensionless)
if isinstance(
self.landCoverObj[coverType].previousFracVegCover, type(None)
):
self.landCoverObj[
coverType
].previousFracVegCover = self.landCoverObj[coverType].fracVegCover
land_cover_fraction = self.landCoverObj[coverType].previousFracVegCover
# - initial land cover states (unit: m)
land_cover_states = vars(self.landCoverObj[coverType])[var]
vars(self)[var] += land_cover_states * land_cover_fraction
def waterDemandOptions(self, iniItems):
# domestic water demand (unit: m/day)
#
self.domesticWaterDemandOption = False
if (
configget(
iniItems, "landSurfaceOptions", "includeDomesticWaterDemand", "False"
)
== "True"
):
logger.info("Domestic water demand is included in the calculation.")
self.domesticWaterDemandOption = True
else:
logger.info("Domestic water demand is NOT included in the calculation.")
#
if self.domesticWaterDemandOption:
self.domesticWaterDemandFile = vos.getFullPath(
configget(
iniItems, "landSurfaceOptions", "domesticWaterDemandFile", "None"
),
self.inputDir,
False,
)
# industry water demand (unit: m/day)
#
self.industryWaterDemandOption = False
if (
configget(
iniItems, "landSurfaceOptions", "includeIndustryWaterDemand", "False"
)
== "True"
):
logger.info("Industry water demand is included in the calculation.")
self.industryWaterDemandOption = True
else:
logger.info("Industry water demand is NOT included in the calculation.")
#
if self.industryWaterDemandOption:
self.industryWaterDemandFile = vos.getFullPath(
configget(
iniItems, "landSurfaceOptions", "industryWaterDemandFile", "None"
),
self.inputDir,
False,
)
# livestock water demand (unit: m/day)
self.livestockWaterDemandOption = False
if (
configget(
iniItems, "landSurfaceOptions", "includeLivestockWaterDemand", "False"
)
== "True"
):
logger.info("Livestock water demand is included in the calculation.")
self.livestockWaterDemandOption = True
else:
logger.info("Livestock water demand is NOT included in the calculation.")
#
if self.livestockWaterDemandOption:
self.livestockWaterDemandFile = vos.getFullPath(
configget(
iniItems, "landSurfaceOptions", "livestockWaterDemandFile", "None"
),
self.inputDir,
False,
)
# historical irrigation area (unit: hectar)
self.dynamicIrrigationArea = False
if (
configget(
iniItems, "landSurfaceOptions", "historicalIrrigationArea", "None"
)
!= "None"
):
logger.info(
"Using the dynamicIrrigationArea option. Extent of irrigation areas is based on the file provided in the 'historicalIrrigationArea'."
)
self.dynamicIrrigationArea = True
#
if self.dynamicIrrigationArea:
self.dynamicIrrigationAreaFile = vos.getFullPath(
configget(
iniItems, "landSurfaceOptions", "historicalIrrigationArea", "None"
),
self.inputDir,
False,
)
# irrigation efficiency map (in percentage) # TODO: Using the time series of efficiency (considering historical technological development).
self.irrigationEfficiency = vos.readPCRmapClone(
configget(iniItems, "landSurfaceOptions", "irrigationEfficiency", "1.00"),
self.cloneMap,
self.tmpDir,
self.inputDir,
)
# extrapolate efficiency map: # TODO: Make a better extrapolation algorithm (considering cell size, etc.).
window_size = 1.25 * pcr.clone().cellSize()
window_size = pcr.min(
window_size,
pcr.min(pcr.scalar(pcr.clone().nrRows()), pcr.scalar(pcr.clone().nrCols()))
* pcr.clone().cellSize(),
)
try:
self.irrigationEfficiency = pcr.cover(
self.irrigationEfficiency,
pcr.windowaverage(self.irrigationEfficiency, window_size),
)
self.irrigationEfficiency = pcr.cover(
self.irrigationEfficiency,
pcr.windowaverage(self.irrigationEfficiency, window_size),
)
self.irrigationEfficiency = pcr.cover(
self.irrigationEfficiency,
pcr.windowaverage(self.irrigationEfficiency, window_size),
)
self.irrigationEfficiency = pcr.cover(
self.irrigationEfficiency,
pcr.windowaverage(self.irrigationEfficiency, window_size),
)
self.irrigationEfficiency = pcr.cover(
self.irrigationEfficiency,
pcr.windowaverage(self.irrigationEfficiency, window_size),
)
self.irrigationEfficiency = pcr.cover(
self.irrigationEfficiency,
pcr.windowaverage(self.irrigationEfficiency, 0.75),
)
self.irrigationEfficiency = pcr.cover(
self.irrigationEfficiency,
pcr.windowaverage(self.irrigationEfficiency, 1.00),
)
self.irrigationEfficiency = pcr.cover(
self.irrigationEfficiency,
pcr.windowaverage(self.irrigationEfficiency, 1.50),
)
except:
pass
# ~ self.irrigationEfficiency = pcr.ifthen(self.landmask, self.irrigationEfficiency)
self.irrigationEfficiency = pcr.cover(self.irrigationEfficiency, 1.0)
self.irrigationEfficiency = pcr.max(0.1, self.irrigationEfficiency)
self.irrigationEfficiency = pcr.ifthen(self.landmask, self.irrigationEfficiency)
# desalination water supply option
self.includeDesalination = False
if configget(
iniItems, "landSurfaceOptions", "desalinationWater", "False"
) not in ["None", "False"]:
logger.info("Monthly desalination water is included.")
self.includeDesalination = True
self.desalinationWaterFile = vos.getFullPath(
configget(iniItems, "landSurfaceOptions", "desalinationWater", "None"),
self.inputDir,
)
else:
logger.info("Monthly desalination water is NOT included.")
# zones at which water allocation (surface and groundwater allocation) is determined
self.usingAllocSegments = False
self.allocSegments = None
if (
configget(
iniItems,
"landSurfaceOptions",
"allocationSegmentsForGroundSurfaceWater",
"None",
)
!= "None"
):
self.usingAllocSegments = True
self.allocSegments = vos.readPCRmapClone(
configget(
iniItems,
"landSurfaceOptions",
"allocationSegmentsForGroundSurfaceWater",
"None",
),
self.cloneMap,
self.tmpDir,
self.inputDir,
isLddMap=False,
cover=None,
isNomMap=True,
)
self.allocSegments = pcr.ifthen(self.landmask, self.allocSegments)
cellArea = vos.readPCRmapClone(
iniItems.get("routingOptions", "cellAreaMap"),
self.cloneMap,
self.tmpDir,
self.inputDir,
)
cellArea = pcr.ifthen(self.landmask, cellArea)
self.segmentArea = pcr.areatotal(
pcr.cover(cellArea, 0.0), self.allocSegments
)
self.segmentArea = pcr.ifthen(self.landmask, self.segmentArea)
else:
logger.info(
"If there is any, water demand is satisfied by local source only."
)
def scaleNaturalLandCoverFractions(self):
""" rescales natural land cover fractions (make sure the total = 1)"""
# total land cover fractions
pristineAreaFrac = 0.0
numb_of_lc_types = 0.0
for coverType in self.coverTypes:
if not coverType.startswith("irr"):
pristineAreaFrac += pcr.cover(
self.landCoverObj[coverType].fracVegCover, 0.0
)
numb_of_lc_types += 1.0
# Fill cells with pristineAreaFrac < 0.0 - with window average value within 0.5 and 1.5 degree
for coverType in self.coverTypes:
if not coverType.startswith("irr"):
filled_fractions = pcr.windowaverage(
self.landCoverObj[coverType].fracVegCover, 0.5
)
filled_fractions = pcr.cover(
filled_fractions,
pcr.windowaverage(self.landCoverObj[coverType].fracVegCover, 1.5),
)
filled_fractions = pcr.max(0.0, filled_fractions)
filled_fractions = pcr.min(1.0, filled_fractions)
self.landCoverObj[coverType].fracVegCover = pcr.ifthen(
pristineAreaFrac >= 0.0, self.landCoverObj[coverType].fracVegCover
)
self.landCoverObj[coverType].fracVegCover = pcr.cover(
self.landCoverObj[coverType].fracVegCover, filled_fractions
)
self.landCoverObj[coverType].fracVegCover = pcr.ifthen(
self.landmask, self.landCoverObj[coverType].fracVegCover
)
# re-check total land cover fractions
pristineAreaFrac = 0.0
numb_of_lc_types = 0.0
for coverType in self.coverTypes:
if not coverType.startswith("irr"):
pristineAreaFrac += pcr.cover(
self.landCoverObj[coverType].fracVegCover, 0.0
)
numb_of_lc_types += 1.0
# Fill cells with pristineAreaFrac = 0.0:
self.landCoverObj["forest"].fracVegCover = pcr.ifthenelse(
pristineAreaFrac > 0.0, self.landCoverObj["forest"].fracVegCover, 0.0
)
self.landCoverObj["forest"].fracVegCover = pcr.min(
1.0, self.landCoverObj["forest"].fracVegCover
)
self.landCoverObj["grassland"].fracVegCover = (
1.0 - self.landCoverObj["forest"].fracVegCover
)
# recalculate total land cover fractions
pristineAreaFrac = 0.0
for coverType in self.coverTypes:
if not coverType.startswith("irr"):
pristineAreaFrac += pcr.cover(
self.landCoverObj[coverType].fracVegCover, 0.0
)
# correcting
for coverType in self.coverTypes:
if not coverType.startswith("irr"):
self.landCoverObj[coverType].fracVegCover = (
self.landCoverObj[coverType].fracVegCover / pristineAreaFrac
)
pristineAreaFrac = 0.0 # reset
#
# checking pristineAreaFrac (must be equal to 1)
for coverType in self.coverTypes:
if not coverType.startswith("irr"):
pristineAreaFrac += self.landCoverObj[coverType].fracVegCover
self.landCoverObj[coverType].naturalFracVegCover = self.landCoverObj[
coverType
].fracVegCover
#
# check and make sure that totalArea = 1.0 for all cells
totalArea = pristineAreaFrac
totalArea = pcr.ifthen(self.landmask, totalArea)
totalArea = pcr.cover(totalArea, 1.0)
check_map = totalArea - pcr.scalar(1.0)
a, b, c = vos.getMinMaxMean(check_map)
threshold = 1e-4
if abs(a) > threshold or abs(b) > threshold:
logger.error(
"total of 'Natural Area' fractions is not equal to 1.0 ... Min %f Max %f Mean %f"
% (a, b, c)
)
def scaleModifiedLandCoverFractions(self):
""" rescales the land cover fractions with irrigation areas"""
# calculate irrigatedAreaFrac (fraction of irrigation areas)
irrigatedAreaFrac = pcr.spatial(pcr.scalar(0.0))
for coverType in self.coverTypes:
if coverType.startswith("irr"):
irrigatedAreaFrac = (
irrigatedAreaFrac + self.landCoverObj[coverType].fracVegCover
)
# correcting/scaling fracVegCover of irrigation if irrigatedAreaFrac > 1
for coverType in self.coverTypes:
if coverType.startswith("irr"):
self.landCoverObj[coverType].fracVegCover = pcr.ifthenelse(
irrigatedAreaFrac > 1.0,
self.landCoverObj[coverType].fracVegCover / irrigatedAreaFrac,
self.landCoverObj[coverType].fracVegCover,
)
# the corrected irrigated area fraction
irrigatedAreaFrac = pcr.spatial(pcr.scalar(0.0))
for coverType in self.coverTypes:
if coverType.startswith("irr"):
irrigatedAreaFrac += self.landCoverObj[coverType].fracVegCover
totalArea = pcr.spatial(pcr.scalar(0.0))
totalArea += irrigatedAreaFrac
# correction factor for forest and grassland (pristine Areas)
lcFrac = pcr.max(0.0, 1.0 - totalArea)
pristineAreaFrac = pcr.spatial(pcr.scalar(0.0))
for coverType in self.coverTypes:
if not coverType.startswith("irr"):
self.landCoverObj[coverType].fracVegCover = 0.0
self.landCoverObj[coverType].fracVegCover = (
self.landCoverObj[coverType].naturalFracVegCover * lcFrac
)
pristineAreaFrac += pcr.cover(
self.landCoverObj[coverType].fracVegCover, 0.0
)
# check and make sure that totalArea = 1.0 for all cells
totalArea += pristineAreaFrac
totalArea = pcr.ifthen(self.landmask, totalArea)
totalArea = pcr.cover(totalArea, 1.0)
totalArea = pcr.ifthen(self.landmask, totalArea)
a, b, c = vos.getMinMaxMean(totalArea - pcr.scalar(1.0))
threshold = 1e-4
if abs(a) > threshold or abs(b) > threshold:
logger.error(
"fraction total (from all land cover types) is not equal to 1.0 ... Min %f Max %f Mean %f"
% (a, b, c)
)
def obtainNonIrrWaterDemand(self, routing, currTimeStep):
# get NON-Irrigation GROSS water demand and its return flow fraction
# domestic water demand
if currTimeStep.timeStepPCR == 1 or currTimeStep.day == 1:
if self.domesticWaterDemandOption:
#
if self.domesticWaterDemandFile.endswith(vos.netcdf_suffixes):
#
self.domesticGrossDemand = pcr.max(
0.0,
pcr.cover(
vos.netcdf2PCRobjClone(
self.domesticWaterDemandFile,
"domesticGrossDemand",
currTimeStep.fulldate,
useDoy="monthly",
cloneMapFileName=self.cloneMap,
),
0.0,
),
)
#
self.domesticNettoDemand = pcr.max(
0.0,
pcr.cover(
vos.netcdf2PCRobjClone(
self.domesticWaterDemandFile,
"domesticNettoDemand",
currTimeStep.fulldate,
useDoy="monthly",
cloneMapFileName=self.cloneMap,
),
0.0,
),
)
else:
string_month = str(currTimeStep.month)
if currTimeStep.month < 10:
string_month = "0" + str(currTimeStep.month)
grossFileName = (
self.domesticWaterDemandFile
+ "w"
+ str(currTimeStep.year)
+ ".0"
+ string_month
)
self.domesticGrossDemand = pcr.max(
pcr.cover(
vos.readPCRmapClone(
grossFileName, self.cloneMap, self.tmpDir
),
0.0,
),
0.0,
)
nettoFileName = (
self.domesticWaterDemandFile
+ "n"
+ str(currTimeStep.year)
+ ".0"
+ string_month
)
self.domesticNettoDemand = pcr.max(
pcr.cover(
vos.readPCRmapClone(
nettoFileName, self.cloneMap, self.tmpDir
),
0.0,
),
0.0,
)
else:
self.domesticGrossDemand = pcr.scalar(0.0)
self.domesticNettoDemand = pcr.scalar(0.0)
logger.debug("Domestic water demand is NOT included.")
# gross and netto domestic water demand in m/day
self.domesticGrossDemand = pcr.cover(self.domesticGrossDemand, 0.0)
self.domesticNettoDemand = pcr.cover(self.domesticNettoDemand, 0.0)
self.domesticNettoDemand = pcr.min(
self.domesticGrossDemand, self.domesticNettoDemand
)
# industry water demand
if currTimeStep.timeStepPCR == 1 or currTimeStep.day == 1:
if self.industryWaterDemandOption:
#
if self.industryWaterDemandFile.endswith(vos.netcdf_suffixes):
#
self.industryGrossDemand = pcr.max(
0.0,
pcr.cover(
vos.netcdf2PCRobjClone(
self.industryWaterDemandFile,
"industryGrossDemand",
currTimeStep.fulldate,
useDoy="monthly",
cloneMapFileName=self.cloneMap,
),
0.0,
),
)
#
self.industryNettoDemand = pcr.max(
0.0,
pcr.cover(
vos.netcdf2PCRobjClone(
self.industryWaterDemandFile,
"industryNettoDemand",
currTimeStep.fulldate,
useDoy="monthly",
cloneMapFileName=self.cloneMap,
),
0.0,
),
)
else:
grossFileName = (
self.industryWaterDemandFile
+ "w"
+ str(currTimeStep.year)
+ ".map"
)
self.industryGrossDemand = pcr.max(
0.0,
pcr.cover(
vos.readPCRmapClone(
grossFileName, self.cloneMap, self.tmpDir
),
0.0,
),
)
nettoFileName = (
self.industryWaterDemandFile
+ "n"
+ str(currTimeStep.year)
+ ".map"
)
self.industryNettoDemand = pcr.max(
0.0,
pcr.cover(
vos.readPCRmapClone(
nettoFileName, self.cloneMap, self.tmpDir
),
0.0,
),
)
else:
self.industryGrossDemand = pcr.scalar(0.0)
self.industryNettoDemand = pcr.scalar(0.0)
logger.debug("Industry water demand is NOT included.")
# gross and netto industrial water demand in m/day
self.industryGrossDemand = pcr.cover(self.industryGrossDemand, 0.0)
self.industryNettoDemand = pcr.cover(self.industryNettoDemand, 0.0)
self.industryNettoDemand = pcr.min(
self.industryGrossDemand, self.industryNettoDemand
)
# livestock water demand
if currTimeStep.timeStepPCR == 1 or currTimeStep.day == 1:
if self.livestockWaterDemandOption:
#
if self.livestockWaterDemandFile.endswith(vos.netcdf_suffixes):
#
self.livestockGrossDemand = pcr.max(
0.0,
pcr.cover(
vos.netcdf2PCRobjClone(
self.livestockWaterDemandFile,
"livestockGrossDemand",
currTimeStep.fulldate,
useDoy="monthly",
cloneMapFileName=self.cloneMap,
),
0.0,
),
)
#
self.livestockNettoDemand = pcr.max(
0.0,
pcr.cover(
vos.netcdf2PCRobjClone(
self.livestockWaterDemandFile,
"livestockNettoDemand",
currTimeStep.fulldate,
useDoy="monthly",
cloneMapFileName=self.cloneMap,
),
0.0,
),
)
else:
string_month = str(currTimeStep.month)
if currTimeStep.month < 10:
string_month = "0" + str(currTimeStep.month)
grossFileName = (
self.livestockWaterDemandFile
+ "w"
+ str(currTimeStep.year)
+ ".0"
+ string_month
)
self.livestockGrossDemand = pcr.max(
pcr.cover(
vos.readPCRmapClone(
grossFileName, self.cloneMap, self.tmpDir
),
0.0,
),
0.0,
)
nettoFileName = (
self.livestockWaterDemandFile
+ "n"
+ str(currTimeStep.year)
+ ".0"
+ string_month
)
self.livestockNettoDemand = pcr.max(
pcr.cover(
vos.readPCRmapClone(
nettoFileName, self.cloneMap, self.tmpDir
),
0.0,
),
0.0,
)
else:
self.livestockGrossDemand = pcr.scalar(0.0)
self.livestockNettoDemand = pcr.scalar(0.0)
logger.debug("Livestock water demand is NOT included.")
# gross and netto livestock water demand in m/day
self.livestockGrossDemand = pcr.cover(self.livestockGrossDemand, 0.0)
self.livestockNettoDemand = pcr.cover(self.livestockNettoDemand, 0.0)
self.livestockNettoDemand = pcr.min(
self.livestockGrossDemand, self.livestockNettoDemand
)
# GROSS domestic, industrial and livestock water demands (unit: m/day)
self.domesticGrossDemand = pcr.ifthen(self.landmask, self.domesticGrossDemand)
self.domesticNettoDemand = pcr.ifthen(self.landmask, self.domesticNettoDemand)
self.industryGrossDemand = pcr.ifthen(self.landmask, self.industryGrossDemand)
self.industryNettoDemand = pcr.ifthen(self.landmask, self.industryNettoDemand)
self.livestockGrossDemand = pcr.ifthen(self.landmask, self.livestockGrossDemand)
self.livestockNettoDemand = pcr.ifthen(self.landmask, self.livestockNettoDemand)
# RETURN FLOW fractions for domestic, industrial and livestock water demands (unit: fraction/percentage)
self.domesticReturnFlowFraction = pcr.min(
1.0,
pcr.max(
0.0,
1.0
- vos.getValDivZero(self.domesticNettoDemand, self.domesticGrossDemand),
),
)
self.industryReturnFlowFraction = pcr.min(
1.0,
pcr.max(
0.0,
1.0
- vos.getValDivZero(self.industryNettoDemand, self.industryGrossDemand),
),
)
self.livestockReturnFlowFraction = pcr.min(
1.0,
pcr.max(
0.0,
1.0
- vos.getValDivZero(
self.livestockNettoDemand, self.livestockGrossDemand
),
),
)
# make a dictionary summarizing potential demand (potential withdrawal) and its return flow fraction
nonIrrigationWaterDemandDict = {}
nonIrrigationWaterDemandDict["potential_demand"] = {}
nonIrrigationWaterDemandDict["potential_demand"][
"domestic"
] = self.domesticGrossDemand
nonIrrigationWaterDemandDict["potential_demand"][
"industry"
] = self.industryGrossDemand
nonIrrigationWaterDemandDict["potential_demand"][
"livestock"
] = self.livestockGrossDemand
nonIrrigationWaterDemandDict["return_flow_fraction"] = {}
nonIrrigationWaterDemandDict["return_flow_fraction"]["domestic"] = pcr.cover(
pcr.min(
1.0, pcr.roundup(self.domesticReturnFlowFraction * 1000.0) / 1000.0
),
1.0,
)
nonIrrigationWaterDemandDict["return_flow_fraction"]["industry"] = pcr.cover(
pcr.min(
1.0, pcr.roundup(self.industryReturnFlowFraction * 1000.0) / 1000.0
),
1.0,
)
nonIrrigationWaterDemandDict["return_flow_fraction"]["livestock"] = pcr.cover(
pcr.min(
1.0, pcr.roundup(self.livestockReturnFlowFraction * 1000.0) / 1000.0
),
1.0,
)
return nonIrrigationWaterDemandDict
def calculateCapRiseFrac(self, groundwater, routing, currTimeStep):
# calculate cell fraction influenced by capillary rise:
# relative groundwater head (m) above the minimum elevation within a grid cell
if groundwater.useMODFLOW == True:
dzGroundwater = groundwater.relativeGroundwaterHead
# update dzGroundwater from file, from modflow calculation, using the previous time step
# - assumption that it will be updated once every month
if currTimeStep.day == 1 and currTimeStep.timeStepPCR > 1:
# for online coupling, we will read files from pcraster maps
# directory = self.iniItems.main_output_directory + "/modflow/transient/maps/"
directory = (
iniItems.get("globalOptions", "outputDir")
+ "/modflow/transient/maps/"
)
# - relative groundwater head from MODFLOW
yesterday = str(currTimeStep.yesterday())
filename = (
directory + "relativeGroundwaterHead_" + str(yesterday) + ".map"
)
dzGroundwater = pcr.ifthen(
self.landmask,
pcr.cover(
vos.readPCRmapClone(filename, self.cloneMap, self.tmpDir), 0.0
),
)
else:
dzGroundwater = groundwater.storGroundwater / groundwater.specificYield
# add some tolerance/influence level (unit: m)
dzGroundwater += self.soil_topo_parameters["default"].maxGWCapRise
# set minimum value to zero (zero relativeGroundwaterHead indicate no capRiseFrac)
dzGroundwater = pcr.max(0.0, dzGroundwater)
# approximate cell fraction under influence of capillary rise
FRACWAT = pcr.scalar(0.0)
if currTimeStep.timeStepPCR > 1:
FRACWAT = pcr.cover(routing.WaterBodies.fracWat, 0.0)
else:
if routing.includeWaterBodies:
if routing.WaterBodies.useNetCDF:
routing.WaterBodies.fracWat = vos.netcdf2PCRobjClone(
routing.WaterBodies.ncFileInp,
"fracWaterInp",
currTimeStep.fulldate,
useDoy="yearly",
cloneMapFileName=self.cloneMap,
)
else:
routing.WaterBodies.fracWat = vos.readPCRmapClone(
routing.WaterBodies.fracWaterInp
+ str(currTimeStep.year)
+ ".map",
self.cloneMap,
self.tmpDir,
self.inputDir,
)
FRACWAT = pcr.cover(FRACWAT, 0.0)
# zero fracwat assumption used for debugging against version 1.0
if routing.zeroFracWatAllAndAlways:
FRACWAT = pcr.scalar(0.0)
CRFRAC = pcr.min(
1.0,
1.0
- (self.soil_topo_parameters["default"].dzRel0100 - dzGroundwater)
* 0.1
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0100
- self.soil_topo_parameters["default"].dzRel0090,
),
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0090,
0.9
- (self.soil_topo_parameters["default"].dzRel0090 - dzGroundwater)
* 0.1
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0090
- self.soil_topo_parameters["default"].dzRel0080,
),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0080,
0.8
- (self.soil_topo_parameters["default"].dzRel0080 - dzGroundwater)
* 0.1
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0080
- self.soil_topo_parameters["default"].dzRel0070,
),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0070,
0.7
- (self.soil_topo_parameters["default"].dzRel0070 - dzGroundwater)
* 0.1
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0070
- self.soil_topo_parameters["default"].dzRel0060,
),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0060,
0.6
- (self.soil_topo_parameters["default"].dzRel0060 - dzGroundwater)
* 0.1
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0060
- self.soil_topo_parameters["default"].dzRel0050,
),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0050,
0.5
- (self.soil_topo_parameters["default"].dzRel0050 - dzGroundwater)
* 0.1
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0050
- self.soil_topo_parameters["default"].dzRel0040,
),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0040,
0.4
- (self.soil_topo_parameters["default"].dzRel0040 - dzGroundwater)
* 0.1
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0040
- self.soil_topo_parameters["default"].dzRel0030,
),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0030,
0.3
- (self.soil_topo_parameters["default"].dzRel0030 - dzGroundwater)
* 0.1
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0030
- self.soil_topo_parameters["default"].dzRel0020,
),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0020,
0.2
- (self.soil_topo_parameters["default"].dzRel0020 - dzGroundwater)
* 0.1
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0020
- self.soil_topo_parameters["default"].dzRel0010,
),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0010,
0.1
- (self.soil_topo_parameters["default"].dzRel0010 - dzGroundwater)
* 0.05
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0010
- self.soil_topo_parameters["default"].dzRel0005,
),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0005,
0.05
- (self.soil_topo_parameters["default"].dzRel0005 - dzGroundwater)
* 0.04
/ pcr.max(
0.001,
self.soil_topo_parameters["default"].dzRel0005
- self.soil_topo_parameters["default"].dzRel0001,
),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
dzGroundwater < self.soil_topo_parameters["default"].dzRel0001,
0.01
- (self.soil_topo_parameters["default"].dzRel0001 - dzGroundwater)
* 0.01
/ pcr.max(0.001, self.soil_topo_parameters["default"].dzRel0001),
CRFRAC,
)
CRFRAC = pcr.ifthenelse(
FRACWAT < 1.0, pcr.max(0.0, CRFRAC - FRACWAT) / (1.0 - FRACWAT), 0.0
)
capRiseFrac = pcr.max(0.0, pcr.min(1.0, CRFRAC))
# ~ capRiseFrac = 0.0
return capRiseFrac
def partitioningGroundSurfaceAbstraction(self, groundwater, routing):
# partitioning abstraction sources: groundwater and surface water
# de Graaf et al., 2014 principle: partitioning based on local average baseflow (m3/s) and upstream average discharge (m3/s)
# - estimates of fractions of groundwater and surface water abstractions
averageBaseflowInput = routing.avgBaseflow
averageUpstreamInput = pcr.max(
routing.avgDischarge,
pcr.cover(pcr.upstream(routing.lddMap, routing.avgDischarge), 0.0),
)
if self.usingAllocSegments:
averageBaseflowInput = pcr.max(
0.0, pcr.ifthen(self.landmask, averageBaseflowInput)
)
averageUpstreamInput = pcr.max(
0.0, pcr.ifthen(self.landmask, averageUpstreamInput)
)
averageBaseflowInput = pcr.cover(
pcr.areaaverage(averageBaseflowInput, self.allocSegments), 0.0
)
averageUpstreamInput = pcr.cover(
pcr.areamaximum(averageUpstreamInput, self.allocSegments), 0.0
)
else:
logger.debug("Water demand can only be satisfied by local source.")
swAbstractionFraction = vos.getValDivZero(
averageUpstreamInput,
averageUpstreamInput + averageBaseflowInput,
vos.smallNumber,
)
swAbstractionFraction = pcr.roundup(swAbstractionFraction * 100.0) / 100.0
swAbstractionFraction = pcr.max(0.0, swAbstractionFraction)
swAbstractionFraction = pcr.min(1.0, swAbstractionFraction)
if self.usingAllocSegments:
swAbstractionFraction = pcr.areamaximum(
swAbstractionFraction, self.allocSegments
)
swAbstractionFraction = pcr.cover(swAbstractionFraction, 1.0)
swAbstractionFraction = pcr.ifthen(self.landmask, swAbstractionFraction)
# making a dictionary containing the surface water fraction for various purpose
swAbstractionFractionDict = {}
# - the default estimate (based on de Graaf et al., 2014)
swAbstractionFractionDict["estimate"] = swAbstractionFraction
# - for irrigation and livestock purpose
swAbstractionFractionDict["irrigation"] = swAbstractionFraction
# - for industrial and domestic purpose
swAbstractionFractionDict["max_for_non_irrigation"] = swAbstractionFraction
#
# - a treshold fraction value to optimize/maximize surface water withdrawal for irrigation
# Principle: Areas with swAbstractionFractionDict['irrigation'] above this treshold will prioritize surface water use for irrigation purpose.
# A zero treshold value will ignore this principle.
swAbstractionFractionDict[
"treshold_to_maximize_irrigation_surface_water"
] = self.treshold_to_maximize_irrigation_surface_water
#
# - a treshold fraction value to minimize fossil groundwater withdrawal, particularly to remove the unrealistic areas of fossil groundwater abstraction
# Principle: Areas with swAbstractionFractionDict['irrigation'] above this treshold will not extract fossil groundwater.
swAbstractionFractionDict[
"treshold_to_minimize_fossil_groundwater_irrigation"
] = self.treshold_to_minimize_fossil_groundwater_irrigation
# if defined, incorporating the pre-defined fraction of surface water sources (e.g. based on Siebert et al., 2014 and McDonald et al., 2014)
if not isinstance(self.swAbstractionFractionData, type(None)):
logger.debug(
"Using/incorporating the predefined fractions of surface water source."
)
swAbstractionFractionDict["estimate"] = swAbstractionFraction
swAbstractionFractionDict[
"irrigation"
] = self.partitioningGroundSurfaceAbstractionForIrrigation(
swAbstractionFraction,
self.swAbstractionFractionData,
self.swAbstractionFractionDataQuality,
)
swAbstractionFractionDict[
"max_for_non_irrigation"
] = self.maximumNonIrrigationSurfaceWaterAbstractionFractionData
else:
logger.debug(
"NOT using/incorporating the predefined fractions of surface water source."
)
return swAbstractionFractionDict
def partitioningGroundSurfaceAbstractionForIrrigation(
self,
swAbstractionFractionEstimate,
swAbstractionFractionData,
swAbstractionFractionDataQuality,
):
# surface water source fraction based on Stefan Siebert's map:
factor = (
0.5
) # using this factor, the minimum value for the following 'data_weight_value' is 0.75 (for swAbstractionFractionDataQuality == 5)
data_weight_value = (
pcr.scalar(1.0)
- (pcr.min(5.0, pcr.max(0.0, swAbstractionFractionDataQuality)) / 10.0)
* factor
)
swAbstractionFractionForIrrigation = (
data_weight_value * swAbstractionFractionData
+ (1.0 - data_weight_value) * swAbstractionFractionEstimate
)
swAbstractionFractionForIrrigation = pcr.cover(
swAbstractionFractionForIrrigation, swAbstractionFractionEstimate
)
swAbstractionFractionForIrrigation = pcr.cover(
swAbstractionFractionForIrrigation, 1.0
)
swAbstractionFractionForIrrigation = pcr.ifthen(
self.landmask, swAbstractionFractionForIrrigation
)
return swAbstractionFractionForIrrigation
def scaleDynamicIrrigation(self, yearInInteger):
# This method is to update fracVegCover of landCover for historical irrigation areas (done at yearly basis).
# ~ # Available datasets are only from 1960 to 2010 (status on 24 September 2010)
# ~ yearInInteger = int(yearInInteger)
# ~ if float(yearInInteger) < 1960. or float(yearInInteger) > 2010.:
# ~ msg = 'Dataset for the year '+str(yearInInteger)+" is not available. Dataset of historical irrigation areas is only available from 1960 to 2010."
# ~ logger.warning(msg)
# ~ yearInInteger = min(2010, max(1960, yearInInteger))
#
# TODO: Generally, I do not need the aforementioned lines as I have defined the functions "findLastYearInNCTime" and "findFirstYearInNCTime" in the module virtualOS.py
# However, Niko still need them for his DA scheme as we somehow his DA scheme cannot handle the netcdf file of historical irrigation areas (and therefore we have to use pcraster map files).
yearInString = str(yearInInteger)
# read historical irrigation areas
if self.dynamicIrrigationAreaFile.endswith((".nc4", ".nc")):
fulldateInString = yearInString + "-01" + "-01"
self.irrigationArea = 10000.0 * pcr.cover(
vos.netcdf2PCRobjClone(
self.dynamicIrrigationAreaFile,
"irrigationArea",
fulldateInString,
useDoy="yearly",
cloneMapFileName=self.cloneMap,
),
0.0,
) # unit: m2 (input file is in hectare)
else:
irrigation_pcraster_file = (
self.dynamicIrrigationAreaFile + yearInString + ".map"
)
logger.debug(
"reading irrigation area map from : " + irrigation_pcraster_file
)
self.irrigationArea = 10000.0 * pcr.cover(
vos.readPCRmapClone(
irrigation_pcraster_file, self.cloneMap, self.tmpDir
),
0.0,
) # unit: m2 (input file is in hectare)
# TODO: Convert the input file, from hectare to percentage.
# This is to avoid errors if somebody uses 30 min input to run his 5 min model.
# area of irrigation is limited by cellArea
self.irrigationArea = pcr.max(self.irrigationArea, 0.0)
self.irrigationArea = pcr.min(
self.irrigationArea, self.cellArea
) # limited by cellArea
# calculate fracVegCover (for irrigation only)
for coverType in self.coverTypes:
if coverType.startswith("irr"):
self.landCoverObj[coverType].fractionArea = 0.0 # reset
self.landCoverObj[coverType].fractionArea = (
self.landCoverObj[coverType].irrTypeFracOverIrr
* self.irrigationArea
) # unit: m2
self.landCoverObj[coverType].fracVegCover = pcr.min(
1.0, self.landCoverObj[coverType].fractionArea / self.cellArea
)
# avoid small values
self.landCoverObj[coverType].fracVegCover = (
pcr.rounddown(self.landCoverObj[coverType].fracVegCover * 1000.0)
/ 1000.0
)
# rescale land cover fractions (for all land cover types):
self.scaleModifiedLandCoverFractions()
def update(self, meteo, groundwater, routing, currTimeStep, wflow_logger):
# updating regional groundwater abstraction limit (at the begining of the year or at the beginning of simulation)
if groundwater.limitRegionalAnnualGroundwaterAbstraction:
# logger.debug('Total groundwater abstraction is limited by regional annual pumping capacity.')
if currTimeStep.doy == 1 or currTimeStep.timeStepPCR == 1:
self.groundwater_pumping_region_ids = vos.netcdf2PCRobjClone(
groundwater.pumpingCapacityNC,
"region_ids",
currTimeStep.fulldate,
useDoy="yearly",
cloneMapFileName=self.cloneMap,
)
other_ids = (
pcr.mapmaximum(self.groundwater_pumping_region_ids)
+ pcr.scalar(1000.0)
+ pcr.uniqueid(self.landmask)
)
self.groundwater_pumping_region_ids = pcr.cover(
self.groundwater_pumping_region_ids, other_ids
)
self.groundwater_pumping_region_ids = pcr.ifthen(
self.landmask, pcr.nominal(self.groundwater_pumping_region_ids)
)
self.regionalAnnualGroundwaterAbstractionLimit = pcr.ifthen(
self.landmask,
pcr.cover(
vos.netcdf2PCRobjClone(
groundwater.pumpingCapacityNC,
"regional_pumping_limit",
currTimeStep.fulldate,
useDoy="yearly",
cloneMapFileName=self.cloneMap,
),
0.0,
),
)
self.regionalAnnualGroundwaterAbstractionLimit = pcr.areamaximum(
self.regionalAnnualGroundwaterAbstractionLimit,
self.groundwater_pumping_region_ids,
)
self.regionalAnnualGroundwaterAbstractionLimit *= (
1000.0 * 1000.0 * 1000.0
) # unit: m3/year
self.regionalAnnualGroundwaterAbstractionLimit = pcr.ifthen(
self.landmask, self.regionalAnnualGroundwaterAbstractionLimit
)
# minimum value (unit: m3/year at the regional scale)
minimum_value = 1000.0
self.regionalAnnualGroundwaterAbstractionLimit = pcr.max(
minimum_value, self.regionalAnnualGroundwaterAbstractionLimit
)
else:
# logger.debug('Total groundwater abstraction is NOT limited by regional annual pumping capacity.')
self.groundwater_pumping_region_ids = None
self.regionalAnnualGroundwaterAbstractionLimit = None
# updating fracVegCover of each landCover (landCover fraction)
# - if considering dynamic/historical irrigation areas (expansion/reduction of irrigated areas)
# - done at yearly basis, at the beginning of each year, also at the beginning of simulation
#
if (
self.dynamicIrrigationArea
and self.includeIrrigation
and (currTimeStep.timeStepPCR == 1 or currTimeStep.doy == 1)
and self.noLandCoverFractionCorrection == False
):
# scale land cover fraction (due to expansion/reduction of irrigated areas)
self.scaleDynamicIrrigation(currTimeStep.year)
####################################################################################################################################################################
# correcting land cover fractions
total_fractions = pcr.scalar(0.0)
for coverType in self.coverTypes:
total_fractions += self.landCoverObj[coverType].fracVegCover
if "grassland" in list(self.landCoverObj.keys()):
self.landCoverObj["grassland"].fracVegCover = pcr.ifthenelse(
total_fractions > 0.1,
self.landCoverObj["grassland"].fracVegCover,
1.0,
)
if "short_natural" in list(self.landCoverObj.keys()):
self.landCoverObj["short_natural"].fracVegCover = pcr.ifthenelse(
total_fractions > 0.1,
self.landCoverObj["short_natural"].fracVegCover,
1.0,
)
total_fractions = pcr.scalar(0.0)
for coverType in self.coverTypes:
total_fractions += self.landCoverObj[coverType].fracVegCover
for coverType in self.coverTypes:
self.landCoverObj[coverType].fracVegCover = (
self.landCoverObj[coverType].fracVegCover / total_fractions
)
####################################################################################################################################################################
# read land cover fractions from netcdf files
# - assumption: annual resolution
if (
self.noAnnualChangesInLandCoverParameter == False
and self.dynamicIrrigationArea == False
and (currTimeStep.timeStepPCR == 1 or currTimeStep.doy == 1)
):
msg = "Read land cover fractions based on the given netcdf file."
# logger.debug(msg)
for coverType in self.coverTypes:
self.landCoverObj[coverType].fracVegCover = self.landCoverObj[
coverType
].get_land_cover_parameters(
date_in_string=str(currTimeStep.fulldate),
get_only_fracVegCover=True,
)
####################################################################################################################################################################
# correcting land cover fractions
total_fractions = pcr.scalar(0.0)
for coverType in self.coverTypes:
total_fractions += self.landCoverObj[coverType].fracVegCover
if "grassland" in list(self.landCoverObj.keys()):
self.landCoverObj["grassland"].fracVegCover = pcr.ifthenelse(
total_fractions > 0.1,
self.landCoverObj["grassland"].fracVegCover,
1.0,
)
if "short_natural" in list(self.landCoverObj.keys()):
self.landCoverObj["short_natural"].fracVegCover = pcr.ifthenelse(
total_fractions > 0.1,
self.landCoverObj["short_natural"].fracVegCover,
1.0,
)
total_fractions = pcr.scalar(0.0)
for coverType in self.coverTypes:
total_fractions += self.landCoverObj[coverType].fracVegCover
for coverType in self.coverTypes:
self.landCoverObj[coverType].fracVegCover = (
self.landCoverObj[coverType].fracVegCover / total_fractions
)
####################################################################################################################################################################
# transfer some states, due to changes/dynamics in land cover conditions
# - if considering dynamic/historical irrigation areas (expansion/reduction of irrigated areas)
# - done at yearly basis, at the beginning of each year
# - note that this must be done at the beginning of each year, including for the first time step (timeStepPCR == 1)
#
if (
(self.dynamicIrrigationArea and self.includeIrrigation)
or self.noAnnualChangesInLandCoverParameter == False
) and currTimeStep.doy == 1:
#
# loop for all main states:
for var in self.mainStates:
# logger.info("Transfering states for the variable "+str(var))
moving_fraction = pcr.scalar(
0.0
) # total land cover fractions that will be transferred
moving_states = pcr.scalar(0.0) # total states that will be transferred
for coverType in self.coverTypes:
old_fraction = self.landCoverObj[coverType].previousFracVegCover
new_fraction = self.landCoverObj[coverType].fracVegCover
moving_fraction += pcr.max(0.0, old_fraction - new_fraction)
moving_states += (
pcr.max(0.0, old_fraction - new_fraction)
* vars(self.landCoverObj[coverType])[var]
)
previous_state = pcr.scalar(0.0)
rescaled_state = pcr.scalar(0.0)
# correcting states
for coverType in self.coverTypes:
old_states = vars(self.landCoverObj[coverType])[var]
old_fraction = self.landCoverObj[coverType].previousFracVegCover
new_fraction = self.landCoverObj[coverType].fracVegCover
correction = moving_states * vos.getValDivZero(
pcr.max(0.0, new_fraction - old_fraction),
moving_fraction,
vos.smallNumber,
)
new_states = pcr.ifthenelse(
new_fraction > old_fraction,
vos.getValDivZero(
old_states * old_fraction + correction,
new_fraction,
vos.smallNumber,
),
old_states,
)
new_states = pcr.ifthenelse(
new_fraction > 0.0, new_states, pcr.scalar(0.0)
)
vars(self.landCoverObj[coverType])[var] = new_states
previous_state += old_fraction * old_states
rescaled_state += new_fraction * new_states
# check and make sure that previous_state == rescaled_state
check_map = previous_state - rescaled_state
a, b, c = vos.getMinMaxMean(check_map)
threshold = 1e-5
# if abs(a) > threshold or abs(b) > threshold:
# logger.warning("Error in transfering states (due to dynamic in land cover fractions) ... Min %f Max %f Mean %f" %(a,b,c))
# else:
# logger.info("Successful in transfering states (after change in land cover fractions) ... Min %f Max %f Mean %f" %(a,b,c))
# for the last day of the year, we have to save the previous land cover fractions (to be considered in the next time step)
if (
self.dynamicIrrigationArea
and self.includeIrrigation
and currTimeStep.isLastDayOfYear
):
# save the current state of fracVegCover
for coverType in self.coverTypes:
self.landCoverObj[coverType].previousFracVegCover = self.landCoverObj[
coverType
].fracVegCover
# calculate cell fraction influenced by capillary rise:
self.capRiseFrac = self.calculateCapRiseFrac(groundwater, routing, currTimeStep)
# get a dictionary containing livestock, domestic and industrial water demand, including their return flow fractions
self.nonIrrigationWaterDemandDict = self.obtainNonIrrWaterDemand(
routing, currTimeStep
)
# get a dictionary containing the partitioning of withdrawal/abstraction sources: (from groundwater and surface water)
self.swAbstractionFractionDict = self.partitioningGroundSurfaceAbstraction(
groundwater, routing
)
# get desalination water use (m/day); assume this one as potential supply
if self.includeDesalination:
# logger.debug("Monthly desalination water use is included.")
if currTimeStep.timeStepPCR == 1 or currTimeStep.day == 1:
desalinationWaterUse = pcr.ifthen(
self.landmask,
pcr.cover(
vos.netcdf2PCRobjClone(
self.desalinationWaterFile,
"desalination_water_use",
currTimeStep.fulldate,
useDoy="monthly",
cloneMapFileName=self.cloneMap,
),
0.0,
),
)
self.desalinationWaterUse = pcr.max(0.0, desalinationWaterUse)
else:
# logger.debug("Monthly desalination water use is NOT included.")
self.desalinationWaterUse = pcr.scalar(0.0)
# update (loop per each land cover type):
wflow_logger.info("start landsurface landcover")
for coverType in self.coverTypes:
# logger.info("Updating land cover: "+str(coverType))
self.landCoverObj[coverType].updateLC(
meteo,
groundwater,
routing,
self.capRiseFrac,
self.nonIrrigationWaterDemandDict,
self.swAbstractionFractionDict,
currTimeStep,
self.allocSegments,
self.desalinationWaterUse,
self.groundwater_pumping_region_ids,
self.regionalAnnualGroundwaterAbstractionLimit,
wflow_logger,
)
wflow_logger.info("end landsurface landcover")
# first, we set all aggregated values/variables to zero:
for var in self.aggrVars:
vars(self)[var] = pcr.scalar(0.0)
#
# get or calculate the values of all aggregated values/variables
for coverType in self.coverTypes:
# calculate the aggregrated or global landSurface values:
for var in self.aggrVars:
vars(self)[var] += (
self.landCoverObj[coverType].fracVegCover
* vars(self.landCoverObj[coverType])[var]
)
# total storages (unit: m3) in the entire landSurface module
if self.numberOfSoilLayers == 2:
self.totalSto = (
self.snowCoverSWE
+ self.snowFreeWater
+ self.interceptStor
+ self.topWaterLayer
+ self.storUpp
+ self.storLow
)
#
if self.numberOfSoilLayers == 3:
self.totalSto = (
self.snowCoverSWE
+ self.snowFreeWater
+ self.interceptStor
+ self.topWaterLayer
+ self.storUpp000005
+ self.storUpp005030
+ self.storLow030150
)
# old-style reporting (this is useful for debugging)
# self.old_style_land_surface_reporting(currTimeStep)
def old_style_land_surface_reporting(self, currTimeStep):
if self.report == True:
timeStamp = datetime.datetime(
currTimeStep.year, currTimeStep.month, currTimeStep.day, 0
)
# writing daily output to netcdf files
timestepPCR = currTimeStep.timeStepPCR
if self.outDailyTotNC[0] != "None":
for var in self.outDailyTotNC:
self.netcdfObj.data2NetCDF(
str(self.outNCDir) + "/" + str(var) + "_dailyTot.nc",
var,
pcr.pcr2numpy(self.__getattribute__(var), vos.MV),
timeStamp,
timestepPCR - 1,
)
# writing monthly output to netcdf files
# -cummulative
if self.outMonthTotNC[0] != "None":
for var in self.outMonthTotNC:
# introduce variables at the beginning of simulation or
# reset variables at the beginning of the month
if currTimeStep.timeStepPCR == 1 or currTimeStep.day == 1:
vars(self)[var + "MonthTot"] = pcr.scalar(0.0)
# accumulating
vars(self)[var + "MonthTot"] += vars(self)[var]
# reporting at the end of the month:
if currTimeStep.endMonth == True:
self.netcdfObj.data2NetCDF(
str(self.outNCDir) + "/" + str(var) + "_monthTot.nc",
var,
pcr.pcr2numpy(
self.__getattribute__(var + "MonthTot"), vos.MV
),
timeStamp,
currTimeStep.monthIdx - 1,
)
# -average
if self.outMonthAvgNC[0] != "None":
for var in self.outMonthAvgNC:
# only if a accumulator variable has not been defined:
if var not in self.outMonthTotNC:
# introduce accumulator at the beginning of simulation or
# reset accumulator at the beginning of the month
if currTimeStep.timeStepPCR == 1 or currTimeStep.day == 1:
vars(self)[var + "MonthTot"] = pcr.scalar(0.0)
# accumulating
vars(self)[var + "MonthTot"] += vars(self)[var]
# calculating average & reporting at the end of the month:
if currTimeStep.endMonth == True:
vars(self)[var + "MonthAvg"] = (
vars(self)[var + "MonthTot"] / currTimeStep.day
)
self.netcdfObj.data2NetCDF(
str(self.outNCDir) + "/" + str(var) + "_monthAvg.nc",
var,
pcr.pcr2numpy(
self.__getattribute__(var + "MonthAvg"), vos.MV
),
timeStamp,
currTimeStep.monthIdx - 1,
)
#
# -last day of the month
if self.outMonthEndNC[0] != "None":
for var in self.outMonthEndNC:
# reporting at the end of the month:
if currTimeStep.endMonth == True:
self.netcdfObj.data2NetCDF(
str(self.outNCDir) + "/" + str(var) + "_monthEnd.nc",
var,
pcr.pcr2numpy(self.__getattribute__(var), vos.MV),
timeStamp,
currTimeStep.monthIdx - 1,
)
# writing yearly output to netcdf files
# -cummulative
if self.outAnnuaTotNC[0] != "None":
for var in self.outAnnuaTotNC:
# introduce variables at the beginning of simulation or
# reset variables at the beginning of the month
if currTimeStep.timeStepPCR == 1 or currTimeStep.doy == 1:
vars(self)[var + "AnnuaTot"] = pcr.scalar(0.0)
# accumulating
vars(self)[var + "AnnuaTot"] += vars(self)[var]
# reporting at the end of the year:
if currTimeStep.endYear == True:
self.netcdfObj.data2NetCDF(
str(self.outNCDir) + "/" + str(var) + "_annuaTot.nc",
var,
pcr.pcr2numpy(
self.__getattribute__(var + "AnnuaTot"), vos.MV
),
timeStamp,
currTimeStep.annuaIdx - 1,
)
# -average
if self.outAnnuaAvgNC[0] != "None":
for var in self.outAnnuaAvgNC:
# only if a accumulator variable has not been defined:
if var not in self.outAnnuaTotNC:
# introduce accumulator at the beginning of simulation or
# reset accumulator at the beginning of the year
if currTimeStep.timeStepPCR == 1 or currTimeStep.doy == 1:
vars(self)[var + "AnnuaTot"] = pcr.scalar(0.0)
# accumulating
vars(self)[var + "AnnuaTot"] += vars(self)[var]
#
# calculating average & reporting at the end of the year:
if currTimeStep.endYear == True:
vars(self)[var + "AnnuaAvg"] = (
vars(self)[var + "AnnuaTot"] / currTimeStep.doy
)
self.netcdfObj.data2NetCDF(
str(self.outNCDir) + "/" + str(var) + "_annuaAvg.nc",
var,
pcr.pcr2numpy(
self.__getattribute__(var + "AnnuaAvg"), vos.MV
),
timeStamp,
currTimeStep.annuaIdx - 1,
)
#
# -last day of the year
if self.outAnnuaEndNC[0] != "None":
for var in self.outAnnuaEndNC:
# reporting at the end of the year:
if currTimeStep.endYear == True:
self.netcdfObj.data2NetCDF(
str(self.outNCDir) + "/" + str(var) + "_annuaEnd.nc",
var,
pcr.pcr2numpy(self.__getattribute__(var), vos.MV),
timeStamp,
currTimeStep.annuaIdx - 1,
)
|
openstreams/wflow
|
wflow/pcrglobwb/landSurface.py
|
Python
|
gpl-3.0
| 106,368
|
[
"NetCDF"
] |
199992e3f5106a1591794ae25d5e2f0cd671416821a522c3081957c70e000bb8
|
# -*- coding: utf-8 -*-
"""Release data for the IPython project."""
#-----------------------------------------------------------------------------
# Copyright (c) 2008, IPython Development Team.
# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# IPython version information. An empty _version_extra corresponds to a full
# release. 'dev' as a _version_extra string means this is a development
# version
_version_major = 0
_version_minor = 13
_version_micro = 1 # use '' for first of series, number for 1 and above
# _version_extra = 'dev'
_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
version = __version__ # backwards compatibility name
description = "IPython: Productive Interactive Computing"
long_description = \
"""
IPython provides a rich toolkit to help you make the most out of using Python
interactively. Its main components are:
* Powerful interactive Python shells (terminal- and Qt-based).
* A web-based interactive notebook environment with all shell features plus
support for embedded figures, animations and rich media.
* Support for interactive data visualization and use of GUI toolkits.
* Flexible, embeddable interpreters to load into your own projects.
* A high-performance library for high level and interactive parallel computing
that works in multicore systems, clusters, supercomputing and cloud scenarios.
The enhanced interactive Python shells have the following main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Extensible tab completion, with support by default for completion of python
variables and keywords, filenames and function keywords.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* A rich configuration system with easy switching between different setups
(simpler than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs and GUIs.
* Integrated access to the pdb debugger and the Python profiler.
The parallel computing architecture has the following main features:
* Quickly parallelize Python code from an interactive Python/IPython session.
* A flexible and dynamic process model that be deployed on anything from
multicore workstations to supercomputers.
* An architecture that supports many different styles of parallelism, from
message passing to task farming.
* Both blocking and fully asynchronous interfaces.
* High level APIs that enable many things to be parallelized in a few lines
of code.
* Share live parallel jobs with other users securely.
* Dynamically load balanced task farming system.
* Robust error handling in parallel code.
The latest development version is always available from IPython's `GitHub
site <http://github.com/ipython>`_.
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com'),
'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'),
'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'),
'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'),
'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
}
author = 'The IPython Development Team'
author_email = 'ipython-dev@scipy.org'
url = 'http://ipython.org'
download_url = 'https://github.com/ipython/ipython/downloads'
platforms = ['Linux','Mac OSX','Windows XP/2000/NT/Vista/7']
keywords = ['Interactive','Interpreter','Shell','Parallel','Distributed',
'Web-based computing', 'Qt console', 'Embedding']
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Shells'
]
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/core/release.py
|
Python
|
lgpl-3.0
| 5,422
|
[
"Brian"
] |
08da7187f78650c99c89703d38a46ab4de43d45fb1d4c91b9d241eff56e71d72
|
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MVN with covariance parameterized by a diagonal and a low rank update."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import mvn_low_rank_update_linear_operator_covariance
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'MultivariateNormalDiagPlusLowRankCovariance',
]
class MultivariateNormalDiagPlusLowRankCovariance(
mvn_low_rank_update_linear_operator_covariance
.MultivariateNormalLowRankUpdateLinearOperatorCovariance):
"""The multivariate normal distribution on `R^k`.
This Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (the mean) and a (batch of) `k x k`
`covariance` matrix.
The covariance matrix for this particular Normal is a (typically low rank)
perturbation of a diagonal matrix.
Compare to `MultivariateNormalDiagPlusLowRank` which perturbs the *scale*
rather than covariance.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, covariance) = exp(-0.5 y^T @ inv(covariance) @ y) / Z,
y := x - loc
Z := (2 pi)**(0.5 k) |det(covariance)|**0.5,
```
where `^T` denotes matrix transpose and `@` matrix multiplication
The MultivariateNormal distribution can also be parameterized as a
[location-scale family](https://en.wikipedia.org/wiki/Location-scale_family),
i.e., it can be constructed using a matrix `scale` such that
`covariance = scale @ scale^T`, and then
```none
X ~ MultivariateNormal(loc=0, scale=I) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
tfd = tfp.distributions
# Initialize a single 2-variate Gaussian.
# The covariance is a rank 1 update of a diagonal matrix.
loc = [1., 2.]
cov_diag_factor = [1., 1.]
cov_perturb_factor = tf.ones((2, 1)) * np.sqrt(2) # Unit vector
mvn = MultivariateNormalDiagPlusLowRankCovariance(
loc,
cov_diag_factor,
cov_perturb_factor)
# Covariance agrees with
# tf.linalg.matrix_diag(cov_diag_factor)
# + cov_perturb_factor @ cov_perturb_factor.T
mvn.covariance()
# ==> [[ 2., 1.],
# [ 1., 2.]]
# Compute the pdf of an`R^2` observation; return a scalar.
mvn.prob([-1., 0]) # shape: []
# Initialize a 2-batch of 2-variate Gaussians.
mu = [[1., 2],
[11, 22]] # shape: [2, 2]
cov_diag_factor = [[1., 2],
[0.5, 1]] # shape: [2, 2]
cov_perturb_factor = tf.ones((2, 1)) * np.sqrt(2) # Broadcasts!
mvn = MultivariateNormalDiagPlusLowRankCovariance(
loc,
cov_diag_factor,
cov_perturb_factor)
# Compute the pdf of two `R^2` observations; return a length-2 vector.
x = [[-0.9, 0],
[-10, 0]] # shape: [2, 2]
mvn.prob(x) # shape: [2]
```
"""
def __init__(
self,
loc=None,
cov_diag_factor=None,
cov_perturb_factor=None,
validate_args=False,
allow_nan_stats=True,
name='MultivariateNormalDiagPlusLowRankCovariance'):
"""Construct Multivariate Normal distribution on `R^k`.
The covariance matrix is constructed as an efficient implementation of:
```
update = cov_perturb_factor @ cov_perturb_factor^T
covariance = tf.linalg.matrix_diag(cov_diag_factor) + update
```
The `batch_shape` is the broadcast shape between `loc` and covariance args.
The `event_shape` is given by last dimension of the matrix implied by the
covariance. The last dimension of `loc` (if provided) must broadcast with
this.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
cov_diag_factor: `Tensor` of same dtype as `loc` and broadcastable
shape. Should have positive entries.
cov_perturb_factor: `Tensor` of same dtype as `loc` and shape that
broadcasts with `loc.shape + [M]`, where if `M < k` this is a low rank
update.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if either of `cov_diag_factor` or
`cov_perturb_factor` is unspecified.
"""
parameters = dict(locals())
if cov_diag_factor is None:
raise ValueError('Missing required `cov_diag_factor` parameter.')
if cov_perturb_factor is None:
raise ValueError(
'Missing required `cov_perturb_factor` parameter.')
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype(
[loc, cov_diag_factor, cov_perturb_factor],
dtype_hint=tf.float32)
cov_diag_factor = tensor_util.convert_nonref_to_tensor(
cov_diag_factor, dtype=dtype, name='cov_diag_factor')
cov_perturb_factor = tensor_util.convert_nonref_to_tensor(
cov_perturb_factor,
dtype=dtype,
name='cov_perturb_factor')
loc = tensor_util.convert_nonref_to_tensor(loc, dtype=dtype, name='loc')
cov_operator = tf.linalg.LinearOperatorLowRankUpdate(
base_operator=tf.linalg.LinearOperatorDiag(
cov_diag_factor,
# The user is required to provide a positive
# cov_diag_factor. If they don't, then unexpected behavior
# will happen, and may not be caught unless validate_args=True.
is_positive_definite=True,
),
u=cov_perturb_factor,
# If cov_diag_factor > 0, then cov_operator is SPD since
# it is of the form D + UU^T.
is_positive_definite=True)
super(MultivariateNormalDiagPlusLowRankCovariance, self).__init__(
loc=loc,
cov_operator=cov_operator,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
self._cov_diag_factor = cov_diag_factor
self._cov_perturb_factor = cov_perturb_factor
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
loc=parameter_properties.ParameterProperties(event_ndims=1),
cov_diag_factor=parameter_properties.ParameterProperties(
event_ndims=1,
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
cov_perturb_factor=parameter_properties.ParameterProperties(
event_ndims=2,
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED),
)
@property
def cov_diag_factor(self):
"""The diagonal term in the covariance."""
return self._cov_diag_factor
@property
def cov_perturb_factor(self):
"""The (probably low rank) update term in the covariance."""
return self._cov_perturb_factor
_composite_tensor_nonshape_params = (
'loc', 'cov_diag_factor', 'cov_perturb_factor')
|
tensorflow/probability
|
tensorflow_probability/python/distributions/mvn_diag_plus_low_rank_covariance.py
|
Python
|
apache-2.0
| 8,348
|
[
"Gaussian"
] |
392a907488ad0a45a80dd870c06581cfa7a2f81f7e1d4443567c5f6e84896f9e
|
# encoding: utf-8
from __future__ import unicode_literals
import mimetypes
import os
import re
from tempfile import NamedTemporaryFile
import requests
from twitter import TwitterError
TLDS = [
"ac", "ad", "ae", "af", "ag", "ai", "al", "am", "an", "ao", "aq", "ar",
"as", "at", "au", "aw", "ax", "az", "ba", "bb", "bd", "be", "bf", "bg",
"bh", "bi", "bj", "bl", "bm", "bn", "bo", "bq", "br", "bs", "bt", "bv",
"bw", "by", "bz", "ca", "cc", "cd", "cf", "cg", "ch", "ci", "ck", "cl",
"cm", "cn", "co", "cr", "cu", "cv", "cw", "cx", "cy", "cz", "de", "dj",
"dk", "dm", "do", "dz", "ec", "ee", "eg", "eh", "er", "es", "et", "eu",
"fi", "fj", "fk", "fm", "fo", "fr", "ga", "gb", "gd", "ge", "gf", "gg",
"gh", "gi", "gl", "gm", "gn", "gp", "gq", "gr", "gs", "gt", "gu", "gw",
"gy", "hk", "hm", "hn", "hr", "ht", "hu", "id", "ie", "il", "im", "in",
"io", "iq", "ir", "is", "it", "je", "jm", "jo", "jp", "ke", "kg", "kh",
"ki", "km", "kn", "kp", "kr", "kw", "ky", "kz", "la", "lb", "lc", "li",
"lk", "lr", "ls", "lt", "lu", "lv", "ly", "ma", "mc", "md", "me", "mf",
"mg", "mh", "mk", "ml", "mm", "mn", "mo", "mp", "mq", "mr", "ms", "mt",
"mu", "mv", "mw", "mx", "my", "mz", "na", "nc", "ne", "nf", "ng", "ni",
"nl", "no", "np", "nr", "nu", "nz", "om", "pa", "pe", "pf", "pg", "ph",
"pk", "pl", "pm", "pn", "pr", "ps", "pt", "pw", "py", "qa", "re", "ro",
"rs", "ru", "rw", "sa", "sb", "sc", "sd", "se", "sg", "sh", "si", "sj",
"sk", "sl", "sm", "sn", "so", "sr", "ss", "st", "su", "sv", "sx", "sy",
"sz", "tc", "td", "tf", "tg", "th", "tj", "tk", "tl", "tm", "tn", "to",
"tp", "tr", "tt", "tv", "tw", "tz", "ua", "ug", "uk", "um", "us", "uy",
"uz", "va", "vc", "ve", "vg", "vi", "vn", "vu", "wf", "ws", "ye", "yt",
"za", "zm", "zw", "ελ", "бел", "мкд", "мон", "рф", "срб", "укр", "қаз",
"հայ", "الاردن", "الجزائر", "السعودية", "المغرب", "امارات", "ایران", "بھارت",
"تونس", "سودان", "سورية", "عراق", "عمان", "فلسطين", "قطر", "مصر",
"مليسيا", "پاکستان", "भारत", "বাংলা", "ভারত", "ਭਾਰਤ", "ભારત",
"இந்தியா", "இலங்கை", "சிங்கப்பூர்", "భారత్", "ලංකා", "ไทย",
"გე", "中国", "中國", "台湾", "台灣", "新加坡", "澳門", "香港", "한국", "neric:",
"abb", "abbott", "abogado", "academy", "accenture", "accountant",
"accountants", "aco", "active", "actor", "ads", "adult", "aeg", "aero",
"afl", "agency", "aig", "airforce", "airtel", "allfinanz", "alsace",
"amsterdam", "android", "apartments", "app", "aquarelle", "archi", "army",
"arpa", "asia", "associates", "attorney", "auction", "audio", "auto",
"autos", "axa", "azure", "band", "bank", "bar", "barcelona", "barclaycard",
"barclays", "bargains", "bauhaus", "bayern", "bbc", "bbva", "bcn", "beer",
"bentley", "berlin", "best", "bet", "bharti", "bible", "bid", "bike",
"bing", "bingo", "bio", "biz", "black", "blackfriday", "bloomberg", "blue",
"bmw", "bnl", "bnpparibas", "boats", "bond", "boo", "boots", "boutique",
"bradesco", "bridgestone", "broker", "brother", "brussels", "budapest",
"build", "builders", "business", "buzz", "bzh", "cab", "cafe", "cal",
"camera", "camp", "cancerresearch", "canon", "capetown", "capital",
"caravan", "cards", "care", "career", "careers", "cars", "cartier",
"casa", "cash", "casino", "cat", "catering", "cba", "cbn", "ceb", "center",
"ceo", "cern", "cfa", "cfd", "chanel", "channel", "chat", "cheap",
"chloe", "christmas", "chrome", "church", "cisco", "citic", "city",
"claims", "cleaning", "click", "clinic", "clothing", "cloud", "club",
"coach", "codes", "coffee", "college", "cologne", "com", "commbank",
"community", "company", "computer", "condos", "construction", "consulting",
"contractors", "cooking", "cool", "coop", "corsica", "country", "coupons",
"courses", "credit", "creditcard", "cricket", "crown", "crs", "cruises",
"cuisinella", "cymru", "cyou", "dabur", "dad", "dance", "date", "dating",
"datsun", "day", "dclk", "deals", "degree", "delivery", "delta",
"democrat", "dental", "dentist", "desi", "design", "dev", "diamonds",
"diet", "digital", "direct", "directory", "discount", "dnp", "docs",
"dog", "doha", "domains", "doosan", "download", "drive", "durban", "dvag",
"earth", "eat", "edu", "education", "email", "emerck", "energy",
"engineer", "engineering", "enterprises", "epson", "equipment", "erni",
"esq", "estate", "eurovision", "eus", "events", "everbank", "exchange",
"expert", "exposed", "express", "fage", "fail", "faith", "family", "fan",
"fans", "farm", "fashion", "feedback", "film", "finance", "financial",
"firmdale", "fish", "fishing", "fit", "fitness", "flights", "florist",
"flowers", "flsmidth", "fly", "foo", "football", "forex", "forsale",
"forum", "foundation", "frl", "frogans", "fund", "furniture", "futbol",
"fyi", "gal", "gallery", "game", "garden", "gbiz", "gdn", "gent",
"genting", "ggee", "gift", "gifts", "gives", "giving", "glass", "gle",
"global", "globo", "gmail", "gmo", "gmx", "gold", "goldpoint", "golf",
"goo", "goog", "google", "gop", "gov", "graphics", "gratis", "green",
"gripe", "group", "guge", "guide", "guitars", "guru", "hamburg", "hangout",
"haus", "healthcare", "help", "here", "hermes", "hiphop", "hitachi", "hiv",
"hockey", "holdings", "holiday", "homedepot", "homes", "honda", "horse",
"host", "hosting", "hoteles", "hotmail", "house", "how", "hsbc", "ibm",
"icbc", "ice", "icu", "ifm", "iinet", "immo", "immobilien", "industries",
"infiniti", "info", "ing", "ink", "institute", "insure", "int",
"international", "investments", "ipiranga", "irish", "ist", "istanbul",
"itau", "iwc", "java", "jcb", "jetzt", "jewelry", "jlc", "jll", "jobs",
"joburg", "jprs", "juegos", "kaufen", "kddi", "kim", "kitchen", "kiwi",
"koeln", "komatsu", "krd", "kred", "kyoto", "lacaixa", "lancaster", "land",
"lasalle", "lat", "latrobe", "law", "lawyer", "lds", "lease", "leclerc",
"legal", "lexus", "lgbt", "liaison", "lidl", "life", "lighting", "limited",
"limo", "link", "live", "lixil", "loan", "loans", "lol", "london", "lotte",
"lotto", "love", "ltda", "lupin", "luxe", "luxury", "madrid", "maif",
"maison", "man", "management", "mango", "market", "marketing", "markets",
"marriott", "mba", "media", "meet", "melbourne", "meme", "memorial", "men",
"menu", "miami", "microsoft", "mil", "mini", "mma", "mobi", "moda", "moe",
"mom", "monash", "money", "montblanc", "mormon", "mortgage", "moscow",
"motorcycles", "mov", "movie", "movistar", "mtn", "mtpc", "museum",
"nadex", "nagoya", "name", "navy", "nec", "net", "netbank", "network",
"neustar", "new", "news", "nexus", "ngo", "nhk", "nico", "ninja", "nissan",
"nokia", "nra", "nrw", "ntt", "nyc", "office", "okinawa", "omega", "one",
"ong", "onl", "online", "ooo", "oracle", "orange", "org", "organic",
"osaka", "otsuka", "ovh", "page", "panerai", "paris", "partners", "parts",
"party", "pet", "pharmacy", "philips", "photo", "photography", "photos",
"physio", "piaget", "pics", "pictet", "pictures", "pink", "pizza", "place",
"play", "plumbing", "plus", "pohl", "poker", "porn", "post", "praxi",
"press", "pro", "prod", "productions", "prof", "properties", "property",
"pub", "qpon", "quebec", "racing", "realtor", "realty", "recipes", "red",
"redstone", "rehab", "reise", "reisen", "reit", "ren", "rent", "rentals",
"repair", "report", "republican", "rest", "restaurant", "review",
"reviews", "rich", "ricoh", "rio", "rip", "rocks", "rodeo", "rsvp", "ruhr",
"run", "ryukyu", "saarland", "sakura", "sale", "samsung", "sandvik",
"sandvikcoromant", "sanofi", "sap", "sarl", "saxo", "sca", "scb",
"schmidt", "scholarships", "school", "schule", "schwarz", "science",
"scor", "scot", "seat", "seek", "sener", "services", "sew", "sex", "sexy",
"shiksha", "shoes", "show", "shriram", "singles", "site", "ski", "sky",
"skype", "sncf", "soccer", "social", "software", "sohu", "solar",
"solutions", "sony", "soy", "space", "spiegel", "spreadbetting", "srl",
"starhub", "statoil", "studio", "study", "style", "sucks", "supplies",
"supply", "support", "surf", "surgery", "suzuki", "swatch", "swiss",
"sydney", "systems", "taipei", "tatamotors", "tatar", "tattoo", "tax",
"taxi", "team", "tech", "technology", "tel", "telefonica", "temasek",
"tennis", "thd", "theater", "tickets", "tienda", "tips", "tires", "tirol",
"today", "tokyo", "tools", "top", "toray", "toshiba", "tours", "town",
"toyota", "toys", "trade", "trading", "training", "travel", "trust", "tui",
"ubs", "university", "uno", "uol", "vacations", "vegas", "ventures",
"vermögensberater", "vermögensberatung", "versicherung", "vet", "viajes",
"video", "villas", "vin", "vision", "vista", "vistaprint", "vlaanderen",
"vodka", "vote", "voting", "voto", "voyage", "wales", "walter", "wang",
"watch", "webcam", "website", "wed", "wedding", "weir", "whoswho", "wien",
"wiki", "williamhill", "win", "windows", "wine", "wme", "work", "works",
"world", "wtc", "wtf", "xbox", "xerox", "xin", "xperia", "xxx", "xyz",
"yachts", "yandex", "yodobashi", "yoga", "yokohama", "youtube", "zip",
"zone", "zuerich", "дети", "ком", "москва", "онлайн", "орг", "рус", "сайт",
"קום", "بازار", "شبكة", "كوم", "موقع", "कॉम", "नेट", "संगठन", "คอม",
"みんな", "グーグル", "コム", "世界", "中信", "中文网", "企业", "佛山", "信息",
"健康", "八卦", "公司", "公益", "商城", "商店", "商标", "在线", "大拿", "娱乐",
"工行", "广东", "慈善", "我爱你", "手机", "政务", "政府", "新闻", "时尚", "机构",
"淡马锡", "游戏", "点看", "移动", "组织机构", "网址", "网店", "网络", "谷歌", "集团",
"飞利浦", "餐厅", "닷넷", "닷컴", "삼성", "onion"]
URL_REGEXP = re.compile((
r'('
r'^(?!(https?://|www\.)?\.|ftps?://|([0-9]+\.){{1,3}}\d+)' # exclude urls that start with "."
r'(?:https?://|www\.)*^(?!.*@)(?:[\w+-_]+[.])' # beginning of url
r'(?:{0}\b|' # all tlds
r'(?:[:0-9]))' # port numbers & close off TLDs
r'(?:[\w+\/]?[a-z0-9!\*\'\(\);:&=\+\$/%#\[\]\-_\.,~?])*' # path/query params
r')').format(r'\b|'.join(TLDS)), re.U | re.I | re.X)
def calc_expected_status_length(status, short_url_length=23):
""" Calculates the length of a tweet, taking into account Twitter's
replacement of URLs with https://t.co links.
Args:
status: text of the status message to be posted.
short_url_length: the current published https://t.co links
Returns:
Expected length of the status message as an integer.
"""
status_length = 0
for word in re.split(r'\s', status):
if is_url(word):
status_length += short_url_length
else:
status_length += len(word)
status_length += len(re.findall(r'\s', status))
return status_length
def is_url(text):
""" Checks to see if a bit of text is a URL.
Args:
text: text to check.
Returns:
Boolean of whether the text should be treated as a URL or not.
"""
return bool(re.findall(URL_REGEXP, text))
def http_to_file(http):
data_file = NamedTemporaryFile()
req = requests.get(http, stream=True)
data_file.write(req.raw.data)
return data_file
def parse_media_file(passed_media):
""" Parses a media file and attempts to return a file-like object and
information about the media file.
Args:
passed_media: media file which to parse.
Returns:
file-like object, the filename of the media file, the file size, and
the type of media.
"""
img_formats = ['image/jpeg',
'image/png',
'image/gif',
'image/bmp',
'image/webp']
video_formats = ['video/mp4',
'video/quicktime']
# If passed_media is a string, check if it points to a URL, otherwise,
# it should point to local file. Create a reference to a file obj for
# each case such that data_file ends up with a read() method.
if not hasattr(passed_media, 'read'):
if passed_media.startswith('http'):
data_file = http_to_file(passed_media)
filename = os.path.basename(passed_media)
else:
data_file = open(os.path.realpath(passed_media), 'rb')
filename = os.path.basename(passed_media)
# Otherwise, if a file object was passed in the first place,
# create the standard reference to media_file (i.e., rename it to fp).
else:
if passed_media.mode != 'rb':
raise TwitterError({'message': 'File mode must be "rb".'})
filename = os.path.basename(passed_media.name)
data_file = passed_media
data_file.seek(0, 2)
file_size = data_file.tell()
try:
data_file.seek(0)
except:
pass
media_type = mimetypes.guess_type(os.path.basename(filename))[0]
if media_type is not None:
if media_type in img_formats and file_size > 5 * 1048576:
raise TwitterError({'message': 'Images must be less than 5MB.'})
elif media_type in video_formats and file_size > 15 * 1048576:
raise TwitterError({'message': 'Videos must be less than 15MB.'})
elif media_type not in img_formats and media_type not in video_formats:
raise TwitterError({'message': 'Media type could not be determined.'})
return data_file, filename, file_size, media_type
def enf_type(field, _type, val):
""" Checks to see if a given val for a field (i.e., the name of the field)
is of the proper _type. If it is not, raises a TwitterError with a brief
explanation.
Args:
field:
Name of the field you are checking.
_type:
Type that the value should be returned as.
val:
Value to convert to _type.
Returns:
val converted to type _type.
"""
try:
return _type(val)
except ValueError:
raise TwitterError({
'message': '"{0}" must be type {1}'.format(field, _type.__name__)
})
|
IKholopov/HackUPC2017
|
hackupc/env/lib/python3.5/site-packages/twitter/twitter_utils.py
|
Python
|
apache-2.0
| 14,700
|
[
"CASINO",
"MOE"
] |
f7d04e676185e3a707ea2129cbc2b3f178b6692404e7510e7415ccf8e2f51611
|
""" Test the FilenamePlugin class"""
import unittest
from DIRAC.Resources.Catalog.ConditionPlugins.FilenamePlugin import FilenamePlugin
class TestfilenamePlugin(unittest.TestCase):
"""Test the FilenamePlugin class"""
def setUp(self):
self.lfns = ["/lhcb/lfn1", "/lhcb/anotherlfn", "/otherVo/name"]
def test_01_endswith(self):
"""Testing endswith (method with argument"""
fnp = FilenamePlugin("endswith('n')")
self.assertTrue(not fnp.eval(lfn="/lhcb/lfn1"))
self.assertTrue(fnp.eval(lfn="/lhcb/lfn"))
def test_02_find(self):
"""Testing special case of find"""
fnp = FilenamePlugin("find('lfn')")
self.assertTrue(fnp.eval(lfn="/lhcb/lfn1"))
self.assertTrue(not fnp.eval(lfn="/lhcb/l0f0n"))
def test_03_isalnum(self):
"""Testing isalnum (method without argument"""
fnp = FilenamePlugin("isalnum()")
self.assertTrue(fnp.eval(lfn="lhcblfn1"))
self.assertTrue(not fnp.eval(lfn="/lhcb/lf_n"))
def test_04_nonExisting(self):
"""Testing non existing string method"""
fnp = FilenamePlugin("nonexisting()")
self.assertTrue(not fnp.eval(lfn="lhcblfn1"))
self.assertTrue(not fnp.eval(lfn="/lhcb/lf_n"))
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestfilenamePlugin)
unittest.TextTestRunner(verbosity=2).run(suite)
|
DIRACGrid/DIRAC
|
src/DIRAC/Resources/Catalog/ConditionPlugins/test/Test_FilenamePlugin.py
|
Python
|
gpl-3.0
| 1,428
|
[
"DIRAC"
] |
1a629c6f11289641a1d5783d2faadd2c4876ac9e9ec711127ae0fcc7e09abb9b
|
#-*-coding:utf-8-*-
"""
Copyright (c) 2012 wong2 <wonderfuly@gmail.com>
Copyright (c) 2012 hupili <hpl1989@gmail.com>
Original Author:
Wong2 <wonderfuly@gmail.com>
Changes Statement:
Changes made by Pili Hu <hpl1989@gmail.com> on
Jan 10 2013:
Support captcha.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# 人人各种接口
import requests
import json
import re
import random
from pyquery import PyQuery
from encrypt import encryptString
import os
class RenRen:
def __init__(self, email=None, pwd=None):
self.session = requests.Session()
self.token = {}
if email and pwd:
self.login(email, pwd)
def _loginByCookie(self, cookie_str):
cookie_dict = dict([v.split('=', 1) for v in cookie_str.strip().split(';')])
self.session.cookies = requests.utils.cookiejar_from_dict(cookie_dict)
self.getToken()
def loginByCookie(self, cookie_path):
with open(cookie_path) as fp:
cookie_str = fp.read()
self._loginByCookie(cookie_str)
def saveCookie(self, cookie_path):
with open(cookie_path, 'w') as fp:
cookie_dict = requests.utils.dict_from_cookiejar(self.session.cookies)
cookie_str = '; '.join([k + '=' + v for k, v in cookie_dict.iteritems()])
fp.write(cookie_str)
def login(self, email, pwd):
key = self.getEncryptKey()
if self.getShowCaptcha(email) == 1:
fn = 'icode.%s.jpg' % os.getpid()
self.getICode(fn)
print "Please input the code in file '%s':" % fn
icode = raw_input().strip()
os.remove(fn)
else:
icode = ''
data = {
'email': email,
'origURL': 'http://www.renren.com/home',
'icode': icode,
'domain': 'renren.com',
'key_id': 1,
'captcha_type': 'web_login',
'password': encryptString(key['e'], key['n'], pwd) if key['isEncrypt'] else pwd,
'rkey': key['rkey']
}
print "login data: %s" % data
url = 'http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=%f' % random.random()
r = self.post(url, data)
result = r.json()
if result['code']:
print 'login successfully'
self.email = email
r = self.get(result['homeUrl'])
self.getToken(r.text)
else:
print 'login error', r.text
def getICode(self, fn):
r = self.get("http://icode.renren.com/getcode.do?t=web_login&rnd=%s" % random.random())
if r.status_code == 200 and r.raw.headers['content-type'] == 'image/jpeg':
with open(fn, 'wb') as f:
for chunk in r.iter_content():
f.write(chunk)
else:
print "get icode failure"
def getShowCaptcha(self, email=None):
r = self.post('http://www.renren.com/ajax/ShowCaptcha', data={'email': email})
return r.json()
def getEncryptKey(self):
r = requests.get('http://login.renren.com/ajax/getEncryptKey')
return r.json()
def getToken(self, html=''):
p = re.compile("get_check:'(.*)',get_check_x:'(.*)',env")
if not html:
r = self.get('http://www.renren.com')
html = r.text
result = p.search(html)
self.token = {
'requestToken': result.group(1),
'_rtk': result.group(2)
}
def request(self, url, method, data={}):
if data:
data.update(self.token)
if method == 'get':
return self.session.get(url, data=data)
elif method == 'post':
return self.session.post(url, data=data)
def get(self, url, data={}):
return self.request(url, 'get', data)
def post(self, url, data={}):
return self.request(url, 'post', data)
def getUserInfo(self):
r = self.get('http://notify.renren.com/wpi/getonlinecount.do')
return r.json()
def getNotifications(self):
url = 'http://notify.renren.com/rmessage/get?getbybigtype=1&bigtype=1&limit=50&begin=0&view=17'
r = self.get(url)
try:
result = json.loads(r.text, strict=False)
except Exception, e:
print 'error', e
result = []
return result
def removeNotification(self, notify_id):
self.get('http://notify.renren.com/rmessage/remove?nl=' + str(notify_id))
def getDoings(self, uid, page=0):
url = 'http://status.renren.com/GetSomeomeDoingList.do?userId=%s&curpage=%d' % (str(uid), page)
r = self.get(url)
return r.json().get('doingArray', [])
def getDoingById(self, owner_id, doing_id):
doings = self.getDoings(owner_id)
doing = filter(lambda doing: doing['id'] == doing_id, doings)
return doing[0] if doing else None
def getDoingComments(self, owner_id, doing_id):
url = 'http://status.renren.com/feedcommentretrieve.do'
r = self.post(url, {
'doingId': doing_id,
'source': doing_id,
'owner': owner_id,
't': 3
})
return r.json()['replyList']
def getCommentById(self, owner_id, doing_id, comment_id):
comments = self.getDoingComments(owner_id, doing_id)
comment = filter(lambda comment: comment['id'] == int(comment_id), comments)
return comment[0] if comment else None
def addComment(self, data):
return {
'status': self.addStatusComment,
'album' : self.addAlbumComment,
'photo' : self.addPhotoComment,
'blog' : self.addBlogComment,
'share' : self.addShareComment,
'gossip': self.addGossip
}[data['type']](data)
def sendComment(self, url, payloads):
r = self.post(url, payloads)
r.raise_for_status()
try:
return r.json()
except:
return { 'code': 0 }
# 评论状态
def addStatusComment(self, data):
url = 'http://status.renren.com/feedcommentreply.do'
payloads = {
't': 3,
'rpLayer': 0,
'source': data['source_id'],
'owner': data['owner_id'],
'c': data['message']
}
if data.get('reply_id', None):
payloads.update({
'rpLayer': 1,
'replyTo': data['author_id'],
'replyName': data['author_name'],
'secondaryReplyId': data['reply_id'],
'c': '回复%s:%s' % (data['author_name'].encode('utf-8'), data['message'])
})
return self.sendComment(url, payloads)
# 回复留言
def addGossip(self, data):
url = 'http://gossip.renren.com/gossip.do'
payloads = {
'id': data['owner_id'],
'only_to_me': 1,
'mode': 'conversation',
'cc': data['author_id'],
'body': data['message'],
'ref':'http://gossip.renren.com/getgossiplist.do'
}
return self.sendComment(url, payloads)
# 回复分享
def addShareComment(self, data):
url = 'http://share.renren.com/share/addComment.do'
if data.get('reply_id', None):
body = '回复%s:%s' % (data['author_name'].encode('utf-8'), data['message']),
else:
body = data['message']
payloads = {
'comment': body,
'shareId' : data['source_id'],
'shareOwner': data['owner_id'],
'replyToCommentId': data.get('reply_id', 0),
'repetNo' : data.get('author_id', 0)
}
return self.sendComment(url, payloads)
# 回复日志
def addBlogComment(self, data):
url = 'http://blog.renren.com/PostComment.do'
payloads = {
'body': '回复%s:%s' % (data['author_name'].encode('utf-8'), data['message']),
'feedComment': 'true',
'guestName': '小黄鸡',
'id' : data['source_id'],
'only_to_me': 0,
'owner': data['owner_id'],
'replyCommentId': data['reply_id'],
'to': data['author_id']
}
return self.sendComment(url, payloads)
# 回复相册
def addAlbumComment(self, data):
url = 'http://photo.renren.com/photo/%d/album-%d/comment' % (data['owner_id'], data['source_id'])
payloads = {
'id': data['source_id'],
'only_to_me' : 'false',
'body': '回复%s:%s' % (data['author_name'].encode('utf-8'), data['message']),
'feedComment' : 'true',
'owner' : data['owner_id'],
'replyCommentId' : data['reply_id'],
'to' : data['author_id']
}
return self.sendComment(url, payloads)
def addPhotoComment(self, data):
url = 'http://photo.renren.com/photo/%d/photo-%d/comment' % (data['owner_id'], data['source_id'])
if 'author_name' in data:
body = '回复%s:%s' % (data['author_name'].encode('utf-8'), data['message']),
else:
body = data['message']
payloads = {
'guestName': '小黄鸡',
'feedComment' : 'true',
'body': body,
'owner' : data['owner_id'],
'realWhisper':'false',
'replyCommentId' : data.get('reply_id', 0),
'to' : data.get('author_id', 0)
}
return self.sendComment(url, payloads)
# 访问某人页面
def visit(self, uid):
self.get('http://www.renren.com/' + str(uid) + '/profile')
# 根据关键词搜索最新状态(全站)
def searchStatus(self, keyword, max_length=20):
url = 'http://browse.renren.com/s/status?offset=0&sort=1&range=0&q=%s&l=%d' % (keyword, max_length)
r = self.session.get(url, timeout=5)
status_elements = PyQuery(r.text)('.list_status .status_content')
id_pattern = re.compile("forwardDoing\('(\d+)','(\d+)'\)")
results = []
for index, _ in enumerate(status_elements):
status_element = status_elements.eq(index)
# 跳过转发的
if status_element('.status_root_msg'):
continue
status_element = status_element('.status_content_footer')
status_time = status_element('span').text()
m = id_pattern.search(status_element('.share_status').attr('onclick'))
status_id, user_id = m.groups()
results.append( (int(user_id), int(status_id), status_time) )
return results
if __name__ == '__main__':
renren = RenRen()
renren.login('email', 'password')
info = renren.getUserInfo()
print 'hello', info['hostname']
print renren.searchStatus('么么哒')
|
BigRocky/xiaohuangji
|
renren.py
|
Python
|
mit
| 11,873
|
[
"VisIt"
] |
1573638c38a8ff64727830960bdebcf658f4a7f7e92120160c6992fd023385c1
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
import pyro
import pyro.distributions as dist
from pyro.contrib.gp.likelihoods.likelihood import Likelihood
class Poisson(Likelihood):
"""
Implementation of Poisson likelihood, which is used for count data.
Poisson likelihood uses the :class:`~pyro.distributions.Poisson`
distribution, so the output of ``response_function`` should be positive.
By default, we use :func:`torch.exp` as response function, corresponding
to a log-Gaussian Cox process.
:param callable response_function: A mapping to positive real numbers.
"""
def __init__(self, response_function=None):
super().__init__()
self.response_function = (
torch.exp if response_function is None else response_function
)
def forward(self, f_loc, f_var, y=None):
r"""
Samples :math:`y` given :math:`f_{loc}`, :math:`f_{var}` according to
.. math:: f & \sim \mathbb{Normal}(f_{loc}, f_{var}),\\
y & \sim \mathbb{Poisson}(\exp(f)).
.. note:: The log likelihood is estimated using Monte Carlo with 1 sample of
:math:`f`.
:param torch.Tensor f_loc: Mean of latent function output.
:param torch.Tensor f_var: Variance of latent function output.
:param torch.Tensor y: Training output tensor.
:returns: a tensor sampled from likelihood
:rtype: torch.Tensor
"""
# calculates Monte Carlo estimate for E_q(f) [logp(y | f)]
f = dist.Normal(f_loc, f_var.sqrt())()
f_res = self.response_function(f)
y_dist = dist.Poisson(f_res)
if y is not None:
y_dist = y_dist.expand_by(y.shape[: -f_res.dim()]).to_event(y.dim())
return pyro.sample(self._pyro_get_fullname("y"), y_dist, obs=y)
|
uber/pyro
|
pyro/contrib/gp/likelihoods/poisson.py
|
Python
|
apache-2.0
| 1,884
|
[
"Gaussian"
] |
5c130a00cc22c50d705e8dd63334290b7f9febec39edec190dbf3bbce85371df
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawLoops(myscreen, loops, loopcolor):
nloop = 0
for lop in loops:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=loopcolor) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) )
previous=p
n=n+1
print("rendered loop ",nloop, " with ", len(lop), " points")
nloop = nloop+1
def getWaterline(s, cutter, zh, sampling):
wl = ocl.Waterline()
#wl.setThreads(1) # single thread for easier debug
wl.setSTL(s)
wl.setCutter(cutter)
wl.setZ(zh)
wl.setSampling(sampling)
wl.run()
loops = wl.getLoops()
return loops
def getPathsY(s,cutter,sampling,y):
#apdc = ocl.PathDropCutter()
apdc = ocl.AdaptivePathDropCutter()
apdc.setSTL(s)
apdc.setCutter(cutter)
apdc.setZ( -20 )
apdc.setSampling(sampling)
apdc.setMinSampling(sampling/700)
path = ocl.Path()
p1 = ocl.Point(-1.52*cutter.getDiameter() , y,-111) # start-point of line
p2 = ocl.Point(+1.52*cutter.getDiameter(), y,-111) # end-point of line
l = ocl.Line(p1,p2) # line-object
path.append( l )
apdc.setPath( path )
apdc.run()
return apdc.getCLPoints()
def getPathsX(s,cutter,sampling,x):
#apdc = ocl.PathDropCutter()
apdc = ocl.AdaptivePathDropCutter()
apdc.setSTL(s)
apdc.setCutter(cutter)
apdc.setZ( -20 )
apdc.setSampling(sampling)
apdc.setMinSampling(sampling/700)
path = ocl.Path()
p1 = ocl.Point(x, -1.52*cutter.getDiameter() , -111) # start-point of line
p2 = ocl.Point(x, +1.52*cutter.getDiameter(), -111) # end-point of line
l = ocl.Line(p1,p2) # line-object
path.append( l )
apdc.setPath( path )
apdc.run()
return apdc.getCLPoints()
if __name__ == "__main__":
print(ocl.version()) # revision()
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../stl/demo.stl")
#stl = camvtk.STLSurf("../stl/30sphere.stl")
#myscreen.addActor(stl)
base=0.1
tip=10
a=ocl.Point(base,0,-tip)
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)));
b=ocl.Point(-base,0,-tip)
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)));
c=ocl.Point(0,0,0)
myscreen.addActor( camvtk.Point(center=(c.x,c.y,c.z), color=(1,0,1)));
#myscreen.addActor( camvtk.Line(p1=(1,0,0),p2=(0,0,0.3)) )
#myscreen.addActor( camvtk.Line(p1=(0,0,0.3),p2=(0,1,0)) )
#myscreen.addActor( camvtk.Line(p1=(1,0,0),p2=(0,1,0)) )
t = ocl.Triangle(a,b,c)
s = ocl.STLSurf()
s.addTriangle(t)
print("STL surface read,", s.size(), "triangles")
Nwaterlines = 40
zh=[-0.15*x for x in range(Nwaterlines)]
#zh=[15]
diam = 3.01
length = 50
loops = []
sampling = 0.1
#cutter = ocl.CylCutter( diam , length )
#cutter = ocl.BallCutter( diam , length )
#cutter = ocl.BullCutter( diam , diam/5, length )
#cutter = ocl.ConeCutter(diam, math.pi/3, length)
#cutter = ocl.CylConeCutter(diam/float(3),diam,math.pi/float(9))
#cutter = ocl.BallConeCutter(diam/float(2.3),diam,math.pi/float(5))
#cutter = ocl.BullConeCutter(diam/1.5, diam/10, diam, math.pi/10)
cutter = ocl.ConeConeCutter(diam/2,math.pi/3,diam,math.pi/6)
print(cutter)
#raw_input("Press Enter to terminate")
ptsy_all = []
ptsx_all = []
yvals=[]
Nmax=15
for i in range(Nmax):
yvals.append( diam* float(i)/float(Nmax) )
yvals.append( -diam* float(i)/float(Nmax) )
for y in yvals: #[diam*0.4, diam*0.2, 0, -diam*0.2,diam*(-0.4)]:
ptsy = getPathsY(s,cutter,sampling, y)
ptsx = getPathsX(s,cutter,sampling, y)
ptsy_all.append(ptsy)
ptsx_all.append(ptsx)
#print " got ",len(pts)," cl-points"
#for p in pts:
# print(p.x," ",p.y," ",p.z)
#exit()
loops = []
for z in zh:
z_loops = getWaterline(s, cutter, z, sampling)
for l in z_loops:
loops.append(l)
#for l in line:
#drawLoops(myscreen, line, camvtk.cyan)
#for l in cutter_loops:
# loops.append(l)
print("All waterlines done. Got", len(loops)," loops in total.")
# draw the loops
drawLoops(myscreen, loops, camvtk.cyan)
drawLoops(myscreen, ptsy_all, camvtk.pink)
drawLoops(myscreen, ptsx_all, camvtk.lblue)
print("done.")
myscreen.camera.SetPosition(15, 13, 7)
myscreen.camera.SetFocalPoint(5, 5, 0)
camvtk.drawArrows(myscreen,center=(0,0,3))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
aewallin/opencamlib
|
examples/python/cutter_shapes.py
|
Python
|
lgpl-2.1
| 5,396
|
[
"VTK"
] |
a5c9181a59f0c5d3ffdd29e242d3ca6c55b6ba9b0d7f248abb7ad667315c0a88
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .stationary import Stationary
from .psi_comp import PSICOMP_RBF, PSICOMP_RBF_GPU
from ...core import Param
from paramz.transformations import Logexp
class RBF(Stationary):
"""
Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel:
.. math::
k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg)
"""
_support_GPU = True
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False, inv_l=False):
super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU)
if self.useGPU:
self.psicomp = PSICOMP_RBF_GPU()
else:
self.psicomp = PSICOMP_RBF()
self.use_invLengthscale = inv_l
if inv_l:
self.unlink_parameter(self.lengthscale)
self.inv_l = Param('inv_lengthscale',1./self.lengthscale**2, Logexp())
self.link_parameter(self.inv_l)
def K_of_r(self, r):
return self.variance * np.exp(-0.5 * r**2)
def dK_dr(self, r):
return -r*self.K_of_r(r)
def dK2_drdr(self, r):
return (r**2-1)*self.K_of_r(r)
def dK2_drdr_diag(self):
return -self.variance # as the diagonal of r is always filled with zeros
def __getstate__(self):
dc = super(RBF, self).__getstate__()
if self.useGPU:
dc['psicomp'] = PSICOMP_RBF()
dc['useGPU'] = False
return dc
def __setstate__(self, state):
self.use_invLengthscale = False
return super(RBF, self).__setstate__(state)
def spectrum(self, omega):
assert self.input_dim == 1 #TODO: higher dim spectra?
return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2)
def parameters_changed(self):
if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200)
super(RBF,self).parameters_changed()
#---------------------------------------#
# PSI statistics #
#---------------------------------------#
def psi0(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior)[0]
def psi1(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
def psi2(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=False)[2]
def psi2n(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=True)[2]
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
dL_dvar, dL_dlengscale = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[:2]
self.variance.gradient = dL_dvar
self.lengthscale.gradient = dL_dlengscale
if self.use_invLengthscale:
self.inv_l.gradient = dL_dlengscale*(self.lengthscale**3/-2.)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2]
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:]
def update_gradients_diag(self, dL_dKdiag, X):
super(RBF,self).update_gradients_diag(dL_dKdiag, X)
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)
def update_gradients_full(self, dL_dK, X, X2=None):
super(RBF,self).update_gradients_full(dL_dK, X, X2)
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)
|
avehtari/GPy
|
GPy/kern/src/rbf.py
|
Python
|
bsd-3-clause
| 4,138
|
[
"Gaussian"
] |
da4ab5bfc343709f73640806767602d5510520456bcafcd71146d2d37e2100ff
|
import analysis.event
import analysis.beamline
import analysis.pixel_detector
import analysis.hitfinding
import utils.cxiwriter
import simulation.ptycho
import ipc.mpi
import numpy as np
import os,sys
import time
import h5py
# Physical constants (from http://physics.nist.gov/)
h = 6.626070040e-34 # [J s]
c = np.double(2.99792458e8) # [m/s]
hev = np.double(4.135667662e-15) # [eV s]
ev2J = np.double(1.60217657e-19) # [J/eV]
# Simulate a simple ptychography experiment
photon_energy_keV = 6 # keV
photon_energy_J = np.double(photon_energy_keV * 1000.) * ev2J # J
wavelength = (hev*c) / np.double(photon_energy_keV * 1000.) # m
focus_diameter = 500e-9 # m
pulse_energy = 4e-3 # J
transmission = 1e-8
det_pixelsize = 110e-6 # m
det_distance = 2.4 # m
det_sidelength = 20 # px
det_aduphoton = 30 # Nr. of ADUs per photon
scan_exposure = 1 # Shots (exposures) per position
scan_x, scan_y = (30,30) # px
scan_step = 400e-9 # m
sample_size = 80e-6 # m
sample_thickness = 200e-9 # m
sample_material = 'gold'
corner_position = [det_sidelength/2 * det_pixelsize, det_sidelength/2 * det_pixelsize, det_distance]
sim = simulation.ptycho.Simulation()
sim.setSource(wavelength=wavelength, focus_diameter=focus_diameter,
pulse_energy=pulse_energy, transmission=transmission)
sim.setDetector(pixelsize=det_pixelsize, nx=det_sidelength,
distance=det_distance, adus_per_photon=det_aduphoton)
sim.setScan(nperpos=scan_exposure, scanx=scan_x, scany=scan_y,
step=scan_step, start=(0, 0))
sim.setObject(sample='logo', size=sample_size,
thickness=sample_thickness, material=sample_material)
sim.setIllumination(shape='gaussian')
print "Simulating a scanning experiment, this might take a few seconds..."
sim.start()
state = {
'Facility': 'Dummy',
'Dummy': {
'Repetition Rate' : 100,
'Data Sources': {
'CCD': {
'data': lambda: sim.get_nextframe(),
'unit': 'ADU',
'type': 'photonPixelDetectors'
},
'position_x': {
'data': lambda: sim.get_position_x(),
'unit': 'm',
'type': 'simulation'
},
'position_y': {
'data': lambda: sim.get_position_y(),
'unit': 'm',
'type': 'simulation'
},
'position_z': {
'data': lambda: 0.,
'unit': 'm',
'type': 'simulation'
},
'end':{
'data': lambda: sim.get_end_of_scan(),
'unit':'',
'type': 'simulation'
}
}
}
}
if ipc.mpi.is_worker():
# Open a CXI file
filename = "test.cxi"
W = utils.cxiwriter.CXIWriter(filename, chunksize=100)
# This is the backbone we are going to use to extend the CXI file with data frames and translation vectors
extend_dict= {'entry_1':{'instrument_1':{'detector_1':{}},
'sample_1':{'geometry_1':{}}}}
def onEvent(evt):
# Processin rate [Hz]
analysis.event.printProcessingRate()
# Translation vector
x = evt['simulation']['position_x'].data
y = evt['simulation']['position_y'].data
z = evt['simulation']['position_z'].data
translations = np.array([x,y,z])
# Add data frames to CXI file
D = extend_dict.copy()
D["entry_1"]["instrument_1"]["detector_1"]["data"] = evt['photonPixelDetectors']['CCD'].data
D["entry_1"]["sample_1"]["geometry_1"]["translation"] = translations
W.write_slice(D)
# Stop running at the end of the scan
if evt['simulation']['end'].data:
print "Reached the end of the scan"
end_of_run()
def end_of_run():
# Close CXI file
W.close()
if ipc.mpi.is_main_worker():
# Reopen CXI file to append with more information nessesary
# for ptychography datasets, see http://www.cxidb.org/cxi.html
f = h5py.File(filename, "r+")
# Already existing fields
entry_1 = f['entry_1']
instrument_1 = f['entry_1']['instrument_1']
detector_1 = f['entry_1']['instrument_1']['detector_1']
sample_1 = f['entry_1']['sample_1']
geometry_1 = f['entry_1']['sample_1']['geometry_1']
# Add new data fields
f.create_dataset("cxi_version",data=140)
source_1 = instrument_1.create_group("source_1")
source_1.create_dataset("energy", data=photon_energy_J) # in J
detector_1.create_dataset("distance", data=det_distance)
detector_1.create_dataset("x_pixel_size", data=det_pixelsize)
detector_1.create_dataset("y_pixel_size", data=det_pixelsize)
detector_1["translation"] = h5py.SoftLink('/entry_1/sample_1/geometry_1/translation')
detector_1.create_dataset("corner_position", data=corner_position)
data_1 = entry_1.create_group("data_1")
data_1["data"] = h5py.SoftLink('/entry_1/instrument_1/detector_1/data')
data_1["translation"] = h5py.SoftLink('/entry_1/sample_1/geometry_1/translation')
# These are optional data that should be provided (if known)
# ----------------------------------------------------------
source_1.create_dataset("illumination", data=sim.get_illumination())
#detector_1.create_dataset("Fillumination_mask", data=illumination_intensities_mask)
#detector_1.create_dataset("solution", data=sim.obj)
#detector_1.create_dataset("initial_image",data=initial_image)
# Close CXI file and exit
f.close()
|
SPIhub/hummingbird
|
examples/ptychography/write2cxi.py
|
Python
|
bsd-2-clause
| 5,705
|
[
"Gaussian"
] |
2289342823136496158ce99b5b4139695dc1753f9ceedbf66d878ebbf42e1cde
|
"""
Command Line Parameters for creating the Replication transformations Script
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOMSVOForGroup
class Params(object):
"""Parameter Object"""
def __init__(self):
self.targetSE = []
self.sourceSE = ""
self.groupSize = 1
self.groupName = None
self.extraname = ""
self.flavour = "Replication"
self.plugin = "Broadcast"
self.metaValues = []
self.metaKey = None
self.extraData = {}
self.errorMessages = []
self.enable = False
def setMetaValues(self, values):
if isinstance(values, list):
self.metaValues = values
else:
self.metaValues = [val for val in values.split(",")]
return S_OK()
def setMetaKey(self, key):
self.metaKey = key
return S_OK()
def setMetadata(self, metadata):
for pair in metadata.split(","):
splitPair = pair.strip().split(":")
if len(splitPair) == 2:
self.extraData[splitPair[0]] = splitPair[1].strip()
return S_OK()
def setSourceSE(self, sourceSE):
self.sourceSE = [sSE.strip() for sSE in sourceSE.split(",")]
return S_OK()
def setTransFlavour(self, flavour):
self.flavour = flavour
return S_OK()
def setTargetSE(self, targetSE):
self.targetSE = [tSE.strip() for tSE in targetSE.split(",")]
return S_OK()
def setExtraname(self, extraname):
self.extraname = extraname
return S_OK()
def setGroupSize(self, size):
try:
self.groupSize = int(size)
except ValueError:
return S_ERROR("Expected integer for groupsize")
return S_OK()
def setGroupName(self, name):
self.groupName = name
return S_OK()
def setPlugin(self, plugin):
self.plugin = plugin
return S_OK()
def setEnable(self, _):
self.enable = True
return S_OK()
def registerSwitches(self, script):
"""register command line arguments
:param script: Dirac.Core.Base Script Class
:type script: DIRAC.Core.Base.Script
"""
script.registerSwitch("G:", "GroupSize=", "Number of Files per transformation task", self.setGroupSize)
script.registerSwitch("R:", "GroupName=", "TransformationGroup Name", self.setGroupName)
script.registerSwitch("S:", "SourceSEs=", "SourceSE(s) to use, comma separated list", self.setSourceSE)
script.registerSwitch("N:", "Extraname=", "String to append to transformation name", self.setExtraname)
script.registerSwitch("P:", "Plugin=", "Plugin to use for transformation", self.setPlugin)
script.registerSwitch("T:", "Flavour=", "Flavour to create: Replication or Moving", self.setTransFlavour)
script.registerSwitch("K:", "MetaKey=", "Meta Key to use: TransformationID", self.setMetaKey)
script.registerSwitch("M:", "MetaData=", "MetaData to use Key/Value Pairs: 'DataType:REC,'", self.setMetadata)
script.registerSwitch("x", "Enable", "Enable the transformation creation, otherwise dry-run", self.setEnable)
useMessage = []
useMessage.append("Create one replication transformation for each MetaValue given")
useMessage.append("Is running in dry-run mode, unless enabled with -x")
useMessage.append("MetaValue and TargetSEs can be comma separated lists")
useMessage.append("Usage:")
useMessage.append(
" %s <MetaValue1[,val2,val3]> <TargetSEs> [-G<Files>] [-S<SourceSEs>]"
"[-N<ExtraName>] [-T<Type>] [-M<Key>] [-K...] -x" % script.scriptName
)
script.setUsageMessage("\n".join(useMessage))
def checkSettings(self, script, checkArguments=True):
"""check if all required parameters are set, print error message and return S_ERROR if not
:param script: The script object
:type script: DIRAC.Core.Base.Script
:param bool checkArguments: if false do not check for the correct number of arguments, should only be
changed if using derived class
"""
if checkArguments:
args = script.getPositionalArgs()
if len(args) == 2:
self.setMetaValues(args[0])
self.setTargetSE(args[1])
else:
self.errorMessages.append("ERROR: Wrong number of arguments")
self._checkProxy()
# get default metadata key:
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
if self.metaKey is None:
self.metaKey = Operations().getValue("Transformations/TransfIDMeta", "TransformationID")
if not self.errorMessages:
return S_OK()
gLogger.error("\n".join(self.errorMessages))
script.showHelp()
return S_ERROR()
def _checkProxy(self):
"""checks if the proxy has the ProductionManagement property and belongs to a VO"""
proxyInfo = getProxyInfo()
if not proxyInfo["OK"]:
self.errorMessages.append("ERROR: No Proxy present")
return False
proxyValues = proxyInfo.get("Value", {})
group = proxyValues.get("group", "")
vomsvo = getVOMSVOForGroup(group)
if not vomsvo:
self.errorMessages.append("ERROR: ProxyGroup not associated to VOMS VO, get a different proxy")
return False
groupProperties = proxyValues.get("groupProperties", [])
if groupProperties:
if "ProductionManagement" not in groupProperties:
self.errorMessages.append(
"ERROR: Not allowed to create production, you need a ProductionManagement proxy."
)
return False
else:
self.errorMessages.append("ERROR: Could not determine Proxy properties, you do not have the right proxy.")
return False
return True
|
ic-hep/DIRAC
|
src/DIRAC/TransformationSystem/Utilities/ReplicationCLIParameters.py
|
Python
|
gpl-3.0
| 6,238
|
[
"DIRAC"
] |
7e945bd48a336ad9b5f70b059e60d010eb305af41431c71c8c1644840d8e9599
|
# coding: utf-8
from __future__ import unicode_literals, division
import unittest
import os
import shutil
import glob
from monty.tempfile import ScratchDir
from monty.os import cd
import multiprocessing
from custodian.vasp.jobs import VaspJob, VaspNEBJob, GenerateVaspInputJob
from pymatgen.io.vasp import Incar, Kpoints, Poscar
import pymatgen
"""
Created on Jun 1, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jun 1, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
pymatgen.SETTINGS["PMG_VASP_PSP_DIR"] = os.path.abspath(test_dir)
class VaspJobTest(unittest.TestCase):
def test_to_from_dict(self):
v = VaspJob("hello")
v2 = VaspJob.from_dict(v.as_dict())
self.assertEqual(type(v2), type(v))
self.assertEqual(v2.vasp_cmd, "hello")
def test_setup(self):
with cd(test_dir):
with ScratchDir('.', copy_from_current_on_enter=True) as d:
v = VaspJob("hello")
v.setup()
incar = Incar.from_file("INCAR")
count = multiprocessing.cpu_count()
# Need at least 3 CPUs for NPAR to be greater than 1
if count > 3:
self.assertGreater(incar["NPAR"], 1)
def test_postprocess(self):
with cd(os.path.join(test_dir, 'postprocess')):
with ScratchDir('.', copy_from_current_on_enter=True) as d:
shutil.copy('INCAR', 'INCAR.backup')
v = VaspJob("hello", final=False, suffix=".test", copy_magmom=True)
v.postprocess()
incar = Incar.from_file("INCAR")
incar_prev = Incar.from_file("INCAR.test")
for f in ['INCAR', 'KPOINTS', 'CONTCAR', 'OSZICAR', 'OUTCAR',
'POSCAR', 'vasprun.xml']:
self.assertTrue(os.path.isfile('{}.test'.format(f)))
os.remove('{}.test'.format(f))
shutil.move('INCAR.backup', 'INCAR')
self.assertAlmostEqual(incar['MAGMOM'], [3.007, 1.397, -0.189, -0.189])
self.assertAlmostEqual(incar_prev["MAGMOM"], [5, -5, 0.6, 0.6])
def test_continue(self):
# Test the continuation functionality
with cd(os.path.join(test_dir, 'postprocess')):
# Test default functionality
with ScratchDir('.', copy_from_current_on_enter=True) as d:
v = VaspJob("hello", auto_continue=True)
v.setup()
self.assertTrue(os.path.exists("continue.json"), "continue.json not created")
v.setup()
self.assertEqual(Poscar.from_file("CONTCAR").structure,
Poscar.from_file("POSCAR").structure)
self.assertEqual(Incar.from_file('INCAR')['ISTART'], 1)
v.postprocess()
self.assertFalse(os.path.exists("continue.json"),
"continue.json not deleted after postprocessing")
# Test explicit action functionality
with ScratchDir('.', copy_from_current_on_enter=True) as d:
v = VaspJob("hello", auto_continue=[{"dict": "INCAR",
"action": {"_set": {"ISTART": 1}}}])
v.setup()
v.setup()
self.assertNotEqual(Poscar.from_file("CONTCAR").structure,
Poscar.from_file("POSCAR").structure)
self.assertEqual(Incar.from_file('INCAR')['ISTART'], 1)
v.postprocess()
def test_static(self):
# Just a basic test of init.
VaspJob.double_relaxation_run(["vasp"])
class VaspNEBJobTest(unittest.TestCase):
def test_to_from_dict(self):
v = VaspNEBJob("hello")
v2 = VaspNEBJob.from_dict(v.as_dict())
self.assertEqual(type(v2), type(v))
self.assertEqual(v2.vasp_cmd, "hello")
def test_setup(self):
with cd(os.path.join(test_dir, 'setup_neb')):
with ScratchDir('.', copy_from_current_on_enter=True) as d:
v = VaspNEBJob("hello", half_kpts=True)
v.setup()
incar = Incar.from_file("INCAR")
count = multiprocessing.cpu_count()
if count > 3:
self.assertGreater(incar["NPAR"], 1)
kpt = Kpoints.from_file("KPOINTS")
kpt_pre = Kpoints.from_file("KPOINTS.orig")
self.assertEqual(kpt_pre.style.name, "Monkhorst")
self.assertEqual(kpt.style.name, "Gamma")
def test_postprocess(self):
neb_outputs = ['INCAR', 'KPOINTS', 'POTCAR', 'vasprun.xml']
neb_sub_outputs = ['CHG', 'CHGCAR', 'CONTCAR', 'DOSCAR',
'EIGENVAL', 'IBZKPT', 'PCDAT', 'POSCAR',
'REPORT', 'PROCAR', 'OSZICAR', 'OUTCAR',
'WAVECAR', 'XDATCAR']
with cd(os.path.join(test_dir, 'postprocess_neb')):
postprocess_neb = os.path.abspath(".")
v = VaspNEBJob("hello", final=False, suffix=".test")
v.postprocess()
for f in neb_outputs:
self.assertTrue(os.path.isfile('{}.test'.format(f)))
os.remove('{}.test'.format(f))
sub_folders = glob.glob("[0-9][0-9]")
for sf in sub_folders:
os.chdir(os.path.join(postprocess_neb, sf))
for f in neb_sub_outputs:
if os.path.exists(f):
self.assertTrue(os.path.isfile('{}.test'.format(f)))
os.remove('{}.test'.format(f))
class GenerateVaspInputJobTest(unittest.TestCase):
def test_run(self):
with ScratchDir(".") as d:
for f in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
shutil.copy(os.path.join('..', test_dir, f), f)
oldincar = Incar.from_file("INCAR")
v = GenerateVaspInputJob("pymatgen.io.vasp.sets.MPNonSCFSet",
contcar_only=False)
v.run()
incar = Incar.from_file("INCAR")
self.assertEqual(incar["ICHARG"], 11)
self.assertEqual(oldincar["ICHARG"], 1)
kpoints = Kpoints.from_file("KPOINTS")
self.assertEqual(str(kpoints.style), "Reciprocal")
if __name__ == "__main__":
unittest.main()
|
specter119/custodian
|
custodian/vasp/tests/test_jobs.py
|
Python
|
mit
| 6,616
|
[
"VASP",
"pymatgen"
] |
8f6fdc54392cb5df44032514cd27d8ed752477da500a5f0f71f76bdb63fc356d
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides functions to interface with scipy.sparse."""
from __future__ import absolute_import
from future.utils import iteritems
from functools import reduce
import itertools
import numpy
import numpy.linalg
import scipy
import scipy.sparse
import scipy.sparse.linalg
from openfermion.config import EQ_TOLERANCE
from openfermion.ops import (FermionOperator, QuadraticHamiltonian,
QubitOperator, BosonOperator,
QuadOperator, up_index, down_index)
from openfermion.utils import (count_qubits, gaussian_state_preparation_circuit,
is_hermitian,
slater_determinant_preparation_circuit)
# Make global definitions.
identity_csc = scipy.sparse.identity(2, format='csc', dtype=complex)
pauli_x_csc = scipy.sparse.csc_matrix([[0., 1.], [1., 0.]], dtype=complex)
pauli_y_csc = scipy.sparse.csc_matrix([[0., -1.j], [1.j, 0.]], dtype=complex)
pauli_z_csc = scipy.sparse.csc_matrix([[1., 0.], [0., -1.]], dtype=complex)
q_raise_csc = (pauli_x_csc - 1.j * pauli_y_csc) / 2.
q_lower_csc = (pauli_x_csc + 1.j * pauli_y_csc) / 2.
pauli_matrix_map = {'I': identity_csc, 'X': pauli_x_csc,
'Y': pauli_y_csc, 'Z': pauli_z_csc}
def wrapped_kronecker(operator_1, operator_2):
"""Return the Kronecker product of two sparse.csc_matrix operators."""
return scipy.sparse.kron(operator_1, operator_2, 'csc')
def kronecker_operators(*args):
"""Return the Kronecker product of multiple sparse.csc_matrix operators."""
return reduce(wrapped_kronecker, *args)
def jordan_wigner_ladder_sparse(n_qubits, tensor_factor, ladder_type):
r"""Make a matrix representation of a fermion ladder operator.
Operators are mapped as follows:
a_j^\dagger -> Z_0 .. Z_{j-1} (X_j - iY_j) / 2
a_j -> Z_0 .. Z_{j-1} (X_j + iY_j) / 2
Args:
index: This is a nonzero integer. The integer indicates the tensor
factor and the sign indicates raising or lowering.
n_qubits(int): Number qubits in the system Hilbert space.
Returns:
The corresponding Scipy sparse matrix.
"""
parities = tensor_factor * [pauli_z_csc]
identities = [scipy.sparse.identity(
2 ** (n_qubits - tensor_factor - 1), dtype=complex, format='csc')]
if ladder_type:
operator = kronecker_operators(parities + [q_raise_csc] + identities)
else:
operator = kronecker_operators(parities + [q_lower_csc] + identities)
return operator
def jordan_wigner_sparse(fermion_operator, n_qubits=None):
r"""Initialize a Scipy sparse matrix from a FermionOperator.
Operators are mapped as follows:
a_j^\dagger -> Z_0 .. Z_{j-1} (X_j - iY_j) / 2
a_j -> Z_0 .. Z_{j-1} (X_j + iY_j) / 2
Args:
fermion_operator(FermionOperator): instance of the FermionOperator
class.
n_qubits(int): Number of qubits.
Returns:
The corresponding Scipy sparse matrix.
"""
if n_qubits is None:
n_qubits = count_qubits(fermion_operator)
# Create a list of raising and lowering operators for each orbital.
jw_operators = []
for tensor_factor in range(n_qubits):
jw_operators += [(jordan_wigner_ladder_sparse(n_qubits,
tensor_factor,
0),
jordan_wigner_ladder_sparse(n_qubits,
tensor_factor,
1))]
# Construct the Scipy sparse matrix.
n_hilbert = 2 ** n_qubits
values_list = [[]]
row_list = [[]]
column_list = [[]]
for term in fermion_operator.terms:
coefficient = fermion_operator.terms[term]
sparse_matrix = coefficient * scipy.sparse.identity(
2 ** n_qubits, dtype=complex, format='csc')
for ladder_operator in term:
sparse_matrix = sparse_matrix * jw_operators[
ladder_operator[0]][ladder_operator[1]]
if coefficient:
# Extract triplets from sparse_term.
sparse_matrix = sparse_matrix.tocoo(copy=False)
values_list.append(sparse_matrix.data)
(row, column) = sparse_matrix.nonzero()
row_list.append(row)
column_list.append(column)
values_list = numpy.concatenate(values_list)
row_list = numpy.concatenate(row_list)
column_list = numpy.concatenate(column_list)
sparse_operator = scipy.sparse.coo_matrix((
values_list, (row_list, column_list)),
shape=(n_hilbert, n_hilbert)).tocsc(copy=False)
sparse_operator.eliminate_zeros()
return sparse_operator
def qubit_operator_sparse(qubit_operator, n_qubits=None):
"""Initialize a Scipy sparse matrix from a QubitOperator.
Args:
qubit_operator(QubitOperator): instance of the QubitOperator class.
n_qubits (int): Number of qubits.
Returns:
The corresponding Scipy sparse matrix.
"""
if n_qubits is None:
n_qubits = count_qubits(qubit_operator)
if n_qubits < count_qubits(qubit_operator):
raise ValueError('Invalid number of qubits specified.')
# Construct the Scipy sparse matrix.
n_hilbert = 2 ** n_qubits
values_list = [[]]
row_list = [[]]
column_list = [[]]
# Loop through the terms.
for qubit_term in qubit_operator.terms:
tensor_factor = 0
coefficient = qubit_operator.terms[qubit_term]
sparse_operators = [coefficient]
for pauli_operator in qubit_term:
# Grow space for missing identity operators.
if pauli_operator[0] > tensor_factor:
identity_qubits = pauli_operator[0] - tensor_factor
identity = scipy.sparse.identity(
2 ** identity_qubits, dtype=complex, format='csc')
sparse_operators += [identity]
# Add actual operator to the list.
sparse_operators += [pauli_matrix_map[pauli_operator[1]]]
tensor_factor = pauli_operator[0] + 1
# Grow space at end of string unless operator acted on final qubit.
if tensor_factor < n_qubits or not qubit_term:
identity_qubits = n_qubits - tensor_factor
identity = scipy.sparse.identity(
2 ** identity_qubits, dtype=complex, format='csc')
sparse_operators += [identity]
# Extract triplets from sparse_term.
sparse_matrix = kronecker_operators(sparse_operators)
values_list.append(sparse_matrix.tocoo(copy=False).data)
(column, row) = sparse_matrix.nonzero()
column_list.append(column)
row_list.append(row)
# Create sparse operator.
values_list = numpy.concatenate(values_list)
row_list = numpy.concatenate(row_list)
column_list = numpy.concatenate(column_list)
sparse_operator = scipy.sparse.coo_matrix((
values_list, (row_list, column_list)),
shape=(n_hilbert, n_hilbert)).tocsc(copy=False)
sparse_operator.eliminate_zeros()
return sparse_operator
def get_linear_qubit_operator_diagonal(qubit_operator, n_qubits=None):
""" Return a linear operator's diagonal elements.
The main motivation is to use it for Davidson's algorithm, to find out the
lowest n eigenvalues and associated eigenvectors.
Qubit terms with X or Y operators will contribute nothing to the diagonal
elements, while I or Z will contribute a factor of 1 or -1 together with
the coefficient.
Args:
qubit_operator(QubitOperator): A qubit operator.
Returns:
linear_operator_diagonal(numpy.ndarray): The diagonal elements for
LinearQubitOperator(qubit_operator).
"""
if n_qubits is None:
n_qubits = count_qubits(qubit_operator)
if n_qubits < count_qubits(qubit_operator):
raise ValueError('Invalid number of qubits specified.')
n_hilbert = 2 ** n_qubits
zeros_diagonal = numpy.zeros(n_hilbert)
ones_diagonal = numpy.ones(n_hilbert)
linear_operator_diagonal = zeros_diagonal
# Loop through the terms.
for qubit_term in qubit_operator.terms:
is_zero = False
tensor_factor = 0
vecs = [ones_diagonal]
for pauli_operator in qubit_term:
op = pauli_operator[1]
if op in ['X', 'Y']:
is_zero = True
break
# Split vector by half and half for each bit.
if pauli_operator[0] > tensor_factor:
vecs = [v for iter_v in vecs for v in numpy.split(
iter_v, 2 ** (pauli_operator[0] - tensor_factor))]
vec_pairs = [numpy.split(v, 2) for v in vecs]
vecs = [v for vp in vec_pairs for v in (vp[0], -vp[1])]
tensor_factor = pauli_operator[0] + 1
if not is_zero:
linear_operator_diagonal += (qubit_operator.terms[qubit_term] *
numpy.concatenate(vecs))
return linear_operator_diagonal
def jw_configuration_state(occupied_orbitals, n_qubits):
"""Function to produce a basis state in the occupation number basis.
Args:
occupied_orbitals(list): A list of integers representing the indices
of the occupied orbitals in the desired basis state
n_qubits(int): The total number of qubits
Returns:
basis_vector(sparse): The basis state as a sparse matrix
"""
one_index = sum(2 ** (n_qubits - 1 - i) for i in occupied_orbitals)
basis_vector = numpy.zeros(2 ** n_qubits, dtype=float)
basis_vector[one_index] = 1
return basis_vector
def jw_hartree_fock_state(n_electrons, n_orbitals):
"""Function to produce Hartree-Fock state in JW representation."""
hartree_fock_state = jw_configuration_state(range(n_electrons),
n_orbitals)
return hartree_fock_state
def jw_number_indices(n_electrons, n_qubits):
"""Return the indices for n_electrons in n_qubits under JW encoding
Calculates the indices for all possible arrangements of n-electrons
within n-qubit orbitals when a Jordan-Wigner encoding is used.
Useful for restricting generic operators or vectors to a particular
particle number space when desired
Args:
n_electrons(int): Number of particles to restrict the operator to
n_qubits(int): Number of qubits defining the total state
Returns:
indices(list): List of indices in a 2^n length array that indicate
the indices of constant particle number within n_qubits
in a Jordan-Wigner encoding.
"""
occupations = itertools.combinations(range(n_qubits), n_electrons)
indices = [sum([2 ** n for n in occupation])
for occupation in occupations]
return indices
def jw_sz_indices(sz_value, n_qubits, n_electrons=None,
up_index=up_index, down_index=down_index):
r"""Return the indices of basis vectors with fixed Sz under JW encoding.
The returned indices label computational basis vectors which lie within
the corresponding eigenspace of the Sz operator,
.. math::
\begin{align}
S^{z} = \frac{1}{2}\sum_{i = 1}^{n}(n_{i, \alpha} - n_{i, \beta})
\end{align}
Args:
sz_value(float): Desired Sz value. Should be an integer or
half-integer.
n_qubits(int): Number of qubits defining the total state
n_electrons(int, optional): Number of particles to restrict the
operator to, if such a restriction is desired
up_index (Callable, optional): Function that maps a spatial index
to the index of the corresponding up site
down_index (Callable, optional): Function that maps a spatial index
to the index of the corresponding down site
Returns:
indices(list): The list of indices
"""
if n_qubits % 2 != 0:
raise ValueError('Number of qubits must be even')
if not (2. * sz_value).is_integer():
raise ValueError('Sz value must be an integer or half-integer')
n_sites = n_qubits // 2
sz_integer = int(2. * sz_value)
indices = []
if n_electrons is not None:
# Particle number is fixed, so the number of spin-up electrons
# (as well as the number of spin-down electrons) is fixed
if ((n_electrons + sz_integer) % 2 != 0 or
n_electrons < abs(sz_integer)):
raise ValueError('The specified particle number and sz value are '
'incompatible.')
num_up = (n_electrons + sz_integer) // 2
num_down = n_electrons - num_up
up_occupations = itertools.combinations(range(n_sites), num_up)
down_occupations = list(
itertools.combinations(range(n_sites), num_down))
# Each arrangement of up spins can be paired with an arrangement
# of down spins
for up_occupation in up_occupations:
up_occupation = [up_index(index) for index in up_occupation]
for down_occupation in down_occupations:
down_occupation = [down_index(index)
for index in down_occupation]
occupation = up_occupation + down_occupation
indices.append(sum(2 ** (n_qubits - 1 - k)
for k in occupation))
else:
# Particle number is not fixed
if sz_integer < 0:
# There are more down spins than up spins
more_map = down_index
less_map = up_index
else:
# There are at least as many up spins as down spins
more_map = up_index
less_map = down_index
for n in range(abs(sz_integer), n_sites + 1):
# Choose n of the 'more' spin and n - abs(sz_integer) of the
# 'less' spin
more_occupations = itertools.combinations(range(n_sites), n)
less_occupations = list(itertools.combinations(
range(n_sites), n - abs(sz_integer)))
# Each arrangement of the 'more' spins can be paired with an
# arrangement of the 'less' spin
for more_occupation in more_occupations:
more_occupation = [more_map(index)
for index in more_occupation]
for less_occupation in less_occupations:
less_occupation = [less_map(index)
for index in less_occupation]
occupation = more_occupation + less_occupation
indices.append(sum(2 ** (n_qubits - 1 - k)
for k in occupation))
return indices
def jw_number_restrict_operator(operator, n_electrons, n_qubits=None):
"""Restrict a Jordan-Wigner encoded operator to a given particle number
Args:
sparse_operator(ndarray or sparse): Numpy operator acting on
the space of n_qubits.
n_electrons(int): Number of particles to restrict the operator to
n_qubits(int): Number of qubits defining the total state
Returns:
new_operator(ndarray or sparse): Numpy operator restricted to
acting on states with the same particle number.
"""
if n_qubits is None:
n_qubits = int(numpy.log2(operator.shape[0]))
select_indices = jw_number_indices(n_electrons, n_qubits)
return operator[numpy.ix_(select_indices, select_indices)]
def jw_sz_restrict_operator(operator, sz_value,
n_electrons=None,
n_qubits=None,
up_index=up_index,
down_index=down_index):
"""Restrict a Jordan-Wigner encoded operator to a given Sz value
Args:
operator(ndarray or sparse): Numpy operator acting on
the space of n_qubits.
sz_value(float): Desired Sz value. Should be an integer or
half-integer.
n_electrons(int, optional): Number of particles to restrict the
operator to, if such a restriction is desired.
n_qubits(int, optional): Number of qubits defining the total state
up_index (Callable, optional): Function that maps a spatial index
to the index of the corresponding up site
down_index (Callable, optional): Function that maps a spatial index
to the index of the corresponding down site
Returns:
new_operator(ndarray or sparse): Numpy operator restricted to
acting on states with the desired Sz value.
"""
if n_qubits is None:
n_qubits = int(numpy.log2(operator.shape[0]))
select_indices = jw_sz_indices(
sz_value, n_qubits,
n_electrons=n_electrons,
up_index=up_index,
down_index=down_index)
return operator[numpy.ix_(select_indices, select_indices)]
def jw_number_restrict_state(state, n_electrons, n_qubits=None):
"""Restrict a Jordan-Wigner encoded state to a given particle number
Args:
state(ndarray or sparse): Numpy vector in
the space of n_qubits.
n_electrons(int): Number of particles to restrict the state to
n_qubits(int): Number of qubits defining the total state
Returns:
new_operator(ndarray or sparse): Numpy vector restricted to
states with the same particle number. May not be normalized.
"""
if n_qubits is None:
n_qubits = int(numpy.log2(state.shape[0]))
select_indices = jw_number_indices(n_electrons, n_qubits)
return state[select_indices]
def jw_sz_restrict_state(state, sz_value,
n_electrons=None,
n_qubits=None,
up_index=up_index,
down_index=down_index):
"""Restrict a Jordan-Wigner encoded state to a given Sz value
Args:
state(ndarray or sparse): Numpy vector in
the space of n_qubits.
sz_value(float): Desired Sz value. Should be an integer or
half-integer.
n_electrons(int, optional): Number of particles to restrict the
operator to, if such a restriction is desired.
n_qubits(int, optional): Number of qubits defining the total state
up_index (Callable, optional): Function that maps a spatial index
to the index of the corresponding up site
down_index (Callable, optional): Function that maps a spatial index
to the index of the corresponding down site
Returns:
new_operator(ndarray or sparse): Numpy vector restricted to
states with the desired Sz value. May not be normalized.
"""
if n_qubits is None:
n_qubits = int(numpy.log2(state.shape[0]))
select_indices = jw_sz_indices(
sz_value, n_qubits,
n_electrons=n_electrons,
up_index=up_index,
down_index=down_index)
return state[select_indices]
def jw_get_ground_state_at_particle_number(sparse_operator, particle_number):
"""Compute ground energy and state at a specified particle number.
Assumes the Jordan-Wigner transform. The input operator should be Hermitian
and particle-number-conserving.
Args:
sparse_operator(sparse): A Jordan-Wigner encoded sparse matrix.
particle_number(int): The particle number at which to compute the ground
energy and states
Returns:
ground_energy(float): The lowest eigenvalue of sparse_operator within
the eigenspace of the number operator corresponding to
particle_number.
ground_state(ndarray): The ground state at the particle number
"""
n_qubits = int(numpy.log2(sparse_operator.shape[0]))
# Get the operator restricted to the subspace of the desired particle number
restricted_operator = jw_number_restrict_operator(sparse_operator,
particle_number,
n_qubits)
# Compute eigenvalues and eigenvectors
if restricted_operator.shape[0] - 1 <= 1:
# Restricted operator too small for sparse eigensolver
dense_restricted_operator = restricted_operator.toarray()
eigvals, eigvecs = numpy.linalg.eigh(dense_restricted_operator)
else:
eigvals, eigvecs = scipy.sparse.linalg.eigsh(restricted_operator,
k=1,
which='SA')
# Expand the state
state = eigvecs[:, 0]
expanded_state = numpy.zeros(2 ** n_qubits, dtype=complex)
expanded_state[jw_number_indices(particle_number, n_qubits)] = state
return eigvals[0], expanded_state
def jw_get_gaussian_state(quadratic_hamiltonian, occupied_orbitals=None):
"""Compute an eigenvalue and eigenstate of a quadratic Hamiltonian.
Eigenstates of a quadratic Hamiltonian are also known as fermionic
Gaussian states.
Args:
quadratic_hamiltonian(QuadraticHamiltonian):
The Hamiltonian whose eigenstate is desired.
occupied_orbitals(list):
A list of integers representing the indices of the occupied
orbitals in the desired Gaussian state. If this is None
(the default), then it is assumed that the ground state is
desired, i.e., the orbitals with negative energies are filled.
Returns
-------
energy (float):
The eigenvalue.
state (sparse):
The eigenstate in scipy.sparse csc format.
"""
if not isinstance(quadratic_hamiltonian, QuadraticHamiltonian):
raise ValueError('Input must be an instance of QuadraticHamiltonian.')
n_qubits = quadratic_hamiltonian.n_qubits
# Compute the energy
orbital_energies, constant = quadratic_hamiltonian.orbital_energies()
if occupied_orbitals is None:
# The ground energy is desired
if quadratic_hamiltonian.conserves_particle_number:
num_negative_energies = numpy.count_nonzero(
orbital_energies < -EQ_TOLERANCE)
occupied_orbitals = range(num_negative_energies)
else:
occupied_orbitals = []
energy = numpy.sum(orbital_energies[occupied_orbitals]) + constant
# Obtain the circuit that prepares the Gaussian state
circuit_description, start_orbitals = gaussian_state_preparation_circuit(
quadratic_hamiltonian, occupied_orbitals)
# Initialize the starting state
state = jw_configuration_state(start_orbitals, n_qubits)
# Apply the circuit
if not quadratic_hamiltonian.conserves_particle_number:
particle_hole_transformation = (
jw_sparse_particle_hole_transformation_last_mode(n_qubits))
for parallel_ops in circuit_description:
for op in parallel_ops:
if op == 'pht':
state = particle_hole_transformation.dot(state)
else:
i, j, theta, phi = op
state = jw_sparse_givens_rotation(
i, j, theta, phi, n_qubits).dot(state)
return energy, state
def jw_slater_determinant(slater_determinant_matrix):
r"""Obtain a Slater determinant.
The input is an :math:`N_f \times N` matrix :math:`Q` with orthonormal
rows. Such a matrix describes the Slater determinant
.. math::
b^\dagger_1 \cdots b^\dagger_{N_f} \lvert \text{vac} \rangle,
where
.. math::
b^\dagger_j = \sum_{k = 1}^N Q_{jk} a^\dagger_k.
Args:
slater_determinant_matrix: The matrix :math:`Q` which describes the
Slater determinant to be prepared.
Returns:
The Slater determinant as a sparse matrix.
"""
circuit_description = slater_determinant_preparation_circuit(
slater_determinant_matrix)
start_orbitals = range(slater_determinant_matrix.shape[0])
n_qubits = slater_determinant_matrix.shape[1]
# Initialize the starting state
state = jw_configuration_state(start_orbitals, n_qubits)
# Apply the circuit
for parallel_ops in circuit_description:
for op in parallel_ops:
i, j, theta, phi = op
state = jw_sparse_givens_rotation(
i, j, theta, phi, n_qubits).dot(state)
return state
def jw_sparse_givens_rotation(i, j, theta, phi, n_qubits):
"""Return the matrix (acting on a full wavefunction) that performs a
Givens rotation of modes i and j in the Jordan-Wigner encoding."""
if j != i + 1:
raise ValueError('Only adjacent modes can be rotated.')
if j > n_qubits - 1:
raise ValueError('Too few qubits requested.')
cosine = numpy.cos(theta)
sine = numpy.sin(theta)
phase = numpy.exp(1.j * phi)
# Create the two-qubit rotation matrix
rotation_matrix = scipy.sparse.csc_matrix(
([1., phase * cosine, -phase * sine, sine, cosine, phase],
((0, 1, 1, 2, 2, 3), (0, 1, 2, 1, 2, 3))),
shape=(4, 4))
# Initialize identity operators
left_eye = scipy.sparse.eye(2 ** i, format='csc')
right_eye = scipy.sparse.eye(2 ** (n_qubits - 1 - j), format='csc')
# Construct the matrix and return
givens_matrix = kronecker_operators([left_eye, rotation_matrix, right_eye])
return givens_matrix
def jw_sparse_particle_hole_transformation_last_mode(n_qubits):
"""Return the matrix (acting on a full wavefunction) that performs a
particle-hole transformation on the last mode in the Jordan-Wigner
encoding.
"""
left_eye = scipy.sparse.eye(2 ** (n_qubits - 1), format='csc')
return kronecker_operators([left_eye, pauli_matrix_map['X']])
def get_density_matrix(states, probabilities):
n_qubits = states[0].shape[0]
density_matrix = scipy.sparse.csc_matrix(
(n_qubits, n_qubits), dtype=complex)
for state, probability in zip(states, probabilities):
state = scipy.sparse.csc_matrix(state.reshape((len(state), 1)))
density_matrix = density_matrix + probability * state * state.getH()
return density_matrix
def get_ground_state(sparse_operator, initial_guess=None):
"""Compute lowest eigenvalue and eigenstate.
Args:
sparse_operator (LinearOperator): Operator to find the ground state of.
initial_guess (ndarray): Initial guess for ground state. A good
guess dramatically reduces the cost required to converge.
Returns
-------
eigenvalue:
The lowest eigenvalue, a float.
eigenstate:
The lowest eigenstate in scipy.sparse csc format.
"""
values, vectors = scipy.sparse.linalg.eigsh(
sparse_operator, k=1, v0=initial_guess, which='SA', maxiter=1e7)
order = numpy.argsort(values)
values = values[order]
vectors = vectors[:, order]
eigenvalue = values[0]
eigenstate = vectors[:, 0]
return eigenvalue, eigenstate.T
def sparse_eigenspectrum(sparse_operator):
"""Perform a dense diagonalization.
Returns:
eigenspectrum: The lowest eigenvalues in a numpy array.
"""
dense_operator = sparse_operator.todense()
if is_hermitian(sparse_operator):
eigenspectrum = numpy.linalg.eigvalsh(dense_operator)
else:
eigenspectrum = numpy.linalg.eigvals(dense_operator)
return numpy.sort(eigenspectrum)
def expectation(operator, state):
"""Compute the expectation value of an operator with a state.
Args:
operator(scipy.sparse.spmatrix or scipy.sparse.linalg.LinearOperator):
The operator whose expectation value is desired.
state(numpy.ndarray or scipy.sparse.spmatrix): A numpy array
representing a pure state or a sparse matrix representing a density
matrix. If `operator` is a LinearOperator, then this must be a
numpy array.
Returns:
A complex number giving the expectation value.
Raises:
ValueError: Input state has invalid format.
"""
if isinstance(state, scipy.sparse.spmatrix):
# Handle density matrix.
if isinstance(operator, scipy.sparse.linalg.LinearOperator):
raise ValueError('Taking the expectation of a LinearOperator with '
'a density matrix is not supported.')
product = state * operator
expectation = numpy.sum(product.diagonal())
elif isinstance(state, numpy.ndarray):
# Handle state vector.
if len(state.shape) == 1:
# Row vector
expectation = numpy.dot(numpy.conjugate(state), operator * state)
else:
# Column vector
expectation = numpy.dot(numpy.conjugate(state.T),
operator * state)[0, 0]
else:
# Handle exception.
raise ValueError(
'Input state must be a numpy array or a sparse matrix.')
# Return.
return expectation
def variance(operator, state):
"""Compute variance of operator with a state.
Args:
operator(scipy.sparse.spmatrix or scipy.sparse.linalg.LinearOperator):
The operator whose expectation value is desired.
state(numpy.ndarray or scipy.sparse.spmatrix): A numpy array
representing a pure state or a sparse matrix representing a density
matrix.
Returns:
A complex number giving the variance.
Raises:
ValueError: Input state has invalid format.
"""
return (expectation(operator ** 2, state) -
expectation(operator, state) ** 2)
def expectation_computational_basis_state(operator, computational_basis_state):
"""Compute expectation value of operator with a state.
Args:
operator: Qubit or FermionOperator to evaluate expectation value of.
If operator is a FermionOperator, it must be normal-ordered.
computational_basis_state (scipy.sparse vector / list): normalized
computational basis state (if scipy.sparse vector), or list of
occupied orbitals.
Returns:
A real float giving expectation value.
Raises:
TypeError: Incorrect operator or state type.
"""
if isinstance(operator, QubitOperator):
raise NotImplementedError('Not yet implemented for QubitOperators.')
if not isinstance(operator, FermionOperator):
raise TypeError('operator must be a FermionOperator.')
occupied_orbitals = computational_basis_state
if not isinstance(occupied_orbitals, list):
computational_basis_state_index = (
occupied_orbitals.nonzero()[0][0])
occupied_orbitals = [digit == '1' for digit in
bin(computational_basis_state_index)[2:]][::-1]
expectation_value = operator.terms.get((), 0.0)
for i in range(len(occupied_orbitals)):
if occupied_orbitals[i]:
expectation_value += operator.terms.get(
((i, 1), (i, 0)), 0.0)
for j in range(i + 1, len(occupied_orbitals)):
expectation_value -= operator.terms.get(
((j, 1), (i, 1), (j, 0), (i, 0)), 0.0)
return expectation_value
def expectation_db_operator_with_pw_basis_state(
operator, plane_wave_occ_orbitals, n_spatial_orbitals, grid,
spinless):
"""Compute expectation value of a dual basis operator with a plane
wave computational basis state.
Args:
operator: Dual-basis representation of FermionOperator to evaluate
expectation value of. Can have at most 3-body terms.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
n_spatial_orbitals (int): Number of spatial orbitals.
grid (openfermion.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A real float giving the expectation value.
"""
expectation_value = operator.terms.get((), 0.0)
for single_action, coefficient in iteritems(operator.terms):
if len(single_action) == 2:
expectation_value += coefficient * (
expectation_one_body_db_operator_computational_basis_state(
single_action, plane_wave_occ_orbitals, grid, spinless) /
n_spatial_orbitals)
elif len(single_action) == 4:
expectation_value += coefficient * (
expectation_two_body_db_operator_computational_basis_state(
single_action, plane_wave_occ_orbitals, grid, spinless) /
n_spatial_orbitals ** 2)
elif len(single_action) == 6:
expectation_value += coefficient * (
expectation_three_body_db_operator_computational_basis_state(
single_action, plane_wave_occ_orbitals, grid, spinless) /
n_spatial_orbitals ** 3)
return expectation_value
def expectation_one_body_db_operator_computational_basis_state(
dual_basis_action, plane_wave_occ_orbitals, grid, spinless):
"""Compute expectation value of a 1-body dual-basis operator with a
plane wave computational basis state.
Args:
dual_basis_action: Dual-basis action of FermionOperator to
evaluate expectation value of.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
grid (openfermion.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A real float giving the expectation value.
"""
expectation_value = 0.0
r_p = grid.position_vector(grid.grid_indices(dual_basis_action[0][0],
spinless))
r_q = grid.position_vector(grid.grid_indices(dual_basis_action[1][0],
spinless))
for orbital in plane_wave_occ_orbitals:
# If there's spin, p and q have to have the same parity (spin),
# and the new orbital has to have the same spin as these.
k_orbital = grid.momentum_vector(grid.grid_indices(orbital,
spinless))
# The Fourier transform is spin-conserving. This means that p, q,
# and the new orbital all have to have the same spin (parity).
if spinless or (dual_basis_action[0][0] % 2 ==
dual_basis_action[1][0] % 2 == orbital % 2):
expectation_value += numpy.exp(-1j * k_orbital.dot(r_p - r_q))
return expectation_value
def expectation_two_body_db_operator_computational_basis_state(
dual_basis_action, plane_wave_occ_orbitals, grid, spinless):
"""Compute expectation value of a 2-body dual-basis operator with a
plane wave computational basis state.
Args:
dual_basis_action: Dual-basis action of FermionOperator to
evaluate expectation value of.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
grid (openfermion.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A float giving the expectation value.
"""
expectation_value = 0.0
r = {}
for i in range(4):
r[i] = grid.position_vector(grid.grid_indices(dual_basis_action[i][0],
spinless))
rr = {}
k_map = {}
for i in range(2):
rr[i] = {}
k_map[i] = {}
for j in range(2, 4):
rr[i][j] = r[i] - r[j]
k_map[i][j] = {}
# Pre-computations.
for o in plane_wave_occ_orbitals:
k = grid.momentum_vector(grid.grid_indices(o, spinless))
for i in range(2):
for j in range(2, 4):
k_map[i][j][o] = k.dot(rr[i][j])
for orbital1 in plane_wave_occ_orbitals:
k1ac = k_map[0][2][orbital1]
k1ad = k_map[0][3][orbital1]
for orbital2 in plane_wave_occ_orbitals:
if orbital1 != orbital2:
k2bc = k_map[1][2][orbital2]
k2bd = k_map[1][3][orbital2]
# The Fourier transform is spin-conserving. This means that
# the parity of the orbitals involved in the transition must
# be the same.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[3][0] % 2 == orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[2][0] % 2 == orbital2 % 2)):
value = numpy.exp(-1j * (k1ad + k2bc))
# Add because it came from two anti-commutations.
expectation_value += value
# The Fourier transform is spin-conserving. This means that
# the parity of the orbitals involved in the transition must
# be the same.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[2][0] % 2 == orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[3][0] % 2 == orbital2 % 2)):
value = numpy.exp(-1j * (k1ac + k2bd))
# Subtract because it came from a single anti-commutation.
expectation_value -= value
return expectation_value
def expectation_three_body_db_operator_computational_basis_state(
dual_basis_action, plane_wave_occ_orbitals, grid, spinless):
"""Compute expectation value of a 3-body dual-basis operator with a
plane wave computational basis state.
Args:
dual_basis_action: Dual-basis action of FermionOperator to
evaluate expectation value of.
plane_wave_occ_orbitals (list): list of occupied plane-wave orbitals.
grid (openfermion.utils.Grid): The grid used for discretization.
spinless (bool): Whether the system is spinless.
Returns:
A float giving the expectation value.
"""
expectation_value = 0.0
r = {}
for i in range(6):
r[i] = grid.position_vector(grid.grid_indices(dual_basis_action[i][0],
spinless))
rr = {}
k_map = {}
for i in range(3):
rr[i] = {}
k_map[i] = {}
for j in range(3, 6):
rr[i][j] = r[i] - r[j]
k_map[i][j] = {}
# Pre-computations.
for o in plane_wave_occ_orbitals:
k = grid.momentum_vector(grid.grid_indices(o, spinless))
for i in range(3):
for j in range(3, 6):
k_map[i][j][o] = k.dot(rr[i][j])
for orbital1 in plane_wave_occ_orbitals:
k1ad = k_map[0][3][orbital1]
k1ae = k_map[0][4][orbital1]
k1af = k_map[0][5][orbital1]
for orbital2 in plane_wave_occ_orbitals:
if orbital1 != orbital2:
k2bd = k_map[1][3][orbital2]
k2be = k_map[1][4][orbital2]
k2bf = k_map[1][5][orbital2]
for orbital3 in plane_wave_occ_orbitals:
if orbital1 != orbital3 and orbital2 != orbital3:
k3cd = k_map[2][3][orbital3]
k3ce = k_map[2][4][orbital3]
k3cf = k_map[2][5][orbital3]
# Handle \delta_{ad} \delta_{bf} \delta_{ce} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital3 % 2)):
expectation_value += numpy.exp(-1j * (
k1ad + k2bf + k3ce))
# Handle -\delta_{ad} \delta_{be} \delta_{cf} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital3 % 2)):
expectation_value -= numpy.exp(-1j * (
k1ad + k2be + k3cf))
# Handle -\delta_{ae} \delta_{bf} \delta_{cd} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital3 % 2)):
expectation_value -= numpy.exp(-1j * (
k1ae + k2bf + k3cd))
# Handle \delta_{ae} \delta_{bd} \delta_{cf} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital3 % 2)):
expectation_value += numpy.exp(-1j * (
k1ae + k2bd + k3cf))
# Handle \delta_{af} \delta_{be} \delta_{cd} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital3 % 2)):
expectation_value += numpy.exp(-1j * (
k1af + k2be + k3cd))
# Handle -\delta_{af} \delta_{bd} \delta_{ce} after FT.
# The Fourier transform is spin-conserving.
if spinless or (
(dual_basis_action[0][0] % 2 ==
dual_basis_action[5][0] % 2 ==
orbital1 % 2) and
(dual_basis_action[1][0] % 2 ==
dual_basis_action[3][0] % 2 ==
orbital2 % 2) and
(dual_basis_action[2][0] % 2 ==
dual_basis_action[4][0] % 2 ==
orbital3 % 2)):
expectation_value -= numpy.exp(-1j * (
k1af + k2bd + k3ce))
return expectation_value
def get_gap(sparse_operator, initial_guess=None):
"""Compute gap between lowest eigenvalue and first excited state.
Args:
sparse_operator (LinearOperator): Operator to find the ground state of.
initial_guess (ndarray): Initial guess for eigenspace. A good
guess dramatically reduces the cost required to converge.
Returns: A real float giving eigenvalue gap.
"""
if not is_hermitian(sparse_operator):
raise ValueError('sparse_operator must be Hermitian.')
values, _ = scipy.sparse.linalg.eigsh(
sparse_operator, k=2, v0=initial_guess, which='SA', maxiter=1e7)
gap = abs(values[1] - values[0])
return gap
def inner_product(state_1, state_2):
"""Compute inner product of two states."""
return numpy.dot(state_1.conjugate(), state_2)
def boson_ladder_sparse(n_modes, mode, ladder_type, trunc):
r"""Make a matrix representation of a singular bosonic ladder operator
in the Fock space.
Since the bosonic operator lies in an infinite Fock space,
a truncation value needs to be provide so that a sparse matrix
of finite size can be returned.
Args:
n_modes (int): Number of modes in the system Hilbert space.
mode (int): The mode the ladder operator targets.
ladder_type (int): This is a nonzero integer. 0 indicates a lowering
operator, 1 a raising operator.
trunc (int): The size at which the Fock space should be truncated
when returning the matrix representing the ladder operator.
Returns:
The corresponding trunc x trunc Scipy sparse matrix.
"""
if trunc < 1 or not isinstance(trunc, int):
raise ValueError("Fock space truncation must be a positive integer.")
if ladder_type:
lop = scipy.sparse.spdiags(numpy.sqrt(range(1, trunc)),
-1, trunc, trunc, format='csc')
else:
lop = scipy.sparse.spdiags(numpy.sqrt(range(trunc)),
1, trunc, trunc, format='csc')
Id = [scipy.sparse.identity(trunc, format='csc', dtype=complex)]
operator_list = Id*mode + [lop] + Id*(n_modes - mode - 1)
operator = kronecker_operators(operator_list)
return operator
def single_quad_op_sparse(n_modes, mode, quadrature, hbar, trunc):
r"""Make a matrix representation of a singular quadrature
operator in the Fock space.
Since the bosonic operators lie in an infinite Fock space,
a truncation value needs to be provide so that a sparse matrix
of finite size can be returned.
Args:
n_modes (int): Number of modes in the system Hilbert space.
mode (int): The mode the ladder operator targets.
quadrature (str): 'q' for the canonical position operator,
'p' for the canonical moment]um operator.
hbar (float): the value of hbar to use in the definition of the
canonical commutation relation [q_i, p_j] = \delta_{ij} i hbar.
trunc (int): The size at which the Fock space should be truncated
when returning the matrix representing the ladder operator.
Returns:
The corresponding trunc x trunc Scipy sparse matrix.
"""
if trunc < 1 or not isinstance(trunc, int):
raise ValueError("Fock space truncation must be a positive integer.")
b = boson_ladder_sparse(1, 0, 0, trunc)
if quadrature == 'q':
op = numpy.sqrt(hbar/2) * (b + b.conj().T)
elif quadrature == 'p':
op = -1j*numpy.sqrt(hbar/2) * (b - b.conj().T)
Id = [scipy.sparse.identity(trunc, dtype=complex, format='csc')]
operator_list = Id*mode + [op] + Id*(n_modes - mode - 1)
operator = kronecker_operators(operator_list)
return operator
def boson_operator_sparse(operator, trunc, hbar=1.):
r"""Initialize a Scipy sparse matrix in the Fock space
from a bosonic operator.
Since the bosonic operators lie in an infinite Fock space,
a truncation value needs to be provide so that a sparse matrix
of finite size can be returned.
Args:
operator: One of either BosonOperator or QuadOperator.
trunc (int): The size at which the Fock space should be truncated
when returning the matrix representing the ladder operator.
hbar (float): the value of hbar to use in the definition of the
canonical commutation relation [q_i, p_j] = \delta_{ij} i hbar.
This only applies if calcualating the sparse representation of
a quadrature operator.
Returns:
The corresponding Scipy sparse matrix of size [trunc, trunc].
"""
if isinstance(operator, QuadOperator):
from openfermion.transforms._conversion import get_boson_operator
boson_operator = get_boson_operator(operator, hbar)
elif isinstance(operator, BosonOperator):
boson_operator = operator
else:
raise ValueError("Only BosonOperator and QuadOperator are supported.")
if trunc < 1 or not isinstance(trunc, int):
raise ValueError("Fock space truncation must be a positive integer.")
# count the number of modes
n_modes = 0
for term in boson_operator.terms:
for ladder_operator in term:
if ladder_operator[0] + 1 > n_modes:
n_modes = ladder_operator[0] + 1
# Construct the Scipy sparse matrix.
n_hilbert = trunc ** n_modes
values_list = [[]]
row_list = [[]]
column_list = [[]]
# Loop through the terms.
for term in boson_operator.terms:
coefficient = boson_operator.terms[term]
term_operator = coefficient*scipy.sparse.identity(
n_hilbert, dtype=complex, format='csc')
for ladder_op in term:
# Add actual operator to the list.
b = boson_ladder_sparse(n_modes, ladder_op[0], ladder_op[1], trunc)
term_operator = term_operator.dot(b)
# Extract triplets from sparse_term.
values_list.append(term_operator.tocoo(copy=False).data)
(row, column) = term_operator.nonzero()
column_list.append(column)
row_list.append(row)
# Create sparse operator.
values_list = numpy.concatenate(values_list)
row_list = numpy.concatenate(row_list)
column_list = numpy.concatenate(column_list)
sparse_operator = scipy.sparse.coo_matrix((
values_list, (row_list, column_list)),
shape=(n_hilbert, n_hilbert)).tocsc(copy=False)
sparse_operator.eliminate_zeros()
return sparse_operator
|
jarrodmcc/OpenFermion
|
src/openfermion/utils/_sparse_tools.py
|
Python
|
apache-2.0
| 51,858
|
[
"Gaussian"
] |
1112e74d65c0ececd263c8f7a9c7bd785e13077443c9d212fad4eae2c3409b19
|
#! /usr/bin/env python
#-*- coding: utf-8 -*-
"""
Integrated Python GUI for spectral analysis.
Designed functionalities:
> Plot spectra (x,y) file
> Fit spectral lines to common lineshape functions (Gaussian/Lorentzian/Voigt)
> Baseline removal
> Peak selection
> Catalog simulation (JPL/CDMS)
Package Requirments:
> numpy 1.8+
> scipy 0.16+
> PyQt5
> matplotlib
Written by Luyao Zou @ https://github.com/luyaozou
"""
__author__ = 'Luyao Zou, zouluyao@hotmail.com'
__version__ = 'Beta 0.1'
__date__ = 'Date: 07/22/2016'
from PyQt5 import QtWidgets, QtCore, QtGui
import sys
import os
import numpy as np
import matplotlib as mpl
mpl.use('Qt5Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
# custom module
import sflib
class FitParameter:
''' Store Fit Parameters '''
def __init__(self, peak):
self.ftype = 0 # function type
self.der = 0 # order of derivative (up to 4)
self.peak = peak # number of peaks
self.par_per_peak = 3 # number of parameters per peak
self.boxwin = 1 # boxcar smooth window
self.rescale = 1 # y intensity rescaler
self.deg = 0 # degree of polynomial baseline
self.par_name = self.get_par_name(0) # parameter name list
self.par = np.empty(self.par_per_peak*peak) # parameter vector
self.smooth_edge = False
def get_par_name(self, ftype):
if not ftype: # Gaussian type
return ['mu', 'sigma', 'A']
elif ftype == 1: # Lorentzian type
return ['mu', 'gamma', 'A']
def get_function(self):
return sflib.Function(self.ftype, self.der, self.peak)
class FitStatus:
''' Store Fit Status '''
def __init__(self):
self.stat = 2
self.input_valid = True
self.stat_dict = {0:'Fit successful',
1:'Fit failed',
2:'File not found',
3:'Unsupported file format',
4:'Baseline removal failed',
5:'Input Invalid'}
self.file_idx = 0
def print_stat(self):
return self.stat_dict[self.stat]
class FitMainGui(QtWidgets.QMainWindow):
# define main GUI window of the simulator
def __init__(self, parent=None): # initialize GUI
super().__init__()
# initialize fit parameter & fit status instance
self.fit_par = FitParameter(1)
self.fit_stat = FitStatus()
# initialize input validity tracker
self.fit_stat.input_valid = True
# get log directory (local directory of the script)
self.log_dir = os.getcwd()
# get file directory (read last directory from .cfg file)
self.current_dir = self.get_file_dir()
# add aborted and successful file list
self.list_aborted_file = []
self.list_success_file = []
# add menubar
openAction = QtWidgets.QAction('Open', self)
openAction.setShortcut('Ctrl+O')
openAction.setStatusTip('Open Spectra')
openAction.triggered.connect(self.open_file)
self.menu = self.menuBar()
self.menu.setNativeMenuBar(False)
self.menu.addAction(openAction)
# add status bar
self.statusbar = self.statusBar()
# set GUI layout
self.set_main_grid()
self.widget_main = QtWidgets.QWidget()
self.widget_main.setLayout(self.layout_main)
self.setCentralWidget(self.widget_main)
# set program title
self.setWindowTitle('Fit Spectra!')
# show program window
self.show()
def set_main_grid(self):
self.layout_main = QtWidgets.QGridLayout()
self.layout_main.setSpacing(6)
# add current_file label
self.label_current_file = QtWidgets.QLabel()
self.layout_main.addWidget(QtWidgets.QLabel('Current File:'), 0, 0)
self.layout_main.addWidget(self.label_current_file, 0, 1, 1, 2)
# add matplotlib canvas
self.fig = plt.figure()
self.canvas = FigureCanvas(self.fig)
self.canvas.setFocus()
self.mpl_toolbar = NavigationToolbar(self.canvas, self)
self.click_counter = 0 # initialize click counter
# connect the canvas to matplotlib standard key press events
self.canvas.mpl_connect('key_press_event', self.mpl_key_press)
# connect the canvas to mouse click events
self.canvas.mpl_connect('button_press_event', self.mpl_click)
self.layout_main.addWidget(self.mpl_toolbar, 1, 0, 1, 3)
self.layout_main.addWidget(self.canvas, 2, 0, 1, 3)
# add fit option layout
self.layout_setting = QtWidgets.QGridLayout()
# select lineshape
self.combo_ftype = QtWidgets.QComboBox()
self.combo_ftype.addItems(['Gaussian', 'Lorentzian'])
# select number of derivatives
self.combo_der = QtWidgets.QComboBox()
self.combo_der.addItems(['0', '1', '2', '3', '4'])
self.check_boxcar = QtWidgets.QCheckBox('Boxcar Smooth?')
self.check_rescale = QtWidgets.QCheckBox('Rescale Intensity?')
self.edit_boxcar = QtWidgets.QLineEdit('1')
self.edit_rescale = QtWidgets.QLineEdit('1')
self.edit_deg = QtWidgets.QLineEdit('0')
self.edit_num_peak = QtWidgets.QLineEdit('1')
self.layout_setting.addWidget(QtWidgets.QLabel('Lineshape Function'), 0, 0)
self.layout_setting.addWidget(self.combo_ftype, 1, 0)
self.layout_setting.addWidget(QtWidgets.QLabel('Derivative'), 0, 1)
self.layout_setting.addWidget(self.combo_der, 1, 1)
self.layout_setting.addWidget(self.check_boxcar, 2, 0)
self.layout_setting.addWidget(self.edit_boxcar, 2, 1)
self.layout_setting.addWidget(self.check_rescale, 3, 0)
self.layout_setting.addWidget(self.edit_rescale, 3, 1)
self.layout_setting.addWidget(QtWidgets.QLabel('PolyBaseline Degree'), 4, 0)
self.layout_setting.addWidget(self.edit_deg, 4, 1)
self.layout_setting.addWidget(QtWidgets.QLabel('Number of Peaks'), 5, 0)
self.layout_setting.addWidget(self.edit_num_peak, 5, 1)
self.layout_setting.addWidget(QtWidgets.QLabel('<<< Initial Guess >>>'), 6, 0, 2, 2)
# connect signals
# select combo box items
self.combo_ftype.currentIndexChanged.connect(self.get_ftype)
self.combo_der.currentIndexChanged.connect(self.get_der)
# display/hide checked edit box
self.edit_boxcar.hide()
self.edit_rescale.hide()
self.check_boxcar.stateChanged.connect(self.show_boxcar)
self.check_rescale.stateChanged.connect(self.show_rescale)
# check input validity
self.edit_boxcar.textChanged.connect(self.check_int_validity)
self.edit_rescale.textChanged.connect(self.check_double_validity)
self.edit_deg.textChanged.connect(self.check_int_validity)
self.edit_num_peak.textChanged.connect(self.set_par_layout)
# add fit parameter layout for initial guess
self.widget_par = QtWidgets.QWidget()
self.layout_par = QtWidgets.QGridLayout()
self.edit_par = []
self.set_par_layout()
self.widget_par.setLayout(self.layout_par)
self.scroll_par = QtWidgets.QScrollArea()
self.scroll_par.setWidget(self.widget_par)
self.scroll_par.setWidgetResizable(True)
self.scroll_par.setMaximumHeight(600)
self.layout_setting.addWidget(self.scroll_par, 8, 0, 1, 2)
self.layout_main.addLayout(self.layout_setting, 2, 3)
# add fit & Quit button
btn_fit = QtWidgets.QPushButton('Fit Spectrum', self)
btn_quit = QtWidgets.QPushButton('Quit', self)
btn_quit.setShortcut('Ctrl+Q')
self.layout_main.addWidget(btn_fit, 0, 3)
self.layout_main.addWidget(btn_quit, 3, 3)
btn_fit.clicked.connect(self.fit_routine)
btn_quit.clicked.connect(self.close)
def set_par_layout(self):
text = self.edit_num_peak.text()
try:
self.fit_par.peak = abs(int(text))
green = '#00A352'
self.edit_num_peak.setStyleSheet('border: 3px solid %s' % green)
except ValueError:
red = '#D63333'
self.edit_num_peak.setStyleSheet('border: 3px solid %s' % red)
self.fit_par.peak = 0
# set initial guess layout
# clear previous parameters
self.fit_par.par = np.zeros(self.fit_par.peak * self.fit_par.par_per_peak)
self.edit_par = [] # clear previous widgets
self.clear_item(self.layout_par) # clear layout
self.click_counter = 0 # reset click counter
# add widgets
for i in range(self.fit_par.par_per_peak * self.fit_par.peak):
peak_index = i // self.fit_par.par_per_peak + 1
par_index = i % self.fit_par.par_per_peak
self.edit_par.append(QtWidgets.QLineEdit())
if par_index: # starting of a new peak
self.layout_par.addWidget(QtWidgets.QLabel(
'--- Peak {:d} ---'.format(peak_index)),
4*(peak_index-1), 0, 1, 2)
self.layout_par.addWidget(QtWidgets.QLabel(
self.fit_par.par_name[par_index]), i+peak_index, 0)
self.layout_par.addWidget(self.edit_par[i], i+peak_index, 1)
self.edit_par[i].setText('0.5') # set default value
self.edit_par[i].textChanged.connect(self.check_double_validity)
# --------- get all fitting options ---------
def get_ftype(self, ftype):
self.fit_par.ftype = ftype
self.fit_par.par_name = self.fit_par.get_par_name(ftype)
# refresh parameter layout
self.set_par_layout()
def get_der(self, der):
self.fit_par.der = der
def get_par(self):
# if input is valid
if self.fit_stat.input_valid == 2:
for i in range(self.fit_par.par_per_peak * self.fit_par.peak):
self.fit_par.par[i] = float(self.edit_par[i].text())
self.fit_par.boxwin = abs(int(self.edit_boxcar.text()))
self.fit_par.rescale = abs(float(self.edit_rescale.text()))
self.fit_par.deg = abs(int(self.edit_deg.text()))
else:
self.fit_stat.stat = 5
# --------- fit routine ---------
def fit_routine(self):
# if data loaded successfully
if not self.fit_stat.stat:
data_table, popt, uncertainty, ppoly = self.fit_try()
# if fit failed, print information
if self.fit_stat.stat:
failure = QtWidgets.QMessageBox.information(self, 'Failure',
self.fit_stat.print_stat(), QtWidgets.QMessageBox.Retry |
QtWidgets.QMessageBox.Abort, QtWidgets.QMessageBox.Retry)
# choose retry or ignore
if failure == QtWidgets.QMessageBox.Retry:
pass
elif failure == QtWidgets.QMessageBox.Abort:
self.pass_file()
# if fit successful, ask user for save|retry option
else:
success = QtWidgets.QMessageBox.question(self, 'Save?',
'Save the fit if it looks good. \n ' +
'Otherwise retry a fit or abort this file ',
QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.Retry |
QtWidgets.QMessageBox.Abort, QtWidgets.QMessageBox.Save)
if success == QtWidgets.QMessageBox.Save:
# save file
self.save_file(data_table, popt, uncertainty, ppoly)
# go to next spectrum
self.next_file()
elif success == QtWidgets.QMessageBox.Retry:
pass
elif success == QtWidgets.QMessageBox.Abort:
self.pass_file()
def fit_try(self):
# get fitting parameters
self.get_par()
if not self.fit_stat.stat:
if self.fit_par.peak:
# get fit function
f = self.fit_par.get_function()
# re-load data with boxcar win and rescale
xdata, ydata = self.load_data()
popt, uncertainty, noise, ppoly, self.fit_stat.stat = sflib.fit_spectrum(f,
xdata, ydata, self.fit_par.par, self.fit_par.deg, self.fit_par.smooth_edge)
else: # if no peak, fit baseline
xdata, ydata = self.load_data()
popt, uncertainty, noise, ppoly, self.fit_stat.stat = sflib.fit_baseline(xdata, ydata, self.fit_par.deg)
# if fit successful, plot fit
if not self.fit_stat.stat and self.fit_par.peak:
# Make plot for successful fit
fit = f.get_func()(xdata, *popt)
baseline = np.polyval(ppoly, xdata - np.median(xdata))
residual = ydata - fit - baseline
self.statusbar.showMessage('Noise {:.4f}'.format(noise))
self.plot_spect(xdata, ydata, fit, baseline)
# concatenate data table
data_table = np.column_stack((xdata, ydata, fit, baseline))
return data_table, popt, uncertainty, ppoly
elif not self.fit_stat.stat:
baseline = np.polyval(ppoly, xdata - np.median(xdata))
residual = ydata - baseline
fit = np.zeros_like(ydata)
self.statusbar.showMessage('Noise {:.4f}'.format(noise))
self.plot_spect(xdata, ydata, fit, baseline)
data_table = np.column_stack((xdata, ydata, fit, baseline))
return data_table, popt, uncertainty, ppoly
else:
return None, None, None, None
def load_data(self): # load data
# check if there is a file name
try:
filename = '/'.join([self.current_dir, self.current_file])
except AttributeError:
select_file = QtWidgets.QMessageBox.warning(self, 'No File!',
'No spectrum file has been selected. Do you want to select now?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.Yes)
if select_file == QtWidgets.QMessageBox.Yes:
self.open_file()
else:
self.fit_stat.stat = 2 # exception: file not found
return None, None
# try load data
xdata, ydata, self.fit_stat.stat = sflib.read_file(filename,
self.fit_par.boxwin, self.fit_par.rescale)
# if file is readable, plot raw data and return xy data
if not self.fit_stat.stat:
self.plot_data(xdata, ydata)
return xdata, ydata
else:
return None, None
def plot_data(self, xdata, ydata): # plot raw data file before fit
self.fig.clear()
ax = self.fig.add_subplot(111)
ax.hold(False)
ax.plot(xdata, ydata, 'k-')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('Intensity (a.u.)')
self.canvas.draw()
def plot_spect(self, xdata, ydata, fit, baseline): # plot fitted spectra
self.fig.clear()
ax = self.fig.add_subplot(111)
ax.plot(xdata, ydata, 'k-', xdata, fit+baseline, 'r-',
xdata, baseline, 'b--')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('Intensity (a.u.)')
self.canvas.draw()
# --------- file handling ---------
def open_file(self):
# get all file names
QFileHandle = QtWidgets.QFileDialog()
QFileHandle.setDirectory(self.current_dir)
filelist = QFileHandle.getOpenFileNames(self, 'Open Spectra')[0]
# if list is not empty, proceed
if filelist:
# sort file name
filelist.sort()
# seperate directory name and file name
self.list_dir, self.list_file = sflib.separate_dir(filelist)
# get the first directory and file name
self.current_dir = self.list_dir[0]
self.current_file = self.list_file[0]
self.fit_stat.file_idx = 0
# update label
self.label_current_file.setText(self.current_file)
# launch fit routine
self.load_data()
else:
self.fit_stat.stat = 2
def pass_file(self):
try:
self.list_aborted_file.append('/'.join([self.current_dir, self.current_file]))
except AttributeError:
pass
self.next_file()
def save_file(self, data_table, popt, uncertainty, ppoly):
default_fitname = sflib.out_name_gen(self.current_file) + '.csv'
default_logname = sflib.out_name_gen(self.current_file) + '.log'
fitname = QtWidgets.QFileDialog.getSaveFileName(self,
'Save Current Fit Spectrum', '/'.join([self.current_dir, default_fitname]))[0]
if fitname:
sflib.save_fit(fitname, data_table, popt,
self.fit_par.ftype, self.fit_par.der, self.fit_par.peak)
logname = QtWidgets.QFileDialog.getSaveFileName(self,
'Save Current Fit Log', '/'.join([self.current_dir, default_logname]))[0]
if logname:
sflib.save_log(logname, popt, uncertainty, ppoly,
self.fit_par.ftype, self.fit_par.der,
self.fit_par.peak, self.fit_par.par_name)
self.list_success_file.append('/'.join([self.current_dir, self.current_file]))
def next_file(self):
# refresh current file index, fit status and click counter
self.fit_stat.file_idx += 1
self.fit_stat.stat = 0
self.click_counter = 0
try:
self.current_file = self.list_file[self.fit_stat.file_idx]
self.current_dir = self.list_dir[self.fit_stat.file_idx]
# update label text
self.label_current_file.setText(self.current_file)
# repeat fit routine
self.load_data()
except (IndexError, AttributeError):
eof = QtWidgets.QMessageBox.information(self, 'End of File',
'No more files to fit. Do you want to select new files?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Close,
QtWidgets.QMessageBox.Yes)
if eof == QtWidgets.QMessageBox.Yes:
self.open_file()
else:
self.close()
def get_file_dir(self):
try:
f = open('.prev_dir.log', 'r')
last_dir = f.readline()
f.close()
return last_dir
except FileNotFoundError:
return self.log_dir
def save_log(self):
log = QtWidgets.QFileDialog()
log.setNameFilter('Log files (*.log)')
logname = log.getSaveFileName(self, 'Save Fit Log',
'/'.join([self.current_dir, 'FitJob.log']))[0]
# if name is not empty
if logname:
pass
else:
logname = '/'.join([self.log_dir, 'FitJob.log'])
with open(logname, 'w') as a_log:
for file_name in self.list_success_file:
a_log.write('Successful --- {0:s}\n'.format(file_name))
for file_name in self.list_aborted_file:
a_log.write('Aborted --- {0:s}\n'.format(file_name))
def save_last_dir(self):
with open('.prev_dir.log', 'w') as a_file:
a_file.write(self.current_dir)
# --------- some useful little tools -----------
def show_boxcar(self, state):
if state == QtCore.Qt.Checked:
self.edit_boxcar.show()
else:
# make sure no boxcar (in case)
self.edit_boxcar.setText('1')
self.edit_boxcar.hide()
def show_rescale(self, state):
if state == QtCore.Qt.Checked:
self.edit_rescale.show()
else:
# no rescale
self.edit_rescale.setText('1')
self.edit_rescale.hide()
def check_double_validity(self, *args):
sender = self.sender()
validator = QtGui.QDoubleValidator()
state = validator.validate(sender.text(), 0)[0]
if state == QtGui.QValidator.Acceptable and sender.text():
color = '#00A352' # green
self.fit_stat.input_valid = 2 # valid entry
elif not sender.text():
color = '#FF9933' # yellow
self.fit_stat.input_valid = 1 # empty entry
else:
color = '#D63333' # red
self.fit_stat.input_valid = 0 # invalid entry
sender.setStyleSheet('border: 3px solid %s' % color)
def check_int_validity(self, *args):
sender = self.sender()
validator = QtGui.QIntValidator()
state = validator.validate(sender.text(), 0)[0]
if state == QtGui.QValidator.Acceptable and sender.text():
color = '#00A352' # green
self.fit_stat.input_valid = 2 # valid entry
elif not sender.text():
color = '#FF9933' # yellow
self.fit_stat.input_valid = 1 # empty entry
else:
color = '#D63333' # red
self.fit_stat.input_valid = 0 # invalid entry
sender.setStyleSheet('border: 3px solid %s' % color)
def clear_item(self, layout): # clears all elements in the layout
if layout is not None:
while layout.count():
item = layout.takeAt(0)
w = item.widget()
if w is not None:
w.deleteLater()
else:
self.clear_item(item.layout())
def mpl_key_press(self, event):
# matplotlib standard key press event
key_press_handler(event, self.canvas, self.mpl_toolbar)
def mpl_click(self, event):
# if can still pick peak position
if (self.click_counter < self.fit_par.peak) and (self.click_counter >= 0):
# update counter
self.click_counter += 1
# retrieve cooridate upon mouse click
mu = event.xdata # peak center
a = event.ydata*0.1 # peak intensity
# locate parameter index in the parameter list
mu_idx = self.fit_par.par_per_peak * (self.click_counter-1)
a_idx = mu_idx + self.fit_par.par_per_peak - 1
self.edit_par[mu_idx].setText(str(mu))
self.edit_par[a_idx].setText(str(a))
elif self.click_counter >= self.fit_par.peak:
# click number overloads. As user to reset
reset = QtWidgets.QMessageBox.question(self, 'Reset?',
'Do you want to reset clicks to override peak selection?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reset == QtWidgets.QMessageBox.Yes:
self.click_counter = 0
elif reset == QtWidgets.QMessageBox.No:
self.click_counter = -1 # no longer bother in this session
else:
event.ignore()
def closeEvent(self, event): # exit warning
quit_confirm = QtWidgets.QMessageBox.question(self, 'Quit?',
'Are you sure to quit?', QtWidgets.QMessageBox.Yes |
QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes)
if quit_confirm == QtWidgets.QMessageBox.Yes:
#try:
# save fit job log file
self.save_log()
self.save_last_dir()
#except:
# pass
event.accept()
else:
event.ignore()
def keyPressEvent(self, event): # press ESC to exit
if event.key() == QtCore.Qt.Key_Escape:
self.close()
# ------ run script ------
if __name__ == '__main__':
args = sys.argv
# get around the gtk error on linux systems (sacrifice the gui appearance)
#if sys.platform == 'linux':
# args.append('-style')
# args.append('Cleanlooks')
app = QtWidgets.QApplication(args)
launch = FitMainGui()
sys.exit(app.exec_())
|
luyaozou/PySpec
|
PySpec.py
|
Python
|
gpl-3.0
| 24,448
|
[
"Gaussian"
] |
fd48a1105a029febed8044af352c93f67eda863f69c4780dfc2bfd43bda35858
|
#!/usr/bin/env python
"""
C.7 Mathematical Formulas (p187)
"""
from Arrays import Array
from plasTeX import Command, Environment, sourceChildren, Macro
from plasTeX import DimenCommand, GlueCommand
from plasTeX.Logging import getLogger
from plasTeX.DOM import Node
#
# C.7.1
#
class MathEnvironment(Environment):
mathMode = True
class ThinSpace(Command):
macroName = '.'
class NegativeThisSpace(Command):
macroName = '!'
class MediumSpace(Command):
macroName = ':'
class ThickSpace(Command):
macroName = ';'
class ThinSpace_(Command):
macroName = '/'
# Need \newcommand\({\begin{math}} and \newcommand\){\end{math}}
class math(MathEnvironment):
@property
def source(self):
if self.hasChildNodes():
return '$%s$' % sourceChildren(self)
return '$'
class displaymath(MathEnvironment):
blockType = True
@property
def source(self):
if self.hasChildNodes():
return r'\[ %s \]' % sourceChildren(self)
if self.macroMode == Command.MODE_END:
return r'\]'
return r'\['
class BeginDisplayMath(Command):
macroName = '['
def invoke(self, tex):
o = self.ownerDocument.createElement('displaymath')
o.macroMode = Command.MODE_BEGIN
self.ownerDocument.context.push(o)
return [o]
class EndDisplayMath(Command):
macroName = ']'
def invoke(self, tex):
o = self.ownerDocument.createElement('displaymath')
o.macroMode = Command.MODE_END
self.ownerDocument.context.pop(o)
return [o]
class BeginMath(Command):
macroName = '('
def invoke(self, tex):
o = self.ownerDocument.createElement('math')
o.macroMode = Command.MODE_BEGIN
self.ownerDocument.context.push(o)
return [o]
class EndMath(Command):
macroName = ')'
def invoke(self, tex):
o = self.ownerDocument.createElement('math')
o.macroMode = Command.MODE_END
self.ownerDocument.context.pop(o)
return [o]
class ensuremath(Command):
args = 'self'
class equation(MathEnvironment):
blockType = True
counter = 'equation'
class EqnarrayStar(Array):
macroName = 'eqnarray*'
blockType = True
mathMode = True
class lefteqn(Command):
args = 'self'
def digest(self, tokens):
res = Command.digest(self, tokens)
obj = self.parentNode
while obj is not None and not isinstance(obj, Array.ArrayCell):
obj = obj.parentNode
if obj is not None:
obj.attributes['colspan'] = 3
obj.style['text-align'] = 'left'
return res
class ArrayCell(Array.ArrayCell):
@property
def source(self):
return '$\\displaystyle %s $' % sourceChildren(self, par=False)
class eqnarray(EqnarrayStar):
macroName = None
counter = 'equation'
class EndRow(Array.EndRow):
""" End of a row """
counter = 'equation'
def invoke(self, tex):
res = Array.EndRow.invoke(self, tex)
res[1].ref = self.ref
self.ownerDocument.context.currentlabel = res[1]
return res
def invoke(self, tex):
res = EqnarrayStar.invoke(self, tex)
if self.macroMode == self.MODE_END:
return res
res[1].ref = self.ref
return res
class nonumber(Command):
def invoke(self, tex):
self.ownerDocument.context.counters['equation'].addtocounter(-1)
def digest(self, tokens):
row = self.parentNode
while not isinstance(row, Array.ArrayRow):
row = row.parentNode
row.ref = None
class lefteqn(Command):
args = 'self'
#
# Style Parameters
#
class jot(DimenCommand):
value = DimenCommand.new(0)
class mathindent(DimenCommand):
value = DimenCommand.new(0)
class abovedisplayskip(GlueCommand):
value = GlueCommand.new(0)
class belowdisplayskip(GlueCommand):
value = GlueCommand.new(0)
class abovedisplayshortskip(GlueCommand):
value = GlueCommand.new(0)
class belowdisplayshortskip(GlueCommand):
value = GlueCommand.new(0)
#
# C.7.2 Common Structures
#
# _
# ^
# '
class frac(Command):
args = 'numer denom'
class sqrt(Command):
args = '[ n ] self'
class ldots(Command):
pass
class cdots(Command):
pass
class vdots(Command):
pass
class ddots(Command):
pass
#
# C.7.3 Mathematical Symbols
#
#
# Table 3.3: Greek Letters
#
class MathSymbol(Command):
pass
# Lowercase
class alpha(MathSymbol): pass
class beta(MathSymbol): pass
class gamma(MathSymbol): pass
class delta(MathSymbol): pass
class epsilon(MathSymbol): pass
class varepsilon(MathSymbol): pass
class zeta(MathSymbol): pass
class eta(MathSymbol): pass
class theta(MathSymbol): pass
class vartheta(MathSymbol): pass
class iota(MathSymbol): pass
class kappa(MathSymbol): pass
class GreekLamda(MathSymbol):
macroName = 'lambda'
class mu(MathSymbol): pass
class nu(MathSymbol): pass
class xi(MathSymbol): pass
class pi(MathSymbol): pass
class varpi(MathSymbol): pass
class rho(MathSymbol): pass
class varrho(MathSymbol): pass
class sigma(MathSymbol): pass
class varsigma(MathSymbol): pass
class tau(MathSymbol): pass
class upsilon(MathSymbol): pass
class phi(MathSymbol): pass
class varphi(MathSymbol): pass
class chi(MathSymbol): pass
class psi(MathSymbol): pass
class omega(MathSymbol): pass
# Uppercase
class Gamma(MathSymbol): pass
class Delta(MathSymbol): pass
class Theta(MathSymbol): pass
class Lambda(MathSymbol): pass
class Xi(MathSymbol): pass
class Pi(MathSymbol): pass
class Sigma(MathSymbol): pass
class Upsilon(MathSymbol): pass
class Phi(MathSymbol): pass
class Psi(MathSymbol): pass
class Omega(MathSymbol): pass
#
# Table 3.4: Binary Operation Symbols
#
class pm(MathSymbol): pass
class mp(MathSymbol): pass
class times(MathSymbol): pass
class div(MathSymbol): pass
class ast(MathSymbol): pass
class star(MathSymbol): pass
class circ(MathSymbol): pass
class bullet(MathSymbol): pass
class cdot(MathSymbol): pass
class cap(MathSymbol): pass
class cup(MathSymbol): pass
class uplus(MathSymbol): pass
class sqcap(MathSymbol): pass
class sqcup(MathSymbol): pass
class vee(MathSymbol): pass
class wedge(MathSymbol): pass
class setminus(MathSymbol): pass
class wr(MathSymbol): pass
class diamond(MathSymbol): pass
class bigtriangleup(MathSymbol): pass
class bigtriangledown(MathSymbol): pass
class triangleleft(MathSymbol): pass
class triangleright(MathSymbol): pass
class lhd(MathSymbol): pass
class rhd(MathSymbol): pass
class unlhd(MathSymbol): pass
class unrhd(MathSymbol): pass
class oplus(MathSymbol): pass
class ominus(MathSymbol): pass
class otimes(MathSymbol): pass
class oslash(MathSymbol): pass
class odot(MathSymbol): pass
class bigcirc(MathSymbol): pass
class dagger(MathSymbol): pass
class ddagger(MathSymbol): pass
class amalg(MathSymbol): pass
#
# Table 3.5: Relation Symbols
#
class Not(MathSymbol):
macroName = 'not'
args = 'symbol'
class leq(MathSymbol): pass
class le(MathSymbol): pass
class prec(MathSymbol): pass
class preceq(MathSymbol): pass
class ll(MathSymbol): pass
class subset(MathSymbol): pass
class subseteq(MathSymbol): pass
class sqsubseteq(MathSymbol): pass
class In(MathSymbol):
macroName = 'in'
class vdash(MathSymbol): pass
class geq(MathSymbol): pass
class ge(MathSymbol): pass
class succ(MathSymbol): pass
class succeq(MathSymbol): pass
class gg(MathSymbol): pass
class supset(MathSymbol): pass
class supseteq(MathSymbol): pass
class sqsupset(MathSymbol): pass
class sqsupseteq(MathSymbol): pass
class ni(MathSymbol): pass
class dashv(MathSymbol): pass
class equiv(MathSymbol): pass
class sim(MathSymbol): pass
class simeq(MathSymbol): pass
class asymp(MathSymbol): pass
class approx(MathSymbol): pass
class cong(MathSymbol): pass
class neq(MathSymbol): pass
class ne(MathSymbol): pass
class doteq(MathSymbol): pass
class notin(MathSymbol): pass
class models(MathSymbol): pass
class perp(MathSymbol): pass
class mid(MathSymbol): pass
class parallel(MathSymbol): pass
class bowtie(MathSymbol): pass
class Join(MathSymbol): pass
class smile(MathSymbol): pass
class frown(MathSymbol): pass
class propto(MathSymbol): pass
#
# Table 3.6: Arrow Symbols
#
class leftarrow(MathSymbol): pass
class Leftarrow(MathSymbol): pass
class rightarrow(MathSymbol): pass
class Rightarrow(MathSymbol): pass
class leftrightarrow(MathSymbol): pass
class Leftrightarrow(MathSymbol): pass
class mapsto(MathSymbol): pass
class hookleftarrow(MathSymbol): pass
class leftharpoonup(MathSymbol): pass
class leftharpoondown(MathSymbol): pass
class rightleftharpoons(MathSymbol): pass
class longleftarrow(MathSymbol): pass
class Longleftarrow(MathSymbol): pass
class longrightarrow(MathSymbol): pass
class Longrightarrow(MathSymbol): pass
class longleftrightarrow(MathSymbol): pass
class Longleftrightarrow(MathSymbol): pass
class longmapsto(MathSymbol): pass
class hookrightarrow(MathSymbol): pass
class rightharpoonup(MathSymbol): pass
class rightharpoondown(MathSymbol): pass
class leadsto(MathSymbol): pass
class uparrow(MathSymbol): pass
class Uparrow(MathSymbol): pass
class downarrow(MathSymbol): pass
class Downarrow(MathSymbol): pass
class updownarrow(MathSymbol): pass
class Updownarrow(MathSymbol): pass
class nearrow(MathSymbol): pass
class searrow(MathSymbol): pass
class swarrow(MathSymbol): pass
class nwarrow(MathSymbol): pass
#
# Table 3.7: Miscellaneous Symbols
#
class aleph(MathSymbol): pass
class hbar(MathSymbol): pass
class imath(MathSymbol): pass
class jmath(MathSymbol): pass
class ell(MathSymbol): pass
class wp(MathSymbol): pass
class Re(MathSymbol): pass
class Im(MathSymbol): pass
class mho(MathSymbol): pass
class prime(MathSymbol): pass
class emptyset(MathSymbol): pass
class nabla(MathSymbol): pass
class surd(MathSymbol): pass
class top(MathSymbol): pass
class bot(MathSymbol): pass
class VerticalBar(MathSymbol):
macroName = '|'
class forall(MathSymbol): pass
class exists(MathSymbol): pass
class neg(MathSymbol): pass
class flat(MathSymbol): pass
class natural(MathSymbol): pass
class sharp(MathSymbol): pass
class backslash(MathSymbol): pass
class partial(MathSymbol): pass
class infty(MathSymbol): pass
class Box(MathSymbol): pass
class Diamond(MathSymbol): pass
class triangle(MathSymbol): pass
class clubsuit(MathSymbol): pass
class diamondsuit(MathSymbol): pass
class heartsuit(MathSymbol): pass
class spadesuit(MathSymbol): pass
#
# Table 3.8: Variable-sized Symbols
#
class sum(MathSymbol): pass
class prod(MathSymbol): pass
class coprod(MathSymbol): pass
class int(MathSymbol): pass
class oint(MathSymbol): pass
class bigcap(MathSymbol): pass
class bigcup(MathSymbol): pass
class bigsqcup(MathSymbol): pass
class bigvee(MathSymbol): pass
class bigwedge(MathSymbol): pass
class bigodot(MathSymbol): pass
class bigotimes(MathSymbol): pass
class bigoplus(MathSymbol): pass
class biguplus(MathSymbol): pass
#
# Table 3.9: Log-like Functions
#
class Logarithm(MathSymbol):
macroName = 'log'
class bmod(MathSymbol): pass
class pmod(MathSymbol):
args = 'self'
class arccos(MathSymbol): pass
class arcsin(MathSymbol): pass
class arctan(MathSymbol): pass
class arg(MathSymbol): pass
class cos(MathSymbol): pass
class cosh(MathSymbol): pass
class cot(MathSymbol): pass
class coth(MathSymbol): pass
class csc(MathSymbol): pass
class deg(MathSymbol): pass
class det(MathSymbol): pass
class dim(MathSymbol): pass
class exp(MathSymbol): pass
class gcd(MathSymbol): pass
class hom(MathSymbol): pass
class inf(MathSymbol): pass
class ker(MathSymbol): pass
class lg(MathSymbol): pass
class lim(MathSymbol): pass
class liminf(MathSymbol): pass
class limsup(MathSymbol): pass
class ln(MathSymbol): pass
class log(MathSymbol): pass
class max(MathSymbol): pass
class min(MathSymbol): pass
class Pr(MathSymbol): pass
class sec(MathSymbol): pass
class sin(MathSymbol): pass
class sinh(MathSymbol): pass
class sup(MathSymbol): pass
class tan(MathSymbol): pass
class tanh(MathSymbol): pass
#
# C.7.4 Arrays (see Arrays.py)
#
#
# C.7.5 Delimiters
#
class delim(MathEnvironment):
args = 'bchar'
def invoke(self, tex):
if self.macroMode == self.MODE_END:
echar = tex.readArgument(expanded=True)
self.attributes['echar'] = echar[0]
res = MathEnvironment.invoke(self, tex)
return res
def digest(self, tokens):
if self.macroMode == Macro.MODE_END:
return
dopars = self.forcePars
for item in tokens:
# Make sure that we know to group paragraphs if one is found
if item.level == Node.PAR_LEVEL:
self.appendChild(item)
dopars = True
continue
# Don't absorb objects with a higher precedence
if item.level < self.level:
tokens.push(item)
break
# Absorb macros until the end of this environment is found
if item.nodeType == Node.ELEMENT_NODE:
if item.macroMode == Macro.MODE_END and type(item) is type(self):
self.attributes['echar'] = item.attributes['echar']
break
item.parentNode = self
item.digest(tokens)
# Stay within our context depth
if self.level > Node.DOCUMENT_LEVEL and \
item.contextDepth < self.contextDepth:
tokens.push(item)
break
# print 'APPEND', type(item)
self.appendChild(item)
# print 'DONE', type(self)
if dopars:
self.paragraphs()
class left(Command):
def invoke(self, tex):
obj = self.ownerDocument.createElement('delim')
obj.macroMode = Command.MODE_BEGIN
obj.parentNode = self.parentNode
# Return the output of the instantiated macro in
# place of self
out = obj.invoke(tex)
if out is None:
return [obj]
return out
class right(Command):
def invoke(self, tex):
obj = self.ownerDocument.createElement('delim')
obj.macroMode = Command.MODE_END
obj.parentNode = self.parentNode
# Return the output of the instantiated macro in
# place of self
out = obj.invoke(tex)
if out is None:
return [obj]
return out
# Table 3.10: Delimiters and TeXbook (p359)
class Delimiter(Command):
pass
class langle(Delimiter): pass
class rangle(Delimiter): pass
class lbrace(Delimiter): pass
class rbrace(Delimiter): pass
class lceil(Delimiter): pass
class rceil(Delimiter): pass
class lfloor(Delimiter): pass
class rfloor(Delimiter): pass
class lgroup(Delimiter): pass
class rgroup(Delimiter): pass
class lmoustache(Delimiter): pass
class rmoustache(Delimiter): pass
class uparrow(Delimiter): pass
class Uparrow(Delimiter): pass
class downarrow(Delimiter): pass
class Downarrow(Delimiter): pass
class updownarrow(Delimiter): pass
class Updownarrow(Delimiter): pass
class arrowvert(Delimiter): pass
class Arrowvert(Delimiter): pass
class vert(Delimiter): pass
class Vert(Delimiter): pass
class backslash(Delimiter): pass
class bracevert(Delimiter): pass
class bigl(Delimiter): pass
class bigm(Delimiter): pass
class bigr(Delimiter): pass
class Bigl(Delimiter): pass
class Bigm(Delimiter): pass
class Bigr(Delimiter): pass
class biggl(Delimiter): pass
class biggr(Delimiter): pass
class Biggl(Delimiter): pass
class Biggr(Delimiter): pass
class biggm(Delimiter): pass
class Biggm(Delimiter): pass
class Big(Delimiter):
args = 'char'
class bigg(Delimiter):
args = 'char'
class Bigg(Delimiter):
args = 'char'
class choose(Command):
pass
class brack(Command):
pass
class brace(Command):
pass
class sqrt(Command):
pass
#
# C.7.6 Putting One Thing Above Another
#
class overline(Command):
args = 'self'
class underline(Command):
args = 'self'
class overbrace(Command):
args = 'self'
class underbrace(Command):
args = 'self'
# Accents
class MathAccent(Command):
args = 'self'
class hat(MathAccent): pass
class check(MathAccent): pass
class breve(MathAccent): pass
class acute(MathAccent): pass
class grave(MathAccent): pass
class tilde(MathAccent): pass
class bar(MathAccent): pass
class vec(MathAccent): pass
class dot(MathAccent): pass
class ddot(MathAccent): pass
class widehat(MathAccent): pass
class widetilde(MathAccent): pass
class imath(MathAccent): pass
class jmath(MathAccent): pass
class stackrel(MathAccent):
args = 'top bottom'
#
# C.7.7 Spacing
#
# These are nested inside the MathEnvironemnt
#
# C.7.8 Changing Style
#
# Type Style
class mathrm(Command):
args = 'self'
class mathit(Command):
args = 'self'
class mathbf(Command):
args = 'self'
class mathsf(Command):
args = 'self'
class mathtt(Command):
args = 'self'
class mathcal(Command):
args = 'self'
class boldmath(Command):
pass
class unboldmath(Command):
pass
# Math Style
class displaystyle(Command):
pass
class textstyle(Command):
pass
class scriptstyle(Command):
pass
class scriptscriptstyle(Command):
pass
|
hao-han/LaTex2Docx
|
plasTeX/Base/LaTeX/Math.py
|
Python
|
mit
| 17,215
|
[
"Bowtie"
] |
9f04d52d4be623e81152a689b0fefd662bf268b746e95f5af17716e5530116d8
|
#!/usr/bin/env python
import math, numpy as np
#from enthought.mayavi import mlab
import matplotlib.pyplot as pp
import matplotlib.cm as cm
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import tf
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_lib.matplotlib_util as mpu
import pickle
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray
from m3skin_ros.msg import TaxelArray as TaxelArray_Meka
from hrl_msgs.msg import FloatArrayBare
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
def compute_contact_regions(arr_2d, threshold):
mask = arr_2d > threshold
label_im, nb_labels = ni.label(mask)
return label_im, nb_labels
def compute_obj_labels(label_im, nb_labels):
i=1
index = np.zeros(nb_labels)
row,col = np.shape(label_im)
while i <= nb_labels:
j=0
while j < row:
k=0
while k < col:
if label_im[j][k] == i:
index[i-1] = index[i-1]+1
k=k+1
j=j+1
i=i+1
temp=0
max_index_1 = np.max(index)
while temp < len(index):
if index[temp] == max_index_1:
local_nb_label_first_obj = temp+1
temp = temp+1
index[local_nb_label_first_obj-1]=0
max_index_2 = np.max(index)
temp=0
while temp < len(index):
if index[temp] == max_index_2:
local_nb_label_second_obj = temp+1
temp = temp+1
return local_nb_label_first_obj, local_nb_label_second_obj, max_index_1, max_index_2
def compute_resultant_force_magnitudes(force_arr, label_im, nb_label):
total_force = ni.sum(force_arr, label_im, nb_label)
return total_force
def compute_max_force(force_arr, label_im, nb_label):
max_force = ni.maximum(force_arr, label_im, nb_label)
return max_force
def compute_center_of_pressure(cx_arr, cy_arr, cz_arr, label_im,
nb_label):
cx = ni.mean(cx_arr, label_im, nb_label)
cy = ni.mean(cy_arr, label_im, nb_label)
cz = ni.mean(cz_arr, label_im, nb_label)
contact_vector = np.column_stack([cx, cy, cz])
return contact_vector
def callback(data, callback_args):
rospy.loginfo('Getting data!')
tf_lstnr = callback_args
sc = SkinContact()
sc.header.frame_id = '/torso_lift_link' # has to be this and no other coord frame.
sc.header.stamp = data.header.stamp
t1, q1 = tf_lstnr.lookupTransform(sc.header.frame_id,
data.header.frame_id,
rospy.Time(0))
t1 = np.matrix(t1).reshape(3,1)
r1 = tr.quaternion_to_matrix(q1)
force_vectors = np.row_stack([data.forces_x, data.forces_y, data.forces_z])
contact_vectors = np.row_stack([data.centers_x, data.centers_y, data.centers_z]).reshape((3,16,24))
fmags = ut.norm(force_vectors)
force_arr = fmags.reshape((16,24))
cx_arr = contact_vectors[0]
cy_arr = contact_vectors[1]
cz_arr = contact_vectors[2]
label_im, nb_labels = compute_contact_regions(force_arr, 0.1)
nb_label_first_obj, nb_label_second_obj, total_contact_first_obj, total_contact_second_obj = compute_obj_labels(label_im,nb_labels)
total_force_first_obj = compute_resultant_force_magnitudes(force_arr,label_im,nb_label_first_obj)
total_force_second_obj = compute_resultant_force_magnitudes(force_arr,label_im,nb_label_second_obj)
max_force_first_obj = compute_max_force(force_arr,label_im,nb_label_first_obj)
max_force_second_obj = compute_max_force(force_arr,label_im,nb_label_second_obj)
mean_force_first_obj = total_force_first_obj/total_contact_first_obj
mean_force_second_obj = total_force_second_obj/total_contact_second_obj
cop_local_first_obj = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_first_obj)
cop_global_first_obj = r1*(cop_local_first_obj.T) + t1
cop_local_second_obj = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_second_obj)
cop_global_second_obj = r1*(cop_local_second_obj.T) + t1
global time
time = time + 0.01
time_instant_data_first_obj = [time,total_force_first_obj,mean_force_first_obj,max_force_first_obj,total_contact_first_obj,cop_global_first_obj[0],cop_global_first_obj[1],cop_global_first_obj[2]]
time_instant_data_second_obj = [time,total_force_second_obj,mean_force_second_obj,max_force_second_obj,total_contact_second_obj,cop_global_second_obj[0],cop_global_second_obj[1],cop_global_second_obj[2]]
global time_varying_data_first_obj
time_varying_data_first_obj = np.row_stack([time_varying_data_first_obj, time_instant_data_first_obj])
global time_varying_data_second_obj
time_varying_data_second_obj = np.row_stack([time_varying_data_second_obj, time_instant_data_second_obj])
def tracking_point():
rospy.loginfo('Tracking Distance!')
ta1 = time_varying_data_first_obj
ta2 = time_varying_data_second_obj
k = 0
for i in ta1[:,0]:
if i != ta1[-1,0]:
instant_dist_first_obj = math.sqrt((ta1[k+1,5]-ta1[1,5])**2 + (ta1[k+1,6]-ta1[1,6])**2 + (ta1[k+1,7]-ta1[1,7])**2)
time_instant_tracker_first_obj = [ta1[k+1,0], instant_dist_first_obj]
instant_dist_second_obj = math.sqrt((ta2[k+1,5]-ta2[1,5])**2 + (ta2[k+1,6]-ta2[1,6])**2 + (ta2[k+1,7]-ta2[1,7])**2)
time_instant_tracker_second_obj = [ta2[k+1,0], instant_dist_second_obj]
global time_varying_tracker_first_obj
time_varying_tracker_first_obj = np.row_stack([time_varying_tracker_first_obj, time_instant_tracker_first_obj])
global time_varying_tracker_second_obj
time_varying_tracker_second_obj = np.row_stack([time_varying_tracker_second_obj, time_instant_tracker_second_obj])
k=k+1
def savedata():
rospy.loginfo('Saving data!')
global time_varying_data_first_obj
ut.save_pickle(time_varying_data_first_obj, '/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/First_Object/time_varying_data_first_object_trial_3.pkl')
global time_varying_data_second_obj
ut.save_pickle(time_varying_data_second_obj, '/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/Second_Object/time_varying_data_second_object_trial_3.pkl')
global time_varying_tracker_first_obj
ut.save_pickle(time_varying_tracker_first_obj, '/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/First_Object/time_varying_tracking_data_first_object_trial_3.pkl')
global time_varying_tracker_second_obj
ut.save_pickle(time_varying_tracker_second_obj, '/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/Second_Object/time_varying_tracking_data_second_object_trial_3.pkl')
def plotdata():
rospy.loginfo('Plotting data!')
data1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/First_Object/time_varying_data_first_object_trial_3.pkl')
data2 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/Second_Object/time_varying_data_second_object_trial_3.pkl')
tracking_data1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/First_Object/time_varying_tracking_data_first_object_trial_3.pkl')
tracking_data2 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/Second_Object/time_varying_tracking_data_second_object_trial_3.pkl')
mpu.figure(1)
pp.title('Time-Varying Force (1st Object)')
pp.xlabel('Time (s)')
pp.ylabel('Total Force')
pp.plot(data1[:,0], data1[:,1])
mpu.figure(2)
pp.title('Time-Varying Force (1st Object)')
pp.xlabel('Time (s)')
pp.ylabel('Mean Force')
pp.plot(data1[:,0], data1[:,2])
mpu.figure(3)
pp.title('Time-Varying Force (1st Object)')
pp.xlabel('Time (s)')
pp.ylabel('Max Force')
pp.plot(data1[:,0], data1[:,3])
mpu.figure(4)
pp.title('Time-Varying Contact (1st Object)')
pp.xlabel('Time (s)')
pp.ylabel('No. of Contact Regions')
pp.plot(data1[:,0], data1[:,4])
mpu.figure(5)
pp.title('Point Tracker (1st Object)')
pp.xlabel('Time (s)')
pp.ylabel('Contact Point Distance')
pp.plot(tracking_data1[:,0], tracking_data1[:,1])
pp.grid('True')
mpu.figure(6)
pp.title('Time-Varying Force (2nd Object)')
pp.xlabel('Time (s)')
pp.ylabel('Total Force')
pp.plot(data2[:,0], data2[:,1])
mpu.figure(7)
pp.title('Time-Varying Force (2nd Object)')
pp.xlabel('Time (s)')
pp.ylabel('Mean Force')
pp.plot(data2[:,0], data2[:,2])
mpu.figure(8)
pp.title('Time-Varying Force (2nd Object)')
pp.xlabel('Time (s)')
pp.ylabel('Max Force')
pp.plot(data2[:,0], data2[:,3])
mpu.figure(9)
pp.title('Time-Varying Contact (2nd Object)')
pp.xlabel('Time (s)')
pp.ylabel('No. of Contact Regions')
pp.plot(data2[:,0], data2[:,4])
mpu.figure(10)
pp.title('Point Tracker (2nd Object)')
pp.xlabel('Time (s)')
pp.ylabel('Contact Point Distance')
pp.plot(tracking_data2[:,0], tracking_data2[:,1])
pp.grid('True')
def getdata():
rospy.init_node('time_varying_data_two_objects', anonymous=True)
tf_lstnr = tf.TransformListener()
rospy.Subscriber("/skin_patch_forearm_right/taxels/forces", TaxelArray_Meka, callback, callback_args = (tf_lstnr))
rospy.spin()
if __name__ == '__main__':
time = 0
time_varying_data_first_obj = [0,0,0,0,0,0,0,0]
time_varying_data_second_obj = [0,0,0,0,0,0,0,0]
time_varying_tracker_first_obj = [0,0]
time_varying_tracker_second_obj = [0,0]
getdata()
tracking_point()
savedata()
plotdata()
pp.show()
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/skin_related/Cody_Data/time_varying_data_two_objects.py
|
Python
|
mit
| 10,020
|
[
"Mayavi"
] |
911a36a4dd5bc4689433918988607efbdc71a2a27803805d8c4611df7d96fa00
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from PyQt4 import QtGui
from openlp.core.lib import translate
from openlp.core.lib.ui import create_button_box
class Ui_TopicsDialog(object):
"""
The user interface for the topics dialog.
"""
def setupUi(self, topics_dialog):
"""
Set up the user interface for the topics dialog.
"""
topics_dialog.setObjectName('topics_dialog')
topics_dialog.resize(300, 10)
self.dialog_layout = QtGui.QVBoxLayout(topics_dialog)
self.dialog_layout.setObjectName('dialog_layout')
self.name_layout = QtGui.QFormLayout()
self.name_layout.setObjectName('name_layout')
self.name_label = QtGui.QLabel(topics_dialog)
self.name_label.setObjectName('name_label')
self.name_edit = QtGui.QLineEdit(topics_dialog)
self.name_edit.setObjectName('name_edit')
self.name_label.setBuddy(self.name_edit)
self.name_layout.addRow(self.name_label, self.name_edit)
self.dialog_layout.addLayout(self.name_layout)
self.button_box = create_button_box(topics_dialog, 'button_box', ['cancel', 'save'])
self.dialog_layout.addWidget(self.button_box)
self.retranslateUi(topics_dialog)
topics_dialog.setMaximumHeight(topics_dialog.sizeHint().height())
def retranslateUi(self, topics_dialog):
"""
Translate the UI on the fly.
"""
topics_dialog.setWindowTitle(translate('SongsPlugin.TopicsForm', 'Topic Maintenance'))
self.name_label.setText(translate('SongsPlugin.TopicsForm', 'Topic name:'))
|
marmyshev/item_title
|
openlp/plugins/songs/forms/topicsdialog.py
|
Python
|
gpl-2.0
| 3,674
|
[
"Brian"
] |
213d1703ec55bbcb7745cf279e3a08dd8cbb8fc4fd21926bd83e0e78620cb6ac
|
# This script processes MIMIC-III dataset and builds longitudinal diagnosis records for patients with at least two visits.
# The output data are cPickled, and suitable for training Doctor AI or RETAIN
# Written by Edward Choi (mp2893@gatech.edu)
# Usage: Put this script to the foler where MIMIC-III CSV files are located. Then execute the below command.
# python process_mimic.py ADMISSIONS.csv DIAGNOSES_ICD.csv <output file>
# Output files
# <output file>.pids: List of unique Patient IDs. Used for intermediate processing
# <output file>.dates: List of List of Python datetime objects. The outer List is for each patient. The inner List is for each visit made by each patient
# <output file>.seqs: List of List of List of integer diagnosis codes. The outer List is for each patient. The middle List contains visits made by each patient. The inner List contains the integer diagnosis codes that occurred in each visit
# <output file>.types: Python dictionary that maps string diagnosis codes to integer diagnosis codes.
import sys
import cPickle as pickle
from datetime import datetime
def convert_to_icd9(dxStr):
if dxStr.startswith('E'):
if len(dxStr) > 4: return dxStr[:4] + '.' + dxStr[4:]
else: return dxStr
else:
if len(dxStr) > 3: return dxStr[:3] + '.' + dxStr[3:]
else: return dxStr
def convert_to_3digit_icd9(dxStr):
if dxStr.startswith('E'):
if len(dxStr) > 4: return dxStr[:4]
else: return dxStr
else:
if len(dxStr) > 3: return dxStr[:3]
else: return dxStr
if __name__ == '__main__':
admissionFile = sys.argv[1]
diagnosisFile = sys.argv[2]
outFile = sys.argv[3]
print 'Building pid-admission mapping, admission-date mapping'
pidAdmMap = {}
admDateMap = {}
infd = open(admissionFile, 'r')
infd.readline()
for line in infd:
tokens = line.strip().split(',')
pid = int(tokens[1])
admId = int(tokens[2])
admTime = datetime.strptime(tokens[3], '%Y-%m-%d %H:%M:%S')
admDateMap[admId] = admTime
if pid in pidAdmMap: pidAdmMap[pid].append(admId)
else: pidAdmMap[pid] = [admId]
infd.close()
print 'Building admission-dxList mapping'
admDxMap = {}
infd = open(diagnosisFile, 'r')
infd.readline()
for line in infd:
tokens = line.strip().split(',')
admId = int(tokens[2])
dxStr = 'D_' + convert_to_icd9(tokens[4][1:-1]) ############## Uncomment this line and comment the line below, if you want to use the entire ICD9 digits.
#dxStr = 'D_' + convert_to_3digit_icd9(tokens[4][1:-1])
if admId in admDxMap: admDxMap[admId].append(dxStr)
else: admDxMap[admId] = [dxStr]
infd.close()
print 'Building pid-sortedVisits mapping'
pidSeqMap = {}
for pid, admIdList in pidAdmMap.iteritems():
if len(admIdList) < 2: continue
sortedList = sorted([(admDateMap[admId], admDxMap[admId]) for admId in admIdList])
pidSeqMap[pid] = sortedList
print 'Building pids, dates, strSeqs'
pids = []
dates = []
seqs = []
for pid, visits in pidSeqMap.iteritems():
pids.append(pid)
seq = []
date = []
for visit in visits:
date.append(visit[0])
seq.append(visit[1])
dates.append(date)
seqs.append(seq)
print 'Converting strSeqs to intSeqs, and making types'
types = {}
newSeqs = []
for patient in seqs:
newPatient = []
for visit in patient:
newVisit = []
for code in visit:
if code in types:
newVisit.append(types[code])
else:
types[code] = len(types)
newVisit.append(types[code])
newPatient.append(newVisit)
newSeqs.append(newPatient)
pickle.dump(pids, open(outFile+'.pids', 'wb'), -1)
pickle.dump(dates, open(outFile+'.dates', 'wb'), -1)
pickle.dump(newSeqs, open(outFile+'.seqs', 'wb'), -1)
pickle.dump(types, open(outFile+'.types', 'wb'), -1)
|
mp2893/doctorai
|
process_mimic.py
|
Python
|
bsd-3-clause
| 3,673
|
[
"VisIt"
] |
c32064e72e81471d282765725d36bfeab725e2f00542a0129773e242d069c3b0
|
"""
Photovoltaic
"""
import os
import time
from itertools import repeat
from math import *
from multiprocessing import Pool
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame as gdf
from scipy import interpolate
import cea.config
import cea.inputlocator
import cea.utilities.parallel
from cea.analysis.costs.equations import calc_capex_annualized
from cea.constants import HOURS_IN_YEAR
from cea.technologies.solar import constants
from cea.utilities import epwreader
from cea.utilities import solar_equations
from cea.utilities.standardize_coordinates import get_lat_lon_projected_shapefile
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca, Shanshan Hsieh"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def calc_PV(locator, config, latitude, longitude, weather_data, datetime_local, building_name):
"""
This function first determines the surface area with sufficient solar radiation, and then calculates the optimal
tilt angles of panels at each surface location. The panels are categorized into groups by their surface azimuths,
tilt angles, and global irradiation. In the last, electricity generation from PV panels of each group is calculated.
:param locator: An InputLocator to locate input files
:type locator: cea.inputlocator.InputLocator
:param radiation_path: solar insulation data on all surfaces of each building (path
:type radiation_path: String
:param metadata_csv: data of sensor points measuring solar insulation of each building
:type metadata_csv: .csv
:param latitude: latitude of the case study location
:type latitude: float
:param longitude: longitude of the case study location
:type longitude: float
:param weather_path: path to the weather data file of the case study location
:type weather_path: .epw
:param building_name: list of building names in the case study
:type building_name: Series
:return: Building_PV.csv with PV generation potential of each building, Building_sensors.csv with sensor data of
each PV panel.
"""
t0 = time.perf_counter()
radiation_path = locator.get_radiation_building_sensors(building_name)
metadata_csv_path = locator.get_radiation_metadata(building_name)
# solar properties
solar_properties = solar_equations.calc_sun_properties(latitude, longitude, weather_data, datetime_local, config)
print('calculating solar properties done')
# calculate properties of PV panel
panel_properties_PV = calc_properties_PV_db(locator.get_database_conversion_systems(), config)
print('gathering properties of PV panel')
# select sensor point with sufficient solar radiation
max_annual_radiation, annual_radiation_threshold, sensors_rad_clean, sensors_metadata_clean = \
solar_equations.filter_low_potential(radiation_path, metadata_csv_path, config)
print('filtering low potential sensor points done')
# set the maximum roof coverage
if config.solar.custom_roof_coverage:
max_roof_coverage = config.solar.max_roof_coverage
else:
max_roof_coverage = 1.0
if not sensors_metadata_clean.empty:
if not config.solar.custom_tilt_angle:
# calculate optimal angle and tilt for panels
sensors_metadata_cat = solar_equations.optimal_angle_and_tilt(sensors_metadata_clean, latitude,
solar_properties,
max_annual_radiation, panel_properties_PV,
max_roof_coverage)
print('calculating optimal tilt angle and separation done')
else:
# calculate spacing required by user-supplied tilt angle for panels
sensors_metadata_cat = solar_equations.calc_spacing_custom_angle(sensors_metadata_clean, solar_properties,
max_annual_radiation, panel_properties_PV,
config.solar.panel_tilt_angle,
max_roof_coverage)
print('calculating separation for custom tilt angle done')
# group the sensors with the same tilt, surface azimuth, and total radiation
sensor_groups = solar_equations.calc_groups(sensors_rad_clean, sensors_metadata_cat)
print('generating groups of sensor points done')
final = calc_pv_generation(sensor_groups, weather_data, datetime_local, solar_properties, latitude,
panel_properties_PV)
final.to_csv(locator.PV_results(building=building_name), index=True,
float_format='%.2f') # print PV generation potential
sensors_metadata_cat.to_csv(locator.PV_metadata_results(building=building_name), index=True,
index_label='SURFACE',
float_format='%.2f',
na_rep='nan') # print selected metadata of the selected sensors
print(building_name, 'done - time elapsed: %.2f seconds' % (time.perf_counter() - t0))
else: # This loop is activated when a building has not sufficient solar potential
final = pd.DataFrame(
{'Date': datetime_local, 'PV_walls_north_E_kWh': 0, 'PV_walls_north_m2': 0, 'PV_walls_south_E_kWh': 0,
'PV_walls_south_m2': 0,
'PV_walls_east_E_kWh': 0, 'PV_walls_east_m2': 0, 'PV_walls_west_E_kWh': 0, 'PV_walls_west_m2': 0,
'PV_roofs_top_E_kWh': 0, 'PV_roofs_top_m2': 0,
'E_PV_gen_kWh': 0, 'Area_PV_m2': 0, 'radiation_kWh': 0}, index=range(HOURS_IN_YEAR))
final.to_csv(locator.PV_results(building=building_name), index=False, float_format='%.2f', na_rep='nan')
sensors_metadata_cat = pd.DataFrame(
{'SURFACE': 0, 'AREA_m2': 0, 'BUILDING': 0, 'TYPE': 0, 'Xcoor': 0, 'Xdir': 0, 'Ycoor': 0, 'Ydir': 0,
'Zcoor': 0, 'Zdir': 0, 'orientation': 0, 'total_rad_Whm2': 0, 'tilt_deg': 0, 'B_deg': 0,
'array_spacing_m': 0, 'surface_azimuth_deg': 0, 'area_installed_module_m2': 0,
'CATteta_z': 0, 'CATB': 0, 'CATGB': 0, 'type_orientation': 0}, index=range(2))
sensors_metadata_cat.to_csv(locator.PV_metadata_results(building=building_name), index=False,
float_format='%.2f', na_rep='nan')
# =========================
# PV electricity generation
# =========================
def calc_pv_generation(sensor_groups, weather_data, date_local, solar_properties, latitude, panel_properties_PV):
"""
To calculate the electricity generated from PV panels.
:param hourly_radiation: mean hourly radiation of sensors in each group [Wh/m2]
:type hourly_radiation: dataframe
:param number_groups: number of groups of sensor points
:type number_groups: float
:param number_points: number of sensor points in each group
:type number_points: float
:param prop_observers: mean values of sensor properties of each group of sensors
:type prop_observers: dataframe
:param weather_data: weather data read from the epw file
:type weather_data: dataframe
:param g: declination
:type g: float
:param Sz: zenith angle
:type Sz: float
:param Az: solar azimuth
:param ha: hour angle
:param latitude: latitude of the case study location
:return:
"""
# local variables
number_groups = sensor_groups['number_groups'] # number of groups of sensor points
prop_observers = sensor_groups['prop_observers'] # mean values of sensor properties of each group of sensors
hourly_radiation = sensor_groups['hourlydata_groups'] # mean hourly radiation of sensors in each group [Wh/m2]
# convert degree to radians
lat = radians(latitude)
g_rad = np.radians(solar_properties.g)
ha_rad = np.radians(solar_properties.ha)
Sz_rad = np.radians(solar_properties.Sz)
Az_rad = np.radians(solar_properties.Az)
# empty list to store results
list_groups_area = [0 for i in range(number_groups)]
total_el_output_PV_kWh = [0 for i in range(number_groups)]
total_radiation_kWh = [0 for i in range(number_groups)]
potential = pd.DataFrame(index=range(HOURS_IN_YEAR))
panel_orientations = ['walls_south', 'walls_north', 'roofs_top', 'walls_east', 'walls_west']
for panel_orientation in panel_orientations:
potential['PV_' + panel_orientation + '_E_kWh'] = 0
potential['PV_' + panel_orientation + '_m2'] = 0
eff_nom = panel_properties_PV['PV_n']
Bref = panel_properties_PV['PV_Bref']
misc_losses = panel_properties_PV['misc_losses'] # cabling, resistances etc..
for group in prop_observers.index.values:
# calculate radiation types (direct/diffuse) in group
radiation_Wperm2 = solar_equations.cal_radiation_type(group, hourly_radiation, weather_data)
# read panel properties of each group
teta_z_deg = prop_observers.loc[group, 'surface_azimuth_deg']
tot_module_area_m2 = prop_observers.loc[group, 'area_installed_module_m2']
tilt_angle_deg = prop_observers.loc[group, 'B_deg'] # tilt angle of panels
# degree to radians
tilt_rad = radians(tilt_angle_deg) # tilt angle
teta_z_deg = radians(teta_z_deg) # surface azimuth
# calculate effective indicent angles necessary
teta_rad = np.vectorize(solar_equations.calc_angle_of_incidence)(g_rad, lat, ha_rad, tilt_rad, teta_z_deg)
teta_ed_rad, teta_eg_rad = calc_diffuseground_comp(tilt_rad)
absorbed_radiation_Wperm2 = np.vectorize(calc_absorbed_radiation_PV)(radiation_Wperm2.I_sol,
radiation_Wperm2.I_direct,
radiation_Wperm2.I_diffuse, tilt_rad,
Sz_rad, teta_rad, teta_ed_rad,
teta_eg_rad, panel_properties_PV)
T_cell_C = np.vectorize(calc_cell_temperature)(absorbed_radiation_Wperm2, weather_data.drybulb_C,
panel_properties_PV)
el_output_PV_kW = np.vectorize(calc_PV_power)(absorbed_radiation_Wperm2, T_cell_C, eff_nom, tot_module_area_m2,
Bref, misc_losses)
# write results from each group
panel_orientation = prop_observers.loc[group, 'type_orientation']
potential['PV_' + panel_orientation + '_E_kWh'] = potential[
'PV_' + panel_orientation + '_E_kWh'] + el_output_PV_kW
potential['PV_' + panel_orientation + '_m2'] = potential['PV_' + panel_orientation + '_m2'] + tot_module_area_m2
# aggregate results from all modules
list_groups_area[group] = tot_module_area_m2
total_el_output_PV_kWh[group] = el_output_PV_kW
total_radiation_kWh[group] = (radiation_Wperm2['I_sol'] * tot_module_area_m2 / 1000) # kWh
# check for missing groups and asign 0 as el_output_PV_kW
# panel_orientations = ['walls_south', 'walls_north', 'roofs_top', 'walls_east', 'walls_west']
# for panel_orientation in panel_orientations:
# if panel_orientation not in prop_observers['type_orientation'].values:
# potential['PV_' + panel_orientation + '_E_kWh'] = 0
# potential['PV_' + panel_orientation + '_m2'] = 0
potential['E_PV_gen_kWh'] = sum(total_el_output_PV_kWh)
potential['radiation_kWh'] = sum(total_radiation_kWh).values
potential['Area_PV_m2'] = sum(list_groups_area)
potential['Date'] = date_local
potential = potential.set_index('Date')
return potential
def calc_cell_temperature(absorbed_radiation_Wperm2, T_external_C, panel_properties_PV):
"""
calculates cell temperatures based on the absorbed radiation
:param absorbed_radiation_Wperm2: absorbed radiation on panel
:type absorbed_radiation_Wperm2: np.array
:param T_external_C: drybulb temperature from the weather file
:type T_external_C: series
:param panel_properties_PV: panel property from the supply system database
:type panel_properties_PV: dataframe
:return T_cell_C: cell temprature of PV panels
:rtype T_cell_C: series
"""
NOCT = panel_properties_PV['PV_noct']
# temperature of cell
T_cell_C = T_external_C + absorbed_radiation_Wperm2 * (NOCT - 20) / (
800) # assuming linear temperature rise vs radiation according to NOCT condition
return T_cell_C
def calc_angle_of_incidence(g, lat, ha, tilt, teta_z):
"""
To calculate angle of incidence from solar vector and surface normal vector.
(Validated with Sandia pvlib.irrandiance.aoi)
:param lat: latitude of the loacation of case study [radians]
:param g: declination of the solar position [radians]
:param ha: hour angle [radians]
:param tilt: panel surface tilt angle [radians]
:param teta_z: panel surface azimuth angle [radians]
:type lat: float
:type g: float
:type ha: float
:type tilt: float
:type teta_z: float
:return teta_B: angle of incidence [radians]
:rtype teta_B: float
.. [Sproul, A. B., 2017] Sproul, A.B. (2007). Derivation of the solar geometric relationships using vector analysis.
Renewable Energy, 32(7), 1187-1205.
"""
# surface normal vector
n_E = sin(tilt) * sin(teta_z)
n_N = sin(tilt) * cos(teta_z)
n_Z = cos(tilt)
# solar vector
s_E = -cos(g) * sin(ha)
s_N = sin(g) * cos(lat) - cos(g) * sin(lat) * cos(ha)
s_Z = cos(g) * cos(lat) * cos(ha) + sin(g) * sin(lat)
# angle of incidence
teta_B = acos(n_E * s_E + n_N * s_N + n_Z * s_Z)
return teta_B
def calc_diffuseground_comp(tilt_radians):
"""
To calculate reflected radiation and diffuse radiation.
:param tilt_radians: surface tilt angle [rad]
:type tilt_radians: float
:return teta_ed: effective incidence angle from diffuse radiation [rad]
:return teta_eg: effective incidence angle from ground-reflected radiation [rad]
:rtype teta_ed: float
:rtype teta_eg: float
:References: Duffie, J. A. and Beckman, W. A. (2013) Radiation Transmission through Glazing: Absorbed Radiation, in
Solar Engineering of Thermal Processes, Fourth Edition, John Wiley & Sons, Inc., Hoboken, NJ, USA.
doi: 10.1002/9781118671603.ch5
"""
tilt = degrees(tilt_radians)
teta_ed = 59.68 - 0.1388 * tilt + 0.001497 * tilt ** 2 # [degrees] (5.4.2)
teta_eG = 90 - 0.5788 * tilt + 0.002693 * tilt ** 2 # [degrees] (5.4.1)
return radians(teta_ed), radians(teta_eG)
def calc_absorbed_radiation_PV(I_sol, I_direct, I_diffuse, tilt, Sz, teta, tetaed, tetaeg, panel_properties_PV):
"""
:param I_sol: total solar radiation [Wh/m2]
:param I_direct: direct solar radiation [Wh/m2]
:param I_diffuse: diffuse solar radiation [Wh/m2]
:param tilt: solar panel tilt angle [rad]
:param Sz: solar zenith angle [rad]
:param teta: angle of incidence [rad]
:param tetaed: effective incidence angle from diffuse radiation [rad]
:param tetaeg: effective incidence angle from ground-reflected radiation [rad]
:type I_sol: float
:type I_direct: float
:type I_diffuse: float
:type tilt: float
:type Sz: float
:type teta: float
:type tetaed: float
:type tetaeg: float
:param panel_properties_PV: properties of the PV panel
:type panel_properties_PV: dataframe
:return:
:References: Duffie, J. A. and Beckman, W. A. (2013) Radiation Transmission through Glazing: Absorbed Radiation, in
Solar Engineering of Thermal Processes, Fourth Edition, John Wiley & Sons, Inc., Hoboken, NJ, USA.
doi: 10.1002/9781118671603.ch5
"""
# read variables
n = constants.n # refractive index of glass
Pg = constants.Pg # ground reflectance
K = constants.K # glazing extinction coefficient
NOCT = panel_properties_PV['PV_noct']
a0 = panel_properties_PV['PV_a0']
a1 = panel_properties_PV['PV_a1']
a2 = panel_properties_PV['PV_a2']
a3 = panel_properties_PV['PV_a3']
a4 = panel_properties_PV['PV_a4']
L = panel_properties_PV['PV_th']
# calcualte ratio of beam radiation on a tilted plane
# to avoid inconvergence when I_sol = 0
lim1 = radians(0)
lim2 = radians(90)
lim3 = radians(89.999)
if teta < lim1:
teta = min(lim3, abs(teta))
if teta >= lim2:
teta = lim3
if Sz < lim1:
Sz = min(lim3, abs(Sz))
if Sz >= lim2:
Sz = lim3
# Rb: ratio of beam radiation of tilted surface to that on horizontal surface
if Sz <= radians(85): # Sz is Zenith angle # TODO: FIND REFERENCE
Rb = cos(teta) / cos(Sz)
else:
Rb = 0 # Assume there is no direct radiation when the sun is close to the horizon.
# calculate air mass modifier
m = 1 / cos(Sz) # air mass
M = a0 + a1 * m + a2 * m ** 2 + a3 * m ** 3 + a4 * m ** 4 # air mass modifier
# incidence angle modifier for direct (beam) radiation
teta_r = asin(sin(teta) / n) # refraction angle in radians(aproximation accrding to Soteris A.) (5.1.4)
Ta_n = exp(-K * L) * (1 - ((n - 1) / (n + 1)) ** 2)
if teta < radians(90): # 90 degrees in radians
part1 = teta_r + teta
part2 = teta_r - teta
Ta_B = exp((-K * L) / cos(teta_r)) * (
1 - 0.5 * ((sin(part2) ** 2) / (sin(part1) ** 2) + (tan(part2) ** 2) / (tan(part1) ** 2)))
kteta_B = Ta_B / Ta_n
else:
kteta_B = 0
# incidence angle modifier for diffuse radiation
teta_r = asin(sin(tetaed) / n) # refraction angle for diffuse radiation [rad]
part1 = teta_r + tetaed
part2 = teta_r - tetaed
Ta_D = exp((-K * L) / cos(teta_r)) * (
1 - 0.5 * ((sin(part2) ** 2) / (sin(part1) ** 2) + (tan(part2) ** 2) / (tan(part1) ** 2)))
kteta_D = Ta_D / Ta_n
# incidence angle modifier for ground-reflected radiation
teta_r = asin(sin(tetaeg) / n) # refraction angle for ground-reflected radiation [rad]
part1 = teta_r + tetaeg
part2 = teta_r - tetaeg
Ta_eG = exp((-K * L) / cos(teta_r)) * (
1 - 0.5 * ((sin(part2) ** 2) / (sin(part1) ** 2) + (tan(part2) ** 2) / (tan(part1) ** 2)))
kteta_eG = Ta_eG / Ta_n
# absorbed solar radiation
absorbed_radiation_Wperm2 = M * Ta_n * (
kteta_B * I_direct * Rb + kteta_D * I_diffuse * (1 + cos(tilt)) / 2 + kteta_eG * I_sol * Pg * (
1 - cos(tilt)) / 2) # [W/m2] (5.12.1)
if absorbed_radiation_Wperm2 < 0.0: # when points are 0 and too much losses
# print ('the absorbed radiation', absorbed_radiation_Wperm2 ,'is negative, please check calc_absorbed_radiation_PVT')
absorbed_radiation_Wperm2 = 0.0
return absorbed_radiation_Wperm2
def calc_PV_power(absorbed_radiation_Wperm2, T_cell_C, eff_nom, tot_module_area_m2, Bref_perC, misc_losses):
"""
To calculate the power production of PV panels.
:param absorbed_radiation_Wperm2: absorbed radiation [W/m2]
:type absorbed_radiation_Wperm2: float
:param T_cell_C: cell temperature [degree]
:param eff_nom: nominal efficiency of PV module [-]
:type eff_nom: float
:param tot_module_area_m2: total PV module area [m2]
:type tot_module_area_m2: float
:param Bref_perC: cell maximum power temperature coefficient [degree C^(-1)]
:type Bref_perC: float
:param misc_losses: expected system loss [-]
:type misc_losses: float
:return el_output_PV_kW: Power production [kW]
:rtype el_output_PV_kW: float
..[Osterwald, C. R., 1986] Osterwald, C. R. (1986). Translation of device performance measurements to
reference conditions. Solar Cells, 18, 269-279.
"""
T_standard_C = 25.0 # temperature at the standard testing condition
el_output_PV_kW = eff_nom * tot_module_area_m2 * absorbed_radiation_Wperm2 * \
(1 - Bref_perC * (T_cell_C - T_standard_C)) * (1 - misc_losses) / 1000
return el_output_PV_kW
# ============================
# Optimal angle and tilt
# ============================
def optimal_angle_and_tilt(sensors_metadata_clean, latitude, worst_sh, worst_Az, transmissivity,
Max_Isol, module_length):
"""
This function first determines the optimal tilt angle, row spacing and surface azimuth of panels installed at each
sensor point. Secondly, the installed PV module areas at each sensor point are calculated. Lastly, all the modules
are categorized with its surface azimuth, tilt angle, and yearly radiation. The output will then be used to
calculate the absorbed radiation.
:param sensors_metadata_clean: data of filtered sensor points measuring solar insulation of each building
:type sensors_metadata_clean: dataframe
:param latitude: latitude of the case study location
:type latitude: float
:param worst_sh: solar elevation at the worst hour [degree]
:type worst_sh: float
:param worst_Az: solar azimuth at the worst hour [degree]
:type worst_Az: float
:param transmissivity: transmissivity: clearness index [-]
:type transmissivity: float
:param module_length: length of the PV module [m]
:type module_length: float
:param Max_Isol: max radiation potential (equals to global horizontal radiation) [Wh/m2/year]
:type Max_Isol: float
:returns sensors_metadata_clean: data of filtered sensor points categorized with module tilt angle, array spacing,
surface azimuth, installed PV module area of each sensor point and the categories
:rtype sensors_metadata_clean: dataframe
:Assumptions:
1) Tilt angle: If the sensor is on tilted roof, the panel will have the same tilt as the roof. If the sensor is on
a wall, the tilt angle is 90 degree. Tilt angles for flat roof is determined using the method from Quinn et al.
2) Row spacing: Determine the row spacing by minimizing the shadow according to the solar elevation and azimuth at
the worst hour of the year. The worst hour is a global variable defined by users.
3) Surface azimuth (orientation) of panels: If the sensor is on a tilted roof, the orientation of the panel is the
same as the roof. Sensors on flat roofs are all south facing.
"""
# calculate panel tilt angle (B) for flat roofs (tilt < 5 degrees), slope roofs and walls.
optimal_angle_flat = calc_optimal_angle(180, latitude,
transmissivity) # assume surface azimuth = 180 (N,E), south facing
sensors_metadata_clean['tilt'] = np.vectorize(acos)(sensors_metadata_clean['Zdir']) # surface tilt angle in rad
sensors_metadata_clean['tilt'] = np.vectorize(degrees)(
sensors_metadata_clean['tilt']) # surface tilt angle in degrees
sensors_metadata_clean['B'] = np.where(sensors_metadata_clean['tilt'] >= 5, sensors_metadata_clean['tilt'],
degrees(optimal_angle_flat)) # panel tilt angle in degrees
# calculate spacing and surface azimuth of the panels for flat roofs
optimal_spacing_flat = calc_optimal_spacing(worst_sh, worst_Az, optimal_angle_flat, module_length)
sensors_metadata_clean['array_s'] = np.where(sensors_metadata_clean['tilt'] >= 5, 0, optimal_spacing_flat)
sensors_metadata_clean['surface_azimuth'] = np.vectorize(calc_surface_azimuth)(sensors_metadata_clean['Xdir'],
sensors_metadata_clean['Ydir'],
sensors_metadata_clean[
'B']) # degrees
# calculate the surface area required to install one pv panel on flat roofs with defined tilt angle and array spacing
surface_area_flat = module_length * (
sensors_metadata_clean.array_s / 2 + module_length * [cos(optimal_angle_flat)])
# calculate the pv module area within the area of each sensor point
sensors_metadata_clean['area_module'] = np.where(sensors_metadata_clean['tilt'] >= 5,
sensors_metadata_clean.AREA_m2,
module_length ** 2 * (
sensors_metadata_clean.AREA_m2 / surface_area_flat))
# categorize the sensors by surface_azimuth, B, GB
result = np.vectorize(solar_equations.calc_categoriesroof)(sensors_metadata_clean.surface_azimuth,
sensors_metadata_clean.B,
sensors_metadata_clean.total_rad_Whm2, Max_Isol)
sensors_metadata_clean['CATteta_z'] = result[0]
sensors_metadata_clean['CATB'] = result[1]
sensors_metadata_clean['CATGB'] = result[2]
return sensors_metadata_clean
def calc_optimal_angle(teta_z, latitude, transmissivity):
"""
To calculate the optimal tilt angle of the solar panels.
:param teta_z: surface azimuth, 0 degree south (east negative) or 0 degree north (east positive)
:type teta_z: float
:param latitude: latitude of the case study site
:type latitude: float
:param transmissivity: clearness index [-]
:type transmissivity: float
:return abs(b): optimal tilt angle [radians]
:rtype abs(b): float
..[Quinn et al., 2013] S.W.Quinn, B.Lehman.A simple formula for estimating the optimum tilt angles of photovoltaic
panels. 2013 IEEE 14th Work Control Model Electron, Jun, 2013, pp.1-8
"""
if transmissivity <= 0.15:
gKt = 0.977
elif 0.15 < transmissivity <= 0.7:
gKt = 1.237 - 1.361 * transmissivity
else:
gKt = 0.273
Tad = 0.98 # transmittance-absorptance product of the diffuse radiation
Tar = 0.97 # transmittance-absorptance product of the reflected radiation
Pg = 0.2 # ground reflectance of 0.2
l = radians(latitude)
a = radians(teta_z)
b = atan((cos(a) * tan(l)) * (1 / (1 + ((Tad * gKt - Tar * Pg) / (2 * (1 - gKt)))))) # eq.(11)
return abs(b)
def calc_optimal_spacing(Sh, Az, tilt_angle, module_length):
"""
To calculate the optimal spacing between each panel to avoid shading.
:param Sh: Solar elevation at the worst hour [degree]
:type Sh: float
:param Az: Solar Azimuth [degree]
:type Az: float
:param tilt_angle: optimal tilt angle for panels on flat surfaces [degree]
:type tilt_angle: float
:param module_length: [m]
:type module_length: float
:return D: optimal distance in [m]
:rtype D: float
"""
h = module_length * sin(tilt_angle)
D1 = h / tan(radians(Sh))
D = max(D1 * cos(radians(180 - Az)), D1 * cos(radians(Az - 180)))
return D
# def calc_categoriesroof(teta_z, B, GB, Max_Isol):
# """
# To categorize solar panels by the surface azimuth, tilt angle and yearly radiation.
# :param teta_z: surface azimuth [degree], 0 degree north (east positive, west negative)
# :type teta_z: float
# :param B: solar panel tile angle [degree]
# :type B: float
# :param GB: yearly radiation of sensors [Wh/m2/year]
# :type GB: float
# :param Max_Isol: yearly global horizontal radiation [Wh/m2/year]
# :type Max_Isol: float
# :return CATteta_z: category of surface azimuth
# :rtype CATteta_z: float
# :return CATB: category of tilt angle
# :rtype CATB: float
# :return CATBG: category of yearly radiation
# :rtype CATBG: float
# """
# if -122.5 < teta_z <= -67:
# CATteta_z = 1
# elif -67.0 < teta_z <= -22.5:
# CATteta_z = 3
# elif -22.5 < teta_z <= 22.5:
# CATteta_z = 5
# elif 22.5 < teta_z <= 67:
# CATteta_z = 4
# elif 67.0 <= teta_z <= 122.5:
# CATteta_z = 2
# else:
# CATteta_z = 6
# B = degrees(B)
# if 0 < B <= 5:
# CATB = 1 # flat roof
# elif 5 < B <= 15:
# CATB = 2 # tilted 5-15 degrees
# elif 15 < B <= 25:
# CATB = 3 # tilted 15-25 degrees
# elif 25 < B <= 40:
# CATB = 4 # tilted 25-40 degrees
# elif 40 < B <= 60:
# CATB = 5 # tilted 40-60 degrees
# elif B > 60:
# CATB = 6 # tilted >60 degrees
# else:
# CATB = None
# print('B not in expected range')
#
# GB_percent = GB / Max_Isol
# if 0 < GB_percent <= 0.25:
# CATGB = 1
# elif 0.25 < GB_percent <= 0.50:
# CATGB = 2
# elif 0.50 < GB_percent <= 0.75:
# CATGB = 3
# elif 0.75 < GB_percent <= 0.90:
# CATGB = 4
# elif 0.90 < GB_percent:
# CATGB = 5
# else:
# CATGB = None
# print('GB not in expected range')
#
# return CATteta_z, CATB, CATGB
def calc_surface_azimuth(xdir, ydir, B):
"""
Calculate surface azimuth from the surface normal vector (x,y,z) and tilt angle (B).
Following the geological sign convention, an azimuth of 0 and 360 degree represents north, 90 degree is east.
:param xdir: surface normal vector x in (x,y,z) representing east-west direction
:param ydir: surface normal vector y in (x,y,z) representing north-south direction
:param B: surface tilt angle in degree
:type xdir: float
:type ydir: float
:type B: float
:returns surface azimuth: the azimuth of the surface of a solar panel in degree
:rtype surface_azimuth: float
"""
B = radians(B)
teta_z = degrees(asin(xdir / sin(B)))
# set the surface azimuth with on the sing convention (E,N)=(+,+)
if xdir < 0:
if ydir < 0:
surface_azimuth = 180 + teta_z # (xdir,ydir) = (-,-)
else:
surface_azimuth = 360 + teta_z # (xdir,ydir) = (-,+)
elif ydir < 0:
surface_azimuth = 180 + teta_z # (xdir,ydir) = (+,-)
else:
surface_azimuth = teta_z # (xdir,ydir) = (+,+)
return surface_azimuth # degree
# ============================
# properties of module
# ============================
# TODO: Delete when done
def calc_properties_PV_db(database_path, config):
"""
To assign PV module properties according to panel types.
:param type_PVpanel: type of PV panel used
:type type_PVpanel: string
:return: dict with Properties of the panel taken form the database
"""
type_PVpanel = config.solar.type_PVpanel
data = pd.read_excel(database_path, sheet_name="PV")
panel_properties = data[data['code'] == type_PVpanel].reset_index().T.to_dict()[0]
return panel_properties
# investment and maintenance costs
# FIXME: it looks like this function is never used!!! (REMOVE)
def calc_Cinv_pv(total_module_area_m2, locator, technology=0):
"""
To calculate capital cost of PV modules, assuming 20 year system lifetime.
:param P_peak: installed capacity of PV module [kW]
:return InvCa: capital cost of the installed PV module [CHF/Y]
"""
PV_cost_data = pd.read_excel(locator.get_database_conversion_systems(), sheet_name="PV")
technology_code = list(set(PV_cost_data['code']))
PV_cost_data = PV_cost_data[PV_cost_data['code'] == technology_code[technology]]
nominal_efficiency = PV_cost_data[PV_cost_data['code'] == technology_code[technology]]['PV_n'].max()
P_nominal_W = total_module_area_m2 * (constants.STC_RADIATION_Wperm2 * nominal_efficiency)
# if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least
# capacity for the corresponding technology from the database
if P_nominal_W < PV_cost_data['cap_min'].values[0]:
P_nominal_W = PV_cost_data['cap_min'].values[0]
PV_cost_data = PV_cost_data[
(PV_cost_data['cap_min'] <= P_nominal_W) & (PV_cost_data['cap_max'] > P_nominal_W)]
Inv_a = PV_cost_data.iloc[0]['a']
Inv_b = PV_cost_data.iloc[0]['b']
Inv_c = PV_cost_data.iloc[0]['c']
Inv_d = PV_cost_data.iloc[0]['d']
Inv_e = PV_cost_data.iloc[0]['e']
Inv_IR = PV_cost_data.iloc[0]['IR_%']
Inv_LT = PV_cost_data.iloc[0]['LT_yr']
Inv_OM = PV_cost_data.iloc[0]['O&M_%'] / 100
InvC = Inv_a + Inv_b * (P_nominal_W) ** Inv_c + (Inv_d + Inv_e * P_nominal_W) * log(P_nominal_W)
Capex_a_PV_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)
Opex_fixed_PV_USD = InvC * Inv_OM
Capex_PV_USD = InvC
return Capex_a_PV_USD, Opex_fixed_PV_USD, Capex_PV_USD, P_nominal_W
# remuneration scheme
def calc_Crem_pv(E_nom):
"""
Calculates KEV (Kostendeckende Einspeise - Verguetung) for solar PV and PVT.
Therefore, input the nominal capacity of EACH installation and get the according KEV as return in Rp/kWh
:param E_nom: Nominal Capacity of solar panels (PV or PVT) [Wh]
:type E_nom: float
:return KEV_obtained_in_RpPerkWh: KEV remuneration [Rp/kWh]
:rtype KEV_obtained_in_RpPerkWh: float
"""
# TODO: change input argument to area_installed and then calculate the nominal capacity within this function, see calc_Cinv_pv
KEV_regime = [0, 0, 20.4, 20.4, 20.4, 20.4, 20.4, 20.4, 19.7, 19.3, 19, 18.9, 18.7, 18.6, 18.5, 18.1, 17.9, 17.8,
17.8, 17.7, 17.7, 17.7, 17.6, 17.6]
P_installed_in_kW = [0, 9.99, 10, 12, 15, 20, 29, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 750, 1000,
1500, 2000, 1000000]
KEV_interpolated_kW = interpolate.interp1d(P_installed_in_kW, KEV_regime, kind="linear")
KEV_obtained_in_RpPerkWh = 0
if (E_nom / 1000) > P_installed_in_kW[-1]:
number_of_installations = int(ceil(E_nom / P_installed_in_kW[-1]))
E_nom_per_chiller = E_nom / number_of_installations
for i in range(number_of_installations):
KEV_obtained_in_RpPerkWh = KEV_obtained_in_RpPerkWh + KEV_interpolated_kW(E_nom_per_chiller / 1000.0)
else:
KEV_obtained_in_RpPerkWh = KEV_obtained_in_RpPerkWh + KEV_interpolated_kW(E_nom / 1000.0)
return KEV_obtained_in_RpPerkWh
def aggregate_results(locator, building_names):
aggregated_hourly_results_df = pd.DataFrame()
aggregated_annual_results = pd.DataFrame()
for i, building in enumerate(building_names):
hourly_results_per_building = pd.read_csv(locator.PV_results(building)).set_index('Date')
if i == 0:
aggregated_hourly_results_df = hourly_results_per_building
else:
aggregated_hourly_results_df = aggregated_hourly_results_df + hourly_results_per_building
annual_energy_production = hourly_results_per_building.filter(like='_kWh').sum()
panel_area_per_building = hourly_results_per_building.filter(like='_m2').iloc[0]
building_annual_results = annual_energy_production.append(panel_area_per_building)
aggregated_annual_results[building] = building_annual_results
return aggregated_hourly_results_df, aggregated_annual_results
def aggregate_results_func(args):
return aggregate_results(args[0], args[1])
def write_aggregate_results(locator, building_names, num_process=1):
aggregated_hourly_results_df = pd.DataFrame()
aggregated_annual_results = pd.DataFrame()
pool = Pool(processes=num_process)
args = [(locator, x) for x in np.array_split(building_names, num_process) if x.size != 0]
for i, x in enumerate(pool.map(aggregate_results_func, args)):
hourly_results_df, annual_results = x
if i == 0:
aggregated_hourly_results_df = hourly_results_df
aggregated_annual_results = annual_results
else:
aggregated_hourly_results_df = aggregated_hourly_results_df + hourly_results_df
aggregated_annual_results = pd.concat([aggregated_annual_results, annual_results], axis=1, sort=False)
# save hourly results
aggregated_hourly_results_df.to_csv(locator.PV_totals(), index=True, float_format='%.2f', na_rep='nan')
# save annual results
aggregated_annual_results_df = pd.DataFrame(aggregated_annual_results).T
aggregated_annual_results_df.to_csv(locator.PV_total_buildings(), index=True, index_label="Name",
float_format='%.2f', na_rep='nan')
def main(config):
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
print('Running photovoltaic with scenario = %s' % config.scenario)
print('Running photovoltaic with annual-radiation-threshold-kWh/m2 = %s' % config.solar.annual_radiation_threshold)
print('Running photovoltaic with panel-on-roof = %s' % config.solar.panel_on_roof)
print('Running photovoltaic with panel-on-wall = %s' % config.solar.panel_on_wall)
print('Running photovoltaic with solar-window-solstice = %s' % config.solar.solar_window_solstice)
print('Running photovoltaic with type-pvpanel = %s' % config.solar.type_pvpanel)
if config.solar.custom_tilt_angle:
print('Running photovoltaic with custom-tilt-angle = %s and panel-tilt-angle = %s' %
(config.solar.custom_tilt_angle, config.solar.panel_tilt_angle))
else:
print('Running photovoltaic with custom-tilt-angle = %s' % config.solar.custom_tilt_angle)
if config.solar.custom_roof_coverage:
print('Running photovoltaic with custom-roof-coverage = %s and max-roof-coverage = %s' %
(config.solar.custom_roof_coverage, config.solar.max_roof_coverage))
else:
print('Running photovoltaic with custom-roof-coverage = %s' % config.solar.custom_roof_coverage)
building_names = locator.get_zone_building_names()
zone_geometry_df = gdf.from_file(locator.get_zone_geometry())
latitude, longitude = get_lat_lon_projected_shapefile(zone_geometry_df)
# list_buildings_names =['B026', 'B036', 'B039', 'B043', 'B050'] for missing buildings
weather_data = epwreader.epw_reader(locator.get_weather_file())
date_local = solar_equations.calc_datetime_local_from_weather_file(weather_data, latitude, longitude)
num_process = config.get_number_of_processes()
n = len(building_names)
cea.utilities.parallel.vectorize(calc_PV, num_process)(repeat(locator, n),
repeat(config, n),
repeat(latitude, n),
repeat(longitude, n),
repeat(weather_data, n),
repeat(date_local, n),
building_names)
# aggregate results from all buildings
write_aggregate_results(locator, building_names, num_process)
if __name__ == '__main__':
main(cea.config.Configuration())
|
architecture-building-systems/CEAforArcGIS
|
cea/technologies/solar/photovoltaic.py
|
Python
|
mit
| 39,532
|
[
"EPW"
] |
e4c9b386a0e10f3d4c25e8a9a44522e97a7a90e2ae98c0bf38251d9ed86cd8bf
|
from rdkit.Chem import PandasTools
from rdkit import Chem
from rdkit.Chem import AllChem
import cirpy
import pandas as pd
import bs4
def parse_page(soup):
for x in soup.find_all("td"):
t = x.get("class")
if t is not None and "term2TD" in t:
if "CAS No." in x.next:
a0 = x
a1 = x.next
a2 = x.next.next
cas = a2.text[1:]
if "Density" in x.next:
a0 = x
a1 = x.next
a2 = x.next.next
density = a2.text
if "TDENL" in x.next:
a0 = x
a1 = x.next
a2 = x.next.next
temperature = a2.text
if "Molecular Wt." in x.next:
a0 = x
a1 = x.next
a2 = x.next.next
weight = a2.text
if density == "NA":
density = None
smiles = cirpy.resolve(cas, "smiles")
return (cas, density, temperature, weight, smiles)
data = []
for i in range(1, 2000):
f = open("./pages/page%d.html" % i).read()
soup = bs4.BeautifulSoup(f)
CAS, density, temperature, weight, smiles = parse_page(soup)
data.append([i, CAS, density, temperature, weight, smiles])
data = pd.DataFrame(data, columns=["Index", "CAS", "MoleDensity", "Temperature", "weight", "smiles"])
data.set_index("Index")
data.dropna(inplace=True)
data.MoleDensity = data.MoleDensity.astype('float')
data.weight = data.weight.astype('float')
data.Temperature = data.Temperature.astype('float')
data["density"] = data.weight * data.MoleDensity
PandasTools.AddMoleculeColumnToFrame(data, smilesCol='smiles', molCol='molecule', includeFingerprints=False)
# g-mol/cm^3 units
has_other = {}
num_atoms = {}
for k, m in data.molecule.iteritems():
try:
m = Chem.AddHs(m)
AllChem.EmbedMolecule(m)
AllChem.UFFOptimizeMolecule(m)
atoms = pd.Series([a.GetSymbol() for a in m.GetAtoms()])
atom_counts = atoms.value_counts()
has_other[k] = len(np.setdiff1d(atom_counts.index.values, ["C", "N", "H", "O"])) > 0
data["molecule"][k] = m
num_atoms[k] = m.GetNumAtoms()
except:
pass
data["has_other"] = pd.Series(has_other)
data["num_atoms"] = pd.Series(num_atoms)
data.to_hdf("./alldata.h5", "data")
data = data[data.has_other == False]
data = data[data.num_atoms <= 25]
data = data[(data.Temperature >= 273) & (data.Temperature <= 320)]
data.to_hdf("./data.h5", "data")
|
choderalab/open-forcefield-group
|
densities/CHERIC/parse.py
|
Python
|
gpl-2.0
| 2,515
|
[
"RDKit"
] |
0b38ed730a97fe84d81b4bdc9e31b674e619e37798eacdd1476aa778dda5ded8
|
import argparse
import ast
import symtable
import sys
from template import T
def lambda_function(arguments_to_values, prettyprinted=False):
# arguments_to_values :: {argument_i: value_i}
# :: string
if prettyprinted:
raise NotImplementedError
else:
return T('(lambda {}: {})({})').format(
T(', ').join(arguments_to_values.keys()),
T('{}'),
T(', ').join(arguments_to_values.values()))
def provide(body, **subs):
body = T('{}').format(body)
needed = set(body.free()).intersection(subs)
if needed:
return lambda_function({k: subs[k] for k in needed}).format(
body.format(**{k: k for k in needed}))
else:
return body
def get_init_code(tree, table):
# Calculate the helper variables that we will need, wrap the output
# code in a definition of those variables.
# TODO: Short-circuit to something far simpler if the program has but one
# print statement.
output = Namespace(table).many_to_one(tree.body)
doc = ast.get_docstring(tree, clean=False)
if doc is not None:
output = assignment_component(output, T("{__g}['__doc__']"), repr(doc))
output = provide(
output.format(__l=T('{__g}')),
__print=T("__import__('__builtin__').__dict__['print']"),
__exec="__import__('trace').Trace(count=False,"
" trace=False).runctx",
__y="(lambda f: (lambda x: x(x))(lambda y:"
" f(lambda: y(y)())))",
__g=T("globals()"),
__sys="__import__('sys')",
__types="__import__('types')")
return output.close()
boolop_code = {
ast.And: ' and ',
ast.Or: ' or ',
}
operator_code = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
ast.Mod: '%',
ast.Pow: '**',
ast.LShift: '<<',
ast.RShift: '>>',
ast.BitOr: '|',
ast.BitXor: '^',
ast.BitAnd: '&',
ast.FloorDiv: '//',
}
unaryop_code = {
ast.Invert: '~',
ast.Not: 'not ',
ast.UAdd: '+',
ast.USub: '-',
}
cmpop_code = {
ast.Eq: ' == ',
ast.NotEq: ' != ',
ast.Lt: ' < ',
ast.LtE: ' <= ',
ast.Gt: ' > ',
ast.GtE: ' >= ',
ast.Is: ' is ',
ast.IsNot: ' is not ',
ast.In: ' in ',
ast.NotIn: ' not in ',
}
def assignment_component(after, targets, value):
# return T('(lambda {}: {})({})').format(targets, after, value)
return T('[{} for {} in [({})]][0]').format(after, targets, value)
class Namespace(ast.NodeVisitor):
def __init__(self, table, private=''):
self.table = table
self.subtables = iter(table.get_children())
self.private = '_' + table.get_name() if table.get_type() == 'class' \
else private
def next_child(self):
return Namespace(next(self.subtables), private=self.private)
def mangle(self, name):
return self.private + name if name.startswith('__') and \
not name.endswith('__') else name
def var(self, name):
name = self.mangle(name)
sym = self.table.lookup(name)
if sym.is_global() or (self.table.get_type() == 'module' and sym.is_local()):
return T('{}').format(name)
elif sym.is_local():
return T('{__l}[{!r}]').format(name)
elif sym.is_free():
return T('{___f_' + name + '}()').format(name)
else:
raise SyntaxError('confusing symbol {!r}'.format(name))
def store_var(self, name):
name = self.mangle(name)
sym = self.table.lookup(name)
if sym.is_global():
return T('{__g}[{!r}]').format(name)
elif sym.is_local():
return T('{__l}[{!r}]').format(name)
elif sym.is_free():
raise SyntaxError('storing free variable {!r}'.format(name))
else:
raise SyntaxError('confusing symbol {!r}'.format(name))
def delete_var(self, name):
name = self.mangle(name)
sym = self.table.lookup(name)
if sym.is_global():
return T('{__g}.pop({!r})').format(name)
elif sym.is_local():
return T('{__l}.pop({!r})').format(name)
elif sym.is_free():
raise SyntaxError('deleting free variable {!r}'.format(name))
else:
raise SyntaxError('confusing symbol {!r}'.format(name))
def close(self, ns, local, body, **subs):
if self.table.get_type() == 'function':
subs = dict(subs, **{'___f_' + v: T('lambda: {}').format(self.var(v))
for v in self.table.get_locals()})
return provide(body, __l=local, **subs)
def many_to_one(self, trees, after='None'):
# trees :: [Tree]
# return :: string
return reduce(
lambda ctx, tree: ctx.format(after=self.visit(tree)),
trees,
T('{after}')).format(after=after)
def slice_repr(self, slice):
if type(slice) is ast.Ellipsis:
return T('Ellipsis')
elif type(slice) is ast.Slice:
return T('slice({}, {}, {})').format(
'None' if slice.lower is None else self.visit(slice.lower),
'None' if slice.upper is None else self.visit(slice.upper),
'None' if slice.step is None else self.visit(slice.step))
elif type(slice) is ast.ExtSlice:
return T('({})').format(T(', ').join(map(self.slice_repr, slice.dims)) +
','*(len(slice.dims) == 1))
elif type(slice) is ast.Index:
return self.visit(slice.value)
else:
raise NotImplementedError('Case not caught: %s' % str(type(slice)))
def delete_code(self, target):
if type(target) is ast.Attribute:
return [T('delattr({}, {!r})').format(self.visit(target.value), target.attr)]
elif type(target) is ast.Subscript:
if type(target.slice) is ast.Slice and target.slice.step is None:
return [T("(lambda o, **t: type('translator', (), {{t[m]: "
"staticmethod(object.__getattribute__(d[m], '__get__'"
")(o, type(o))) for d in [object.__getattribute__("
"type(o), '__dict__')] for m in t if m in d}})())({},"
" __delitem__='__getitem__', __delslice__="
"'__getslice__', __len__='__len__')[{}]").format(
self.visit(target.value),
self.visit(target.slice))]
else:
return [T("(lambda o: object.__getattribute__("
"object.__getattribute__(type(o), '__dict__')"
"['__delitem__'], '__get__')(o, type(o)))"
"({})({})").format(
self.visit(target.value),
self.slice_repr(target.slice))]
elif type(target) is ast.Name:
return [self.delete_var(target.id)]
elif type(target) in (ast.List, ast.Tuple):
return [c for elt in target.elts for c in self.delete_code(elt)]
else:
raise NotImplementedError('Case not caught: %s' % str(type(target)))
def visit_Assert(self, tree):
return T('({after} if {} else ([] for [] in []).throw(AssertionError{}))').format(
self.visit(tree.test),
'' if tree.msg is None else T('({})').format(self.visit(tree.msg)))
def visit_Assign(self, tree):
targets = [self.visit(target) for target in tree.targets]
value = self.visit(tree.value)
targets = T(', ').join(targets)
return assignment_component(T('{after}'), targets,
value if len(tree.targets) == 1
else T('[{}]*{}').format(value, len(tree.targets)))
def visit_Attribute(self, tree):
return T('{}.{}').format(self.visit(tree.value), tree.attr)
def visit_AugAssign(self, tree):
if type(tree.target) is ast.Attribute:
target_params = ['__target']
target_args = [self.visit(tree.target.value)]
target_value = T('__target.{}').format(tree.target.attr)
elif type(tree.target) is ast.Subscript:
if type(tree.target.slice) is ast.Slice and tree.target.slice.step is None:
target_params = ['__target']
target_args = [self.visit(tree.target.value)]
if tree.target.slice.lower is not None:
target_params.append('__lower')
target_args.append(self.visit(tree.target.slice.lower))
if tree.target.slice.upper is not None:
target_params.append('__upper')
target_args.append(self.visit(tree.target.slice.upper))
target_value = T('__target[{}:{}]').format(
'' if tree.target.slice.lower is None else '__lower',
'' if tree.target.slice.upper is None else '__upper')
else:
target_params = ['__target', '__slice']
target_args = [self.visit(tree.target.value), self.slice_repr(tree.target.slice)]
target_value = '__target[__slice]'
elif type(tree.target) is ast.Name:
target_params = []
target_args = []
target_value = self.store_var(tree.target.id)
else:
raise SyntaxError('illegal expression for augmented assignment')
op = operator_code[type(tree.op)]
iop = type(tree.op).__name__.lower()
if iop.startswith('bit'):
iop = iop[len('bit'):]
iop = '__i%s__' % iop
value = self.visit(tree.value)
assign = assignment_component(
T('{after}'), target_value,
T("(lambda o, v: (lambda r: o {} v if r is NotImplemented else r)("
"object.__getattribute__(object.__getattribute__(type(o), "
"'__dict__').get({!r}, lambda self, other: NotImplemented), "
"'__get__')(o, type(o))(v)))({}, {})").format(
op, iop, target_value, value))
if target_params:
assign = T('(lambda {}: {})({})').format(
T(', ').join(target_params),
assign,
T(', ').join(target_args))
return assign
def visit_BinOp(self, tree):
return T('({} {} {})').format(self.visit(tree.left), operator_code[type(tree.op)], self.visit(tree.right))
def visit_BoolOp(self, tree):
return T('({})').format(T(boolop_code[type(tree.op)]).join(map(self.visit, tree.values)))
def visit_Break(self, tree):
return T('{__break}()')
def visit_Call(self, tree):
func = self.visit(tree.func)
args = [self.visit(arg) for arg in tree.args]
keywords = [self.visit(kw) for kw in tree.keywords]
if tree.starargs is None:
starargs = []
else:
starargs = ["*" + self.visit(tree.starargs)]
if tree.kwargs is None:
kwargs = []
else:
kwargs = ["**" + self.visit(tree.kwargs)]
elems = args + keywords + starargs + kwargs
comma_sep_elems = T(', ').join(elems)
return T('{}({})').format(func, comma_sep_elems)
def visit_ClassDef(self, tree):
bases = (T(', ').join(map(self.visit, tree.bases)) +
','*(len(tree.bases) == 1))
decoration = T('{}')
for decorator in tree.decorator_list:
decoration = decoration.format(T('{}({})').format(self.visit(decorator), T('{}')))
ns = self.next_child()
body = ns.many_to_one(tree.body, after=T('{__l}'))
doc = ast.get_docstring(tree, clean=False)
body = self.close(ns, "{{'__module__': __name__{}}}".format(
'' if doc is None else ", '__doc__': {!r}".format(doc)), body)
if tree.bases:
class_code = T("(lambda b, d: d.get('__metaclass__', getattr(b[0], "
"'__class__', type(b[0])))({!r}, b, d))(({}), "
"{})").format(tree.name, bases, body)
else:
class_code = T("(lambda d: d.get('__metaclass__', {__g}.get("
"'__metaclass__', {__types}.ClassType))({!r}, (), "
"d))({})").format(tree.name, body)
class_code = decoration.format(class_code)
return assignment_component(T('{after}'), self.store_var(tree.name), class_code)
def visit_Compare(self, tree):
assert len(tree.ops) == len(tree.comparators)
return T('({})').format(self.visit(tree.left) + T('').join(
[cmpop_code[type(tree.ops[i])] + self.visit(tree.comparators[i])
for i in range(len(tree.ops))]))
def visit_comprehension(self, tree):
return (T('for {} in {}').format(self.visit(tree.target), self.visit(tree.iter)) +
T('').join(' if ' + self.visit(i) for i in tree.ifs))
def comprehension_code(self, generators, wrap):
iter0 = self.visit(generators[0].iter)
ns = self.next_child()
return self.close(
ns, '{}',
wrap(ns, T(' ').join(
[T('for {} in {__iter}').format(ns.visit(generators[0].target))] +
['if ' + ns.visit(i) for i in generators[0].ifs] +
map(ns.visit, generators[1:]))),
__iter=iter0)
def visit_Continue(self, tree):
return T('{__continue}()')
def visit_Delete(self, tree):
cs = [c for target in tree.targets for c in self.delete_code(target)]
if cs:
return T('({}, {after})[-1]').format(T(', ').join(cs))
else:
return T('{after}')
def visit_Dict(self, tree):
return T('{{{}}}').format(T(', ').join(
T('{}: {}').format(k, v)
for k, v in zip(map(self.visit, tree.keys), map(self.visit, tree.values))))
def visit_DictComp(self, tree):
return self.comprehension_code(
tree.generators,
lambda ns, g: T('{{{}: {} {}}}').format(
T('{}'), ns.visit(tree.value), g).format(ns.visit(tree.key)))
def visit_Ellipsis(self, tree):
return T('...')
def visit_ExceptHandler(self, tree):
raise NotImplementedError('Open problem: except')
def visit_Exec(self, tree):
body = self.visit(tree.body)
if tree.globals is None:
exec_code = T('{__exec}({}, {__g}, {__l})').format(body)
elif tree.locals is None:
exec_code = T(
'(lambda b, g: {__exec}(b, {__g} if g is None else g, '
'{__l} if g is None else g))({}, {})').format(
body, self.visit(tree.globals))
else:
exec_code = T(
'(lambda b, g, l: {__exec}(b, {__g} if g is None else g, '
'({__l} if g is None else g) if l is None else l))({}, {}, {})').format(
body, self.visit(tree.globals), self.visit(tree.locals))
return T('({}, {after})[1]').format(exec_code)
def visit_Expr(self, tree):
return T('({}, {after})[1]').format(self.visit(tree.value))
def visit_Expression(self, tree):
return self.visit(tree.body)
def visit_ExtSlice(self, tree):
return (T(', ').join(map(self.visit, tree.dims)) +
','*(len(tree.dims) == 1))
def visit_For(self, tree):
item = self.visit(tree.target)
items = self.visit(tree.iter)
body = self.many_to_one(tree.body, after='__this()')
orelse = self.many_to_one(tree.orelse, after='__after()')
return lambda_function({'__items': T('iter({})').format(items), '__sentinel':
'[]', '__after': T('lambda: {after}')}).format(
T('{__y}(lambda __this: lambda: {})()').format(
lambda_function({'__i': 'next(__items, __sentinel)'}).format(
T('{} if __i is not __sentinel else {}').format(
provide(
assignment_component(body, item, '__i'),
__break='__after', __continue='__this'),
orelse))))
def visit_FunctionDef(self, tree):
# self.visit() returns something of the form
# ('lambda x, y, z=5, *args: ', ['x', 'y', 'z', 'args'])
args, arg_names = self.visit(tree.args)
decoration = T('{}')
for decorator in tree.decorator_list:
decoration = decoration.format(T('{}({})').format(self.visit(decorator), T('{}')))
ns = self.next_child()
body = ns.many_to_one(tree.body)
if arg_names:
body = assignment_component(body,
T(', ').join(ns.var(name) for name in arg_names),
T(', ').join(arg_names))
body = self.close(ns, '{}', body)
function_code = args + body
doc = ast.get_docstring(tree, clean=False)
if tree.decorator_list:
return assignment_component(
T('{after}'),
self.store_var(tree.name),
decoration.format(assignment_component(
'__func',
'__func, __func.__name__' + ('' if doc is None else ', __func.__doc__'),
T('{}, {!r}' + ('' if doc is None else ', {!r}')).format(
function_code, tree.name, doc))))
else:
return assignment_component(
T('{after}'),
T('{}, {}.__name__' + ('' if doc is None else ', {}.__doc__')).format(
self.store_var(tree.name), self.var(tree.name), self.var(tree.name)),
T('{}, {!r}' + ('' if doc is None else ', {!r}')).format(
function_code, tree.name, doc))
def visit_arguments(self, tree):
# this should return something of the form
# ('lambda x, y, z=5, *args: ', ['x', 'y', 'z', 'args'])
padded_defaults = [None] * (len(tree.args) -
len(tree.defaults)) + tree.defaults
arg_names = [arg.id for arg in tree.args]
args = zip(padded_defaults, tree.args)
args = [a.id if d is None else a.id + "=" + self.visit(d) for (d, a) in args]
if tree.vararg is not None:
args += ["*" + tree.vararg]
arg_names += [tree.vararg]
if tree.kwarg is not None:
args += ["**" + tree.kwarg]
arg_names += [tree.kwarg]
args = T(', ').join(args)
return (T('lambda {}: ').format(args), arg_names)
def visit_GeneratorExp(self, tree):
return self.comprehension_code(
tree.generators,
lambda ns, g: T('({} {})').format(ns.visit(tree.elt), g))
def visit_Global(self, tree):
return T('{after}')
def visit_If(self, tree):
test = self.visit(tree.test)
body = self.many_to_one(tree.body, after='__after()')
orelse = self.many_to_one(tree.orelse, after='__after()')
return T('(lambda __after: {} if {} else {})(lambda: {after})').format(
body, test, orelse)
def visit_IfExp(self, tree):
test = self.visit(tree.test)
body = self.visit(tree.body)
orelse = self.visit(tree.orelse)
return T('({} if {} else {})').format(body, test, orelse)
def visit_Import(self, tree):
after = T('{after}')
for alias in tree.names:
ids = alias.name.split('.')
if alias.asname is None:
after = assignment_component(after, self.store_var(ids[0]),
T('__import__({!r}, {__g}, {__l})').format(alias.name))
else:
after = assignment_component(after, self.store_var(alias.asname),
T('.').join([T('__import__({!r}, {__g}, {__l})').format(
alias.name)] + ids[1:]))
return after
def visit_ImportFrom(self, tree):
return T('(lambda __mod: {})(__import__({!r}, {__g}, {__l},'
' {!r}, {!r}))').format(
assignment_component(
T('{after}'),
T(', ').join(self.store_var(alias.name if alias.asname is None
else alias.asname) for alias in tree.names),
T(', ').join('__mod.' + alias.name for alias in tree.names)),
'' if tree.module is None else tree.module,
tuple(alias.name for alias in tree.names),
tree.level)
def visit_Index(self, tree):
return self.visit(tree.value)
def visit_keyword(self, tree):
return T('{}={}').format(tree.arg, self.visit(tree.value))
def visit_Lambda(self, tree):
args, arg_names = self.visit(tree.args)
ns = self.next_child()
body = ns.visit(tree.body)
if arg_names:
body = assignment_component(body, T(', ').join(ns.store_var(name)
for name in arg_names), T(', ').join(arg_names))
body = self.close(ns, '{}', body)
return '(' + args + body + ')'
def visit_List(self, tree):
elts = [self.visit(elt) for elt in tree.elts]
return T('[{}]').format(T(', ').join(elts))
def visit_ListComp(self, tree):
return T('[{}]').format(T(' ').join([self.visit(tree.elt)] +
map(self.visit, tree.generators)))
def visit_Name(self, tree):
if isinstance(tree.ctx, (ast.Store, ast.AugStore)):
return self.store_var(tree.id)
else:
return self.var(tree.id)
def visit_Num(self, tree):
return T('{!r}').format(tree.n)
def visit_Pass(self, tree):
return T('{after}')
def visit_Print(self, tree):
to_print = T('{}')
if tree.dest is not None:
# Abuse varargs to get the right evaluation order
to_print = T('file={}, *[{}]').format(self.visit(tree.dest), to_print)
to_print = to_print.format(T(', ').join(self.visit(x) for x in tree.values))
if not tree.nl:
# TODO: This is apparently good enough for 2to3, but gets
# many cases wrong (tests/unimplemented/softspace.py).
to_print += ", end=' '"
return T('({__print}({}), {after})[1]').format(to_print)
def visit_Raise(self, tree):
if tree.type is None:
return T('([] for [] in []).throw(*{__sys}.exc_info())')
else:
return T('([] for [] in []).throw({}{}{})').format(
self.visit(tree.type),
'' if tree.inst is None else ', ' + self.visit(tree.inst),
'' if tree.tback is None else ', ' + self.visit(tree.tback))
def visit_Repr(self, tree):
return T('`{}`').format(self.visit(tree.value))
def visit_Return(self, tree):
return self.visit(tree.value)
def visit_Set(self, tree):
assert tree.elts, '{} is a dict'
return T('{{{}}}').format(T(', ').join(self.visit(elt) for elt in tree.elts))
def visit_SetComp(self, tree):
return self.comprehension_code(
tree.generators,
lambda ns, g: T('{{{} {}}}').format(ns.visit(tree.elt), g))
def visit_Slice(self, tree):
return T('{}:{}{}').format(
'' if tree.lower is None else self.visit(tree.lower),
'' if tree.upper is None else self.visit(tree.upper),
'' if tree.step is None else ':' + self.visit(tree.step))
def visit_Str(self, tree):
return T('{!r}').format(tree.s)
def visit_Subscript(self, tree):
return T('{}[{}]').format(self.visit(tree.value), self.visit(tree.slice))
def visit_TryExcept(self, tree):
body = self.many_to_one(tree.body, after=T('{after}'))
self.many_to_one(tree.orelse)
for handler in tree.handlers:
if handler.type is not None:
self.visit(handler.type)
if handler.name is not None:
self.visit(handler.name)
self.many_to_one(handler.body)
# TODO: Don't ignore the except handlers, else, and finally clause.
return body
def visit_TryFinally(self, tree):
raise NotImplementedError('Open problem: try-finally')
def visit_Tuple(self, tree):
return T('({})').format(T(', ').join(map(self.visit, tree.elts)) +
','*(len(tree.elts) == 1))
def visit_UnaryOp(self, tree):
return T('({}{})').format(unaryop_code[type(tree.op)], self.visit(tree.operand))
def visit_While(self, tree):
test = self.visit(tree.test)
body = self.many_to_one(tree.body, after='__this()')
orelse = self.many_to_one(tree.orelse, after='__after()')
return lambda_function({'__after': T('lambda: {after}')}).format(
T('{__y}(lambda __this: lambda: {} if {} else {})()').format(
provide(body, __break='__after', __continue='__this'),
test, orelse))
def visit_With(self, tree):
raise NotImplementedError('Open problem: with')
def visit_Yield(self, tree):
raise NotImplementedError('Open problem: yield')
def generic_visit(self, tree):
raise NotImplementedError('Case not caught: %s' % str(type(tree)))
# The entry point for everything.
def to_one_line(original):
# original :: string
# :: string
t = ast.parse(original)
table = symtable.symtable(original, '<string>', 'exec')
original = original.strip()
# If there's only one line anyways, be lazy
if len(original.splitlines()) == 1 and \
len(t.body) == 1 and \
type(t.body[0]) in (ast.Delete, ast.Assign, ast.AugAssign, ast.Print,
ast.Raise, ast.Assert, ast.Import, ast.ImportFrom,
ast.Exec, ast.Global, ast.Expr, ast.Pass):
return original
return get_init_code(t, table)
if __name__ == '__main__':
usage = ['python main.py --help',
'python main.py [--debug] [infile.py [outfile.py]]',
]
parser = argparse.ArgumentParser(usage='\n '.join(usage),
description=("if infile is given and outfile is not, outfile will be "
"infile_ol.py"))
parser.add_argument('infile', nargs='?')
parser.add_argument('outfile', nargs='?')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
original = None
if args.infile is None:
# I have gotten no arguments. Look at sys.stdin
original = sys.stdin.read()
outfilename = None
elif args.outfile is None:
# I have gotten one argument. If there's something to read from
# sys.stdin, read from there.
if args.infile.endswith('.py'):
outfilename = '_ol.py'.join(args.infile.rsplit(".py", 1))
else:
outfilename = args.infile + '_ol.py'
else:
outfilename = args.outfile
if original is None:
infile = open(args.infile)
original = infile.read().strip()
infile.close()
onelined = to_one_line(original)
if outfilename is None:
print onelined
else:
outfi = open(outfilename, 'w')
outfi.write(onelined + '\n')
outfi.close()
if args.debug:
if outfilename is None:
# redirect to sys.stderr if I'm writing outfile to sys.stdout
sys.stdout = sys.stderr
print '--- ORIGINAL ---------------------------------'
print original
print '----------------------------------------------'
scope = {}
try:
exec(original, scope)
except Exception as e:
print e
print '--- ONELINED ---------------------------------'
print onelined
print '----------------------------------------------'
scope = {}
try:
exec(onelined, scope)
except Exception as e:
print e
|
lizhuoli1126/MarkdownScript
|
Oneliner/main.py
|
Python
|
mit
| 28,019
|
[
"VisIt"
] |
248d284a008cd7062c4595e98fd317550ba1d0f72c6dbe7346adbe62618183ce
|
#!/usr/bin/env python3
# Copyright (C) 2016-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# #
# ESPResSo++ Python script for tabulated GROMACS simulation #
# #
###########################################################################
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import logging
from espressopp import Real3D, Int3D
from espressopp.tools import gromacs
from espressopp.tools import decomp
from espressopp.tools import timers
# This example reads in a gromacs water system (SPC/Fw) treated with reaction field. See the corresponding gromacs grompp.mdp paramter file.
# Output of gromacs energies and esp energies should be the same
# simulation parameters (nvt = False is nve)
steps = 10000
check = steps/100
rc = 0.9 # Verlet list cutoff
skin = 0.03
timestep = 0.0005
# parameters to convert GROMACS tabulated potential file
sigma = 1.0
epsilon = 1.0
c6 = 1.0
c12 = 1.0
# GROMACS setup files
grofile = "conf.gro"
topfile = "topol.top"
# this calls the gromacs parser for processing the top file (and included files) and the conf file
# The variables at the beginning defaults, types, etc... can be found by calling
# gromacs.read(grofile,topfile) without return values. It then prints out the variables to be unpacked
defaults, types, atomtypes, masses, charges, atomtypeparameters, bondtypes, bondtypeparams, angletypes, angletypeparams, exclusions, x, y, z, vx, vy, vz, resname, resid, Lx, Ly, Lz =gromacs.read(grofile,topfile)
######################################################################
## IT SHOULD BE UNNECESSARY TO MAKE MODIFICATIONS BELOW THIS LINE ##
######################################################################
#types, bonds, angles, dihedrals, x, y, z, vx, vy, vz, Lx, Ly, Lz = gromacs.read(grofile,topfile)
#defaults, types, masses, charges, atomtypeparameters, bondtypes, bondtypeparams, angletypes, angletypeparams, exclusions, x, y, z, vx, vy, vz, Lx, Ly, Lz = gromacs.read(grofile,topfile)
num_particles = len(x)
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
print(size)
sys.stdout.write('Setting up simulation ...\n')
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size,size,rc,skin)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
# setting up GROMACS interaction stuff
# create a force capped Lennard-Jones interaction that uses a verlet list
verletlist = espressopp.VerletList(system, rc)
interaction = espressopp.interaction.VerletListLennardJonesGromacs(verletlist)
# add particles to the system and then decompose
props = ['id', 'pos', 'v', 'type', 'mass', 'q']
allParticles = []
for pid in range(num_particles):
part = [pid + 1, Real3D(x[pid], y[pid], z[pid]),
Real3D(vx[pid], vy[pid], vz[pid]), types[pid], masses[pid], charges[pid]]
allParticles.append(part)
system.storage.addParticles(allParticles, *props)
system.storage.decompose()
# set up LJ interaction according to the parameters read from the .top file
ljinteraction=gromacs.setLennardJonesInteractions(system, defaults, atomtypeparameters, verletlist,rc)
# set up angle interactions according to the parameters read from the .top file
angleinteractions=gromacs.setAngleInteractions(system, angletypes, angletypeparams)
# set up coulomb interactions according to the parameters read from the .top file
# !! Warning: this only works for reaction-field now!
qq_interactions=gromacs.setCoulombInteractions(system, verletlist, rc, types, epsilon1=1, epsilon2=80, kappa=0)
# set up bonded interactions according to the parameters read from the .top file
bondedinteractions=gromacs.setBondedInteractions(system, bondtypes, bondtypeparams)
# exlusions, i.e. pairs of atoms not considered for the non-bonded part. Those are defined either by bonds which automatically generate an exclusion. Or by the nregxcl variable
verletlist.exclude(exclusions)
# langevin thermostat
langevin = espressopp.integrator.LangevinThermostat(system)
langevin.gamma = 2.0
langevin.temperature = 2.4942 # kT in gromacs units
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.addExtension(langevin)
integrator.dt = timestep
# print simulation parameters
print('')
print('number of particles =', num_particles)
print('density = %.4f' % (density))
print('rc =', rc)
print('dt =', integrator.dt)
print('skin =', system.skin)
print('steps =', steps)
print('NodeGrid = %s' % (nodeGrid,))
print('CellGrid = %s' % (cellGrid,))
print('')
# analysis
configurations = espressopp.analysis.Configurations(system)
configurations.gather()
temperature = espressopp.analysis.Temperature(system)
pressure = espressopp.analysis.Pressure(system)
pressureTensor = espressopp.analysis.PressureTensor(system)
print("i*timestep,Eb, EAng, ELj, EQQ, Ek, Etotal")
fmt='%5.5f %15.8g %15.8g %15.8g %15.8g %15.8g %15.8f\n'
outfile = open("esp.dat", "w")
start_time = time.process_time()
for i in range(int(check)):
T = temperature.compute()
P = pressure.compute()
Eb = 0
EAng = 0
for bd in list(bondedinteractions.values()): Eb+=bd.computeEnergy()
for ang in list(angleinteractions.values()): EAng+=ang.computeEnergy()
ELj= ljinteraction.computeEnergy()
EQQ= qq_interactions.computeEnergy()
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Etotal = Ek+Eb+EAng+EQQ+ELj
outfile.write(fmt%(i*steps/check*timestep,Eb, EAng, ELj, EQQ, Ek, Etotal))
print((fmt%(i*steps/check*timestep,Eb, EAng, ELj, EQQ, Ek, Etotal)), end='')
#espressopp.tools.pdb.pdbwrite("traj.pdb", system, append=True)
integrator.run(int(steps/check)) # print out every steps/check steps
#system.storage.decompose()
# print timings and neighbor list information
end_time = time.process_time()
timers.show(integrator.getTimers(), precision=2)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
|
espressopp/espressopp
|
examples/water_gromacs/water.py
|
Python
|
gpl-3.0
| 7,049
|
[
"ESPResSo",
"Gromacs"
] |
ff33c549d299f76b507d7d7fb641a2aa9e5c482c51cd73d8ca21ecf4bdb69d30
|
"""Acceptances tests using py.test fixtures.
All fixtures from ../conftest.py and :mod: `pytest_splinter.plugin` are
available.
The test case structure should follow the If-When-Then pattern.
"""
#########
# Tests #
#########
def test_user_want_to_explore_news(browser):
# import ipdb; ipdb.set_trace() # python interactive debugger
visit_page(browser, 'the-project')
input_in_search_box_and_press_enter(browser, 'Plenar Meeting')
is_on_page(browser, 'Search')
is_in_listing(browser, '2nd Plenary Meeting')
###########################
# Common helper functions #
###########################
def visit_page(browser, url):
browser.visit('http://policycompass.eu')
browser.browser.click_link_by_partial_href('the-project')
def input_in_search_box_and_press_enter(browser, text):
button = browser.browser.find_by_id('s').first
button.fill(text + '\r')
def is_on_page(browser, partial_url):
assert partial_url in browser.browser.url
def is_in_listing(browser, heading):
assert browser.browser.is_text_present(heading)
|
FabiApfelkern/policycompass
|
tests-acceptance-frontend/test_example_story.py
|
Python
|
agpl-3.0
| 1,073
|
[
"VisIt"
] |
3f9578de9092261c19fd4dd4b26e69ab127996357e9f691a3f07a96f9ab22613
|
# -*- coding: utf-8 -*-
"""
This module contains a custom streamlining class derived from the MayaVi2
streamlining class, modified to accept an array of seed points for visulaisation
using mayavi.
.. warning::
The documentation for this class cannot be built on Read The Docs, it is possible to build it locally.
You can use this class thus:
Create a new Streamline instance and add it to a pipeline
>>> from pysac.plot.mayavi_seed_streamline import SeedStreamline
>>> field_lines = SeedStreamline(seed_points = np.array(seeds))
>>> myvectorfield.add_child(field_lines)
"""
import numpy as np
from tvtk.api import tvtk
from traits.api import Instance, TraitPrefixList, Trait, Array
import mayavi
from mayavi.modules.streamline import Streamline
from distutils.version import LooseVersion
__all__ = ['SeedStreamline']
class SeedStreamline44(Streamline):
"""
This class is a modification of the mayavi Streamline class that accepts
an array of seed points as a input rather than a widget.
Examples
--------
Create a new Streamline instance and add it to a pipeline
>>> from pysac.plot.mayavi_seed_streamline import SeedStreamline
>>> field_lines = SeedStreamline(seed_points = np.array(seeds))
>>> myvectorfield.add_child(field_lines)
"""
seed_points = Array(allow_none=False)
seed = Instance(tvtk.PolyData, args=())
update_mode = Trait('interactive', TraitPrefixList(['interactive',
'semi-interactive',
'non-interactive']),
desc='the speed at which the poly data is updated')
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
# Create and setup the default objects.
self.seed = tvtk.PolyData(points=self.seed_points)
self.stream_tracer = tvtk.StreamTracer(maximum_propagation=2000,
integration_direction='backward',
compute_vorticity=False,
integrator_type='runge_kutta4',
)
self.ribbon_filter = tvtk.RibbonFilter()
self.tube_filter = tvtk.TubeFilter()
self.clean_filter = tvtk.CleanPolyData()
self.actor = mayavi.components.actor.Actor()
# Setup the actor suitably for this module.
self.actor.property.line_width = 2.0
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
mm = self.module_manager
if mm is None:
return
src = mm.source
self.configure_connection(self.stream_tracer, src)
#self.seed.inputs = [src]
# Setup the radius/width of the tube/ribbon filters based on
# given input.
if self._first:
b = src.outputs[0].bounds
l = [(b[1]-b[0]), (b[3]-b[2]), (b[5]-b[4])]
length = np.sqrt(l[0]*l[0] + l[1]*l[1] + l[2]*l[2])
self.ribbon_filter.width = length*0.0075
self.tube_filter.radius = length*0.0075
self._first = False
self._streamline_type_changed(self.streamline_type)
# Set the LUT for the mapper.
self.actor.set_lut(mm.scalar_lut_manager.lut)
self.pipeline_changed = True
def _seed_points_changed(self, old, new):
self.seed = tvtk.PolyData(points=self.seed_points)
def _stream_tracer_changed(self, old, new):
if old is not None:
old.on_trait_change(self.render, remove=True)
seed = self.seed
if seed is not None:
self.configure_source_data(new, seed)
new.on_trait_change(self.render)
mm = self.module_manager
if mm is not None:
src = mm.source
self.configure_connection(new, src)
# A default output so there are no pipeline errors. The
# update_pipeline call corrects this if needed.
self.outputs = [new.output]
self.update_pipeline()
def _seed_changed(self, old, new):
st = self.stream_tracer
if st is not None:
self.configure_connection(st, new)
#self._change_components(old, new)
class SeedStreamline43(SeedStreamline44):
"""
This class is a modification of the mayavi Streamline class that accepts
an array of seed points as a input rather than a widget.
Notes
-----
This is a version for MayaVi < 4.3
Examples
--------
Create a new Streamline instance and add it to a pipeline
>>> from pysac.plot.mayavi_seed_streamline import SeedStreamline
>>> field_lines = SeedStreamline(seed_points = np.array(seeds))
>>> myvectorfield.add_child(field_lines)
"""
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
mm = self.module_manager
if mm is None:
return
src = mm.source
self.stream_tracer.input = src.outputs[0]
#self.seed.inputs = [src]
# Setup the radius/width of the tube/ribbon filters based on
# given input.
if self._first:
b = src.outputs[0].bounds
l = [(b[1]-b[0]), (b[3]-b[2]), (b[5]-b[4])]
length = np.sqrt(l[0]*l[0] + l[1]*l[1] + l[2]*l[2])
self.ribbon_filter.width = length*0.0075
self.tube_filter.radius = length*0.0075
self._first = False
self._streamline_type_changed(self.streamline_type)
# Set the LUT for the mapper.
self.actor.set_lut(mm.scalar_lut_manager.lut)
self.pipeline_changed = True
def _stream_tracer_changed(self, old, new):
if old is not None:
old.on_trait_change(self.render, remove=True)
seed = self.seed
if seed is not None:
new.source = seed
new.on_trait_change(self.render)
mm = self.module_manager
if mm is not None:
new.input = mm.source.outputs[0]
# A default output so there are no pipeline errors. The
# update_pipeline call corrects this if needed.
self.outputs = [new.output]
self.update_pipeline()
def _seed_changed(self, old, new):
st = self.stream_tracer
if st is not None:
st.source = new#.poly_data
#self._change_components(old, new)
if LooseVersion(mayavi.__version__) > LooseVersion('4.4'):
SeedStreamline = SeedStreamline44
else:
SeedStreamline = SeedStreamline43
|
SWAT-Sheffield/pysac
|
pysac/plot/mayavi_seed_streamlines.py
|
Python
|
bsd-2-clause
| 7,479
|
[
"Mayavi"
] |
b1ccba6db26dc5c3cc73421175c456e381e0ef9688bbdc2afa4cfaaa40f23f98
|
""" What's this...?
"""
__RCSID__ = "$Id"
import datetime
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.Client.JobState.JobManifest import JobManifest
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.WorkloadManagementSystem.Service.JobPolicy import RIGHT_GET_INFO, RIGHT_RESCHEDULE
from DIRAC.WorkloadManagementSystem.Service.JobPolicy import RIGHT_RESET, RIGHT_CHANGE_STATUS
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import singleValueDefFields, multiValueDefFields
class JobState(object):
class DBHold:
def __init__(self):
self.checked = False
self.reset()
def reset(self):
self.job = None
self.log = None
self.tq = None
__db = DBHold()
_sDisableLocal = False
class RemoteMethod(object):
def __init__(self, functor):
self.__functor = functor
def __get__(self, obj, oType=None):
return self.__class__(self.__functor.__get__(obj, oType))
def __call__(self, *args, **kwargs):
funcSelf = self.__functor.__self__
if not funcSelf.localAccess:
rpc = funcSelf._getStoreClient()
if kwargs:
fArgs = (args, kwargs)
else:
fArgs = (args, )
return getattr(rpc, self.__functor.__name__)(funcSelf.jid, fArgs)
return self.__functor(*args, **kwargs)
def __init__(self, jid, forceLocal=False, getRPCFunctor=False, source="Unknown"):
self.__jid = jid
self.__source = str(source)
self.__forceLocal = forceLocal
if getRPCFunctor:
self.__getRPCFunctor = getRPCFunctor
else:
self.__getRPCFunctor = RPCClient
self.checkDBAccess()
@classmethod
def checkDBAccess(cls):
# Init DB if there
if not JobState.__db.checked:
JobState.__db.checked = True
for varName, dbName in (('job', 'JobDB'), ('log', 'JobLoggingDB'),
('tq', 'TaskQueueDB')):
try:
dbImp = "DIRAC.WorkloadManagementSystem.DB.%s" % dbName
dbMod = __import__(dbImp, fromlist=[dbImp])
dbClass = getattr(dbMod, dbName)
dbInstance = dbClass()
setattr(JobState.__db, varName, dbInstance)
result = dbInstance._getConnection()
if not result['OK']:
gLogger.warn("Could not connect to %s (%s). Resorting to RPC" % (dbName, result['Message']))
JobState.__db.reset()
break
else:
result['Value'].close()
except RuntimeError:
JobState.__db.reset()
break
except ImportError:
JobState.__db.reset()
break
@property
def jid(self):
return self.__jid
def setSource(self, source):
self.__source = source
@property
def localAccess(self):
if JobState._sDisableLocal:
return False
if JobState.__db.job or self.__forceLocal:
return True
return False
def __getDB(self):
return JobState.__db.job
def _getStoreClient(self):
return self.__getRPCFunctor("WorkloadManagement/JobStateSync")
def getManifest(self, rawData=False):
if self.localAccess:
result = self.__getDB().getJobJDL(self.__jid)
else:
result = self._getStoreClient().getManifest(self.__jid)
if not result['OK'] or rawData:
return result
if not result['Value']:
return S_ERROR("No manifest for job %s" % self.__jid)
manifest = JobManifest()
result = manifest.loadJDL(result['Value'])
if not result['OK']:
return result
return S_OK(manifest)
def setManifest(self, manifest):
if not isinstance(manifest, JobManifest):
manifestStr = manifest
manifest = JobManifest()
result = manifest.load(manifestStr)
if not result['OK']:
return result
manifestJDL = manifest.dumpAsJDL()
if self.localAccess:
return self.__retryFunction(5, self.__getDB().setJobJDL, (self.__jid, manifestJDL))
return self._getStoreClient().setManifest(self.__jid, manifestJDL)
# Execute traces
def __retryFunction(self, retries, functor, args=False, kwargs=False):
retries = max(1, retries)
if not args:
args = tuple()
if not kwargs:
kwargs = {}
while retries:
retries -= 1
result = functor(*args, **kwargs)
if result['OK']:
return result
if retries == 0:
return result
return S_ERROR("No more retries")
right_commitCache = RIGHT_GET_INFO
@RemoteMethod
def commitCache(self, initialState, cache, jobLog):
try:
self.__checkType(initialState, dict)
self.__checkType(cache, dict)
self.__checkType(jobLog, (list, tuple))
except TypeError as excp:
return S_ERROR(str(excp))
result = self.getAttributes(initialState.keys())
if not result['OK']:
return result
if not result['Value'] == initialState:
return S_OK(False)
gLogger.verbose("Job %s: About to execute trace. Current state %s" % (self.__jid, initialState))
data = {'att': [], 'jobp': [], 'optp': []}
for key in cache:
for dk in data:
if key.find("%s." % dk) == 0:
data[dk].append((key[len(dk) + 1:], cache[key]))
jobDB = JobState.__db.job
if data['att']:
attN = [t[0] for t in data['att']]
attV = [t[1] for t in data['att']]
result = self.__retryFunction(5, jobDB.setJobAttributes,
(self.__jid, attN, attV), {'update': True})
if not result['OK']:
return result
if data['jobp']:
result = self.__retryFunction(5, jobDB.setJobParameters, (self.__jid, data['jobp']))
if not result['OK']:
return result
for k, v in data['optp']:
result = self.__retryFunction(5, jobDB.setJobOptParameter, (self.__jid, k, v))
if not result['OK']:
return result
if 'inputData' in cache:
result = self.__retryFunction(5, jobDB.setInputData, (self.__jid, cache['inputData']))
if not result['OK']:
return result
logDB = JobState.__db.log
gLogger.verbose("Adding logging records for %s" % self.__jid)
for record, updateTime, source in jobLog:
gLogger.verbose("Logging records for %s: %s %s %s" % (self.__jid, record, updateTime, source))
record['date'] = updateTime
record['source'] = source
result = self.__retryFunction(5, logDB.addLoggingRecord, (self.__jid, ), record)
if not result['OK']:
return result
gLogger.info("Job %s: Ended trace execution" % self.__jid)
# We return a new initial state
return self.getAttributes(initialState.keys())
#
# Status
#
def __checkType(self, value, tList, canBeNone=False):
""" Raise TypeError if the value does not have one of the expected types
:param value: the value to test
:param tList: type or tuple of types
:param canBeNone: boolean, since there is no type for None to be used with isinstance
"""
if canBeNone:
if value is None:
return
if not isinstance(value, tList):
raise TypeError("%s has wrong type. Has to be one of %s" % (value, tList))
right_setStatus = RIGHT_GET_INFO
@RemoteMethod
def setStatus(self, majorStatus, minorStatus=None, appStatus=None, source=None, updateTime=None):
try:
self.__checkType(majorStatus, basestring)
self.__checkType(minorStatus, basestring, canBeNone=True)
self.__checkType(appStatus, basestring, canBeNone=True)
self.__checkType(source, basestring, canBeNone=True)
self.__checkType(updateTime, datetime.datetime, canBeNone=True)
except TypeError as excp:
return S_ERROR(str(excp))
result = JobState.__db.job.setJobStatus(self.__jid, majorStatus, minorStatus, appStatus)
if not result['OK']:
return result
# HACK: Cause joblogging is crappy
if not minorStatus:
minorStatus = 'idem'
if not source:
source = self.__source
return JobState.__db.log.addLoggingRecord(self.__jid, majorStatus, minorStatus, appStatus,
date=updateTime, source=source)
right_getMinorStatus = RIGHT_GET_INFO
@RemoteMethod
def setMinorStatus(self, minorStatus, source=None, updateTime=None):
try:
self.__checkType(minorStatus, basestring)
self.__checkType(source, basestring, canBeNone=True)
except TypeError as excp:
return S_ERROR(str(excp))
result = JobState.__db.job.setJobStatus(self.__jid, minor=minorStatus)
if not result['OK']:
return result
if not source:
source = self.__source
return JobState.__db.log.addLoggingRecord(self.__jid, minor=minorStatus,
date=updateTime, source=source)
@RemoteMethod
def getStatus(self):
result = JobState.__db.job.getJobAttributes(self.__jid, ['Status', 'MinorStatus'])
if not result['OK']:
return result
data = result['Value']
if data:
return S_OK((data['Status'], data['MinorStatus']))
else:
return S_ERROR('Job %d not found in the JobDB' % int(self.__jid))
right_setAppStatus = RIGHT_GET_INFO
@RemoteMethod
def setAppStatus(self, appStatus, source=None, updateTime=None):
try:
self.__checkType(appStatus, basestring)
self.__checkType(source, basestring, canBeNone=True)
except TypeError as excp:
return S_ERROR(str(excp))
result = JobState.__db.job.setJobStatus(self.__jid, application=appStatus)
if not result['OK']:
return result
if not source:
source = self.__source
return JobState.__db.log.addLoggingRecord(self.__jid, application=appStatus,
date=updateTime, source=source)
right_getAppStatus = RIGHT_GET_INFO
@RemoteMethod
def getAppStatus(self):
result = JobState.__db.job.getJobAttributes(self.__jid, ['ApplicationStatus'])
if result['OK']:
result['Value'] = result['Value']['ApplicationStatus']
return result
# Attributes
right_setAttribute = RIGHT_GET_INFO
@RemoteMethod
def setAttribute(self, name, value):
try:
self.__checkType(name, basestring)
self.__checkType(value, basestring)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.job.setJobAttribute(self.__jid, name, value)
right_setAttributes = RIGHT_GET_INFO
@RemoteMethod
def setAttributes(self, attDict):
try:
self.__checkType(attDict, dict)
except TypeError as excp:
return S_ERROR(str(excp))
keys = [key for key in attDict]
values = [attDict[key] for key in keys]
return JobState.__db.job.setJobAttributes(self.__jid, keys, values)
right_getAttribute = RIGHT_GET_INFO
@RemoteMethod
def getAttribute(self, name):
try:
self.__checkType(name, basestring)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.job.getJobAttribute(self.__jid, name)
right_getAttributes = RIGHT_GET_INFO
@RemoteMethod
def getAttributes(self, nameList=None):
try:
self.__checkType(nameList, (list, tuple), canBeNone=True)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.job.getJobAttributes(self.__jid, nameList)
# JobParameters --- REMOVED
# OptimizerParameters
right_setOptParameter = RIGHT_GET_INFO
@RemoteMethod
def setOptParameter(self, name, value):
try:
self.__checkType(name, basestring)
self.__checkType(value, basestring)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.job.setJobOptParameter(self.__jid, name, value)
right_setOptParameters = RIGHT_GET_INFO
@RemoteMethod
def setOptParameters(self, pDict):
try:
self.__checkType(pDict, dict)
except TypeError as excp:
return S_ERROR(str(excp))
for name in pDict:
result = JobState.__db.job.setJobOptParameter(self.__jid, name, pDict[name])
if not result['OK']:
return result
return S_OK()
right_removeOptParameters = RIGHT_GET_INFO
@RemoteMethod
def removeOptParameters(self, nameList):
if isinstance(nameList, basestring):
nameList = [nameList]
try:
self.__checkType(nameList, (list, tuple))
except TypeError as excp:
return S_ERROR(str(excp))
for name in nameList:
result = JobState.__db.job.removeJobOptParameter(self.__jid, name)
if not result['OK']:
return result
return S_OK()
right_getOptParameter = RIGHT_GET_INFO
@RemoteMethod
def getOptParameter(self, name):
try:
self.__checkType(name, basestring)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.job.getJobOptParameter(self.__jid, name)
right_getOptParameters = RIGHT_GET_INFO
@RemoteMethod
def getOptParameters(self, nameList=None):
try:
self.__checkType(nameList, (list, tuple), canBeNone=True)
except TypeError as excp:
return S_ERROR(str(excp))
return JobState.__db.job.getJobOptParameters(self.__jid, nameList)
# Other
@classmethod
def cleanTaskQueues(cls, source=''):
result = JobState.__db.tq.enableAllTaskQueues()
if not result['OK']:
return result
result = JobState.__db.tq.findOrphanJobs()
if not result['OK']:
return result
for jid in result['Value']:
result = JobState.__db.tq.deleteJob(jid)
if not result['OK']:
gLogger.error("Cannot delete from TQ job %s: %s" % (jid, result['Message']))
continue
result = JobState.__db.job.rescheduleJob(jid)
if not result['OK']:
gLogger.error("Cannot reschedule in JobDB job %s: %s" % (jid, result['Message']))
continue
JobState.__db.log.addLoggingRecord(jid, "Received", "", "", source="JobState")
return S_OK()
right_resetJob = RIGHT_RESCHEDULE
@RemoteMethod
def rescheduleJob(self, source=""):
result = JobState.__db.tq.deleteJob(self.__jid)
if not result['OK']:
return S_ERROR("Cannot delete from TQ job %s: %s" % (self.__jid, result['Message']))
result = JobState.__db.job.rescheduleJob(self.__jid)
if not result['OK']:
return S_ERROR("Cannot reschedule in JobDB job %s: %s" % (self.__jid, result['Message']))
JobState.__db.log.addLoggingRecord(self.__jid, "Received", "", "", source=source)
return S_OK()
right_resetJob = RIGHT_RESET
@RemoteMethod
def resetJob(self, source=""):
result = JobState.__db.job.setJobAttribute(self.__jid, "RescheduleCounter", -1)
if not result['OK']:
return S_ERROR("Cannot set the RescheduleCounter for job %s: %s" % (self.__jid, result['Message']))
result = JobState.__db.tq.deleteJob(self.__jid)
if not result['OK']:
return S_ERROR("Cannot delete from TQ job %s: %s" % (self.__jid, result['Message']))
result = JobState.__db.job.rescheduleJob(self.__jid)
if not result['OK']:
return S_ERROR("Cannot reschedule in JobDB job %s: %s" % (self.__jid, result['Message']))
JobState.__db.log.addLoggingRecord(self.__jid, "Received", "", "", source=source)
return S_OK()
right_getInputData = RIGHT_GET_INFO
@RemoteMethod
def getInputData(self):
return JobState.__db.job.getInputData(self.__jid)
@classmethod
def checkInputDataStructure(self, pDict):
if not isinstance(pDict, dict):
return S_ERROR("Input data has to be a dictionary")
for lfn in pDict:
if 'Replicas' not in pDict[lfn]:
return S_ERROR("Missing replicas for lfn %s" % lfn)
replicas = pDict[lfn]['Replicas']
for seName in replicas:
if 'SURL' not in replicas or 'Disk' not in replicas:
return S_ERROR("Missing SURL or Disk for %s:%s replica" % (seName, lfn))
return S_OK()
right_setInputData = RIGHT_GET_INFO
@RemoteMethod
def set_InputData(self, lfnData):
result = self.checkInputDataStructure(lfnData)
if not result['OK']:
return result
return self.__db.job.setInputData(self.__jid, lfnData)
right_insertIntoTQ = RIGHT_CHANGE_STATUS
@RemoteMethod
def insertIntoTQ(self, manifest=None):
if not manifest:
result = self.getManifest()
if not result['OK']:
return result
manifest = result['Value']
reqSection = "JobRequirements"
result = manifest.getSection(reqSection)
if not result['OK']:
return S_ERROR("No %s section in the job manifest" % reqSection)
reqCfg = result['Value']
jobReqDict = {}
for name in singleValueDefFields:
if name in reqCfg:
if name == 'CPUTime':
jobReqDict[name] = int(reqCfg[name])
else:
jobReqDict[name] = reqCfg[name]
for name in multiValueDefFields:
if name in reqCfg:
jobReqDict[name] = reqCfg.getOption(name, [])
jobPriority = reqCfg.getOption('UserPriority', 1)
result = self.__retryFunction(2, JobState.__db.tq.insertJob, (self.__jid, jobReqDict, jobPriority))
if not result['OK']:
errMsg = result['Message']
# Force removing the job from the TQ if it was actually inserted
result = JobState.__db.tq.deleteJob(self.__jid)
if result['OK']:
if result['Value']:
gLogger.info("Job %s removed from the TQ" % self.__jid)
return S_ERROR("Cannot insert in task queue: %s" % errMsg)
return S_OK()
|
arrabito/DIRAC
|
WorkloadManagementSystem/Client/JobState/JobState.py
|
Python
|
gpl-3.0
| 17,242
|
[
"DIRAC"
] |
357568aef153788e1b2230bf564987210b320e36a20b64164e17ba415cee9da7
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import os
from io import StringIO
import html
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger(".DisplayState")
#-------------------------------------------------------------------------
#
# GNOME python modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import GLib
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.utils.callback import Callback
from .utils import process_pending_events
from .views.navigationview import NavigationView
from gramps.gen.config import config
from gramps.gen.display.name import displayer as name_displayer
from .managedwindow import GrampsWindowManager
from gramps.gen.relationship import get_relationship_calculator
from .glade import Glade
from gramps.gen.utils.db import navigation_label
from gramps.gen.errors import HandleError
from .widgets.progressdialog import ProgressMonitor, GtkProgressDialog
from .dialog import ErrorDialog, WarningDialog
from .uimanager import ActionGroup
from ..version import VERSION_QUALIFIER, DEV_VERSION
from gramps.gen.const import VERSION
DISABLED = -1
#-------------------------------------------------------------------------
#
# History manager
#
#-------------------------------------------------------------------------
class History(Callback):
""" History manages the objects of a certain type that have been viewed,
with ability to go back, or forward.
When accessing an object, it should be pushed on the History.
"""
__signals__ = {
'active-changed' : (str, ),
'mru-changed' : (list, )
}
def __init__(self, dbstate, nav_type):
Callback.__init__(self)
self.dbstate = dbstate
self.nav_type = nav_type
self.clear()
dbstate.connect('database-changed', self.connect_signals)
self.signal_map = {}
self.signal_map[nav_type.lower() + '-delete'] = self.handles_removed
self.signal_map[nav_type.lower() + '-rebuild'] = self.history_changed
def connect_signals(self, dbstate):
"""
Connects database signals when the database has changed.
"""
for sig in self.signal_map:
dbstate.connect(sig, self.signal_map[sig])
def clear(self):
"""
Clears the history, resetting the values back to their defaults
"""
self.history = []
self.mru = []
self.index = -1
self.lock = False
if self.dbstate.is_open() and self.nav_type == 'Person':
initial_person = self.dbstate.db.find_initial_person()
if initial_person:
self.push(initial_person.get_handle())
def push(self, handle):
"""
Pushes the handle on the history stack
"""
self.prune()
if len(self.history) == 0 or handle != self.history[-1]:
self.history.append(handle)
if handle in self.mru:
self.mru.remove(handle)
self.mru.append(handle)
self.emit('mru-changed', (self.mru, ))
self.index += 1
if self.history:
newact = self.history[self.index]
if not isinstance(newact, str):
newact = str(newact)
self.emit('active-changed', (newact,))
def forward(self, step=1):
"""
Moves forward in the history list
"""
self.index += step
handle = self.history[self.index]
if handle in self.mru:
self.mru.remove(handle)
self.mru.append(handle)
self.emit('mru-changed', (self.mru, ))
newact = self.history[self.index]
if not isinstance(newact, str):
newact = str(newact)
self.emit('active-changed', (newact,))
return newact
def back(self, step=1):
"""
Moves backward in the history list
"""
self.index -= step
try:
handle = self.history[self.index]
if handle in self.mru:
self.mru.remove(handle)
self.mru.append(handle)
self.emit('mru-changed', (self.mru, ))
newact = self.history[self.index]
if not isinstance(newact, str):
newact = str(newact)
self.emit('active-changed', (newact,))
return newact
except IndexError:
return ""
def present(self):
"""
return the person handle that is now active in the history
"""
try :
if self.history :
return self.history[self.index]
else:
return ""
except IndexError:
return ""
def at_end(self):
"""
returns True if we are at the end of the history list
"""
return self.index+1 == len(self.history)
def at_front(self):
"""
returns True if we are at the front of the history list
"""
return self.index <= 0
def prune(self):
"""
Truncates the history list at the current object.
"""
if not self.at_end():
self.history = self.history[0:self.index+1]
def handles_removed(self, handle_list):
"""
Called in response to an object-delete signal.
Removes a list of handles from the history.
"""
for del_id in handle_list:
history_count = self.history.count(del_id)
for dummy in range(history_count):
self.history.remove(del_id)
self.index -= 1
mhc = self.mru.count(del_id)
for dummy in range(mhc):
self.mru.remove(del_id)
if self.history:
newact = self.history[self.index]
if not isinstance(newact, str):
newact = str(newact)
self.emit('active-changed', (newact,))
self.emit('mru-changed', (self.mru, ))
def history_changed(self):
"""
Called in response to an object-rebuild signal.
Objects in the history list may have been deleted.
"""
self.clear()
self.emit('mru-changed', (self.mru, ))
#-------------------------------------------------------------------------
#
# Recent Docs Menu
#
#-------------------------------------------------------------------------
_RCT_TOP = '<placeholder id="OpenRecentMenu">'
_RCT_MENU = '''
<item>
<attribute name="action">win.%s</attribute>
<attribute name="label">%s</attribute>
</item>'''
_RCT_BTM = '\n </placeholder>\n'
_RCT_BAR_TOP = ('<object class="GtkMenu" id="OpenBtnMenu">\n'
'<property name="visible">True</property>\n'
'<property name="can_focus">False</property>')
_RCT_BAR = '''
<child>
<object class="GtkMenuItem">
<property name="action-name">win.%s</property>
<property name="label">%s</property>
<property name="use_underline">False</property>
<property name="visible">True</property>
</object>
</child>'''
_RCT_BAR_BTM = '\n</object>\n'
from gramps.gen.recentfiles import RecentFiles
class RecentDocsMenu:
def __init__(self, uistate, state, fileopen):
self.ui_xml = []
self.action_group = ActionGroup('RecentFiles')
self.active = DISABLED
self.uistate = uistate
self.uimanager = uistate.uimanager
self.fileopen = fileopen
self.state = state
def load(self, item):
filename = item.get_path()
try:
self.fileopen(filename)
except Exception as err:
ErrorDialog(_('Cannot load database'), str(err),
parent=self.uistate.window)
def build(self, update_menu=True):
gramps_rf = RecentFiles()
count = 0
if self.active != DISABLED:
self.uimanager.remove_ui(self.ui_xml)
self.uimanager.remove_action_group(self.action_group)
self.active = DISABLED
actionlist = []
menu = _RCT_TOP
bar = _RCT_BAR_TOP
rfiles = gramps_rf.gramps_recent_files
rfiles.sort(key=lambda x: x.get_time(), reverse=True)
#new_menu = Gtk.Menu()
#new_menu.set_tooltip_text(_("Connect to a recent database"))
for item in rfiles:
try:
title = html.escape(item.get_name())
filename = os.path.basename(item.get_path())
action_id = "RecentMenu%d" % count
# add the menuitem for this file
menu += _RCT_MENU % (action_id, title)
# add the action for this file
actionlist.append((action_id, make_callback(item, self.load)))
# add the toolbar menuitem
bar += _RCT_BAR % (action_id, title)
except RuntimeError:
# ignore no longer existing files
_LOG.info("Ignoring the RecentItem %s (%s)" % (title, filename))
count += 1
menu += _RCT_BTM
bar += _RCT_BAR_BTM
self.ui_xml = [menu, bar]
self.action_group.add_actions(actionlist)
self.uimanager.insert_action_group(self.action_group)
self.active = self.uimanager.add_ui_from_string(self.ui_xml)
if update_menu:
self.uimanager.update_menu()
def make_callback(val, func):
return lambda x, y: func(val)
from .logger import RotateHandler
class WarnHandler(RotateHandler):
def __init__(self, capacity, button, parent=None):
RotateHandler.__init__(self, capacity)
self.setLevel(logging.WARN)
self.button = button
button.on_clicked(self.display)
self.timer = None
self.last_line = '-1'
self.parent = parent
def emit(self, record):
if self.timer is None:
#check every 3 minutes if warn button can disappear
self.timer = GLib.timeout_add(3*60*1000, self._check_clear)
RotateHandler.emit(self, record)
self.button.show()
def _check_clear(self):
buffer = self.get_buffer()
if len(buffer) > 0:
new_last_line = self.get_buffer()[-1]
if self.last_line == new_last_line:
#buffer has not changed for 3 minutes, let's clear it:
self._clear()
return False
else:
self.last_line = new_last_line
return True
else:
return False
def _clear(self):
self.button.hide()
self.set_capacity(self._capacity)
self.last_line = '-1'
self.timer = None
def display(self, obj):
obj.hide()
self.glade = Glade(toplevel='displaystate')
top = self.glade.toplevel
msg = self.glade.get_object('msg')
buf = msg.get_buffer()
for i in self.get_formatted_log():
buf.insert_at_cursor(i + '\n')
if self.parent:
top.set_transient_for(self.parent)
top.run()
top.destroy()
class DisplayState(Callback):
__signals__ = {
'filters-changed' : (str, ),
'filter-name-changed' : (str, str, str),
'nameformat-changed' : None,
'placeformat-changed' : None,
'grampletbar-close-changed' : None,
'update-available' : (list, ),
'autobackup' : None,
'font-changed' : None,
}
#nav_type to message
NAV2MES = {
'Person': _("No active person"),
'Family': _("No active family"),
'Event': _("No active event"),
'Place': _("No active place"),
'Source': _("No active source"),
'Citation': _("No active citation"),
'Repository': _("No active repository"),
'Media': _("No active media"),
'Note': _("No active note"),
}
BUSY_CURSOR = Gdk.Cursor.new_for_display(Gdk.Display.get_default(),
Gdk.CursorType.WATCH)
def __init__(self, window, status, uimanager, viewmanager=None):
self.busy = False
self.cursor = None
self.viewmanager = viewmanager
self.uimanager = uimanager
self.progress_monitor = ProgressMonitor(GtkProgressDialog, ("", window))
self.window = window
Callback.__init__(self)
self.status = status
self.status_id = status.get_context_id('GRAMPS')
self.progress = status.get_progress_bar()
self.status_ver = status.get_version_btn()
self.history_lookup = {}
self.gwm = GrampsWindowManager(uimanager)
self.widget = None
self.disprel_old = ''
self.disprel_defpers = None
self.disprel_active = None
self.set_relationship_class()
self.export = False
self.backup_timer = None
self.symbols = config.get('utf8.in-use')
self.death_symbol = config.get('utf8.death-symbol')
formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')
warnbtn = status.get_warning_button()
self.rhandler = WarnHandler(capacity=400, button=warnbtn, parent=window)
self.rhandler.setFormatter(formatter)
self.rhandler.setLevel(logging.WARNING)
self.log = logging.getLogger()
self.log.addHandler(self.rhandler)
# This call has been moved one level up,
# but this connection is still made!
# self.dbstate.connect('database-changed', self.db_changed)
if DEV_VERSION or VERSION_QUALIFIER:
ver_btn = status.get_version_btn()
ver_btn.set_label(VERSION)
if DEV_VERSION:
msg = 'master'
else:
msg = VERSION_QUALIFIER[1:]
ver_btn.connect('clicked', self.__develop_warn, msg)
ver_btn.show()
def set_backup_timer(self):
"""
Set the backup timer.
"""
interval = config.get('database.autobackup')
if self.backup_timer is not None:
GLib.source_remove(self.backup_timer)
self.backup_timer = None
if interval == 1:
minutes = 15
elif interval == 2:
minutes = 30
elif interval == 3:
minutes = 60
elif interval == 4:
minutes = 720
elif interval == 5:
minutes = 1440
if interval > 0:
self.backup_timer = GLib.timeout_add_seconds(
minutes*60, self.__emit_autobackup)
def __emit_autobackup(self):
"""
Emit an 'autobackup' signal.
"""
self.emit('autobackup')
return True
def screen_width(self):
"""
Return the width of the current screen.
"""
return self.window.get_screen().get_width()
def screen_height(self):
"""
Return the height of the current screen.
"""
return self.window.get_screen().get_height()
def clear_history(self):
"""
Clear all history objects.
"""
for history in list(self.history_lookup.values()):
history.clear()
def get_history(self, nav_type, nav_group=0):
"""
Return the history object for the given navigation type and group.
"""
return self.history_lookup.get((nav_type, nav_group))
def register(self, dbstate, nav_type, nav_group):
"""
Create a history and navigation object for the specified
navigation type and group, if they don't exist.
"""
if (nav_type, nav_group) not in self.history_lookup:
history = History(dbstate, nav_type)
self.history_lookup[(nav_type, nav_group)] = history
def get_active(self, nav_type, nav_group=0):
"""
Return the handle for the active obejct specified by the given
navigation type and group.
"""
history = self.get_history(nav_type, nav_group)
return history.present() if history else None
def set_active(self, handle, nav_type, nav_group=0):
"""
Set the active object for the specified navigation type and group to
the given handle.
"""
history = self.get_history(nav_type, nav_group)
if history:
history.push(handle)
def set_sensitive(self, state):
tbar = self.uimanager.get_widget('ToolBar')
tbar.set_sensitive(state)
self.viewmanager.hpane.set_sensitive(state)
self.uimanager.enable_all_actions(state)
def db_changed(self, db):
db.connect('long-op-start', self.progress_monitor.add_op)
self.clear_history()
def set_relationship_class(self):
"""method that rebinds the relationship to the current rel calc
Should be called after load or reload of plugins
"""
self.relationship = get_relationship_calculator(reinit=True)
def set_gendepth(self, value):
""" Set the generations we search back for showing relationships
on Gramps interface. Value must be integer > 0
This method will be used by the preference editor when user changes
the generations.
"""
self.relationship.set_depth(value)
def display_relationship(self, dbstate, active_handle):
""" Construct the relationship in order to show it in the statusbar
This can be a time intensive calculation, so we only want to do
it if persons are different than before.
Eg: select a person, then double click, will result in calling
three times to construct build the statusbar. We only want
to obtain relationship once!
This means the relationship part of statusbar only changes on
change of row.
"""
self.relationship.connect_db_signals(dbstate)
default_person = dbstate.db.get_default_person()
if default_person is None or active_handle is None:
return ''
if default_person.handle == self.disprel_defpers and \
active_handle == self.disprel_active :
return self.disprel_old
active = dbstate.db.get_person_from_handle(active_handle)
if active is None:
# During merger this method can be called at a time when treemodel
# and database are not in sync, resulting in active_handle != None,
# but active == None; see bug 5290 for the details.
return ''
name = self.relationship.get_one_relationship(
dbstate.db, default_person, active)
#store present call data
self.disprel_old = name
self.disprel_defpers = default_person.handle
self.disprel_active = active_handle
if name:
return name
else:
return ""
def set_export_mode(self, value):
self.set_busy_cursor(value)
if value == self.export:
return
else:
self.export = value
def get_export_mode(self):
return self.export
def set_busy_cursor(self, value):
if value == self.busy:
return
else:
self.busy = value
if self.window.get_window():
if value:
self.cursor = self.window.get_window().get_cursor()
self.window.get_window().set_cursor(self.BUSY_CURSOR)
else:
self.window.get_window().set_cursor(self.cursor)
if self.window.get_window().is_visible():
#avoid critical gdk error:
#Gdk-CRITICAL **: gdk_error_trap_pop_internal: assertion `trap != NULL' failed
#only process events if window is actually visible
process_pending_events()
def set_open_widget(self, widget):
self.widget = widget
def set_open_recent_menu(self, menu):
self.widget.set_menu(menu)
def push_message(self, dbstate, text):
self.status_text(text)
GLib.timeout_add(5000, self.modify_statusbar, dbstate)
def show_filter_results(self, dbstate, matched, total):
#nav_type = self.viewmanager.active_page.navigation_type()
#text = ((_("%(nav_type)s View") % {"nav_type": _(nav_type)}) +
text = (self.viewmanager.active_page.get_title() +
(": %d/%d" % (matched, total)))
self.status.set_filter(text)
def clear_filter_results(self):
self.status.clear_filter()
def modify_statusbar(self, dbstate, active=None):
""" Update the status bar with current object info.
Since this is called via GLib.timeout_add it can happen at any time
Gtk is idle or processing pending events. Even in the midst of a
multiple delete, before the GUI has been updated for missing objects.
So it is susceptible to HandleErrors for missing data, thus the 'try'.
"""
try:
view = self.viewmanager.active_page
if not isinstance(view, NavigationView) or dbstate is None:
return
nav_type = view.navigation_type()
active_handle = self.get_active(nav_type, view.navigation_group())
self.status.pop(self.status_id)
if active_handle and dbstate.is_open():
name, _obj = navigation_label(dbstate.db, nav_type,
active_handle)
# Append relationship to default person if enabled.
if(nav_type == 'Person' and
config.get('interface.statusbar') > 1):
if active_handle != dbstate.db.get_default_handle():
msg = self.display_relationship(dbstate, active_handle)
if msg:
name = '%s (%s)' % (name, msg.strip())
else:
name = _('No active object')
if not name:
name = self.NAV2MES[nav_type]
self.status.push(self.status_id, name)
process_pending_events()
except HandleError:
return
def pulse_progressbar(self, value, text=None):
self.progress.set_fraction(min(value/100.0, 1.0))
if text:
self.progress.set_text("%s: %d%%" % (text, value))
else:
self.progress.set_text("%d%%" % value)
process_pending_events()
def status_text(self, text):
self.status.pop(self.status_id)
self.status.push(self.status_id, text)
process_pending_events()
def reload_symbols(self):
self.symbols = config.get('utf8.in-use')
self.death_symbol = config.get('utf8.death-symbol')
def __develop_warn(self, button, warning_type):
"""
Display a development warning message to the user, with the
warning_type in it.
:param warning_type: the general name of the warning, e.g. "master"
:type warning_type: str
"""
WarningDialog(
_('Danger: This is unstable code!'),
_("This Gramps ('%s') is a development release.\n"
) % warning_type +
_("This version is not meant for normal usage. Use "
"at your own risk.\n"
"\n"
"This version may:\n"
"1) Work differently than you expect.\n"
"2) Fail to run at all.\n"
"3) Crash often.\n"
"4) Corrupt your data.\n"
"5) Save data in a format that is incompatible with the "
"official release.\n"
"\n"
"%(bold_start)sBACKUP%(bold_end)s "
"your existing databases before opening "
"them with this version, and make sure to export your "
"data to XML every now and then."
) % {'bold_start' : '<b>',
'bold_end' : '</b>'},
parent=self.window)
|
gramps-project/gramps
|
gramps/gui/displaystate.py
|
Python
|
gpl-2.0
| 25,554
|
[
"Brian"
] |
bac529da89523d59b2fea97213b98683b2fafa5299f1b86314bdc948ff96964e
|
# Copyright 2003 by Bartek Wilczynski. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Consumers for AlignACE and CompareACE parsers.
"""
class AlignAceScanner:
"""Scannner for AlignACE output
Methods:
feed Feed data into the scanner.
The scanner generates (and calls the consumer) the following types of events:
noevent - blank line
version - AlignACE version number
command_line - AlignACE command line string
parameters - the begining of the parameters
parameter - the line containing a parameter
sequences - the begining of the sequences list
sequence - line containing the name of the input sequence (and a respective number)
motif - the begining of the motif (contains the number)
motif_hit - one hit for a motif
motif_mask - mask of the motif (space - gap, asterisk - significant position)
motif_score - MAP score of the motif - approx. N * log R, where R == (num. of actual occur.) / (num. of occur. expected by random.)
"""
def feed(self, handle, consumer):
"""S.feed(handle, consumer)
Feed in a AlignACE report for scanning. handle is a file-like
object that contains the AlignACE report. consumer is a Consumer
object that will receive events as the report is scanned.
"""
consumer.version(handle.readline())
consumer.command_line(handle.readline())
for line in handle:
if line.strip() == "":
consumer.noevent(line)
elif line[:4]=="Para":
consumer.parameters(line)
elif line[0]=="#":
consumer.sequence(line)
elif "=" in line:
consumer.parameter(line)
elif line[:5]=="Input":
consumer.sequences(line)
elif line[:5]=="Motif":
consumer.motif(line)
elif line[:3]=="MAP":
consumer.motif_score(line)
elif len(line.split("\t"))==4:
consumer.motif_hit(line)
elif "*" in line:
consumer.motif_mask(line)
else:
raise ValueError, line
class CompareAceScanner:
"""Scannner for CompareACE output
Methods:
feed Feed data into the scanner.
The scanner generates (and calls the consumer) the following types of events:
motif_score - CompareACE score of motifs
###### TO DO #############3
extend the scanner to include other, more complex outputs.
"""
def feed(self, handle, consumer):
"""S.feed(handle, consumer)
Feed in a CompareACE report for scanning. handle is a file-like
object that contains the CompareACE report. consumer is a Consumer
object that will receive events as the report is scanned.
"""
consumer.motif_score(handle.readline())
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/AlignAce/Scanner.py
|
Python
|
apache-2.0
| 3,011
|
[
"Biopython"
] |
2b9073bfa482ba44affc1ec1afb32b95802f9673f92781557a41dbf4ac3c88cc
|
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
import errno
import json
import os
import re
import subprocess
import sys
try:
import configparser
except ImportError:
import ConfigParser as configparser
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py),
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg) as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs)
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except OSError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix),
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents,
re.M | re.S,
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents,
re.M | re.S,
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set {} to '{}'".format(filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print(f"got version from file {versionfile_abs} {ver}")
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
},
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
},
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile,
self._versioneer_generated_versions,
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
},
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy) as f:
old = f.read()
except OSError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in) as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source,
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
pyoceans/python-oceans
|
versioneer.py
|
Python
|
bsd-3-clause
| 68,638
|
[
"Brian"
] |
80ab01f473859652602ca280f2d245eed1670a11260aa97efc39bd09e1d67f3e
|
import os
import sys
import time
import logging
import datetime
import numpy as np
from data import *
from time import clock
from parameters import *
from collections import defaultdict
SAVE_PATH = "" # path to save results
spike_generators = {} # dict name_part : spikegenerator
spike_detectors = {} # dict name_part : spikedetector
multimeters = {} # dict name_part : multimeter
startsimulate = 0
endsimulate = 0
txt_result_path = "" # path for txt results
all_parts = tuple() # tuple of all parts
one_collumn = tuple() # tuple of all parts
MaxSynapses = 4000 # max synapses
SYNAPSES = 0 # synapse number
NEURONS = 0 # neurons number
times = [] # store time simulation
logging.basicConfig(format='%(name)s.%(levelname)s: %(message)s.', level=logging.DEBUG)
logger = logging.getLogger('function')
def getAllParts():
return all_parts
def generate_neurons(NNumber):
global NEURONS, all_parts
logger.debug("* * * Start generate neurons")
c2 = l2c2 + l3c2 + l4c2 + l5c2 + l6c2
c3 = l2c3 + l3c3 + l4c3 + l5c3 + l6c3
c4 = l2c4 + l3c4 + l4c4 + l5c4 + l6c4
c5 = l2c5 + l3c5 + l4c5 + l5c5 + l6c5
c6 = l2c6 + l3c6 + l4c6 + l5c6 + l6c6
c7 = l2c7 + l3c7 + l4c7 + l5c7 + l6c7
c8 = l2c8 + l3c8 + l4c8 + l5c8 + l6c8
c9 = l2c9 + l3c9 + l4c9 + l5c9 + l6c9
parts_no_dopa = l2 + l3 + l4 + l5 + l6 + thalamus
collumns = c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9
all_parts = tuple(sorted(parts_no_dopa))
#all_parts = tuple(sorted(parts_no_dopa + collumns)) FOR ALL PARTS FOR EVERY COLLUMNS
NN_coef = float(NNumber) / sum(item[k_NN] for item in all_parts)
for part in all_parts:
part[k_NN] = NN_minimal if int(part[k_NN] * NN_coef) < NN_minimal else int(part[k_NN] * NN_coef)
NEURONS = sum(item[k_NN] for item in all_parts)
for part in collumns:
part[k_NN] = NN_minimal if int(part[k_NN] * NN_coef) < NN_minimal else int(part[k_NN] * NN_coef)
NEURONS = sum(item[k_NN] for item in all_parts)
logger.debug('Initialized: {0} neurons'.format(NEURONS))
# Init neuron models with our parameters
nest.SetDefaults('iaf_psc_exp', iaf_neuronparams)
nest.SetDefaults('iaf_psc_alpha', iaf_neuronparams)
# Parts without dopamine
for part in parts_no_dopa:
part[k_model] = 'iaf_psc_exp'
for part in collumns:
part[k_model] = 'iaf_psc_exp'
# Creating neurons
for part in all_parts:
part[k_IDs] = nest.Create(part[k_model], part[k_NN])
logger.debug("{0} [{1}, {2}] {3} neurons".format(part[k_name], part[k_IDs][0], part[k_IDs][-1:][0], part[k_NN]))
for part in collumns:
part[k_IDs] = nest.Create(part[k_model], part[k_NN])
logger.debug("{0} [{1}, {2}] {3} neurons".format(part[k_name], part[k_IDs][0], part[k_IDs][-1:][0], part[k_NN]))
def log_connection(pre, post, syn_type, weight):
global SYNAPSES
connections = pre[k_NN] * post[k_NN] if post[k_NN] < MaxSynapses else pre[k_NN] * MaxSynapses
SYNAPSES += connections
logger.debug("{0} -> {1} ({2}) w[{3}] // "
"{4}x{5}={6} synapses".format(pre[k_name], post[k_name], syn_type[:-8], weight, pre[k_NN],
MaxSynapses if post[k_NN] > MaxSynapses else post[k_NN], connections))
def connect(pre, post, syn_type=GABA, weight_coef=1):
# Set new weight value (weight_coef * basic weight)
nest.SetDefaults(synapses[syn_type][model], {'weight': weight_coef * synapses[syn_type][basic_weight]})
# Create dictionary of connection rules
conn_dict = {'rule': 'fixed_outdegree',
'outdegree': MaxSynapses if post[k_NN] > MaxSynapses else post[k_NN],
'multapses': True}
# Connect PRE IDs neurons with POST IDs neurons, add Connection and Synapse specification
nest.Connect(pre[k_IDs], post[k_IDs], conn_spec=conn_dict, syn_spec=synapses[syn_type][model])
# Show data of new connection
log_connection(pre, post, synapses[syn_type][model], nest.GetDefaults(synapses[syn_type][model])['weight'])
def connect_generator(part, startTime=1, stopTime=T, rate=250, coef_part=1):
name = part[k_name]
# Add to spikeGenerators dict a new generator
spike_generators[name] = nest.Create('poisson_generator', 1, {'rate' : float(rate),
'start': float(startTime),
'stop' : float(stopTime)})
# Create dictionary of connection rules
conn_dict = {'rule': 'fixed_outdegree',
'outdegree': int(part[k_NN] * coef_part)}
# Connect generator and part IDs with connection specification and synapse specification
nest.Connect(spike_generators[name], part[k_IDs], conn_spec=conn_dict, syn_spec=static_syn)
# Show data of new generator
logger.debug("Generator => {0}. Element #{1}".format(name, spike_generators[name][0]))
def connect_detector(part):
name = part[k_name]
# Init number of neurons which will be under detector watching
number = part[k_NN] if part[k_NN] < N_detect else N_detect
# Add to spikeDetectors a new detector
spike_detectors[name] = nest.Create('spike_detector', params=detector_param)
# Connect N first neurons ID of part with detector
nest.Connect(part[k_IDs][:number], spike_detectors[name])
# Show data of new detector
logger.debug("Detector => {0}. Tracing {1} neurons".format(name, number))
'''Generates string full name of an image'''
def f_name_gen(path, name):
return "{0}{1}{2}.png".format(path, name, "+dopa" if dopamine_flag else "")
def simulate():
global startsimulate, endsimulate
begin = 0
save_path = "../results/output-{0}/".format(NEURONS)
if not os.path.exists(save_path):
os.makedirs(save_path)
nest.PrintNetwork()
logger.debug('* * * Simulating')
startsimulate = datetime.datetime.now()
for t in np.arange(0, T, dt):
print "SIMULATING [{0}, {1}]".format(t, t + dt)
nest.Simulate(dt)
end = clock()
times.append("{0:10.1f} {1:8.1f} "
"{2:10.1f} {3:4.1f} {4}\n".format(begin, end - begin, end, t, datetime.datetime.now().time()))
begin = end
print "COMPLETED {0}%\n".format(t/dt)
endsimulate = datetime.datetime.now()
logger.debug('* * * Simulation completed successfully')
def get_log(startbuild, endbuild):
logger.info("Number of neurons : {}".format(NEURONS))
logger.info("Number of synapses : {}".format(SYNAPSES))
logger.info("Building time : {}".format(endbuild - startbuild))
logger.info("Simulation time : {}".format(endsimulate - startsimulate))
logger.info("Dopamine : {}".format('YES' if dopamine_flag else 'NO'))
logger.info("Noise : {}".format('YES' if generator_flag else 'NO'))
def save(GUI):
global txt_result_path
if GUI:
import pylab as pl
import nest.raster_plot
import nest.voltage_trace
logger.debug("Saving IMAGES into {0}".format(SAVE_PATH))
N_events_gen = len(spike_generators)
for key in spike_detectors:
try:
nest.raster_plot.from_device(spike_detectors[key], hist=True)
pl.savefig(f_name_gen(SAVE_PATH, "spikes_" + key.lower()), dpi=dpi_n, format='png')
pl.close()
except Exception:
print("From {0} is NOTHING".format(key))
N_events_gen -= 1
for key in multimeters:
try:
nest.voltage_trace.from_device(multimeters[key])
pl.savefig(f_name_gen(SAVE_PATH, "volt_" + key.lower()), dpi=dpi_n, format='png')
pl.close()
except Exception:
print("From {0} is NOTHING".format(key))
print "Results {0}/{1}".format(N_events_gen, len(spike_detectors))
print "Results {0}/{1}".format(N_events_gen, len(spike_detectors))
txt_result_path = SAVE_PATH + 'txt/'
logger.debug("Saving TEXT into {0}".format(txt_result_path))
if not os.path.exists(txt_result_path):
os.mkdir(txt_result_path)
for key in spike_detectors:
save_spikes(spike_detectors[key], name=key)
#for key in multimeters:
# save_voltage(multimeters[key], name=key)
with open(txt_result_path + 'timeSimulation.txt', 'w') as f:
for item in times:
f.write(item)
def save_spikes(detec, name, hist=False):
title = "Raster plot from device '%i'" % detec[0]
ev = nest.GetStatus(detec, "events")[0]
ts = ev["times"]
gids = ev["senders"]
data = defaultdict(list)
if len(ts):
with open("{0}@spikes_{1}.txt".format(txt_result_path, name), 'w') as f:
f.write("Name: {0}, Title: {1}, Hist: {2}\n".format(name, title, "True" if hist else "False"))
for num in range(0, len(ev["times"])):
data[round(ts[num], 1)].append(gids[num])
for key in sorted(data.iterkeys()):
f.write("{0:>5} : {1:>4} : {2}\n".format(key, len(data[key]), sorted(data[key])))
else:
print "Spikes in {0} is NULL".format(name)
|
vitaliykomarov/NEUCOGAR
|
nest/visual/V4/scripts/func.py
|
Python
|
gpl-2.0
| 9,243
|
[
"NEURON"
] |
7a5a3e645aae2348c9cd3cccec88644d864a48a4462d066739ba8e5f07d2df02
|
import pytest
from biothings.web.options import Option, OptionError, ReqArgs
def test_01():
reqargs = (
ReqArgs.Path(
args=("gene", "1017"),
kwargs={
"host": "mygene.info",
"version": "v3"}),
{
"size": "10",
"dotfield": "true",
"format": "json"
}
)
opt = Option({"keyword": "doc_type", "path": 0})
assert opt.parse(reqargs) == "gene"
opt = Option({"keyword": "gene_id", "type": int, "path": 1})
assert opt.parse(reqargs) == 1017
opt = Option({"keyword": "host", "path": "host"})
assert opt.parse(reqargs) == "mygene.info"
opt = Option({"keyword": "host", "path": 100})
assert opt.parse(reqargs) is None
opt = Option({"keyword": "size", "type": int})
assert opt.parse(reqargs) == 10
opt = Option({"keyword": "dotfield", "type": bool, "default": False})
assert opt.parse(reqargs) is True
opt = Option({"keyword": "from", "type": int, "default": 0})
assert opt.parse(reqargs) == 0
opt = Option({"keyword": "userquery", "type": str})
assert opt.parse(reqargs) is None
def test_02():
reqargs = ReqArgs(
query={
"size": "10",
"format": "html"
},
json_={
"q": "cdk2",
"scopes": ["ensembl", "entrez"],
"format": "json"
}
)
opt = Option({"keyword": "size", "type": int})
assert opt.parse(reqargs) == 10
with pytest.raises(OptionError):
opt = Option({"keyword": "size", "type": int, "max": 3})
opt.parse(reqargs)
opt = Option({"keyword": "q"})
assert opt.parse(reqargs) == "cdk2"
opt = Option({"keyword": "q", "type": str})
assert opt.parse(reqargs) == "cdk2"
with pytest.raises(OptionError):
opt = Option({"keyword": "q", "type": list})
opt.parse(reqargs)
opt = Option({"keyword": "q", "type": list, "strict": False})
assert opt.parse(reqargs) == ["cdk2"]
opt = Option({"keyword": "format", "type": str})
assert opt.parse(reqargs) == "html"
opt = Option({"keyword": "out_format", "alias": "format"})
assert opt.parse(reqargs) == "html"
opt = Option({"keyword": "format", "location": ("json", "query")})
assert opt.parse(reqargs) == "json"
opt = Option({"keyword": "format", "location": "json"})
assert opt.parse(reqargs) == "json"
with pytest.raises(OptionError):
opt = Option({"keyword": "scopes", "type": str})
opt.parse(reqargs)
opt = Option({"keyword": "scopes", "type": str, "strict": False})
assert opt.parse(reqargs) == "['ensembl', 'entrez']"
opt = Option({"keyword": "scopes", "type": list})
assert opt.parse(reqargs) == ["ensembl", "entrez"]
def test_03():
reqargs = ReqArgs(form={
"ids": "cdk,cdk2",
"scopes": "symbol",
"size": 10
})
opt = Option({"keyword": "scopes", "type": list})
assert opt.parse(reqargs) == ["symbol"]
opt = Option({"keyword": "ids", "type": list})
assert opt.parse(reqargs) == ["cdk", "cdk2"]
opt = Option({"keyword": "size", "type": int})
assert opt.parse(reqargs) == 10
|
biothings/biothings.api
|
tests/web/options/test_options.py
|
Python
|
apache-2.0
| 3,211
|
[
"CDK"
] |
7f2fdf11922146d0a2e21bd52043c081fc25bf376abd524b1105ac5437b523dd
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
from argparse import ArgumentParser
from os import listdir
import numpy as np
from adderror import adderror
"""ENSAMBLE, -d directory -n number of models """
"""-k number of selected structure"""
"""-r repet of program"""
def get_argument():
parser = ArgumentParser()
parser.add_argument("-d", "--dir", dest="mydirvariable",
help="Choose dir", metavar="DIR", required=True)
parser.add_argument("-n", metavar='N', type=int,
dest="n_files",
help="Number of selected structure",
required=True)
parser.add_argument("-k", metavar='K', type=int,
dest="k_options",
help="Number of possibility structure, less then selected files",
required=True)
parser.add_argument("-r", metavar='R', type=int,
dest="repeat", help="Number of repetitions",
default=1)
parser.add_argument("--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument("-result", type=float, dest="result",
help="pesimist(0) or optimist(<0) result",
default=0)
return parser.parse_args()
def find_pdb_file(mydirvariable):
pdb_files = []
files = listdir(mydirvariable)
for line in files:
line = line.rstrip()
if re.search('.pdb$', line):
pdb_files.append(line)
return pdb_files
def test_argument(n_files, k_options, list_pdb_file):
if len(list_pdb_file) < n_files:
print("Number od pdb files is ONLY", len(list_pdb_file))
sys.exit(0)
if k_options > n_files:
print("Number of selected structure is ONLY", args.n_files)
sys.exit(0)
def print_parametrs_verbose(n_files, k_options, list_pdb_file):
print('Parametrs ')
print ('Working directory', os.getcwd())
print('Total number of pdb files is', len(list_pdb_file))
print('The number of the selected files',
n_files)
print('The number of selected options', k_options)
print('All pdb.dat files \n', list_pdb_file)
def ensamble_fit(selected_files_for_ensamble,
data_for_experiment_modified, file_and_weight):
with tempfile.TemporaryDirectory(dir='.') as tmpdirname:
print('created temporary directory', tmpdirname)
i = 1
for f in selected_files_for_ensamble:
shutil.copy(f, tmpdirname + '/' + str(i).zfill(2) + '.pdb')
i += 1
print('ZKOUSKA', len(selected_files_for_ensamble), len(data_for_experiment_modified))
print('zkouska', tmpdirname[2:])
#name = input("Enter your name: ") # Python 3
curve_for_ensamble = make_curve_for_experiment(data_for_experiment_modified, file_and_weight)
command = '/storage/brno3-cerit/home/krab1k/saxs-ensamble-fit/core/ensamble-fit -L -p {path}{pdbdir}/ -n {n} -m {saxscurve}'.format(path = os.getcwd(), pdbdir=tmpdirname[1:], n=len(selected_files_for_ensamble), saxscurve=curve_for_ensamble)
print(command)
#subprocess.call(command, shell=True)
return ()
def work_with_result_from_ensamble():
result_q_and_value = []
with open('result', 'r') as f:
next(f)
# print (f.readline[2:4])
for line in f:
line = line.rstrip()
print(line)
value_of_chi2 = line.split(',')[3]
values_of_index_result = line.split(',')[4:]
result_q_and_value.append((value_of_chi2, values_of_index_result))
print('value chi', value_of_chi2)
print('index', values_of_index_result)
print('result_q_and_value', result_q_and_value)
return result_q_and_value
def do_result(result, select_random_files_for_experiment,
data_for_experiment, f):
result_q_and_value = work_with_result_from_ensamble()
list_of_tuples = []
if result == 0:
tolerance = 0
else:
tolerance = float(result)
if tolerance > 1:
print('Less then 1')
sys.exit(0)
maximum = float(max(result_q_and_value)[0])
minimum = maximum - maximum * tolerance
for i, j in result_q_and_value:
print('pocet select', len(select_random_files_for_experiment))
print('delka j', len(j))
if float(i) >= minimum:
f.write('minimum a maximum:' + '\t' + str(minimum) + str(maximum) + '\n')
for k in range(len(j)):
if float(j[k]) != 0:
list_of_tuples.append((i, j[k],
select_random_files_for_experiment[k]))
print('vysledek', list_of_tuples)
sum_rmsd = 0
for k in list_of_tuples:
sum_rmsd = sum_rmsd + rmsd_pymol(data_for_experiment[0],
k[2], f) * float(k[1])
f.write('sum rmsd' + '\t' + str(sum_rmsd) + '\n')
list_of_tuples = []
print('rmsd pymol', sum_rmsd)
print(tolerance)
def make_curve_for_experiment(data_for_experiment_modified, file_and_weight):
print('data', data_for_experiment_modified)
tmp = list(file_and_weight)
files = [open(file, 'r') for file in data_for_experiment_modified]
print(tmp)
print(data_for_experiment_modified)
with open('result.pdb.dat', 'w') as f:
run = True
while run:
sum_result = 0
q = 0
i = 0
for file in files:
line = file.readline()
if line == '':
run = False
break
if not line.startswith('#'):
#print(line)
tmp_list = list(filter(None,line.split(' ')))
sum_result += float(tmp_list[1])* tmp[i][0]
#print('vysledek',tmp_list[1], tmp[i][0], sum_result)
q = float(tmp_list[0])
i += 1
if sum_result != 0:
f.write(str(q) + '\t' + str(sum_result) + '\t' + str(0) + '\n')
for file in files:
file.close()
curve_for_ensamble = adderror("exp.dat", 'result.pdb.dat')
# print(curve_for_ensamble)
return curve_for_ensamble
def rmsd_pymol(structure_1, structure_2, f):
if structure_1 == structure_2:
print('double')
rmsd = 0
else:
with open("file_for_pymol.pml", "w") as file_for_pymol:
file_for_pymol.write("""
load {s1}
load {s2}
align {s3}, {s4}
quit
""".format(s1=structure_1, s2=structure_2,
s3=os.path.splitext(structure_1)[0],
s4=os.path.splitext(structure_2)[0]))
#out_pymol = subprocess.check_output("module add pymol-1.8.2.1-gcc; pymol -c file_for_pymol.pml | grep Executive:; module rm pymol-1.8.2.1-gcc", shell=True)
out_pymol = subprocess.check_output(" pymol -c file_for_pymol.pml | grep Executive:", shell=True)
rmsd = float(out_pymol[out_pymol.index(b'=') + 1:out_pymol.index(b'(') - 1])
f.write('RMSD ' + '\t' + structure_1 + ' and ' + structure_2 + ' = ' + str(rmsd) + '\n')
print('RMSD ', structure_1, ' and ', structure_2, ' = ', rmsd)
return rmsd
def main():
args = get_argument()
os.chdir(args.mydirvariable)
global list_pdb_file
list_pdb_file = find_pdb_file(args.mydirvariable)
test_argument(args.n_files, args.k_options, list_pdb_file)
if args.verbose:
print_parametrs_verbose(args.n_files, args.k_options, list_pdb_file)
for i in range(args.repeat):
filename = 'output_' + str(i) + str('_') + str(args.n_files)
with open(filename, 'w') as f:
select_random_files_for_experiment = random.sample(list_pdb_file,
args.n_files)
print(select_random_files_for_experiment)
data_for_experiment = random.sample(select_random_files_for_experiment,
args.k_options)
f.write('N selected file' + str(select_random_files_for_experiment) + '\n')
f.write('k options' + str(data_for_experiment) + '\n')
data_for_experiment_modified = [None] * args.k_options
for j in range(args.k_options):
data_for_experiment_modified[j] = str(data_for_experiment[j]) + ".dat"
weight = np.random.dirichlet(np.ones(args.k_options), size=1)[0]
file_and_weight = zip(weight, data_for_experiment)
ensamble_fit(select_random_files_for_experiment,
data_for_experiment_modified, file_and_weight)
work_with_result_from_ensamble()
if args.k_options == 1:
do_result(args.result, select_random_files_for_experiment,
data_for_experiment, f)
else:
print('not implemented')
sys.exit(0)
if __name__ == '__main__':
main()
|
spirit01/SAXS
|
testing_ensamble_krabik.py
|
Python
|
mit
| 9,282
|
[
"PyMOL"
] |
c46ef9a07ae905c549e111f5557cc7b8b9cab9414d554baa2801668232c328e7
|
# -*- coding: utf-8 -*-
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: base_astra_recon
:platform: Unix
:synopsis: A base for all Astra toolbox reconstruction algorithms
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import logging
import astra
import numpy as np
import math
import copy
from savu.plugins.base_recon import BaseRecon
class BaseAstraRecon(BaseRecon):
"""
A Plugin to perform Astra toolbox reconstruction
:param FBP_filter: The FBP reconstruction filter type (none|ram-lak|\
shepp-logan|cosine|hamming|hann|tukey|lanczos|triangular|gaussian|\
barlett-hann|blackman|nuttall|blackman-harris|blackman-nuttall|\
flat-top|kaiser|parzen). Default: 'ram-lak'.
"""
def __init__(self, name='BaseAstraRecon'):
super(BaseAstraRecon, self).__init__(name)
self.res = False
def get_parameters(self):
"""
Get reconstruction_type and number_of_iterations parameters
"""
logging.error("get_parameters needs to be implemented")
raise NotImplementedError("get_parameters needs to be implemented")
def setup(self):
super(BaseAstraRecon, self).setup()
out_dataset = self.get_out_datasets()
# if res_norm is required then setup another output dataset
if len(out_dataset) is 2:
self.res = True
out_pData = self.get_plugin_out_datasets()
in_data = self.get_in_datasets()[0]
dim_detX = in_data.find_axis_label_dimension('y', contains=True)
shape = (in_data.get_shape()[dim_detX],
self.parameters['number_of_iterations'])
label = ['vol_y.voxel', 'iteration.number']
pattern = {'name': 'SINOGRAM', 'slice_dir': (0,), 'core_dir': (1,)}
out_dataset[1].create_dataset(axis_labels=label, shape=shape)
out_dataset[1].add_pattern(pattern['name'],
slice_dir=pattern['slice_dir'],
core_dir=pattern['core_dir'])
out_pData[1].plugin_data_setup(pattern['name'],
self.get_max_frames(), fixed=True)
def pre_process(self):
self.alg, self.iters = self.get_parameters()
if '3D' in self.alg:
self.setup_3D()
self.reconstruct = self.astra_3D_recon
else:
self.setup_2D()
self.reconstruct = self.astra_2D_recon
def setup_2D(self):
pData = self.get_plugin_in_datasets()[0]
dim_detX = pData.get_data_dimension_by_axis_label('x', contains=True)
self.sino_shape = pData.get_shape()
self.nDims = len(self.sino_shape)
self.sino_dim_detX = dim_detX if self.nDims is 2 else dim_detX-1
self.nCols = self.sino_shape[dim_detX]
self.slice_dir = pData.get_slice_dimension()
self.nSinos = self.sino_shape[self.slice_dir] if self.nDims is 3 else 1
self.slice_func = self.slice_sino(self.nDims)
l = self.sino_shape[dim_detX]
c = np.linspace(-l/2.0, l/2.0, l)
x, y = np.meshgrid(c, c)
self.mask = np.array((x**2 + y**2 < (l/2.0)**2), dtype=np.float)
self.mask_id = True if not self.parameters['sino_pad'] and 'FBP' not \
in self.alg else False
if not self.parameters['sino_pad']:
self.manual_mask = copy.copy(self.mask)
self.manual_mask[self.manual_mask == 0] = np.nan
else:
self.manual_mask = False
def slice_sino(self, nDims):
if nDims is 2:
return lambda x, sslice: np.expand_dims(
x, axis=self.slice_dir)[sslice]
else:
return lambda x, sslice: x[sslice]
def astra_2D_recon(self, sino, cors, angles, vol_shape, init):
sslice = [slice(None)]*self.nDims
recon = np.zeros(self.vol_shape)
if self.nDims is 2:
recon = np.expand_dims(recon, axis=self.slice_dir)
if self.res:
res = np.zeros((self.vol_shape[self.slice_dir], self.iters))
if self.nDims is 2:
res = np.expand_dims(recon, axis=self.slice_dir)
proj_id = False
# create volume geom
vol_geom = astra.create_vol_geom(*vol_shape[0:1])
for i in range(self.nSinos):
sslice[self.slice_dir] = i
try:
cor = cors[i]
except:
cor = cors[0]
pad_sino = self.pad_sino(self.slice_func(sino, sslice), cor)
# create projection geom
proj_geom = astra.create_proj_geom(
'parallel', 1.0, pad_sino.shape[self.sino_dim_detX],
np.deg2rad(angles))
# create sinogram id
sino_id = astra.data2d.create("-sino", proj_geom, pad_sino)
# create reconstruction id
if init is not None:
rec_id = astra.data2d.create('-vol', vol_geom, init[sslice])
else:
rec_id = astra.data2d.create('-vol', vol_geom)
if self.mask_id:
self.mask_id = astra.data2d.create('-vol', vol_geom, self.mask)
# setup configuration options
cfg = self.set_config(rec_id, sino_id, proj_geom, vol_geom)
# create algorithm id
alg_id = astra.algorithm.create(cfg)
# run algorithm
if self.res:
for j in range(self.iters):
# Run a single iteration
astra.algorithm.run(alg_id, 1)
res[i, j] = astra.algorithm.get_res_norm(alg_id)
else:
astra.algorithm.run(alg_id, self.iters)
# get reconstruction matrix
if self.manual_mask is not False:
recon[sslice] = self.manual_mask*astra.data2d.get(rec_id)
else:
recon[sslice] = astra.data2d.get(rec_id)
# delete geometry
self.delete(alg_id, sino_id, rec_id, proj_id)
if self.res:
return [recon, res]
else:
return recon
def set_config(self, rec_id, sino_id, proj_geom, vol_geom):
cfg = astra.astra_dict(self.alg)
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sino_id
if 'FBP' in self.alg:
cfg['FilterType'] = self.parameters['FBP_filter']
if 'projector' in self.parameters.keys():
proj_id = astra.create_projector(
self.parameters['projector'], proj_geom, vol_geom)
cfg['ProjectorId'] = proj_id
# mask not currently working correctly for SIRT or SART algorithms
sirt_or_sart = [a for a in ['SIRT', 'SART'] if a in self.alg]
if self.mask_id and not sirt_or_sart:
cfg['option'] = {}
cfg['option']['ReconstructionMaskId'] = self.mask_id
cfg = self.set_options(cfg)
return cfg
def delete(self, alg_id, sino_id, rec_id, proj_id):
astra.algorithm.delete(alg_id)
if self.mask_id:
astra.data2d.delete(self.mask_id)
astra.data2d.delete(sino_id)
astra.data2d.delete(rec_id)
if proj_id:
astra.projector.delete(proj_id)
def pad_sino(self, sino, cor):
centre_pad = (0, 0) if '3D' in self.alg else \
self.array_pad(cor, sino.shape[self.sino_dim_detX])
sino_width = sino.shape[self.sino_dim_detX]
new_width = sino_width + max(centre_pad)
sino_pad = \
int(math.ceil(float(sino_width)/new_width * self.sino_pad)/2)
pad = np.array([sino_pad]*2) + centre_pad
pad_tuples = [(0, 0)]*(len(sino.shape)-1)
pad_tuples.insert(self.pad_dim, tuple(pad))
return np.pad(sino, tuple(pad_tuples), mode='edge')
def array_pad(self, ctr, nPixels):
width = nPixels - 1.0
alen = ctr
blen = width - ctr
mid = (width-1.0)/2.0
shift = round(abs(blen-alen))
p_low = 0 if (ctr > mid) else shift
p_high = shift + 0 if (ctr > mid) else 0
return np.array([int(p_low), int(p_high)])
def get_max_frames(self):
return 16
## Add this as citation information:
## W. van Aarle, W. J. Palenstijn, J. De Beenhouwer, T. Altantzis, S. Bals, \
## K J. Batenburg, and J. Sijbers, "The ASTRA Toolbox: A platform for advanced \
## algorithm development in electron tomography", Ultramicroscopy (2015),
## http://dx.doi.org/10.1016/j.ultramic.2015.05.002
#
## Additionally, if you use parallel beam GPU code, we would appreciate it if \
## you would refer to the following paper:
##
## W. J. Palenstijn, K J. Batenburg, and J. Sijbers, "Performance improvements
## for iterative electron tomography reconstruction using graphics processing
## units (GPUs)", Journal of Structural Biology, vol. 176, issue 2, pp. 250-253,
## 2011, http://dx.doi.org/10.1016/j.jsb.2011.07.017
|
FedeMPouzols/Savu
|
savu/plugins/reconstructions/base_astra_recon.py
|
Python
|
gpl-3.0
| 9,521
|
[
"Gaussian"
] |
7a2150082729dda7b8750b1c0d9a8037f38ec05aeb7c72d37910d497feb85508
|
"""
############################################################
Pyndorama - Model
############################################################
:Author: *Carlo E. T. Oliveira*
:Contact: carlo@nce.ufrj.br
:Date: 2013/09/13
:Status: This is a "work in progress"
:Revision: 0.1.5
:Home: `Labase <http://labase.selfip.org/>`__
:Copyright: 2013, `GPL <http://is.gd/3Udt>`__.
Game model comprising of Loci and Actors
0.1.4 Add commands for game action
0.1.5 Add selectable action to holder
"""
THETHING = None
class Thing:
"""A commom element for every other element."""
ALL_THINGS = {}
INVENTORY = {}
CONTROL = {}
def __init__(self, fab=None, part=None, o_Id=None, **kwargs):
self.items = []
#print ("Thing init:", fab, part, o_Id)
self.create(fab=fab, part=part, o_Id=o_Id, **kwargs)
self.container = self.o_part = self.o_Id = self.current = None
def create(self, fab=None, part=None, o_Id=None, **kwargs):
"""Fabricate and return a given part."""
#print ("create:", fab, part, o_Id, kwargs, self)
self.o_part, kwargs['o_Id'] = self.__class__.__name__, o_Id
self.o_Id = o_Id or len(Thing.ALL_THINGS)
self.o_part = part
(fab or self).register(o_Id or 13081299999999, self)
self._add_properties(**kwargs)
self._do_create()
def activate(self, o_emp=None, o_cmd="DoAdd", o_part=None, o_Id=None, o_place=None, **kwargs):
"""Activate a given command."""
try:
kwargs['o_place'] = o_place
thing_class = Thing.CONTROL[o_cmd]
#print("activate:", o_emp, o_cmd, o_part, o_Id, o_place, kwargs)
return thing_class(o_emp, fab=self, o_part=o_part, o_Id=o_Id, **kwargs)
except Exception:
print("error activating %s id = %s, place %s" % (o_part, o_Id, o_place))
def employ(self, o_part=None, o_Id=None, **kwargs):
"""Fabricate and locate a given part."""
#print ("employ:", o_part, o_Id, kwargs)
try:
thing_class = Thing.INVENTORY[o_part]
return thing_class(fab=self, part=o_part, o_Id=o_Id, **kwargs)
except Exception:
print("error creating %s id = %s" % (o_part, o_Id))
def register(self, oid, entry):
"""Append an entry to this resgistry. """
Thing.ALL_THINGS[oid] = entry
def remove(self, item):
"""Remove this thing from this container. """
self.items.remove(item)
return self
def append(self, item):
"""Append this thing to this container. """
self.items.append(item)
return self
def deploy(self, employ=None, **kwargs):
"""Deploy this thing at a certain site. """
for item in self.items:
item.deploy(employ=employ, **kwargs)
def shape(self, o_Id, **kwargs):
"""Set member as current. """
shaped = Thing.ALL_THINGS[o_Id]
[kwargs.pop(prop) for prop in 'o_placeid o_place o_gcomp o_item o_part'.split() if prop in kwargs]
print("Set member as current. ", kwargs)
shaped._add_properties(**kwargs)
return shaped
def delete(self, o_Id, employ):
"""Set member as current. """
deleted = Thing.ALL_THINGS[o_Id]
deleted.undeploy(employ)
oid = deleted.o_placeid if hasattr(deleted, "o_placeid") else deleted.o_place.Id
Thing.ALL_THINGS[oid].remove(deleted)
del Thing.ALL_THINGS[o_Id]
return self.current
def up(self, o_Id):
"""Set member as current. """
self.current = Thing.ALL_THINGS[o_Id]
return self.current
def list(self, employ=None, kind='Locus', **kwargs):
"""List member. """
[employ(**{argument: getattr(item, argument)
for argument in dir(item) if argument[:2] in "o_ s_"})
for item in Thing.ALL_THINGS.values() if item.o_part == kind]
def visit(self, visiting):
"""Visit across the structure. """
for item in self.items:
visiting(item)
def _do_create(self):
"""Finish thing creation. """
pass
def _add_properties(self, **kwargs):
"""Finish thing creation. """
#print (kwargs)
[setattr(self, argument, value)
for argument, value in kwargs.items() if argument[:2] in "o_ s_"]
class Holder(Thing):
"""A placeholder for gui positioning scaffolding."""
def __init__(self, fab=None, part=None, o_Id=None, **kwargs):
self.items = []
if 'o_place' not in kwargs or not kwargs['o_place']:
kwargs['o_placeid'] = THETHING.current.o_Id
THETHING.current.append(self)
self.o_part, kwargs['o_Id'] = self.__class__.__name__, o_Id
(fab or self).register(o_Id, self)
self._add_properties(**kwargs)
def undeploy(self, employ=None, **kwargs):
"""Deploy this thing at a certain site. """
for item in self.items:
item.deploy(employ=employ, **kwargs)
employ(**{argument: getattr(self, argument)
for argument in dir(self) if argument[:2] in "o_ s_"})
def deploy(self, employ=None, **kwargs):
"""Deploy this thing at a certain site. """
employ(**{argument: getattr(self, argument)
for argument in dir(self) if argument[:2] in "o_ s_"})
for item in self.items:
item.deploy(employ=employ, **kwargs)
def execute(self, employ=None, **kwargs):
"""Execute a given action. """
[item.execute(employ=employ, **kwargs) for item in self.items]
class Action(Holder):
"""A placeholder describing an action to be executed by a holder."""
def __init__(self, fab=None, part=None, o_Id=None, o_placeid=None, **kwargs):
Thing.ALL_THINGS[o_placeid].append(self)
kwargs.update(o_part=self.__class__.__name__, o_Id=o_Id, o_placeid=o_placeid)
(fab or self).register(o_Id, self)
self._add_properties(**kwargs)
def deploy(self, employ=None, **kwargs):
"""Deploy this thing at a certain site. """
args = {argument: getattr(self, argument)
for argument in dir(self) if argument[:2] in "o_ s_"}
#args.update(o_Id=self.o_placeid)
#print("Action deploy", args, Thing.ALL_THINGS[self.o_placeid].items)
employ(**args)
def execute(self, employ=None, **kwargs):
"""Execute a given action. """
args = {argument: getattr(self, argument)
for argument in dir(self) if argument[:2] in "o_ s_"}
args.update(o_cmd=self.o_act, o_gcomp=self.o_acomp, o_Id=self.o_item)
#employ(**args)
THETHING.activate(employ, **args)
class Locus(Thing):
"""A place where things happen."""
def __init__(self, fab=None, part=None, o_Id=None, **kwargs):
self.items = []
#print ("Thing init:", fab, part, o_Id)
self.create(fab=fab, part=part, o_Id=o_Id, **kwargs)
def _do_create(self):
"""Finish thing creation. """
container = THETHING # Thing.ALL_THINGS.setdefault(self.o_place, THETHING)
self.container = container.append(self)
#print("""Finish thing creation. """, THETHING, self.o_place, self.container, self)
def deploy(self, employ=None, **kwargs):
"""Deploy this thing at a certain site. """
THETHING.current = self
employ(**{argument: getattr(self, argument)
for argument in dir(self) if argument[:2] in "o_ s_"})
for item in self.items:
item.deploy(employ=employ, **kwargs)
class Grid(Locus):
"""A mapped grid with sub elements."""
def __init__(self, fab=None, part=None, o_Id=None, **kwargs):
#print ("Thing init:", fab, part, o_Id)
kwargs['o_gcomp'] = 'div'
self.create(fab=fab, part=part, o_Id=o_Id, **kwargs)
self.items, kwargs['o_place'], kwargs['o_gcomp'] = [], o_Id, 'div'
grid, invent = kwargs.pop('o_grid')
args = {key[1:]: value for key, value in kwargs.items() if key[:3] in "go_ gs_ "}
objid, args['o_place'], obj = o_Id, o_Id, self # kwargs # .items()
#return
self.items = [
invent[ckey].update(args) or Thing.INVENTORY[invent[ckey]['o_part']](
o_Id=objid+str(i), **invent[ckey]) for i, ckey in enumerate(grid)]
def ___do_create(self):
"""Finish thing creation. """
self.container = Thing.ALL_THINGS.setdefault(
self.o_place, THETHING).append(self)
class Dragger(Holder):
"""A drag decorator."""
def __init__(self, fab=None, part=None, o_Id=None, **kwargs):
Holder.__init__(self, o_Id=o_Id)
dropper, dragger, self.kwargs = kwargs['o_drop'], kwargs['o_place'], kwargs
self.kwargs = {key: value for key, value in kwargs
if key in 'o_drop '}
self.dropper = Thing.ALL_THINGS[dropper]
self.dragger = Thing.ALL_THINGS[dragger]
self.dropper.receive = self.receive
self.dragger.visit(self.visiting)
kwargs['action'] = self.dropper.receive
self._add_properties(**kwargs)
THETHING.append(self)
def visiting(self, visited):
visited._add_properties(**self.kwargs)
visited.enter = self.enter
def receive(self, guest_id):
return Thing.ALL_THINGS[guest_id].enter(self.dropper)
def enter(self, host):
self.container = host
return self.o_Id
class Command(Thing):
"""A commom element to any kind of action."""
SCRIPT = []
def __init__(self, employ, fab=THETHING, o_part=None, o_Id=None, **kwargs):
Command.SCRIPT.append(self)
#print ("Command init:", fab, o_part, o_Id, kwargs)
self.execute(employ, fab=fab, part=o_part, o_Id=o_Id, **kwargs)
def create(self, employ, fab=None, part=None, o_Id=None, **kwargs):
"""Fabricate and return a given part."""
pass
class DoAdd(Command):
"""Add an element to another."""
def __init__(self, employ, fab=THETHING, o_part=None, o_Id=None, **kwargs):
Command.__init__(self, employ, fab=fab, o_part=o_part, o_Id=o_Id, **kwargs)
def execute(self, employ, fab=None, part=None, o_Id=None, **kwargs):
"""Add an element and deploy a given part."""
element = fab.employ(part, o_Id, **kwargs)
#print ("DoAdd execute:", fab, part, o_Id, element, employ, kwargs)
element.deploy(employ)
class DoExecute(Command):
"""Execute the command associated with this element."""
def execute(self, employ, fab=None, part=None, o_Id=None, **kwargs):
"""Deploy the current element to the front."""
#print('DoExecute:', o_Id, employ)
Thing.ALL_THINGS[o_Id].execute(employ)
class DoUp(Command):
"""Set element as current."""
def execute(self, employ, fab=None, part=None, o_Id=None, **kwargs):
"""Deploy the current element to the front."""
#print('DoUp:', o_Id, employ)
element = fab.up(o_Id)
employ(o_Id=element.o_Id, **kwargs)
#element.deploy(employ)
class DoShape(Command):
"""Shape current element."""
def execute(self, employ, fab=None, part=None, o_Id=None, **kwargs):
"""Reshape current element."""
#kwargs.update(o_gcomp='shape')
old_gcomp = kwargs.pop('o_gcomp')
element = fab.shape(o_Id, **kwargs)
element.o_gcomp, old_gcomp = old_gcomp, element.o_gcomp
element.deploy(employ)
print('DoShape:', o_Id, employ, old_gcomp, element.o_gcomp, kwargs)
element.o_gcomp = old_gcomp
class DoDel(Command):
"""Delete current element."""
def execute(self, employ, fab=None, part=None, o_Id=None, **kwargs):
"""Delete current element."""
#print ("DoDel execute:", fab, part, o_Id, kwargs)
fab.delete(o_Id, employ)
class DoList(Command):
"""List elements from another element."""
def execute(self, employ, fab=None, part=None, o_Id=None, o_kind=None, **kwargs):
"""Ask fabric to list all."""
fab.list(employ, o_kind)
def init():
global THETHING
THETHING = Thing(o_Id='book')
Thing.INVENTORY.update(
Locus=Locus, Holder=Holder, TheThing=THETHING,
Grid=Grid, Dragger=Dragger, Action=Action)
Thing.CONTROL.update(
DoAdd=DoAdd, DoList=DoList, DoUp=DoUp, DoDel=DoDel, DoShape=DoShape,
DoExecute=DoExecute)
#print (Thing.INVENTORY, Thing.ALL_THINGS)
return THETHING
|
labase/pyndorama
|
src/model.py
|
Python
|
gpl-2.0
| 12,473
|
[
"VisIt"
] |
25a3062efc41ba6fbca2db3f8bfe2ebdad9ac0e63b671ea24db5fe9889c91a97
|
"""gro.py: Used for loading Gromacs GRO files.
"""
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2015 Stanford University and the Authors
#
# Authors: Robert McGibbon, Lee-Ping Wang, Peter Eastman
# Contributors: Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and the Authors. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
##############################################################################
# Imports
##############################################################################
import os
import sys
import itertools
from re import sub, match
# import element as elem
import numpy as np
import mdtraj as md
from mdtraj.utils import in_units_of, cast_indices, ensure_type
from mdtraj.formats import pdb
from mdtraj.core import element as elem
from mdtraj.formats.registry import _FormatRegistry
##############################################################################
# Code
##############################################################################
@_FormatRegistry.register_loader('.gro')
def load_gro(filename, stride=None, atom_indices=None, frame=None):
"""Load a GROMACS GRO file.
Parameters
----------
filename : str
Path to the GRO file on disk.
stride : int, default=None
Only read every stride-th model from the file
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. These indices are zero-based.
frame : int, optional
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
"""
from mdtraj.core.trajectory import _parse_topology, Trajectory
with GroTrajectoryFile(filename, 'r') as f:
topology = f.topology
if frame is not None:
f.seek(frame)
n_frames = 1
else:
n_frames = None
return f.read_as_traj(n_frames=n_frames, stride=stride,
atom_indices=atom_indices)
@_FormatRegistry.register_fileobject('.gro')
class GroTrajectoryFile(object):
"""Interface for reading and writing to GROMACS GRO files.
Parameters
----------
filename : str
The filename to open. A path to a file on disk.
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for write.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
Attributes
----------
n_atoms : int
The number of atoms in the file
topology : md.Topology
The topology. TODO(rmcgibbo) note about chain
See Also
--------
load_gro : High-level wrapper that returns a ``md.Trajectory``
"""
distance_unit = 'nanometers'
def __init__(self, filename, mode='r', force_overwrite=True):
self._open = False
self._file = None
self._mode = mode
if mode == 'r':
self._open = True
self._frame_index = 0
self._file = open(filename, 'r')
try:
self.n_atoms, self.topology = self._read_topology()
finally:
self._file.seek(0)
elif mode == 'w':
self._open = True
if os.path.exists(filename) and not force_overwrite:
raise IOError('"%s" already exists' % filename)
self._frame_index = 0
self._file = open(filename, 'w')
else:
raise ValueError("invalid mode: %s" % mode)
def write(self, coordinates, topology, time=None, unitcell_vectors=None,
precision=3):
"""Write one or more frames of a molecular dynamics trajectory to disk
in the GROMACS GRO format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of each atom, in units of nanometers.
topology : mdtraj.Topology
The Topology defining the model to write.
time : np.ndarray, dtype=float32, shape=(n_frames), optional
The simulation time corresponding to each frame, in picoseconds.
If not supplied, the numbers 0..n_frames will be written.
unitcell_vectors : np.ndarray, dtype=float32, shape=(n_frames, 3, 3), optional
The periodic box vectors of the simulation in each frame, in nanometers.
precision : int, optional
The number of decimal places to print for coordinates. Default is 3
"""
if not self._open:
raise ValueError('I/O operation on closed file')
if not self._mode == 'w':
raise ValueError('file not opened for writing')
coordinates = ensure_type(coordinates, dtype=np.float32, ndim=3, name='coordinates', can_be_none=False, warn_on_cast=False)
time = ensure_type(time, dtype=float, ndim=1, name='time', can_be_none=True, shape=(len(coordinates),), warn_on_cast=False)
unitcell_vectors = ensure_type(unitcell_vectors, dtype=float, ndim=3, name='unitcell_vectors',
can_be_none=True, shape=(len(coordinates), 3, 3), warn_on_cast=False)
for i in range(coordinates.shape[0]):
frame_time = None if time is None else time[i]
frame_box = None if unitcell_vectors is None else unitcell_vectors[i]
self._write_frame(coordinates[i], topology, frame_time, frame_box, precision)
def read_as_traj(self, n_frames=None, stride=None, atom_indices=None):
"""Read a trajectory from a gro file
Parameters
----------
n_frames : int, optional
If positive, then read only the next `n_frames` frames. Otherwise read all
of the frames in the file.
stride : np.ndarray, optional
Read only every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object containing the loaded portion of the file.
"""
from mdtraj.core.trajectory import Trajectory
topology = self.topology
if atom_indices is not None:
topology = topology.subset(atom_indices)
coordinates, time, unitcell_vectors = self.read(stride=stride, atom_indices=atom_indices)
if len(coordinates) == 0:
return Trajectory(xyz=np.zeros((0, topology.n_atoms, 3)), topology=topology)
coordinates = in_units_of(coordinates, self.distance_unit, Trajectory._distance_unit, inplace=True)
unitcell_vectors = in_units_of(unitcell_vectors, self.distance_unit, Trajectory._distance_unit, inplace=True)
traj = Trajectory(xyz=coordinates, topology=topology, time=time)
traj.unitcell_vectors = unitcell_vectors
return traj
def read(self, n_frames=None, stride=None, atom_indices=None):
"""Read data from a molecular dynamics trajectory in the GROMACS GRO
format.
Parameters
----------
n_frames : int, optional
If n_frames is not None, the next n_frames of data from the file
will be read. Otherwise, all of the frames in the file will be read.
stride : int, optional
If stride is not None, read only every stride-th frame from disk.
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of the atoms, in units of nanometers.
time : np.ndarray, None
The time corresponding to each frame, in units of picoseconds, or
None if no time information is present in the trajectory.
unitcell_vectors : np.ndarray, shape=(n_frames, 3, 3)
The box vectors in each frame, in units of nanometers
"""
if not self._open:
raise ValueError('I/O operation on closed file')
if not self._mode == 'r':
raise ValueError('file not opened for reading')
coordinates = []
unitcell_vectors = []
time = []
contains_time = True
atom_indices = cast_indices(atom_indices)
atom_slice = slice(None) if atom_indices is None else atom_indices
if n_frames is None:
frameiter = itertools.count()
else:
frameiter = range(n_frames)
for i in frameiter:
try:
frame_xyz, frame_box, frame_time = self._read_frame()
contains_time = contains_time and (frame_time is not None)
coordinates.append(frame_xyz[atom_slice])
unitcell_vectors.append(frame_box)
time.append(frame_time)
except StopIteration:
break
coordinates, unitcell_vectors, time = map(np.array, (coordinates, unitcell_vectors, time))
if not contains_time:
time = None
else:
time = time[::stride]
return coordinates[::stride], time, unitcell_vectors[::stride]
def _read_topology(self):
if not self._open:
raise ValueError('I/O operation on closed file')
if not self._mode == 'r':
raise ValueError('file not opened for reading')
pdb.PDBTrajectoryFile._loadNameReplacementTables()
n_atoms = None
topology = md.Topology()
chain = topology.add_chain()
residue = None
atomReplacements = {}
for ln, line in enumerate(self._file):
if ln == 1:
n_atoms = int(line.strip())
elif ln > 1 and ln < n_atoms + 2:
(thisresnum, thisresname, thisatomname, thisatomnum) = \
[line[i*5:i*5+5].strip() for i in range(4)]
thisresnum, thisatomnum = map(int, (thisresnum, thisatomnum))
if residue is None or residue.resSeq != thisresnum:
if thisresname in pdb.PDBTrajectoryFile._residueNameReplacements:
thisresname = pdb.PDBTrajectoryFile._residueNameReplacements[thisresname]
residue = topology.add_residue(thisresname, chain, resSeq=thisresnum)
if thisresname in pdb.PDBTrajectoryFile._atomNameReplacements:
atomReplacements = pdb.PDBTrajectoryFile._atomNameReplacements[thisresname]
else:
atomReplacements = {}
thiselem = thisatomname
if len(thiselem) > 1:
thiselem = thiselem[0] + sub('[A-Z0-9]','',thiselem[1:])
try:
element = elem.get_by_symbol(thiselem)
except KeyError:
element = elem.virtual
if thisatomname in atomReplacements:
thisatomname = atomReplacements[thisatomname]
topology.add_atom(thisatomname, element=element, residue=residue,
serial=thisatomnum)
return n_atoms, topology
def _read_frame(self):
if not self._open:
raise ValueError('I/O operation on closed file')
if not self._mode == 'r':
raise ValueError('file not opened for reading')
atomcounter = itertools.count()
comment = None
boxvectors = None
topology = None
xyz = np.zeros((self.n_atoms, 3), dtype=np.float32)
got_line = False
firstDecimalPos = None
atomindex = -1
for ln, line in enumerate(self._file):
got_line = True
if ln == 0:
comment = line.strip()
continue
elif ln == 1:
assert self.n_atoms == int(line.strip())
continue
if firstDecimalPos is None:
try:
firstDecimalPos = line.index('.', 20)
secondDecimalPos = line.index('.', firstDecimalPos+1)
except ValueError:
firstDecimalPos = secondDecimalPos = None
crd = _parse_gro_coord(line, firstDecimalPos, secondDecimalPos)
if crd is not None and atomindex < self.n_atoms - 1:
atomindex = next(atomcounter)
xyz[atomindex, :] = (crd[0], crd[1], crd[2])
elif _is_gro_box(line) and ln == self.n_atoms + 2:
sline = line.split()
boxvectors = tuple([float(i) for i in sline])
# the gro_box line comes at the end of the record
break
else:
raise Exception("Unexpected line in .gro file: "+line)
if not got_line:
raise StopIteration()
time = None
if 't=' in comment:
# title string (free format string, optional time in ps after 't=')
time = float(comment[comment.index('t=')+2:].strip())
# box vectors (free format, space separated reals), values: v1(x) v2(y)
# v3(z) v1(y) v1(z) v2(x) v2(z) v3(x) v3(y), the last 6 values may be
# omitted (they will be set to zero).
box = [boxvectors[i] if i < len(boxvectors) else 0 for i in range(9)]
unitcell_vectors = np.array([
[box[0], box[3], box[4]],
[box[5], box[1], box[6]],
[box[7], box[8], box[2]]])
return xyz, unitcell_vectors, time
def _write_frame(self, coordinates, topology, time, box, precision):
comment = 'Generated with MDTraj'
if time is not None:
comment += ', t= %s' % time
varwidth = precision + 5
fmt = '%%5d%%-5s%%5s%%5d%%%d.%df%%%d.%df%%%d.%df' % (
varwidth, precision, varwidth, precision, varwidth, precision)
assert topology.n_atoms == coordinates.shape[0]
lines = [comment, ' %d' % topology.n_atoms]
if box is None:
box = np.zeros((3,3))
for i in range(topology.n_atoms):
atom = topology.atom(i)
residue = atom.residue
serial = atom.serial
if serial is None:
serial = atom.index
if serial >= 100000:
serial -= 100000
lines.append(fmt % (residue.resSeq, residue.name, atom.name, serial,
coordinates[i, 0], coordinates[i, 1], coordinates[i, 2]))
lines.append('%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f' % (
box[0,0], box[1,1], box[2,2],
box[0,1], box[0,2], box[1,0],
box[1,2], box[2,0], box[2,1]))
self._file.write('\n'.join(lines))
self._file.write('\n')
def seek(self, offset, whence=0):
"""Move to a new file position
Parameters
----------
offset : int
A number of frames.
whence : {0, 1, 2}
0: offset from start of file, offset should be >=0.
1: move relative to the current position, positive or negative
2: move relative to the end of file, offset should be <= 0.
Seeking beyond the end of a file is not supported
"""
raise NotImplementedError()
def tell(self):
"""Current file position
Returns
-------
offset : int
The current frame in the file.
"""
return self._frame_index
def close(self):
"Close the file"
if self._open:
self._file.close()
self._open = False
def __enter__(self):
"Support the context manager protocol"
return self
def __exit__(self, *exc_info):
"Support the context manager protocol"
self.close()
##############################################################################
# Utilities
##############################################################################
def _isint(word):
"""ONLY matches integers! If you have a decimal point? None shall pass!
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is an integer (only +/- sign followed by digits)
"""
return match('^[-+]?[0-9]+$',word)
def _isfloat(word):
"""Matches ANY number; it can be a decimal, scientific notation, what have you
CAUTION - this will also match an integer.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is any number
"""
return match('^[-+]?[0-9]*\.?[0-9]*([eEdD][-+]?[0-9]+)?$',word)
def _parse_gro_coord(line, firstDecimal, secondDecimal):
""" Determines whether a line contains GROMACS data or not
@param[in] line The line to be tested
"""
if firstDecimal is None or secondDecimal is None:
return None
digits = secondDecimal - firstDecimal
try:
return tuple(float(line[20+i*digits:20+(i+1)*digits]) for i in range(3))
except ValueError:
return None
def _is_gro_box(line):
""" Determines whether a line contains a GROMACS box vector or not
@param[in] line The line to be tested
"""
sline = line.split()
if len(sline) == 9 and all([_isfloat(i) for i in sline]):
return 1
elif len(sline) == 3 and all([_isfloat(i) for i in sline]):
return 1
else:
return 0
|
hainm/mdtraj
|
mdtraj/formats/gro.py
|
Python
|
lgpl-2.1
| 19,920
|
[
"Gromacs",
"MDTraj",
"OpenMM"
] |
5e3120dc1ce57c731d92d1446acceaf9e6853affeb74fd52accbd43751467079
|
from ..utils import *
##
# Minions
# Goblin Auto-Barber
class GVG_023:
play = Buff(FRIENDLY_WEAPON, "GVG_023a")
# One-eyed Cheat
class GVG_025:
events = Summon(CONTROLLER, PIRATE - SELF).on(Stealth(SELF))
# Iron Sensei
class GVG_027:
events = OWN_TURN_END.on(Buff(RANDOM(FRIENDLY_MINIONS + MECH - SELF), "GVG_027e"))
# Trade Prince Gallywix
class GVG_028:
events = Play(OPPONENT, SPELL - ID("GVG_028t")).on(
Give(CONTROLLER, Copy(Play.Args.CARD)),
Give(OPPONENT, "GVG_028t")
)
class GVG_028t:
play = ManaThisTurn(CONTROLLER, 1)
##
# Spells
# Tinker's Sharpsword Oil
class GVG_022:
play = Buff(FRIENDLY_WEAPON, "GVG_022a")
combo = Buff(FRIENDLY_WEAPON, "GVG_022a"), Buff(RANDOM_FRIENDLY_CHARACTER, "GVG_022b")
# Sabotage
class GVG_047:
play = Destroy(RANDOM_ENEMY_MINION)
combo = Destroy(ENEMY_WEAPON | RANDOM_ENEMY_MINION)
##
# Weapons
# Cogmaster's Wrench
class GVG_024:
update = Find(FRIENDLY_MINIONS + MECH) & Refresh(SELF, {GameTag.ATK: +2})
|
liujimj/fireplace
|
fireplace/cards/gvg/rogue.py
|
Python
|
agpl-3.0
| 979
|
[
"TINKER"
] |
77421a182dcc360cb16961aed1c6000fd223ae9b1b53d9c3d3c9cc7746080790
|
import searchengine as se
import ast
eval_tests = [
"a and b",
"a or b",
"a and (b or c)",
]
class MyVisitor(ast.NodeVisitor):
def visit_Name(self, node):
print 'Found string "%s"' % node
def visit_BoolOp(self, node):
if isinstance(node, ast.BinOp) and \
isinstance(node.op, (ast.And, ast.Or)):
# fields = [(a, _format(b)) for a, b in iter_fields(node)]
print 'Found Object "%s"' % node
print node.left
print node.right
for test in eval_tests:
node = ast.parse(test)
print ast.dump(node)
# MyVisitor().visit(node)
print '\n'
if __name__ == '__main__':
'''
2. Boolean operations. Many search engines support Boolean queries, which allow
users to construct searches like "python OR perl." An OR search can work by
doing the queries separately and combining the results, but what about "python
AND (program OR code)"? Modify the query methods to support some basic
Boolean operations.
3. Exact matches. Search engines often support "exact match" queries, where the
words in the page must match the words in the query in the same order with no
additional words in between. Create a new version of getrows that only returns
results that are exact matches. (Hint: you can use subtraction in SQL to get the
difference between the word locations.)
'''
dbname = 'searchindex.db'
if True:
crawler = se.crawler(dbname)
crawler.createindextables()
pages = [
'https://www.zhihu.com/',
'https://github.com/'
]
crawler.crawl(pages, depth=2)
crawler.calculatepagerank()
else:
searcher = se.searcher(dbname)
q = 'zhihu career'
print searcher.query(q)
|
vinjn/MLStudy
|
miner/main.py
|
Python
|
mit
| 1,770
|
[
"VisIt"
] |
38c64c8dbf1c38f55a3a1b48059c92ca8f73550205dc287faf08b24d57512e72
|
"""Structured Commons fingerprinting utilities."""
from __future__ import print_function
import base64
import codecs
import hashlib
import re
import sys
if hasattr(int, 'from_bytes'):
# python 3
long = int
def bytes_to_long(barray):
return int.from_bytes(barray, 'big')
def long_to_bytes(val):
return val.to_bytes(32, 'big')
else:
# python 2:
def bytes_to_long(barray):
v = codecs.encode(barray, 'hex_codec')
return long(v, base=16)
def long_to_bytes(val):
v = '%064x' % val
f = codecs.decode(v, 'hex_codec')
assert len(f) == 32
return bytearray(f)
def fletcher(barray):
"""Return the two Fletcher-16 sums of a byte array."""
assert isinstance(barray, bytes) or isinstance(barray, bytearray)
a = 0
b = 0
for c in barray:
a = (a + c) % 255
b = (a + b) % 255
return (a, b)
def validate_name(name):
"""Ensure a name is valid.
A valid name must be a character string, not empty and not contain
codes between 0 and 31 inclusive.
"""
# a name is a character string
assert isinstance(name, str) or isinstance(name, type(u'')), \
"name %r is not a string" % name
assert len(name) > 0 # name must not be empty
for c in name:
assert ord(c) > 31, \
"invalid character %r (code %d) found in name %r" % (c, ord(c), name)
class fingerprintable(object):
"""Base class for Python objects that can be fingerprinted."""
def visit(self, v):
"""Visitor dispatch method to be implemented by sub-classes.
This must call either:
- v.enter_file(sz), followed by
v.visit_data(b) zero or more times, followed by
v.leave_file() once; or
- v.enter_dict() once, followed by
v.visit_entry(name, t, obj) zero or more times, followed by
v.leave_dict() once.
See the help of class ``visitor`` for details.
"""
pass
class compute_visitor(object):
"""Visitor to compute fingerprints over abstract object trees.
It applies to objects that implement the ``fingerprintable`` interface.
compute_visitor :: Fingerprintable a => a -> fingerprint
"""
def __init__(self, verbose = False):
"""Instantiate a visitor.
If verbose is non-false, the visitor prints detail on the
standard output.
"""
self._v = verbose
def _finish(self):
s = self._h.digest()
if isinstance(s, str): # python 2 compat
s = bytearray(s)
self._fp = s
def fingerprint(self):
"""Return the fingerprint computed by this visitor."""
return fingerprint(self._fp)
def enter_file(self, sz):
"""Start fingerprinting an object file."""
self._sz = sz
self._cnt = 0
self._h = hashlib.sha256()
self._h.update(b's')
self._h.update(bytearray('%d' % sz, 'ascii'))
self._h.update(b'\0')
def visit_data(self, b):
"""Fingerprint some more data from a file previously entered."""
assert isinstance(b, bytearray) or isinstance(b, bytes)
self._cnt += len(b)
self._h.update(b)
def leave_file(self):
"""Finish fingerprinting an object file."""
assert self._cnt == self._sz
self._finish()
if self._v:
print("file, sz %d (%s)" % (self._sz, fingerprint(self._fp).compact()), file=sys.stderr)
def enter_dict(self):
"""Start fingerprinting an object dictionary."""
self._ents = {}
if self._v:
print("dictionary, entering:", file=sys.stderr)
def visit_entry(self, name, t, obj):
"""Fingerprint some more data from a dictionary previously entered.
Arguments:
name -- the entry name in the dictionary (string)
t -- either 't', 's' or 'l' depending on the entity
obj -- either a fingerprint or another fingerprintable object
"""
if self._v:
print("entry %r: " % name, end='', file=sys.stderr)
# name must have valid form
validate_name(name)
# names must be unique in dictionary
assert name not in self._ents, "duplicate name %r" % name
if (t == 'l') and hasattr(obj, 'binary'):
self._ents[name] = (t, obj.binary())
if self._v:
print("fingerprint (%s)" % obj.compact(), file=sys.stderr)
elif t in ['s', 't'] and isinstance(obj, fingerprintable):
fpv = compute_visitor(self._v)
obj.visit(fpv)
self._ents[name] = (t, fpv._fp)
else:
print(type(t), t, obj, type(obj), file=sys.stderr)
raise TypeError("unknown entity type in dictionary")
def leave_dict(self):
"""Finish fingerprinting an object dictionary."""
keys = sorted(self._ents.keys())
buf = bytearray()
for k in keys:
t, fp = self._ents[k]
buf += bytearray(t, 'ascii')
buf += b':'
buf += bytearray(k, 'utf-8')
buf += b'\0'
buf += fp
self._h = hashlib.sha256()
self._h.update(b't')
self._h.update(bytearray('%d' % len(buf), 'ascii'))
self._h.update(b'\0')
self._h.update(buf)
self._finish()
if self._v:
print("leaving dictionary (%s)" % fingerprint(self._fp).compact(), file = sys.stderr)
class fingerprint(object):
"""fingerprint(fingerprintable) -> compute fingerprint of object
fingerprint(str) -> parse fingerprint representation
fingerprint(fingerprint or bytearray) -> copy fingerprint
"""
def __init__(self, obj):
"""Initialize a fingerprint. See help(fingerprint) for signature."""
if hasattr(obj, 'binary'): # assume already a fingerprint
v = obj.binary()
assert len(v) == 32
self._value = v
elif isinstance(obj, int) or isinstance(obj, long):
self._value = long_to_bytes(obj)
elif isinstance(obj, str) or isinstance(obj, type(u'')):
fp, fmt, errmsg = parse(obj)
if fp is None:
raise RuntimeError(errmsg)
self._value = fp._value
elif (isinstance(obj, bytearray) or isinstance(obj, bytes)) and len(obj) == 32:
self._value = bytearray(obj)
elif isinstance(obj, fingerprintable):
v = compute_visitor()
obj.visit(v)
self._value = v._fp
else:
raise TypeError("a string, fingerprint or fingerprintable is required")
def __int__(self):
"""Return the binary representation of the fingerprint as a long integer."""
return bytes_to_long(self._value)
__long__ = __int__
def __repr__(self):
"""Pretty print a fingerprint object."""
return "<%s>" % self.compact()
def __cmp__(self, other):
"""Compare two fingerprints."""
return cmp(self._value, other._value)
def binary(self):
"""Returns the binary representation of the fingerprint as a byte array."""
return bytearray(self._value)
def carray(self):
"""Returns a C array definition equivalent to the fingerprint."""
buf = 'char fp[32] = "'
for c in self._value:
if c == ord('\\'):
buf += '\\\\'
elif c == ord('"'):
buf += '\\"'
elif c < 32 or c > 126:
buf += '\\x%02x' % c
else:
buf += chr(c)
buf += '";'
return str(buf)
def hex(self, split = None):
"""Returns the hexadecimal representaiton of the fingerprint.
The optional 'split' argument introduces hyphens for increased
readability.
"""
x = self.binary()
r = codecs.encode(x, 'hex_codec').decode('ascii')
if split is None:
split = 8
if split == 0:
split = len(r)
r = '-'.join((r[i:i+split] for i in range(0, len(r), split)))
return str(r)
def _append_fletcher(self):
x = self.binary()
a, b = fletcher(x)
x.append(a)
x.append(b)
return x
def compact(self):
"""Returns the compact representation of the fingerprint.
The compact representation is derived by the prefix 'fp:'
followed by a Base64 encoding of the fingerprint byte
representation and a Fletcher-16 checksum.
"""
x = self._append_fletcher()
r = base64.urlsafe_b64encode(x)
r = r.decode('ascii').rstrip('=')
return str('fp:' + r)
def long(self, split = None):
"""Returns the long representation of the fingerprint.
The long representation is derived by the prefix 'fp::'
followed by a Base32 encoding of the fingerprint byte
representation and a Fletcher-16 checksum.
The optional 'split' argument introduces hyphens for increased
readability.
"""
x = self._append_fletcher()
r = base64.b32encode(x)
r = r.decode('ascii').rstrip('=')
# insert some hyphens for clarity
if split is None:
split = 4
if split == 0:
split = len(r)
r = '-'.join((r[i:i+split] for i in range(0, len(r), split)))
return str('fp::' + r)
def empty_file_fp():
"""Return the fingerprint of the empty file."""
class E(fingerprintable):
def visit(self, fp):
fp.enter_file(0)
fp.leave_file()
return fingerprint(E())
def empty_dict_fp():
"""Return the fingerprint of the empty dictionary."""
class E(fingerprintable):
def visit(self, fp):
fp.enter_dict()
fp.leave_dict()
return fingerprint(E())
def zero_fp():
"""Return a fingerprint with all bits set to zero."""
b = b'\0'*32
if isinstance(b, str): # python 2 compat
b = bytearray(b)
return fingerprint(b)
def ones_fp():
"""Return a fingerprint with all bits set to one."""
b = b'\xff'*32
if isinstance(b, str): # python 2 compat
b = bytearray(b)
return fingerprint(b)
def _check_ck(f):
assert isinstance(f, bytearray) or isinstance(f, bytes)
fp = f[:-2]
a, b = fletcher(fp)
x, y = f[-2], f[-1]
if (a, b) != (x, y):
return (None, "invalid checksum (fp says %d %d, computed %d %d)" % (a, b, x, y))
return (fingerprint(fp), None)
def _from_compact(s):
assert isinstance(s, str) or isinstance(s, type(u''))
if not s.startswith('fp:'):
return (None, "invalid prefix (expected 'fp:', got '%s')" % s[:3])
s = s[3:]
if len(s) != 46:
return (None, "invalid length (expected 46, got %d)" % len(s))
s = bytearray(s + '==', 'ascii')
f = base64.urlsafe_b64decode(s)
if isinstance(f, str): # python 2 compat
f = bytearray(f)
return _check_ck(f)
def _from_long(s):
assert isinstance(s, str) or isinstance(s, type(u''))
s = s.upper()
if not s.startswith('FP::'):
return (None, "invalid prefix (expected 'fp::', got '%s')" % s[:4])
s = s[4:].replace('-', '')
if len(s) != 55:
return (None, "invalid length (expected 55, got %d)" % len(s))
s = bytearray(s + '=', 'ascii')
f = base64.b32decode(bytes(s))
if isinstance(f, str): # python 2 compat
f = bytearray(f)
return _check_ck(f)
def _from_hex(s):
s = s.replace('-', '')
if len(s) != 64:
return (None, "invalid length (expected 64, got %d)" % len(s))
s = bytearray(s, 'ascii')
f = codecs.decode(s, 'hex_codec')
if isinstance(f, str): # python 2 compat
f = bytearray(f)
return (fingerprint(f), None)
_rlong = re.compile(r'^[fF][pP]::[a-zA-Z2-7-]*$')
_rcompact = re.compile(r'^fp:[a-zA-Z0-9_-]*$')
_rhex = re.compile(r'^[0-9a-fA-F-]*$')
def parse(s):
"""Parse a string representation of a fingerprint.
Returns a 3-tuple containing:
- the fingerprint, or None if an error was encountered,
- the representation type that was recognized (long, compact or hex)
- an error message or None if no error was encountered.
"""
if re.match(_rlong, s) is not None:
fmt = "long"
fp, errmsg = _from_long(s)
elif re.match(_rcompact, s) is not None:
fmt = "compact"
fp, errmsg = _from_compact(s)
elif re.match(_rhex, s) is not None:
fmt = "hex"
fp, errmsg = _from_hex(s)
else:
fp = None
fmt = None
errmsg = "unknown fingerprint format"
return (fp, fmt, errmsg)
if __name__ == "__main__":
print("testing...")
l = [empty_file_fp(), empty_dict_fp(), zero_fp(), ones_fp()]
rl = [
'fp:s5pIIHf32iiVNH_eBGBMXtlXhMa7dI3w9KBrvHZ-v1NRAA',
'fp::WONE-QIDX-67NC-RFJU-P7PA-IYCM-L3MV-PBGG-XN2I-34HU-UBV3-Y5T6-X5JV-CAA',
'fp::WONEQIDX67NCRFJUP7PAIYCML3MVPBGGXN2I34HUUBV3Y5T6X5JVCAA',
'fp::woneqidx67ncrfjup7paiycml3mvpbggxn2i34huubv3y5t6x5jvcaa',
'b39a4820-77f7da28-95347fde-04604c5e-d95784c6-bb748df0-f4a06bbc-767ebf53',
'fp:FvYPWVbnhezNY5vdtqyyef0wpvj149A7SquozxdVe3jigg',
'B39A4820-77F7DA28-95347FDE-04604C5E-D95784C6-BB748DF0-F4A06BBC-767EBF53',
81236592145469940157203126607178760648047830708351681206000552870365001334611
]
for r in rl:
if isinstance(r, str):
fp, fmt, errmsg = parse(r)
assert fp is not None
l.append(fp)
fp = fingerprint(r)
l.append(fp)
for f in l:
print(f)
s = f.compact()
assert isinstance(s, str) and len(s) == 49 and s[:3].lower() == 'fp:'
s = f.hex()
assert isinstance(s, str) and len(s.replace('-','')) == 64
s = f.long();
assert isinstance(s, str) and len(s.replace('-','')) == 59
s = f.binary()
assert isinstance(s, bytearray) and len(s) == 32
s = int(f)
assert s >= 0 and s < (2**256)
b1 = (f == f)
b2 = (f != f)
assert b1 or b2
print("ok")
|
structured-commons/tools
|
sc/fp.py
|
Python
|
unlicense
| 14,159
|
[
"VisIt"
] |
12158c3e369a0c69c5c80a97870f45b2f6a4d8de5f8d71118c82b56a9cebf017
|
import re
from math import log
from time import sleep
import subprocess
from io import StringIO
from pathlib import Path
from operator import itemgetter
from Bio import SearchIO, SeqIO
from Bio.Blast import NCBIXML, NCBIWWW
from RecBlast import print, merge_ranges
from RecBlast.WarningsExceptions import *
class Search(object):
def __init__(self, search_type):
self.search_type = search_type
def __call__(self, seq_record, species, database, database_path, local,
indent, perc_ident, verbose, database_port=None,
expect=None, megablast=True, n_threads=1, write=False, filetype=None,
**kwargs):
# query_length = len(seq_record)
if isinstance(database, Path):
return self.load(database)
elif isinstance(database, str) and database != 'stop':
return self.load(Path(database))
elif database == 'stop':
raise StopRecBlast()
elif self.search_type in ["blastn", "blastp", "blastx", "tblastx", "tblastn"]:
if verbose > 1:
print(self.search_type, 'was selected.', indent=indent)
dt = self.blast_prep(search_type=self.search_type, db_loc=database_path, database=database,
species=species, verbose=verbose, indent=indent)
return self.blast_run(seq_record=seq_record, species=species, database=dt.name, filetype=filetype,
blast_type=self.search_type, local_blast=local, expect=expect, megablast=megablast,
use_index=False, perc_ident=perc_ident, verbose=verbose, indent=indent,
n_threads=n_threads, blastdb=database_path, outtype=5, return_raw=False,
**kwargs)
elif self.search_type in ['blat', 'tblat', 'blat-transcript', 'tblat-transcript']:
if verbose > 1:
print(self.search_type, 'was selected.', indent=indent)
port = self.blat_prep(database_port=database_port, species=species, verbose=verbose, indent=indent)
return self.blat_run(seq_record=seq_record, local=local, port=port,
filetype=filetype, blat_type=self.search_type, perc_ident=perc_ident,
verbose=verbose, indent=indent, blatdb=database_path, outtype='pslx')
else:
raise SearchEngineNotImplementedError('Invalid selection for search type!')
@staticmethod
def blast_run(seq_record, species, database, blast_type, filetype="fasta",
local_blast=False, expect=0.005, megablast=True, use_index=False, perc_ident=75,
verbose=True, indent=0, n_threads=1, blastdb='/usr/db/blastdb/', outtype=5,
return_raw=False, **kwargs):
"""A wrapper function for BLAST searches.
:param seq_record: The record containing the query sequence for the search. Can be either a SeqIO.SeqRecord or
a string with the file loaction.
:param str species: The species whose sequence database will be queried.
:param Union[dict, str, Path] database: The name of the database to be used in the search.
:param str blast_type: Type of BLAST search being performed
:param str filetype: Filetype of seq_record (if seq_record is a SeqRecord object, leave as default.
[default: 'fasta']
:param bool local_blast: Should the search be conducted locally or on remote servers? (BLAT searches are always
local.) [Default: False]
:param float expect: Highest expect value of BLAST results to be returned. [Default: 0.005]
:param bool megablast: Should MegaBLAST be used for nucleotide searches? [Default: True]
:param bool use_index: Should BLAST use indexes associated with the database files? [Default: False]
:param int perc_ident: Minimum percent identity required of results to be returned [Default: 75]
:param bool verbose: Verbose output? [Default: True]
:param int indent: Indent level for pretty print. [Default: 0]
:param int n_threads: Number of threads to allocate for BLAST [Default: 1]
:param str blastdb: Path of databases for either BLAST or BLAT. [Default: '/usr/db/blastdb'
:param int outtype: Output type. (see options for BLAST and BLAT) [Default: pslx]
:param bool return_raw: Return raw output rather than processed BioBlastRecord? [Default: False]
:param kwargs: Additional keyword arguments to pass on to BLAST/BLAT.
:return: blast_record, blast_err
"""
if isinstance(seq_record, SeqIO.SeqRecord):
pass
else:
seq_record = SeqIO.read(seq_record, filetype)
args = dict()
if verbose:
print("Now starting BLAST...", indent=indent)
if local_blast:
# build up the BLAST arguments:
args.update({'-db': str(database), '-evalue': expect,
'-outfmt': str(outtype),
'-num_threads': n_threads})
if blast_type == 'blastn':
if megablast:
args['-task'] = 'megablast'
if use_index:
args['-use_index'] = use_index
args['-perc_identity'] = perc_ident
args_expanded = list()
[(args_expanded.append(j), args_expanded.append(k)) for j, k in args.items()]
if verbose:
print('Running BLAST locally...', indent=indent)
print('Options:', indent=indent)
print(args_expanded, indent=indent + 1)
if blast_type in ["blastn", "blastp", "blastx", "tblastx", "tblastn"]:
blast_cline = [blast_type] + args_expanded
try:
blast_handle = subprocess.check_output([str(i) for i in blast_cline],
input=seq_record.format('fasta'),
universal_newlines=True, cwd=blastdb)
if isinstance(blast_handle, str):
blast_result = blast_handle
blast_err = None
else:
blast_result, blast_err = blast_handle
except subprocess.CalledProcessError:
raise
else:
raise SearchError("Invalid blast choice!")
else:
args.update(dict(program=str(blast_type), database=str(database), sequence=seq_record.format('fasta'),
entrez_query='"{}"[ORGN]'.format(species), expect=expect, perc_ident=perc_ident))
if megablast & (blast_type == 'blastn'):
args['megablast'] = 'True'
if kwargs:
args.update(**kwargs)
if verbose:
print('Submitting Remote BLAST! Options passed:', indent=indent)
for k, v in args.items():
print('{0}\t=\t{1}'.format(k, v), indent=indent + 1)
try:
blast_result = NCBIWWW.qblast(**args)
blast_err = None
except Exception as err:
print(type(err), err)
raise err
if verbose:
print('Done with Blast!', indent=indent)
if return_raw:
return blast_result, blast_err
else:
if isinstance(blast_result, StringIO):
blast_record = NCBIXML.read(blast_result)
else:
try:
with StringIO(''.join(blast_result)) as fin:
blast_record = NCBIXML.read(fin)
except Exception as err:
print('Error reading Blast Results! Aborting!', indent=indent)
print('Error details:\n', err, indent=indent)
raise err
return blast_record, blast_err
@staticmethod
def blat_run(seq_record, port, local="localhost", filetype="fasta", blat_type='blat', perc_ident=None,
verbose=True, indent=0, blatdb='/usr/db/blastdb/', outtype='pslx'):
"""A wrapper function for BLAT searches.
:param seq_record: The record containing the query sequence for the search. Can be either a SeqIO.SeqRecord or
a string with the file loaction.
:param int port: Port of the gfServer to be queried
:param str local: Host address.
:param str filetype: Filetype of seq_record (if seq_record is a SeqRecord object, leave as default.
[default: 'fasta']
:param str blat_type: Type of search to conduct. Can be a BLAST type (blastn, blastp, blastx, tblastn, tblastx)
or a BLAT type (blat, tblat). [Default: 'blastn']
:param int perc_ident: Minimum percent identity required of results to be returned [Default: 75]
:param bool verbose: Verbose output? [Default: True]
:param int indent: Indent level for pretty print. [Default: 0]
:param str blatdb: Path of databases for either BLAST or BLAT. [Default: '/usr/db/blastdb'
:param str outtype: Output type. (see options for BLAST and BLAT) [Default: pslx]
:return: blat_record, blat_err
"""
if isinstance(seq_record, SeqIO.SeqRecord):
pass
elif isinstance(seq_record, str):
seq_record = SeqIO.read(seq_record, filetype)
else:
raise TypeError('seq_record was of type {}, must be either '
'a str with filepath or a SeqRecord object!'.format(type(seq_record)))
if verbose:
print("Now starting BLAT...", indent=indent)
if verbose > 1:
print('Search Type: ', blat_type, indent=indent)
args_expanded = ['gfClient', local, str(port), '/', '/dev/stdin', '/dev/stdout']
args_expanded += ['-t=dnax', '-q=prot'] if blat_type.lower() == 'tblat' else []
args_expanded += ['minIdentity={}'.format(perc_ident if perc_ident else 0),
'-out={}'.format(outtype)]
try:
if verbose > 1:
print('BLAT command:', indent=indent)
print(' '.join(args_expanded), indent=indent + 1)
blat = subprocess.Popen(args_expanded, stdout=subprocess.PIPE,
universal_newlines=True, cwd=blatdb,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
blat_raw, blat_raw_err = blat.communicate(input=seq_record.format('fasta'))
if blat_raw_err:
raise SearchError(blat_raw_err)
head = subprocess.Popen(["head", "-n", "-1"], universal_newlines=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
blat_handle = head.communicate(input=blat_raw)
if verbose > 2:
print(blat_handle[0], indent=indent)
if verbose:
print('Done!', indent=indent)
if isinstance(blat_handle, str):
blat_result = blat_handle
blat_err = None
else:
blat_result, blat_err = blat_handle
except subprocess.CalledProcessError:
raise
blat_result, blast_err = blat_result, blat_err
blat_record = None
with StringIO(blat_result) as fin:
try:
if outtype == 'pslx':
blat_record = SearchIO.read(fin, format='blat-psl', pslx=True)
elif outtype == 'psl':
blat_record = SearchIO.read(fin, format='blat-psl')
elif outtype == 'blast8':
blat_record = SearchIO.read(fin, format='blast-tab')
elif outtype == 'blast9':
blat_record = SearchIO.read(fin, format='blast-tab', comments=True)
elif outtype == 'blast':
blat_record = SearchIO.read(fin, format='blast-xml')
else:
raise SearchError('Invalid out type')
except ValueError:
if verbose:
print('No Query Results were found in handle for seq_record {}!'.format(seq_record.id))
raise NoHitsError('No Query Results were found in handle for seq_record {}!'.format(seq_record.id))
except Exception as err:
print('Error reading BLAT results! Aborting!')
print('Error details:\n')
raise err
return blat_record, blat_err
@staticmethod
def blat_prep(database_port, species, verbose, indent):
if isinstance(database_port, dict):
try:
blat_port = database_port[species]
if verbose > 1:
print('Using port {0} for gfServer of species {1}.'.format(blat_port, species), indent=indent)
except KeyError:
raise SearchError('No 2bit found for species {}!'.format(species))
elif isinstance(database_port, int):
blat_port = database_port
elif isinstance(database_port, str):
try:
blat_port = int(database_port)
except ValueError:
raise SearchError('Invalid option "{}" was passed to database_port! database_port must be '
'either a dictionary of species-port pairs or an integer!'.format(database_port))
else:
raise SearchError('Invalid option of type "{}" was passed to database_port! database_port must be '
'either a dictionary of species-port pairs or an '
'integer!'.format(str(type(database_port))))
return blat_port
@staticmethod
def blast_prep(search_type, database, species, verbose, indent, db_loc):
if database == 'auto' or database == 'auto-transcript':
if verbose > 1:
print('Blast type set to auto!', indent=indent)
try:
blast_db = get_searchdb(search_type=search_type, species=species, db_loc=db_loc,
verbose=verbose, indent=indent + 1)
except Exception:
raise SearchError('No BLAST database was found for species {}!'.format(species))
elif isinstance(database, dict):
try:
blast_db = database[species]
if verbose > 1:
print('Using {} as BLAST database!'.format(blast_db), indent=indent)
except KeyError:
raise SearchError('No BLAST database was found for species {}!'.format(species))
elif isinstance(database, str) or isinstance(database, Path):
blast_db = database
else:
raise SearchError('Invalid type given for database!')
return blast_db
@staticmethod
def load(database):
try:
if database.exists() and database.is_file():
rec = None
with database.open('r') as forward_blasthits:
if database.suffix == '.psl':
rec = SearchIO.read(forward_blasthits, 'blat-psl')
elif database.suffix == '.pslx':
rec = SearchIO.read(forward_blasthits, 'blat-psl', pslx=True)
elif database.suffix == '.xml':
rec = SearchIO.read(forward_blasthits, 'blast-xml')
else:
raise SearchError('Database file "{}" could not be loaded - '
'Must be either a PSL, PSLX, or BLAST-XML file!'.format(str(database)))
else:
raise FileNotFoundError()
except FileNotFoundError:
raise SearchError('Database file "{}" was not found!'.format(str(database)))
return rec
def id_search(id_rec, id_type='brute', verbose=2, indent=0, custom_regex=None, regex_only=False):
"""
EX:
gi =
refseq_accession = 'XP_010883249.1'
scaffold = 'scaffold_145\t[:1033526-1034566](-)\t190
id =
chr = 'chrX[:3047971-3259961](-)119'
seq_range =
assembly1 = 'KN678312.1 [:9787-29116](+) 478'
assembly2 = 'KN678312.1 [:9787-29116](+) 478'
symbol = 'TP53'
symbol = 'INS [:259-568](+) (161)'
strand = '+'
:param id_rec:
:param id_type:
:param custom_regex:
:param regex_only:
:param verbose:
:param indent:
:return:
"""
# Define the regex functions
p = dict(gi=re.compile('(\Agi[| _:]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
accession=re.compile('(\A[AXNYZ][MWRCPGTZ][| _:]+[0-9.]+|\Aref[| _:]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
scaffold=re.compile('(\Ascaffold[| _:]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
id=re.compile('(\Aid[| _:]*[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
chr=re.compile('(\Achr[| _:]*[A-Za-z0-9.]+)'
'([| \t:_])??\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
assembly=re.compile('(\A[A-Za-z]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
assembly_broad=re.compile('(\b[ALYB]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
symbol=re.compile('(\A\S+)([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
seq_range=re.compile(':?(\d+)-(\d+)'),
strand=re.compile('(\([-+0N]\))'),
score=re.compile('\d\d*')
)
if custom_regex is not None:
p = {'custom': custom_regex}
id_type = 'custom'
# Begin search:
if verbose > 1:
print('ID Loaded, performing regex search for identifiers...', indent=indent)
print('ID type: ', id_type, indent=indent)
if id_type == 'brute':
for tmp_type in ['accession', 'gi', 'scaffold', 'id', 'chr', 'assembly', 'assembly_broad', 'symbol']:
if bool(p[tmp_type].findall(id_rec)):
if verbose > 1:
print('Brute Force was set, tested strings for all pre-registered IDs.', indent=indent)
print('ID was selected as type {0}!'.format(tmp_type), indent=indent + 1)
if regex_only:
return p[tmp_type]
else:
return id_search(id_rec=id_rec, id_type=tmp_type, verbose=verbose, indent=indent)
raise IDError('Couldn\'t identify the id type of line: {}!'.format(id_rec))
else:
try:
item_parts = p[id_type].findall(id_rec)[0]
if verbose > 1:
print('Successfully found {0}, compiling list!'.format(id_type), indent=indent)
print('Item:\t', '\t'.join(item_parts), indent=indent + 1)
except IndexError:
raise IDError('Could not identify patterns in {0} with id_type={1}, '
'is the id_search sequence correct?'.format(id_rec, id_type))
try:
item_parts = list(item_parts)
item_parts[0] = item_parts[0] if not isinstance(item_parts[0], str) else ''.join(item_parts[0])
if item_parts[2]:
try:
sr_tuple = p['seq_range'].findall(item_parts[2])[0]
if verbose > 1:
print('Found sequence delimiters in IDs!', indent=indent)
print(sr_tuple, indent=indent + 1)
except IndexError:
raise IDError('A positive match for a sequence range was found '
'({0}), yet no hits were identified! Confirm that '
'the regex is correct and try again!'.format(item_parts[2]))
else:
sr_tuple = (0, -1)
if item_parts[4]:
try:
strand = p['strand'].findall(item_parts[4])[0]
except IndexError:
strand = '(N)'
try:
score = p['score'].findall(item_parts[4])[0]
except IndexError:
score = 0
else:
strand = '(N)'
score = '0'
if verbose > 1:
if strand != '(N)':
print('Strand info found: {0}'.format(strand), indent=indent)
if score != '0':
print('Score info found: {0}'.format(score), indent=indent)
seq_range = (int(sr_tuple[0]), int(sr_tuple[1]), strand, int(score))
return p, item_parts[0], seq_range, id_type
except IndexError:
raise IDError('Could not identify patterns in {0} with id_type={1}, '
'is the id_search sequence correct?'.format(id_rec, id_type))
def percent_identity_searchio(hit, is_protein=True):
"""Calculates percent identity based on entire hit. Adapted from UCSC BLAT FAQ and Biopython."""
size_mul = 3 if is_protein else 1
qali_size = size_mul * sum([i[-1] - i[0] for i in merge_ranges([(hsp.query_start, hsp.query_end) for hsp in hit])])
tali_size = sum([i[-1] - i[0] for i in merge_ranges([(hsp.hit_start, hsp.hit_end) for hsp in hit])])
ali_size = min(qali_size, tali_size)
if ali_size <= 0:
return 0
size_dif = qali_size - tali_size
size_dif = 0 if size_dif < 0 else size_dif
sum_match = sum([i.match_num for i in hit])
sum_rep = sum([i.match_rep_num for i in hit])
sum_mismatch = sum([i.mismatch_num for i in hit])
total = size_mul * (sum_match + sum_rep + sum_mismatch)
if total != 0:
millibad = (1000 * (sum([i.mismatch_num for i in hit]) * size_mul + sum([i.query_gap_num for i in hit]) +
round(3 * log(1 + size_dif)))) / total
else:
raise Exception('Somehow your total in the percent_identity function was 0, so you broke the script!')
perc_ident = 100 - (millibad * 0.1)
return perc_ident
def get_searchdb(search_type, species, db_loc, verbose=1, indent=0):
"""Finds and returns the appropriate search database for the given species and search type.
This function automates the process of selecting the search database needed by the selected search program,
like BLAST or BLAT, so that the user does not need to preoccupy themselves with providing said information
for a large number of species. For BLAST* that depend on protein databases (BLASTP and BLASTX), the function
searches for files matching the form 'Genus_species_protein.*' in the given directory; for BLAST* that depend
on DNA databases (BLASTN, TBLASTN, and TBLASTX), it instead looks for files 'Genus_species_genome.*'.
If '-transcript' is added to the end of any of the DNA-dependent BLAST*, then instead the function will
search for files in the style of 'Genus_species_transcript.*'. In the case of BLAT searches, the program will
similarly search for 'Genus_species*.2bit', or for 'Genus_species*transcript.2bit' if '-transcript' is added
after the search type.
In all usage cases, if the program does not find files matching the 'Genus_species' format, it will try to
find the files using a case-insensitive search using the 6-letter abbreviated form of the species name.
Usage::
>>> get_searchdb('blastp', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/Homo_Sapiens_protein.*
>>> get_searchdb('tblastn', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap_genome.*
>>> get_searchdb('blastn-transcript', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap_transcript.*
>>> get_searchdb('blat', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap.2bit
>>> get_searchdb('blat-transcript', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap_transcript.2bit
Arguments::
:param str search_type: The name of the search method (blast or blat, and sub-type: blastp, blastn, blat, tblat...)
:param str species: Name of species associated with the database. If there is a space, it will be replaced with an
underscore.
:param str db_loc: Path to folder containing collection of search databases.
:param int verbose: How verbose should the output be. Zero suppresses all output, 2 is max verbosity.
:param int indent: Indent level for printed output.
:return str: Path to the identified search database.
"""
if verbose:
print('Search DB set to auto, choosing search_db...', indent=indent)
species = species.replace(' ', '_')
if verbose > 1:
print('Search DB location set to: ', db_loc, indent=indent)
db_type_dict = {
'blastx': "protein",
'blastp': "protein",
'blastn': "genome",
'tblastn': "genome",
'tblastx': "genome",
'blastn-transcript': "transcript",
'tblastn-transcript': "transcript",
'tblastx-transcript': "transcript",
'blat': "blat",
'tblat': "blat",
'blat-transcript': 'blat-transcript',
'tblat-transcript': 'tblat-transcript'
}
try:
db_type = db_type_dict[search_type]
except KeyError:
print('Unable to determine search db type!', indent=indent)
raise SearchError('Improper search type given ({})!'.format(search_type))
if verbose > 1:
print('DB type: ', db_type, indent=indent)
db_path = Path(db_loc).absolute()
if not db_path.exists():
db_path = Path(db_loc)
if db_path.exists() and db_path.is_dir():
if db_type == 'blat':
glob_path = [i for i in
db_path.glob('{0}*.2bit'.format(species.replace(' ', '_')))] # Todo: generalize extension
elif db_type in ['blat-transcript', 'tblat-transcript']:
glob_path = [i for i in db_path.glob('{0}*transcript.2bit'.format(species.replace(' ', '_')))]
else:
glob_path = [i for i in db_path.glob('{0}_{1}*'.format(species.replace(' ', '_'), db_type))]
if not glob_path:
if verbose:
print('No DB found! Trying again with abbreviated species name', indent=indent)
species_abbv = ''.join([i[0:3] for i in species.title().split('_')])
# making it insensitive to case for Glob
species_abbv_insensitive = ''.join(['[{0}{1}]'.format(c.lower(),
c.upper()) for c in species_abbv if c.isalpha()])
if verbose:
print('Abbreviated species name: ', species_abbv, indent=indent)
print('RegEx species abbreviation: ', species_abbv_insensitive, indent=indent)
if db_type == 'blat':
glob_path = [i for i in db_path.glob('{0}*.2bit'.format(species_abbv_insensitive))]
elif db_type in ['blat-transcript', 'tblat-transcript']:
glob_path = [i for i in db_path.glob('{0}*transcript.2bit'.format(species_abbv_insensitive))]
else:
glob_path = [i for i in db_path.glob('{0}_{1}*'.format(species_abbv_insensitive, db_type))]
try:
if verbose:
print(glob_path, indent=indent)
if isinstance(glob_path, list):
search_db = sorted(glob_path, reverse=True)[0]
else:
search_db = glob_path
except IndexError:
print('WARNING: COULD NOT FIND DATABASE! ABORTING!', indent=indent)
raise DatabaseNotFoundError('', 'No databases were found!')
else:
raise DatabaseNotFoundError('DB_Path {} does not exist!'.format(str(db_path)))
if verbose:
print('{0} DB chosen: {1}'.format(search_type, str(search_db)), indent=indent)
return search_db
def blat_server(twobit, order='start', host='localhost', port=20000, type='blat', log='/dev/null', species=None,
search_db_loc='/usr/db/blat', verbose=1, indent=0, try_limit=10, **kwargs):
"""Convenience function that controls a gfServer. Still in alpha.
This function serves as a python wrapper for the Bash gfServer command. The user can either provide a .2bit file,
or else can provide a species and set 'twobit="auto"' to have the function use 'get_searchdb()' to find a .2bit file
automatically. By default, the function is set to start up a new gfServer instance, but using the 'order' parameter,
the user can execute any of the standard gfServer commands such as 'stop' and 'status'.
To start a gfServer, the function first probes the selected port (default is 20000) to ensure its unused; if it is
currently in use, the program then goes port-by-port in ascending order until it finds an empty port to use for the
server. Then, it simply calls the gfServer command with all the keyword arguments required, as well as with any
extra arguments provided by the user.
Usage::
>>>blat_server(twobit='hg38.2bit', port=20000, verbose=3)
gfServer start localhost 20001 -canStop -stepSize=5 hg38.2bit
# Waits 30 seconds, then starts calling 'gfServer status localhost 20001' every 30 seconds for 5 minutes
# If at any point 'gfServer status' returns something that is not an error or "Couldn't connect...", it
# returns the port where the server was opened.
20001
>>>blat_server(twobit='auto', port=20000, species='Homo sapiens', verbose=3)
# Calls get_searchdb('blat', 'Homo sapiens', db_loc=BLATDB)
# Internally, will return a .2bit file such as 'Homo_sapiens.2bit'
20001
>>>blat_server(twobit='hg38.2bit', port=20000, order='status', verbose=3)
# If the server is active:
1
>>>blat_server(twobit='hg38.2bit', port=20000, order='status', verbose=3)
# If the server either has not been started or is not yet active:
0
>>>blat_server(twobit='hg38.2bit', port=20000, order='status', verbose=3)
# If the server returns an error
Exception(...)
:param str twobit: A path to the .2bit file to be used for the server. Can also be set to 'auto'.
:param str order: A command for gfServer. Can be one of the following: start, stop, status, files, query (requires
a nucleotide sequence in fasta format), protQuery (requires a protein sequence in fasta format), transQuery
(requires a nucleotide sequence in fasta format), pcr (requires arguments fPrimer, rPrimer, maxDistance), direct
(requires probe.fa, file(s).nib), or pcrDirect (requires fPrimer, rPrimer, file(s).nib).
:param str host: Address at which to host the server.
:param int port: Port number that will be assigned to server. If in use, will test new port number in increments of
1 until a free port is found.
:param str type: Type of server to be hosted. 'blat' will start a DNA server, 'tblat' will start a DNAX server for
protein queries.
:param str log: Path and name of log file to be written.
:param str species: Species name that get_searchdb() will use to find .2bit file when twobit='auto'.
:param str search_db_loc: Path to the folder containing .2bit file.
:param int verbose: Level of verbosity of function output. 0 suppresses all output, 3 is max verbosity.
:param int indent: Indentation level of print output.
:param int try_limit: Number of tries at 30-second intervals that function should probe the gfServer before timeout.
:param kwargs: keyword arguments to be passed on to gfServer.
:return: if order='start', returns the port of the new gfServer; if order='status', returns 0 if there was no
connection, or 1 if the server is active and responding.
"""
# Regular: gfServer start localhost portX -stepSize=5 -log=untrans.log database.2bit
# Prot>DNAX: gfServer start localhost portY -trans -mask -log=trans.log database.2bit
gfserver_suppl_args = list()
if twobit == 'auto' and order != 'stop':
if verbose:
print('2bit set to auto: searching for 2bit file for species ', species, indent=indent)
twobit = get_searchdb(search_type='blat', species=species, db_loc=search_db_loc,
verbose=verbose, indent=indent + 1)
if twobit.exists() and twobit.is_file():
twobit = twobit.name
else:
raise BLATServerError('Invalid 2bit file!')
for key, item in kwargs.items():
if key == 'order':
order = item
elif key == 'host':
host = item
elif key == 'port':
port = item
else:
gfserver_suppl_args.append('-{0}={1}'.format(key, item))
if order == 'status':
gfcheck = subprocess.Popen('gfServer status {0} {1}'.format(str(host), str(port)), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True, shell=True,
executable='/bin/bash')
out, _ = gfcheck.communicate()
if "couldn't connect to localhost" in out.lower():
return 0
elif "error" in out.lower():
raise BLATServerError(out)
else:
return 1
elif order == 'stop':
subprocess.check_call('gfServer stop {0} {1}'.format(str(host), str(port)), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True, shell=True,
executable='/bin/bash')
return
else:
print(order)
# Todo: make the portsniffer its own function and make sure it works properly.
portfinder = subprocess.check_output('/home/manny/Scripts/oneshot/checkifportisopen.sh {}'.format(str(port)),
universal_newlines=True, shell=True, executable='/bin/bash')
port = portfinder.rstrip()
gfserver_cmd = ['gfServer', str(order), str(host), str(port), '-canStop']
if type == 'blat':
gfserver_cmd.append('-stepSize=5')
elif type == 'tblat':
gfserver_cmd += ['-trans', '-mask']
if gfserver_suppl_args:
gfserver_cmd += gfserver_suppl_args
gfserver_cmd_str = ' '.join(gfserver_cmd + [twobit])
if verbose > 2:
print(gfserver_cmd_str, indent=indent)
subprocess.Popen(gfserver_cmd_str, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, shell=True, executable='/bin/bash')
tries = 0
while tries <= try_limit:
sleep(30)
gfcheck = subprocess.Popen('gfServer status {0} {1}'.format(str(host), str(port)), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True, shell=True,
executable='/bin/bash')
out, _ = gfcheck.communicate()
if verbose > 2:
print(out)
if "couldn't connect to localhost" in out.lower():
tries += 1
elif "error" in out.lower():
raise BLATServerError(out)
else:
if verbose:
print(out)
return port
if tries > try_limit:
raise TimeoutError('Timed out!')
def id_ranker(record, perc_score, perc_query_span, perc_ident, expect=None,
indent=0, verbose=1, same_strand=True, return_only=None):
"""Filters results based on score, expectation value, length, percent identity, and span; returns a sorted list.
:param query_record record: Either a SearchIO.QueryResult or a Bio.Blast.Record.
:param float perc_score: Minimum percentage of top score for a hit.
:param float expect: Maximum e-value for a hit (BLAST-only).
:param float perc_query_span: Minimum percent of the longest hit by query coverage for a hit.
:param int perc_ident: Minimum percent identity of a hit.
:param int indent: Indent level for pretty print. [Default: 0]
:param int verbose: Level of verbose output? [Default: 1]
:param bool same_strand: Should the function filter hits with HSPs on different strands? [Default:True]
:param return_only: Should all or only one id be returned?
:return list: Returns a list of tuples containing the final hit data in BED6 format.
"""
id_list = []
if verbose:
print('Beginning ID_Ranker...', indent=indent)
if record.program == 'blat':
if verbose > 2:
print('Results obtained from BLAT run.', indent=indent + 1)
elif 'blast' in record.program:
if verbose > 2:
print('Results obtained from BLAST run.', indent=indent + 1)
else:
raise NotImplementedError('Sorry, your program {} is not yet '
'implemented for RecBlast!'.format(record.program))
# Create filter functions:
def hsp_minscores(hsp):
return hsp.score >= int(perc_score * top_score)
def hsp_min_query_span(hsp):
return hsp.query_span >= perc_query_span * top_length
def hsp_perc_ident(hsp):
return hsp.ident_pct >= perc_ident
def hsp_same_strand(hsp):
if same_strand:
return all([i == hsp.hit_strand_all[0] for i in hsp.hit_strand_all])
else:
return True
def hit_sort_scores(hit):
return sum([hsp.score for hsp in hit.hsps])
def hsp_sort_scores(hsp):
return hsp.score
# Get top stats:
top_score = max([max([hsp.score for hsp in hit.hsps]) for hit in record])
if verbose > 1:
print('Top score for {}:\t'.format(record.id), top_score, indent=indent)
top_length = max([max([hsp.query_span for hsp in hit]) for hit in record])
if verbose > 1:
print('Longest hit for {}:\t'.format(record.id), top_length, indent=indent)
if verbose > 2:
print("ALL HITS STATS:")
print('|\tHit Name:\t|\t# HSPs\t|\tScore:\t|\tLength:\t|\tP.Ident\t|')
print("==========================================================")
for hit in record:
name = hit.id
n_hsp = len(hit.hsps)
print('|\t{HitName}\t|\t{HSP}\t|'.format(HitName=name, HSP=n_hsp))
print("------------------------------------------------------")
for hsp in hit:
print('|\t{id}\t|\t{hf}\t|\t{score}\t|\t{length}\t|\t{ident}\t|'.format(id=hsp.hit_id,
hf=len(hsp),
score=hsp.score,
length=hsp.hit_span,
ident=hsp.ident_pct))
# Execute filters:
# query_span
if verbose > 1:
print('Number of HSPs for {}:\t'.format(record.id), sum([len(i.hsps) for i in record]), indent=indent)
print('Filtering out all HSPs shorter than {}...'.format(perc_query_span * top_length), indent=indent)
record = record.hsp_filter(hsp_min_query_span) if perc_query_span else record
if not record:
text = ('No hits in Query Results match a stretch of the query sequence longer than '
'{0}!').format((top_length * perc_query_span))
raise NoHitsError(text)
# Score
if verbose > 1:
print('Number of HSPs for {}:\t'.format(record.id), sum([len(i.hsps) for i in record]), indent=indent)
print('Filtering out all HSPs with scores less than {}...'.format(top_score * perc_score), indent=indent)
record = record.hsp_filter(hsp_minscores) if perc_score else record
if not record:
text = 'No hits in Query Results have a score above the minimum of {0}!'.format((top_score * perc_score))
raise NoHitsError(text)
if verbose > 1:
print('Number of HSPs for {}:\t'.format(record.id), sum([len(i.hsps) for i in record]), indent=indent)
print('Filtering out all HSPs with percent identity below {}...'.format(perc_ident), indent=indent)
record = record.hsp_filter(hsp_perc_ident) if perc_ident else record
if not record:
text = 'No hits in Query Results have a percent identity above {}%!'.format(round(perc_ident * 100, 2))
raise NoHitsError(text)
if verbose > 1:
print('Number of HSPs for {}:\t'.format(record.id), sum([len(i.hsps) for i in record]), indent=indent)
if same_strand:
print('Filtering out all HSPs that have fragments on opposite strands...')
record = record.hsp_filter(hsp_same_strand) if same_strand else record
if not record:
text = 'No hits in Query Results are on the same strand!'
raise NoHitsError(text)
# Sorting them for good measure
if verbose > 1:
print('Sorting all hits by descending scores!', indent=indent)
record.sort(key=hit_sort_scores, reverse=True, in_place=True)
for hit in record:
hit.sort(key=hsp_sort_scores, reverse=True, in_place=True)
if verbose > 1:
print('Done!', indent=indent)
# Add items to id_list
# Big note: think in HSPs, not Hits
n = 1
for hit in record:
for hsp in hit:
# some quick strand math:
if hsp._has_hit_strand:
strands = set(hsp.hit_strand_all)
if len(strands) == 1:
strand = "+" if strands == {1} else "-"
else:
strand = "."
else:
strand = "."
if verbose > 2:
print("Adding hit {chr}:{s}-{e}({st}) to id list".format(chr=hsp.hit_id,
s=str(hsp.hit_range[0]),
e=str(hsp.hit_range[1]),
st=strand),
indent=indent)
# A little witchcraft before we do though
# turns out hsp.hit_start_all won't necessarily start with the starting point of the hit...
# That means we need to zip hit_start_all and hit_span_all, sort by the first one, then de-zip.
block_starts, block_spans = zip(*sorted(zip(hsp.hit_start_all, hsp.hit_span_all), key=itemgetter(0)))
# chr (start,end) id score strand thickStart thickEnd rgb blockcount blockspans blockstarts query_span
id_list.append([hsp.hit_id, hsp.hit_range, hsp.query_id, hsp.score, strand, hsp.hit_range[0],
hsp.hit_range[1], "255,0,0", len(hsp.hit_start_all),
",".join([str(i) for i in block_spans]),
",".join([str(i - hsp.hit_range[0]) for i in block_starts]), hsp.query_range])
if return_only and n == return_only:
print('Returning only the top {} hits, ending here!'.format(return_only),
indent=indent)
return id_list
n += 1
return id_list
|
docmanny/RecBlast
|
RecBlast/Search.py
|
Python
|
mit
| 43,643
|
[
"BLAST",
"Biopython"
] |
b7dd4c8c752259a64c2ab688c06ab3ceded41c2abb5c70871b68b233df848641
|
class Record:
def __init__(self, elem):
"""
this class simply represents a list record linked to his next/prev elems
:param elem: elem stored in the record
"""
self._elem = elem
self._next = None
self._prev = None
def getElem(self):
"""
:return elem, the elem stored in the record
"""
return self._elem
def addNext(self, elem):
"""
:return Record, set the next record
"""
self._next = elem
def getNext(self):
"""
:return Record, get the next record
"""
return self._next
def addPrev(self, elem):
"""
:return Record, set the previous record
"""
self._prev = elem
def getPrev(self, elem):
"""
:return Record, get the previous record
"""
return self._prev
class DoubledLinkedList:
def __init__(self):
"""
This class represents a double linked list
"""
self._count = 0
self._first = None
self._last = None
self._added_last_elems = []
def getCount(self):
"""
:return: int, elements' number
"""
return self._count
def isEmpty(self):
"""
:return: bool, true if the list is empty
"""
return self._first is None
def getFirst(self):
"""
:return: elem, the elem stored in the first record
"""
if self.isEmpty():
return None
else:
return self._first.getElem()
def getFirstRecord(self):
"""
:return: Record, the first record of the list
"""
if self.isEmpty():
return None
else:
return self._first
def getLast(self):
"""
:return: elem, the elem stored in the last record
"""
if self.isEmpty():
return None
else:
return self._last.getElem()
def getLastRecord(self):
"""
:return: Record, the last record of the list
"""
if self.isEmpty():
return None
else:
return self._last
def addAsFirst(self, elem):
"""
this function stores the elem in a new record at the top of the list
:param elem: the elem to be stored
"""
newRecord = Record(elem)
self._count += 1
if self.isEmpty():
self._first = self._last = newRecord
else:
if self._last is None:
self._last = self.getFirstRecord()
newRecord.addNext(self.getFirstRecord())
self._first = newRecord
def addAsLast(self, elem):
"""
this function stores the elem in a new record at the bottom of the list
:param elem: the elem to be stored
"""
newRecord = Record(elem)
self._count += 1
if self.isEmpty():
self._first = self._last = newRecord
else:
newRecord.addPrev(self.getLastRecord())
self._last.addNext(newRecord)
self._last = newRecord
self._added_last_elems.append(elem)
def popFirst(self):
"""
this function pops the first record in the list
:return: Record, the first record of the list
"""
if self.isEmpty():
return None
else:
pop = self.getFirstRecord()
self._count -= 1
self._first = self.getFirstRecord().getNext()
if self.getFirstRecord() is None:
self._last = None
else:
self.getFirstRecord().addPrev(None)
return pop
def popLast(self):
"""
this function pops the first record in the list
:return: Record, the first record of the list
"""
if self.isEmpty():
return None
else:
pop = self.getLastRecord()
self._last = self.getLastRecord().getPrev()
if self.getLastRecord() is None:
self._first = None
else:
self.getLastRecord().addNext(None)
return pop
def deleteRecord(self, delRecord):
"""
this function deletes the record specified relinking its prev/next records
:param delRecord: the record to be deleted
"""
if self.isEmpty():
return
elif delRecord is None:
return
else:
if delRecord.getPrev() is None:
self._first = delRecord.getNext()
else:
linking = delRecord.getNext()
delRecord.getPrev().addNext(linking)
if delRecord.getNext() is None:
self._last = delRecord.getPrev()
else:
linking = delRecord.getPrev()
delRecord.getNext().addPrev(linking)
def getLastAddedList(self):
"""
this function returns a plain list of all the last added elems (history of last added elems, it is useful
for an implementation without any affection by deleting items, such as an adjacency list visit of a static
graph!)
:return: list, a list of Nodes
"""
return self._added_last_elems
def getList(self):
"""
this function returns a plain list of all the elems
:return: list, list of Nodes
"""
l = []
if self.isEmpty():
return l
else:
rec = self.getFirstRecord()
while rec is not None:
l.append(rec.getElem())
rec = rec.getNext()
return l
def printList(self):
"""
this function prints the entire list fine formatted
:return: fine formatted list of elements
"""
print "List count:", self.getCount()
rec = self.getFirstRecord()
while rec is not None:
print "--->", rec.getElem().get_node() #in this case we used .get_node() in order to use our personal
#interface to see every node element
rec = rec.getNext()
class Queue(DoubledLinkedList):
"""
this class represent a simple queue
"""
def enqueue(self, elem):
self.addAsLast(elem)
def dequeue(self):
return self.popFirst().getElem()
|
IA-MP/KnightTour
|
libs/graph/DLinkedList.py
|
Python
|
mit
| 6,415
|
[
"VisIt"
] |
965e4239162895604707a66c400b859eae2110456495129814a2c50f798bb367
|
import pysam
class AnnotateMateInformation(object):
"""Reads along a file that has a complete set of alignments and a file that should be annotated for mates of interest."""
def __init__(self, target, source, output_path=None, mate_sequence_tag='MS'):
"""
Add mate sequence into MS tag.
:param file_to_annotate: path to alignment file
:param annotate_source: path to alignment file
:param output_path: path to output alignment file
"""
self.source = source
self.target = target
self.output_path = output_path
self.mate_sequence_tag = mate_sequence_tag
self.setup()
self.reads_to_annotate = self.get_reads_to_annotate()
self.get_mates()
self.write_annotated_reads()
def setup(self):
"""Set up input and output files if these are paths."""
if isinstance(self.source, str):
self.source = pysam.AlignmentFile(self.source)
if isinstance(self.target, str):
self.target = pysam.AlignmentFile(self.target)
if isinstance(self.output_path, str):
self.writer = pysam.AlignmentFile(self.output_path, mode='wb', header=self.target.header)
def get_reads_to_annotate(self):
"""Generate a list of mates to annotate."""
reads = {}
for read in self.target:
reads["%s_%d" % (read.query_name, int(read.is_read1))] = None
if isinstance(self.target, pysam.AlignmentFile):
self.target.reset()
return reads
def get_mates(self):
"""Iterate over source file and annotate self.reads_to_annotate with the needed information."""
for read in self.source:
mate_id = "%s_%d" % (read.query_name, int(not read.is_read1))
if mate_id in self.reads_to_annotate:
self.reads_to_annotate[mate_id] = read.query_sequence
def write_annotated_reads(self):
"""Add mate sequence to read in input file and write out."""
for read in self.target:
read_id = "%s_%d" % (read.query_name, int(read.is_read1))
mate_seq = self.reads_to_annotate[read_id]
read.set_tag(self.mate_sequence_tag, mate_seq)
if self.output_path:
self.writer.write(read)
|
bardin-lab/readtagger
|
readtagger/mateoperations.py
|
Python
|
mit
| 2,303
|
[
"pysam"
] |
ac1fcb1b6c81f6d585fe2f1b0568ebdbb46ca70dec3585ac606d69828bf1527a
|
#!/usr/bin/env python3
#
# Code related to ESET's Linux/Moose research
# For feedback or questions contact us at: github@eset.com
# https://github.com/eset/malware-research/
# Olivier Bilodeau <bilodeau@eset.com>
#
# This code is provided to the community under the two-clause BSD license as
# follows:
#
# Copyright (C) 2015 ESET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from pprint import pprint
import socket
from struct import unpack
import sys
from lib.elan2 import *
def main():
data = {}
with open(sys.argv[1], 'rb') as f:
data.update(parse_cnc1_config(f))
# fetch wordlist
data.update(parse_cnc1_cracklist(f, results=True))
for line in data['wordlist'].decode('ascii').split('\r\n'):
if line.find(" ") != -1:
u, p = line.split(' ', 1)
print("{}:{}".format(u,p))
else:
print(line)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Run with: {} <file>".format(sys.argv[0]))
exit(1)
main()
|
eset/malware-research
|
moose/parse_cnc1_wordlist.py
|
Python
|
bsd-2-clause
| 2,280
|
[
"MOOSE"
] |
bed7b00fda1a71bb3c2db7bcd88e9b9ffac6639b9a554ef979d47b90edc8f3c8
|
#!/usr/bin/env python
################################################################################
#
# castep_constrain.py
#
# Usage: castep_constraints.py some.cell threshold --axis=(a, b or c)
#
# Generates constraints on all atoms below "threshold" (units fractional by default) along
# the specified axis (c axis is default).
#
################################################################################
#
# Copyright 2013 Kane O'Donnell
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
#
# NOTES
#
# 1.
#
################################################################################
from __future__ import division
import argparse
import os.path
import os
import sys
import esc_lib as el
el.DEBUG = 1
parser = argparse.ArgumentParser(description="Generate CASTEP constraints for a cell file.")
parser.add_argument('inputfile', help="Input .cell file.")
parser.add_argument('-t', '--threshold', help="Constrain all atoms less than this threshold (if not given, will ask).")
parser.add_argument('-a', '--axis', default="c", help="Threshold axis direction (a, b or c).")
args = parser.parse_args()
cell = el.Atoms(args.inputfile, "castep,cell")
if args.axis == "a":
axis = 0
elif args.axis == "b":
axis = 1
elif args.axis == "c":
axis = 2
else:
print "Unknown value for threshold axis - use a, b or c. Exiting..."
sys.exit(0)
nat = len(cell.positions[0])
print "Found %d atoms in total, here given in atomic coordinates:" % (nat)
print ""
for i in range(nat):
print "%s\t%.3g\t%.3g\t%.3g" % (el.elements[cell.species[0][i]], cell.positions[0][i][0], cell.positions[0][i][1], cell.positions[0][i][2])
# Ask user for threshold if not present
if args.threshold is None:
threshold = float(raw_input("Enter threshold value: "))
else:
threshold = float(args.threshold)
# Get all atom indices less than the appropriate threshold.
atoms = []
for i in range(nat):
if abs(cell.positions[0][i][axis]) < threshold:
atoms.append(i)
print ""
print "Found %d atoms to be constrained." % (len(atoms))
# Now use esc_lib's built in constraints function!
string_constraints = cell.generateCASTEPConstraints(atoms)
# Write by appending to the original file - note this means running repeatedly on the
# same cell file will result in multiple constraints blocks.
with open(args.inputfile, 'a') as f:
f.write("\n")
f.write(string_constraints)
|
HSINWEI/physics
|
python/castep_constrain.py
|
Python
|
gpl-3.0
| 3,080
|
[
"CASTEP"
] |
fb092e9593935639fcffa73cf724c3b60887e212828e20bab7255d504dfad766
|
""" Here, we need some documentation...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import threading
import time
import select
import socket
from concurrent.futures import ThreadPoolExecutor
try:
import selectors
except ImportError:
import selectors2 as selectors
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.private.TransportPool import getGlobalTransportPool
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.Core.DISET.private.MessageFactory import MessageFactory, DummyMessage
class MessageBroker(object):
def __init__(self, name, transportPool=None, threadPool=None):
self.__name = name
self.__messageTransports = {}
self.__msgCounter = 0
self.__msgCounterLock = threading.Lock()
self.__responseCallbacks = {}
self.__msgInTransport = {}
self.__listenPersistConn = False
self.__useMessageObjects = True
self.__callbacksLock = threading.Condition()
self.__trInOutLock = threading.Lock()
self.__msgFactory = MessageFactory()
self.__log = gLogger.getSubLogger("MSGBRK")
if not transportPool:
transportPool = getGlobalTransportPool()
self.__trPool = transportPool
if not threadPool:
threadPool = ThreadPoolExecutor(100)
self.__threadPool = threadPool
self.__listeningForMessages = False
self.__listenThread = None
def getNumConnections(self):
return len(self.__messageTransports)
def getMsgFactory(self):
return self.__msgFactory
def useMessageObjects(self, bD):
self.__useMessageObjects = bD
# Message id generation
def __generateMsgId(self):
self.__msgCounterLock.acquire()
try:
msgId = "%s:%d" % (self.__name, self.__msgCounter)
self.__msgCounter += 1
return msgId
finally:
self.__msgCounterLock.release()
def getTransportPool(self):
return self.__trPool
# Add and remove transport to/from broker
def addTransport(self, transport, *args, **kwargs):
trid = self.__trPool.add(transport)
try:
result = self.addTransportId(trid, *args, **kwargs)
except Exception as e:
gLogger.exception("Cannot add transport id", lException=e)
result = S_ERROR("Cannot add transport id")
if not result["OK"]:
self.__trPool.remove(trid)
return result
return S_OK(trid)
def addTransportId(
self,
trid,
svcName,
receiveMessageCallback=None,
disconnectCallback=None,
idleRead=False,
listenToConnection=True,
):
self.__trInOutLock.acquire()
try:
if trid in self.__messageTransports:
return S_OK()
tr = self.__trPool.get(trid)
if not tr:
return S_ERROR("No transport with id %s registered" % trid)
self.__messageTransports[trid] = {
"transport": tr,
"svcName": svcName,
"cbReceiveMessage": receiveMessageCallback,
"cbDisconnect": disconnectCallback,
"listen": listenToConnection,
"idleRead": idleRead,
}
self.__startListeningThread()
return S_OK()
finally:
self.__trInOutLock.release()
def listenToTransport(self, trid, listen=True):
self.__trInOutLock.acquire()
try:
if trid in self.__messageTransports:
self.__messageTransports[trid]["listen"] = listen
self.__startListeningThread()
finally:
self.__trInOutLock.release()
# Listen to connections
def __startListeningThread(self):
threadDead = (
self.__listeningForMessages and self.__listenThread is not None and not self.__listenThread.is_alive()
)
if not self.__listeningForMessages or threadDead:
self.__listeningForMessages = True
self.__listenThread = threading.Thread(target=self.__listenAutoReceiveConnections)
self.__listenThread.setDaemon(True)
self.__listenThread.start()
def __listenAutoReceiveConnections(self):
while self.__listeningForMessages:
self.__trInOutLock.acquire()
try:
# TODO: A single DefaultSelector instance can probably be shared by all threads
sel = selectors.DefaultSelector()
for trid in self.__messageTransports:
mt = self.__messageTransports[trid]
if not mt["listen"]:
continue
sel.register(mt["transport"].getSocket(), selectors.EVENT_READ, trid)
if not sel.get_map():
self.__listeningForMessages = False
return
finally:
self.__trInOutLock.release()
try:
events = sel.select(timeout=1)
except (socket.error, select.error):
# TODO: When can this happen?
time.sleep(0.001)
continue
except Exception as e:
gLogger.exception("Exception while selecting persistent connections", lException=e)
continue
for key, event in events:
if event & selectors.EVENT_READ:
trid = key.data
if trid in self.__messageTransports:
result = self.__receiveMsgDataAndQueue(trid)
if not result["OK"]:
self.removeTransport(trid)
# Process received data functions
def __receiveMsgDataAndQueue(self, trid):
# Receive
result = self.__trPool.receive(
trid, blockAfterKeepAlive=False, idleReceive=self.__messageTransports[trid]["idleRead"]
)
self.__log.debug("[trid %s] Received data: %s" % (trid, str(result)))
# If error close transport and exit
if not result["OK"]:
self.__log.debug("[trid %s] ERROR RCV DATA %s" % (trid, result["Message"]))
gLogger.warn(
"Error while receiving message",
"from %s : %s" % (self.__trPool.get(trid).getFormattedCredentials(), result["Message"]),
)
return self.removeTransport(trid)
self.__threadPool.submit(self.__processIncomingData, trid, result)
return S_OK()
def __processIncomingData(self, trid, receivedResult):
# If keep alive, return OK
if "keepAlive" in receivedResult and receivedResult["keepAlive"]:
return S_OK()
# If idle read return
self.__trInOutLock.acquire()
try:
idleRead = self.__messageTransports[trid]["idleRead"]
except KeyError:
return S_ERROR("Transport %s unknown" % trid)
finally:
self.__trInOutLock.release()
if idleRead:
if receivedResult["Value"]:
gLogger.fatal("OOOops. Idle read has returned data!")
return S_OK()
if not receivedResult["Value"]:
self.__log.debug("Transport %s closed connection" % trid)
return self.removeTransport(trid)
# This is a message req/resp
msg = receivedResult["Value"]
# Valid message?
if "request" not in msg:
gLogger.warn("Received data does not seem to be a message !!!!")
return self.removeTransport(trid)
# Decide if it's a response or a request
if msg["request"]:
# If message has Id return ACK to received
if "id" in msg:
self.__sendResponse(trid, msg["id"], S_OK())
# Process msg
result = self.__processIncomingRequest(trid, msg)
else:
result = self.__processIncomingResponse(trid, msg)
# If error close the transport
if not result["OK"]:
gLogger.info("Closing transport because of error while processing message", result["Message"])
return self.removeTransport(trid)
return S_OK()
def __processIncomingRequest(self, trid, msg):
self.__trInOutLock.acquire()
try:
rcvCB = self.__messageTransports[trid]["cbReceiveMessage"]
except KeyError:
return S_ERROR("Transport %s unknown" % trid)
finally:
self.__trInOutLock.release()
if not rcvCB:
gLogger.fatal("Transport %s does not have a callback defined and a message arrived!" % trid)
return S_ERROR("No message was expected in for this transport")
# Check message has id and name
for requiredField in ["name"]:
if requiredField not in msg:
gLogger.error("Message does not have required field", requiredField)
return S_ERROR("Message does not have %s" % requiredField)
# Load message
if "attrs" in msg:
attrs = msg["attrs"]
if not isinstance(attrs, (tuple, list)):
return S_ERROR("Message args has to be a tuple or a list, not %s" % type(attrs))
else:
attrs = None
# Do we "unpack" or do we send the raw data to the callback?
if self.__useMessageObjects:
result = self.__msgFactory.createMessage(self.__messageTransports[trid]["svcName"], msg["name"], attrs)
if not result["OK"]:
return result
msgObj = result["Value"]
else:
msgObj = DummyMessage(msg)
# Is msg ok?
if not msgObj.isOK():
return S_ERROR("Messsage is invalid")
try:
# Callback it and return response
result = rcvCB(trid, msgObj)
if not isReturnStructure(result):
return S_ERROR("Request function does not return a result structure")
return result
except Exception as e:
# Whoops. Show exception and return
gLogger.exception("Exception while processing message %s" % msg["name"], lException=e)
return S_ERROR("Exception while processing message %s: %s" % (msg["name"], str(e)))
def __processIncomingResponse(self, trid, msg):
# This is a message response
for requiredField in ("id", "result"):
if requiredField not in msg:
gLogger.error("Message does not have required field", requiredField)
return S_ERROR("Message does not have %s" % requiredField)
if not isReturnStructure(msg["result"]):
return S_ERROR("Message response did not return a result structure")
return self.__notifyCallback(msg["id"], msg["result"])
# Sending functions
def __sendResponse(self, trid, msgId, msgResult):
msgResponse = {"request": False, "id": msgId, "result": msgResult}
self.__trPool.send(trid, S_OK(msgResponse))
def sendMessage(self, trid, msgObj):
if not msgObj.isOK():
return S_ERROR("Message is not ready to be sent")
result = self.__sendMessage(trid, msgObj)
if not result["OK"]:
self.removeTransport(trid)
return result
def __sendMessage(self, trid, msgObj):
if not self.__trPool.exists(trid):
return S_ERROR("Not transport with id %s defined for messaging" % trid)
msg = {"request": True, "name": msgObj.getName()}
attrs = msgObj.dumpAttrs()["Value"]
msg["attrs"] = attrs
waitForAck = msgObj.getWaitForAck()
if not waitForAck:
return self.__trPool.send(trid, S_OK(msg))
msgId = self.__generateMsgId()
msg["id"] = msgId
self.__generateMessageResponse(trid, msgId)
result = self.__trPool.send(trid, S_OK(msg))
# Lock and generate and wait
self.__callbacksLock.acquire()
try:
if not result["OK"]:
# Release lock and exit
self.__clearCallback(msgId)
return result
return self.__waitForMessageResponse(msgId)
finally:
self.__callbacksLock.release()
# Callback nightmare
# Lock need to have been aquired prior to func
def __generateMessageResponse(self, trid, msgId):
self.__callbacksLock.acquire()
try:
if msgId in self.__responseCallbacks:
return self.__responseCallbacks[msgId]
if trid not in self.__msgInTransport:
self.__msgInTransport[trid] = set()
self.__msgInTransport[trid].add(msgId)
self.__responseCallbacks[msgId] = {"creationTime": time.time(), "trid": trid}
return self.__responseCallbacks[msgId]
finally:
self.__callbacksLock.release()
# Lock need to have been aquired prior to func
def __waitForMessageResponse(self, msgId):
if msgId not in self.__responseCallbacks:
return S_ERROR("Invalid msg id")
respCallback = self.__responseCallbacks[msgId]
while "result" not in respCallback and time.time() - respCallback["creationTime"] < 30:
self.__callbacksLock.wait(30)
self.__clearCallback(msgId)
if "result" in respCallback:
return respCallback["result"]
return S_ERROR("Timeout while waiting for message ack")
def __clearCallback(self, msgId):
if msgId not in self.__responseCallbacks:
return False
trid = self.__responseCallbacks[msgId]["trid"]
self.__responseCallbacks.pop(msgId)
try:
self.__msgInTransport[trid].remove(msgId)
except KeyError:
pass
return True
# Lock need to have been aquired prior to func
def __setCallbackResult(self, msgId, result=False):
if msgId not in self.__responseCallbacks:
return False
self.__responseCallbacks[msgId]["result"] = result
return True
def __notifyCallback(self, msgId, msgResult):
self.__callbacksLock.acquire()
try:
if self.__setCallbackResult(msgId, msgResult):
self.__callbacksLock.notifyAll()
finally:
self.__callbacksLock.release()
return S_OK()
def removeTransport(self, trid, closeTransport=True):
# Delete from the message Transports
self.__trInOutLock.acquire()
try:
if trid not in self.__messageTransports:
return S_OK()
# Save the disconnect callback if it's there
if self.__messageTransports[trid]["cbDisconnect"]:
cbDisconnect = self.__messageTransports[trid]["cbDisconnect"]
else:
cbDisconnect = False
self.__messageTransports.pop(trid)
if closeTransport:
self.__trPool.close(trid)
finally:
self.__trInOutLock.release()
# Flush remaining messages
self.__callbacksLock.acquire()
try:
msgIds = False
if trid in self.__msgInTransport:
msgIds = set(self.__msgInTransport[trid])
self.__msgInTransport.pop(trid)
for msgId in msgIds:
self.__setCallbackResult(msgId, S_ERROR("Connection closed by peer"))
self.__callbacksLock.notifyAll()
finally:
self.__callbacksLock.release()
# Queue the disconnect CB if it's there
if cbDisconnect:
self.__threadPool.submit(cbDisconnect, trid)
return S_OK()
class MessageSender(object):
def __init__(self, serviceName, msgBroker):
self.__serviceName = serviceName
self.__msgBroker = msgBroker
def getServiceName(self):
return self.__serviceName
def sendMessage(self, trid, msgObj):
return self.__msgBroker.sendMessage(trid, msgObj)
def createMessage(self, msgName):
return self.__msgBroker.__msgFactory.createMessage(self.__serviceName, msgName)
gMessageBroker = False
def getGlobalMessageBroker():
global gMessageBroker
if not gMessageBroker:
gMessageBroker = MessageBroker("GlobalMessageBroker", transportPool=getGlobalTransportPool())
return gMessageBroker
|
ic-hep/DIRAC
|
src/DIRAC/Core/DISET/private/MessageBroker.py
|
Python
|
gpl-3.0
| 16,514
|
[
"DIRAC"
] |
3c0fa205e5288ff8abb9eb60f371b0960d6b5ccfd3fad1f0dac91db7af615547
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, The Horizomer Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from setuptools import setup, find_packages
__version__ = "0.0.1-dev"
classes = """
Development Status :: 2 - Pre-Alpha
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
Programming Language :: Python
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: Implementation :: CPython
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
"""
long_description = ("Horizomer: workflow for whole genome HGT detection.")
classifiers = [s.strip() for s in classes.split('\n') if s]
keywords = 'horizontal gene transfer, whole genome'
setup(name='horizomer',
version=__version__,
long_description=long_description,
license="BSD",
description='Horizomer',
classifiers=classifiers,
keywords=keywords,
author="Horizomer development team",
author_email="jenya.kopylov@gmail.com",
maintainer="Horizomer development team",
maintainer_email="qiyunzhu@gmail.com",
url='https://github.com/biocore/horizomer',
test_suite='nose.collector',
packages=find_packages(),
install_requires=[
'click >= 6.0',
'scikit-bio >= 0.5.1',
],
extras_require={'test': ["nose", "pep8", "flake8"],
'doc': ["Sphinx == 1.3.3"]},
)
|
qiyunzhu/horizomer
|
setup.py
|
Python
|
bsd-3-clause
| 1,948
|
[
"scikit-bio"
] |
8d1d4d9a2fd2c747cef12c6f91e7b4ad8664f10501ac0218779eccc1aabfe580
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This code is copied from https://github.com/vsvinayak/mnist-helper.
It requires Scipy to perform convolve2d.
Default parameters are modified.
"""
import numpy as np
import math
from scipy.signal import convolve2d
from deepy.utils import global_rand
def create_2d_gaussian(dim, sigma):
"""
This function creates a 2d gaussian kernel with the standard deviation
denoted by sigma
:param dim: integer denoting a side (1-d) of gaussian kernel
:param sigma: floating point indicating the standard deviation
:returns: a numpy 2d array
"""
# check if the dimension is odd
if dim % 2 == 0:
raise ValueError("Kernel dimension should be odd")
# initialize the kernel
kernel = np.zeros((dim, dim), dtype=np.float16)
# calculate the center point
center = dim/2
# calculate the variance
variance = sigma ** 2
# calculate the normalization coefficeint
coeff = 1. / (2 * variance)
# create the kernel
for x in range(0, dim):
for y in range(0, dim):
x_val = abs(x - center)
y_val = abs(y - center)
numerator = x_val**2 + y_val**2
denom = 2*variance
kernel[x,y] = coeff * np.exp(-1. * numerator/denom)
return kernel/sum(sum(kernel))
def elastic_distortion(image, kernel_dim=21, sigma=6, alpha=30, negated=True):
"""
This method performs elastic transformations on an image by convolving
with a gaussian kernel.
:param image: a numpy nd array
:kernel_dim: dimension(1-D) of the gaussian kernel
:param sigma: standard deviation of the kernel
:param alpha: a multiplicative factor for image after convolution
:param negated: a flag indicating whether the image is negated or not
:returns: a nd array transformed image
"""
# check if the image is a negated one
if not negated:
image = 255-image
# check if kernel dimesnion is odd
if kernel_dim % 2 == 0:
raise ValueError("Kernel dimension should be odd")
# create an empty image
result = np.zeros(image.shape)
# create random displacement fields
displacement_field_x = np.array([[global_rand.random_integers(-1, 1) for x in xrange(image.shape[0])] \
for y in xrange(image.shape[1])]) * alpha
displacement_field_y = np.array([[global_rand.random_integers(-1, 1) for x in xrange(image.shape[0])] \
for y in xrange(image.shape[1])]) * alpha
# create the gaussian kernel
kernel = create_2d_gaussian(kernel_dim, sigma)
# convolve the fields with the gaussian kernel
displacement_field_x = convolve2d(displacement_field_x, kernel)
displacement_field_y = convolve2d(displacement_field_y, kernel)
# make the distortrd image by averaging each pixel value to the neighbouring
# four pixels based on displacement fields
for row in xrange(image.shape[1]):
for col in xrange(image.shape[0]):
low_ii = row + int(math.floor(displacement_field_x[row, col]))
high_ii = row + int(math.ceil(displacement_field_x[row, col]))
low_jj = col + int(math.floor(displacement_field_y[row, col]))
high_jj = col + int(math.ceil(displacement_field_y[row, col]))
if low_ii < 0 or low_jj < 0 or high_ii >= image.shape[1] -1 \
or high_jj >= image.shape[0] - 1:
continue
res = image[low_ii, low_jj]/4 + image[low_ii, high_jj]/4 + \
image[high_ii, low_jj]/4 + image[high_ii, high_jj]/4
result[row, col] = res
return result
|
ZhangAustin/deepy
|
deepy/utils/elastic_distortion.py
|
Python
|
mit
| 3,676
|
[
"Gaussian"
] |
a3b967a376e57eaec25a64c9a113bd22d4ffe699cd2567d9e1f0ef5b7cceb854
|
#
# This file is a part of the normalize python library
#
# normalize is free software: you can redistribute it and/or modify
# it under the terms of the MIT License.
#
# normalize is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
#
# You should have received a copy of the MIT license along with
# normalize. If not, refer to the upstream repository at
# http://github.com/hearsaycorp/normalize
#
from __future__ import absolute_import
import collections
import types
from normalize.coll import Collection
import normalize.exc as exc
from normalize.record import Record
from normalize.selector import FieldSelector
from normalize.selector import MultiFieldSelector
class Visitor(object):
"""The Visitor object represents a single recursive visit in progress. You
hopefully shouldn't have to sub-class this class for most use cases; just
VisitorPattern.
"""
def __init__(self, unpack_func, apply_func, collect_func, reduce_func,
apply_empty_slots=False, extraneous=False,
ignore_empty_string=False, ignore_none=True,
visit_filter=None, filter=None):
"""Create a new Visitor object. Generally called by a front-end class
method of :py:class:`VisitorPattern`
There are four positional arguments, which specify the particular
functions to be used during the visit. The important options from a
user of a visitor are the keyword arguments:
``apply_empty_slots=``\ *bool*
If set, then your ``apply`` method (or ``reverse``, etc) will
be called even if there is no corresponding value in the input.
Your method will receive the Exception as if it were the value.
``extraneous=``\ *bool*
Also call the apply method on properties marked *extraneous*.
False by default.
``ignore_empty_string=``\ *bool*
If the 'apply' function returns the empty string, treat it as
if the slot or object did not exist. ``False`` by default.
``ignore_none=``\ *bool*
If the 'apply' function returns ``None``, treat it as if the
slot or object did not exist. ``True`` by default.
``visit_filter=``\ *MultiFieldSelector*
This supplies an instance of
:py:class:`normalize.selector.MultiFieldSelector`, and
restricts the operation to the matched object fields. Can also
be specified as just ``filter=``
"""
self.unpack = unpack_func
self.apply = apply_func
self.collect = collect_func
self.reduce = reduce_func
self.apply_empty_slots = apply_empty_slots
self.extraneous = extraneous
self.ignore_empty_string = ignore_empty_string
self.ignore_none = ignore_none
if visit_filter is None:
visit_filter = filter
if isinstance(visit_filter, (MultiFieldSelector, types.NoneType)):
self.visit_filter = visit_filter
else:
self.visit_filter = MultiFieldSelector(*visit_filter)
self.seen = set() # TODO
self.cue = list()
def is_filtered(self, prop):
return (not self.extraneous and prop.extraneous) or (
self.visit_filter and not self.visit_filter[self.cue + [prop.name]]
)
@property
def field_selector(self):
return FieldSelector(self.cue)
def push(self, what):
self.cue.append(what)
def pop(self, what=None):
if what is not None:
assert(self.cue[-1] == what)
return self.cue.pop()
def copy(self):
"""Be sure to implement this method when sub-classing, otherwise you
will lose any specialization context."""
doppel = type(self)(
self.unpack, self.apply, self.collect, self.reduce,
apply_empty_slots=self.apply_empty_slots,
extraneous=self.extraneous,
ignore_empty_string=self.ignore_empty_string,
ignore_none=self.ignore_none,
visit_filter=self.visit_filter,
)
for x in self.cue:
doppel.push(x)
doppel.seen = self.seen
return doppel
class VisitorPattern(object):
"""Base Class for writing Record visitor pattern classes. These classes
are not instantiated, and consist purely of class methods.
There are three visitors supplied by default, which correspond to typical
use for IO (:py:meth:`normalize.visitor.VisitorPattern.visit` for output,
and :py:meth:`normalize.visitor.VisitorPattern.cast` for input), and for
providing a centralized type catalogue
(:py:meth:`normalize.visitor.VisitorPattern.reflect`).
============= =========== ============= ===================================
``visit`` ``cast`` ``reflect`` Description
============= =========== ============= ===================================
``unpack`` ``grok`` ``scantypes`` Defines how to get a property value
from the thing being walked, and a
generator for the collection.
``apply`` ``reverse`` ``propinfo`` Conversion for individual values
``aggregate`` ``collect`` ``itemtypes`` Combine collection results
``reduce`` ``produce`` ``typeinfo`` Combine apply results
============= =========== ============= ===================================
To customize what is emitted, sub-class ``VisitorPattern`` and override the
class methods of the conversion you are interested in. For many simple IO
use cases, you might need only to override are ``apply`` and ``reverse``,
if that.
The versions for ``visit`` are documented the most thoroughly, as these are
the easiest to understand and the ones most users will be customizing. The
documentation for the other methods describes the differences between them
and their ``visit`` counterpart.
"""
Visitor = Visitor
@classmethod
def visit(cls, value, value_type=None, **kwargs):
"""A value visitor, which visits instances (typically), applies
:py:meth:`normalize.visitor.VisitorPattern.apply` to every attribute
slot, and returns the reduced result.
Like :py:func:`normalize.diff.diff`, this function accepts a series of
keyword arguments, which are passed through to
:py:class:`normalize.visitor.Visitor`.
This function also takes positional arguments:
``value=``\ *object*
The value to visit. Normally (but not always) a
:py:class:`normalize.record.Record` instance.
``value_type=``\ *RecordType*
This is the ``Record`` subclass to interpret ``value`` as. The
default is ``type(value)``. If you specify this, then the type
information on ``value`` is essentially ignored (with the
caveat mentioned below on :py:meth:`Visitor.map_prop`), and may
be a ``dict``, ``list``, etc.
``**kwargs``
Visitor options accepted by
:py:meth:`normalize.visitor.Visitor.__init__`.
"""
visitor = cls.Visitor(
cls.unpack, cls.apply, cls.aggregate, cls.reduce,
**kwargs)
if not value_type:
value_type = type(value)
if not issubclass(value_type, Record):
raise TypeError(
"Cannot visit %s instance" % value_type.__name__
)
return cls.map(visitor, value, value_type)
@classmethod
def unpack(cls, value, value_type, visitor):
"""Unpack a value during a 'visit'
args:
``value=``\ *object*
The instance being visited
``value_type=``\ *RecordType*
The expected type of the instance
``visitor=``\ *Visitor*
The context/options
returns a tuple with two items:
``get_prop=``\ *function*
This function should take a
:py:class:`normalize.property.Property` instance, and return
the slot from the value, or raise ``AttributeError`` or
``KeyError`` if the slot is empty. Returning nothing means
that the item has no properties to unpack; ie, it's an opaque
type.
``get_item=``\ *generator*
This generator should return the tuple protocol used by
:py:class:`normalize.coll.Collection`: (K, V) where K can be an
ascending integer (for sequences), V (for sets), or something
hashable like a string (for dictionaries/maps)
"""
if issubclass(value_type, Collection):
try:
generator = value.itertuples()
except AttributeError:
if isinstance(value, value_type.colltype):
generator = value_type.coll_to_tuples(value)
else:
raise exc.VisitorUnpackError(
passed=value,
colltype=value_type.colltype.__name__,
context=visitor,
)
else:
generator = None
if issubclass(value_type, Record):
def propget(prop):
return prop.__get__(value)
else:
propget = None
return propget, generator
@classmethod
def apply(cls, value, prop, visitor):
"""'apply' is a general place to put a function which is called on
every extant record slot. This is usually the most important function
to implement when sub-classing.
The default implementation passes through the slot value as-is, but
expected exceptions are converted to ``None``.
args:
``value=``\ *value*\ \|\ *AttributeError*\ \|\ *KeyError*
This is the value currently in the slot, or the Record itself
with the ``apply_records`` visitor option. *AttributeError*
will only be received if you passed ``apply_empty_slots``, and
*KeyError* will be passed if ``parent_obj`` is a ``dict`` (see
:py:meth:`Visitor.map_prop` for details about when this might
happen)
``prop=``\ *Property*\ \|\ ``None``
This is the :py:class:`normalize.Property` instance which
represents the field being traversed.
This can be ``None`` when being applied over Collection
instances, where the type of the contents is not a Record.
``visitor=``\ *Visitor*
This object can be used to inspect parameters of the current
run, such as options which control which kinds of values are
visited, which fields are being visited and where the function
is in relation to the starting point.
"""
return (
None if isinstance(value, (AttributeError, KeyError)) else
value
)
@classmethod
def aggregate(self, mapped_coll_generator, coll_type, visitor):
"""Hook called for each normalize.coll.Collection, after mapping over
each of the items in the collection.
The default implementation calls
:py:meth:`normalize.coll.Collection.tuples_to_coll` with
``coerce=False``, which just re-assembles the collection into a native
python collection type of the same type of the input collection.
args:
``result_coll_generator=`` *generator func*
Generator which returns (key, value) pairs (like
:py:meth:`normalize.coll.Collection.itertuples`)
``coll_type=``\ *CollectionType*
This is the :py:class:`normalize.coll.Collection`-derived
*class* which is currently being reduced.
``visitor=``\ *Visitor*
Context/options object
"""
return coll_type.tuples_to_coll(mapped_coll_generator, coerce=False)
@classmethod
def reduce(self, mapped_props, aggregated, value_type, visitor):
"""This reduction is called to combine the mapped slot and collection
item values into a single value for return.
The default implementation tries to behave naturally; you'll almost
always get a dict back when mapping over a record, and list or some
other collection when mapping over collections.
If the collection has additional properties which are not ignored (eg,
not extraneous, not filtered), then the result will be a dictionary
with the results of mapping the properties, and a 'values' key will be
added with the result of mapping the items in the collection.
args:
``mapped_props=``\ *generator*
Iterating over this generator will yield K, V pairs, where K is
**the Property object** and V is the mapped value.
``aggregated=``\ *object*
This contains whatever ``aggregate`` returned, normally a list.
``value_type=``\ *RecordType*
This is the type which is currently being reduced.
A :py:class:`normalize.record.Record` subclass
``visitor=``\ *Visitor*
Contenxt/options object.
"""
reduced = None
if mapped_props:
reduced = dict((k.name, v) for k, v in mapped_props)
if issubclass(value_type, Collection) and aggregated is not None:
if all(visitor.is_filtered(prop) for prop in
value_type.properties.values()):
reduced = aggregated
else:
if reduced.get("values", False):
raise exc.VisitorTooSimple(
fs=visitor.field_selector,
value_type_name=value_type.__name__,
visitor=type(self).__name__,
)
else:
reduced['values'] = aggregated
return reduced
# CAST versions
@classmethod
def cast(cls, value_type, value, visitor=None, **kwargs):
"""Cast is for visitors where you are visiting some random data
structure (perhaps returned by a previous ``VisitorPattern.visit()``
operation), and you want to convert back to the value type.
This function also takes positional arguments:
``value_type=``\ *RecordType*
The type to cast to.
``value=``\ *object*
``visitor=``\ *Visitor.Options*
Specifies the visitor options, which customizes the descent
and reduction.
"""
if visitor is None:
visitor = cls.Visitor(
cls.grok, cls.reverse, cls.collect, cls.produce,
**kwargs)
return cls.map(visitor, value, value_type)
# hooks for types which define what is considered acceptable input for
# given contexts during 'cast'
#
# note: Collection.coll_to_tuples will generally allow you to pass
# collections as a list or a dict with the *values* being the members of
# the set, so this code allows this.
grok_mapping_types = collections.Mapping
grok_coll_types = (collections.Sequence, collections.Mapping)
@classmethod
def grok(cls, value, value_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.unpack` but called
for ``cast`` operations. Expects to work with dictionaries and lists
instead of Record objects.
Reverses the transform performed in
:py:meth:`normalize.visitor.VisitorPattern.reduce` for collections with
properties.
If you pass tuples to ``isa`` of your Properties, then you might need
to override this function and throw ``TypeError`` if the passed
``value_type`` is not appropriate for ``value``.
"""
is_coll = issubclass(value_type, Collection)
is_record = issubclass(value_type, Record) and any(
not visitor.is_filtered(prop) for prop in
value_type.properties.values()
)
if is_record and not isinstance(value, cls.grok_mapping_types):
raise exc.VisitorGrokRecordError(
val=repr(value),
record_type=value_type,
record_type_name=value_type.__name__,
field_selector=visitor.field_selector,
)
values = value
if is_coll and is_record:
try:
if "values" in value:
values = value['values']
except TypeError:
pass
generator = None
if is_coll:
if not isinstance(values, cls.grok_coll_types):
raise exc.VisitorGrokCollectionError(
val=repr(values),
record_type=value_type,
record_type_name=value_type.__name__,
field_selector=visitor.field_selector,
)
generator = value_type.coll_to_tuples(values)
propget = None
if is_record:
def propget(prop):
return value[prop.name]
return propget, generator
@classmethod
def reverse(cls, value, prop, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.apply` but called
for ``cast`` operations. The default implementation passes through but
squashes exceptions, just like apply.
"""
return (
None if isinstance(value, (AttributeError, KeyError)) else
value
)
@classmethod
def collect(cls, mapped_coll_generator, coll_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.aggregate`, but
coerces the mapped values to the collection item type on the way
through.
"""
return coll_type.tuples_to_coll(mapped_coll_generator)
@classmethod
def produce(cls, mapped_props, aggregated, value_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.reduce`, but
constructs instances rather than returning plain dicts.
"""
kwargs = {} if not mapped_props else dict(
(k.name, v) for k, v in mapped_props
)
if issubclass(value_type, Collection):
kwargs['values'] = aggregated
return value_type(**kwargs)
# versions which walk type objects
@classmethod
def reflect(cls, X, **kwargs):
"""Reflect is for visitors where you are exposing some information
about the types reachable from a starting type to an external system.
For example, a front-end, a REST URL router and documentation
framework, an avro schema definition, etc.
X can be a type or an instance.
This API should be considered **experimental**
"""
if isinstance(X, type):
value = None
value_type = X
else:
value = X
value_type = type(X)
if not issubclass(value_type, Record):
raise TypeError("Cannot reflect on %s" % value_type.__name__)
visitor = cls.Visitor(
cls.scantypes, cls.propinfo, cls.itemtypes,
cls.typeinfo,
**kwargs)
return cls.map(visitor, value, value_type)
@classmethod
def scantypes(cls, value, value_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.unpack`, but
returns a getter which just returns the property, and a collection
getter which returns a set with a single item in it.
"""
item_type_generator = None
if issubclass(value_type, Collection):
def get_item_types():
if isinstance(value_type.itemtype, tuple):
# not actually supported by Collection yet, but whatever
for vt in value_type.itemtype:
yield (vt, vt)
else:
yield value_type.itemtype, value_type.itemtype
item_type_generator = get_item_types()
propget = None
if issubclass(value_type, Record):
def propget(prop):
return prop
return propget, item_type_generator
@classmethod
def propinfo(cls, value, prop, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.apply`, but takes a
property and returns a dict with some basic info. The default
implementation returns just the name of the property and the type in
here.
"""
if not prop:
return {"name": value.__name__}
rv = {"name": prop.name}
if prop.valuetype:
if isinstance(prop.valuetype, tuple):
rv['type'] = [typ.__name__ for typ in prop.valuetype]
else:
rv['type'] = prop.valuetype.__name__
return rv
@classmethod
def itemtypes(cls, mapped_types, coll_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.aggregate`, but
returns . This will normally only get called with a single type.
"""
rv = list(v for k, v in mapped_types)
return rv[0] if len(rv) == 1 else rv
@classmethod
def typeinfo(cls, propinfo, type_parameters, value_type, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.reduce`, but returns
the final dictionary to correspond to a type definition. The default
implementation returns just the type name, the list of properties, and
the item type for collections.
"""
propspec = dict((prop.name, info) for prop, info in propinfo)
ts = {'name': value_type.__name__}
if propspec:
ts['properties'] = propspec
if type_parameters:
ts['itemtype'] = type_parameters
return ts
# sentinel iteration stopper
class StopVisiting(object):
"""This sentinel value may be returned by a custom implementation of
``unpack`` (or ``grok``, or ``scantypes``) to indicate that the descent
should be stopped immediately, instead of proceeding to descend into
sub-properties. It can be passed a literal value to use as the mapped
value as a single constructor argument, or the class itself returned to
indicate no mapped value."""
return_value = None
def __init__(self, return_value):
self.return_value = return_value
# methods-in-common
@classmethod
def map(cls, visitor, value, value_type):
"""The common visitor API used by all three visitor implementations.
args:
``visitor=``\ *Visitor*
Visitor options instance: contains the callbacks to use to
implement the visiting, as well as traversal & filtering
options.
``value=``\ *Object*
Object being visited
``value_type=``\ *RecordType*
The type object controlling the visiting.
"""
unpacked = visitor.unpack(value, value_type, visitor)
if unpacked == cls.StopVisiting or isinstance(
unpacked, cls.StopVisiting
):
return unpacked.return_value
if isinstance(unpacked, tuple):
props, coll = unpacked
else:
props, coll = unpacked, None
# recurse into values for collections
if coll:
coll_map_generator = cls.map_collection(
visitor, coll, value_type,
)
mapped_coll = visitor.collect(
coll_map_generator, value_type, visitor,
)
else:
mapped_coll = None
# recurse into regular properties
mapped_props = None
if props:
mapped_props = cls.map_record(visitor, props, value_type)
elif mapped_coll is None:
return visitor.apply(value, None, visitor)
return visitor.reduce(
mapped_props, mapped_coll, value_type, visitor,
)
@classmethod
def map_record(cls, visitor, get_value, record_type):
rv = visitor.copy() # expensive?
for name, prop in record_type.properties.iteritems():
if rv.is_filtered(prop):
continue
rv.push(name)
try:
value = get_value(prop)
except AttributeError as ae:
value = ae
except KeyError as ke:
value = ke
except Exception as e:
rv.pop(name)
raise exc.VisitorPropError(
exception=e,
prop=prop,
prop_name=name,
record_type_name=record_type.__name__,
fs=rv.field_selector,
)
if visitor.apply_empty_slots or not isinstance(
value, (KeyError, AttributeError),
):
mapped = cls.map_prop(rv, value, prop)
if mapped is None and rv.ignore_none:
pass
elif mapped == "" and rv.ignore_empty_string:
pass
else:
yield prop, mapped
rv.pop(name)
@classmethod
def map_collection(cls, visitor, coll_generator, coll_type):
rv = visitor.copy()
for key, value in coll_generator:
rv.push(key)
mapped = cls.map(rv, value, coll_type.itemtype)
rv.pop(key)
if mapped is None and visitor.ignore_none:
pass
elif mapped == "" and visitor.ignore_empty_string:
pass
else:
yield key, mapped
@classmethod
def map_prop(cls, visitor, value, prop):
mapped = None
# XXX - this fallback here is type-unsafe, and exists only for
# those who don't declare their isa= for complex object types.
value_type = prop.valuetype or type(value)
if isinstance(value_type, tuple):
mapped = cls.map_type_union(
visitor, value, value_type, prop,
)
elif issubclass(value_type, Record):
mapped = cls.map(visitor, value, value_type)
else:
mapped = visitor.apply(value, prop, visitor)
return mapped
@classmethod
def map_type_union(cls, visitor, value, type_tuple, prop):
# This corner-case method applies when visiting a value and
# ncountering a type union in the ``Property.valuetype`` field.
#
# this code has the same problem that record_id does; that is, it
# doesn't know which of the type union the value is.
#
# the solution this function uses is to try all of them, until one of
# them returns something logically true. Handlers (ie, unpack/grok)
# can also protest by raising TypeError, and the next one will be
# tried.
record_types = []
matching_record_types = []
for value_type in type_tuple:
if issubclass(value_type, Record):
record_types.append(value_type)
# XXX - this test here should probably be a per-visitor
# hook, as it only really applies to 'visit', not 'grok'
if isinstance(value, value_type):
matching_record_types.append(value_type)
mapped = None
if matching_record_types:
for value_type in matching_record_types:
try:
mapped = cls.map(visitor, value, value_type)
except TypeError:
pass
else:
if mapped:
break
else:
for value_type in record_types:
try:
mapped = cls.map(visitor, value, value_type)
except TypeError:
pass
else:
# this could also be the wrong thing when mapping
# over types.
if mapped:
break
if not mapped:
mapped = visitor.apply(value, prop, visitor)
return mapped
|
samv/normalize
|
normalize/visitor.py
|
Python
|
mit
| 28,858
|
[
"VisIt"
] |
62bf8a1c942ff67e7f33d2bede6b8083f40cf1e95c1dd213db2326d9cb7d0454
|
# encoding: utf-8
"""
Prefiltering components.
Prefilters transform user input before it is exec'd by Python. These
transforms are used to implement additional syntax such as !ls and %magic.
Authors:
* Brian Granger
* Fernando Perez
* Dan Milstein
* Ville Vainio
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from keyword import iskeyword
import re
from IPython.core.autocall import IPyAutocall
from IPython.config.configurable import Configurable
from IPython.core.inputsplitter import (
ESC_MAGIC,
ESC_QUOTE,
ESC_QUOTE2,
ESC_PAREN,
)
from IPython.core.macro import Macro
from IPython.core.splitinput import LineInfo
from IPython.utils.traitlets import (
List, Integer, Unicode, CBool, Bool, Instance, CRegExp
)
#-----------------------------------------------------------------------------
# Global utilities, errors and constants
#-----------------------------------------------------------------------------
class PrefilterError(Exception):
pass
# RegExp to identify potential function names
re_fun_name = re.compile(r'[a-zA-Z_]([a-zA-Z0-9_.]*) *$')
# RegExp to exclude strings with this start from autocalling. In
# particular, all binary operators should be excluded, so that if foo is
# callable, foo OP bar doesn't become foo(OP bar), which is invalid. The
# characters '!=()' don't need to be checked for, as the checkPythonChars
# routine explicitely does so, to catch direct calls and rebindings of
# existing names.
# Warning: the '-' HAS TO BE AT THE END of the first group, otherwise
# it affects the rest of the group in square brackets.
re_exclude_auto = re.compile(r'^[,&^\|\*/\+-]'
r'|^is |^not |^in |^and |^or ')
# try to catch also methods for stuff in lists/tuples/dicts: off
# (experimental). For this to work, the line_split regexp would need
# to be modified so it wouldn't break things at '['. That line is
# nasty enough that I shouldn't change it until I can test it _well_.
#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
# Handler Check Utilities
def is_shadowed(identifier, ip):
"""Is the given identifier defined in one of the namespaces which shadow
the alias and magic namespaces? Note that an identifier is different
than ifun, because it can not contain a '.' character."""
# This is much safer than calling ofind, which can change state
return (identifier in ip.user_ns \
or identifier in ip.user_global_ns \
or identifier in ip.ns_table['builtin']\
or iskeyword(identifier))
#-----------------------------------------------------------------------------
# Main Prefilter manager
#-----------------------------------------------------------------------------
class PrefilterManager(Configurable):
"""Main prefilter component.
The IPython prefilter is run on all user input before it is run. The
prefilter consumes lines of input and produces transformed lines of
input.
The iplementation consists of two phases:
1. Transformers
2. Checkers and handlers
Over time, we plan on deprecating the checkers and handlers and doing
everything in the transformers.
The transformers are instances of :class:`PrefilterTransformer` and have
a single method :meth:`transform` that takes a line and returns a
transformed line. The transformation can be accomplished using any
tool, but our current ones use regular expressions for speed.
After all the transformers have been run, the line is fed to the checkers,
which are instances of :class:`PrefilterChecker`. The line is passed to
the :meth:`check` method, which either returns `None` or a
:class:`PrefilterHandler` instance. If `None` is returned, the other
checkers are tried. If an :class:`PrefilterHandler` instance is returned,
the line is passed to the :meth:`handle` method of the returned
handler and no further checkers are tried.
Both transformers and checkers have a `priority` attribute, that determines
the order in which they are called. Smaller priorities are tried first.
Both transformers and checkers also have `enabled` attribute, which is
a boolean that determines if the instance is used.
Users or developers can change the priority or enabled attribute of
transformers or checkers, but they must call the :meth:`sort_checkers`
or :meth:`sort_transformers` method after changing the priority.
"""
multi_line_specials = CBool(True, config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
def __init__(self, shell=None, **kwargs):
super(PrefilterManager, self).__init__(shell=shell, **kwargs)
self.shell = shell
self.init_transformers()
self.init_handlers()
self.init_checkers()
#-------------------------------------------------------------------------
# API for managing transformers
#-------------------------------------------------------------------------
def init_transformers(self):
"""Create the default transformers."""
self._transformers = []
for transformer_cls in _default_transformers:
transformer_cls(
shell=self.shell, prefilter_manager=self, parent=self
)
def sort_transformers(self):
"""Sort the transformers by priority.
This must be called after the priority of a transformer is changed.
The :meth:`register_transformer` method calls this automatically.
"""
self._transformers.sort(key=lambda x: x.priority)
@property
def transformers(self):
"""Return a list of checkers, sorted by priority."""
return self._transformers
def register_transformer(self, transformer):
"""Register a transformer instance."""
if transformer not in self._transformers:
self._transformers.append(transformer)
self.sort_transformers()
def unregister_transformer(self, transformer):
"""Unregister a transformer instance."""
if transformer in self._transformers:
self._transformers.remove(transformer)
#-------------------------------------------------------------------------
# API for managing checkers
#-------------------------------------------------------------------------
def init_checkers(self):
"""Create the default checkers."""
self._checkers = []
for checker in _default_checkers:
checker(
shell=self.shell, prefilter_manager=self, parent=self
)
def sort_checkers(self):
"""Sort the checkers by priority.
This must be called after the priority of a checker is changed.
The :meth:`register_checker` method calls this automatically.
"""
self._checkers.sort(key=lambda x: x.priority)
@property
def checkers(self):
"""Return a list of checkers, sorted by priority."""
return self._checkers
def register_checker(self, checker):
"""Register a checker instance."""
if checker not in self._checkers:
self._checkers.append(checker)
self.sort_checkers()
def unregister_checker(self, checker):
"""Unregister a checker instance."""
if checker in self._checkers:
self._checkers.remove(checker)
#-------------------------------------------------------------------------
# API for managing handlers
#-------------------------------------------------------------------------
def init_handlers(self):
"""Create the default handlers."""
self._handlers = {}
self._esc_handlers = {}
for handler in _default_handlers:
handler(
shell=self.shell, prefilter_manager=self, parent=self
)
@property
def handlers(self):
"""Return a dict of all the handlers."""
return self._handlers
def register_handler(self, name, handler, esc_strings):
"""Register a handler instance by name with esc_strings."""
self._handlers[name] = handler
for esc_str in esc_strings:
self._esc_handlers[esc_str] = handler
def unregister_handler(self, name, handler, esc_strings):
"""Unregister a handler instance by name with esc_strings."""
try:
del self._handlers[name]
except KeyError:
pass
for esc_str in esc_strings:
h = self._esc_handlers.get(esc_str)
if h is handler:
del self._esc_handlers[esc_str]
def get_handler_by_name(self, name):
"""Get a handler by its name."""
return self._handlers.get(name)
def get_handler_by_esc(self, esc_str):
"""Get a handler by its escape string."""
return self._esc_handlers.get(esc_str)
#-------------------------------------------------------------------------
# Main prefiltering API
#-------------------------------------------------------------------------
def prefilter_line_info(self, line_info):
"""Prefilter a line that has been converted to a LineInfo object.
This implements the checker/handler part of the prefilter pipe.
"""
# print "prefilter_line_info: ", line_info
handler = self.find_handler(line_info)
return handler.handle(line_info)
def find_handler(self, line_info):
"""Find a handler for the line_info by trying checkers."""
for checker in self.checkers:
if checker.enabled:
handler = checker.check(line_info)
if handler:
return handler
return self.get_handler_by_name('normal')
def transform_line(self, line, continue_prompt):
"""Calls the enabled transformers in order of increasing priority."""
for transformer in self.transformers:
if transformer.enabled:
line = transformer.transform(line, continue_prompt)
return line
def prefilter_line(self, line, continue_prompt=False):
"""Prefilter a single input line as text.
This method prefilters a single line of text by calling the
transformers and then the checkers/handlers.
"""
# print "prefilter_line: ", line, continue_prompt
# All handlers *must* return a value, even if it's blank ('').
# save the line away in case we crash, so the post-mortem handler can
# record it
self.shell._last_input_line = line
if not line:
# Return immediately on purely empty lines, so that if the user
# previously typed some whitespace that started a continuation
# prompt, he can break out of that loop with just an empty line.
# This is how the default python prompt works.
return ''
# At this point, we invoke our transformers.
if not continue_prompt or (continue_prompt and self.multi_line_specials):
line = self.transform_line(line, continue_prompt)
# Now we compute line_info for the checkers and handlers
line_info = LineInfo(line, continue_prompt)
# the input history needs to track even empty lines
stripped = line.strip()
normal_handler = self.get_handler_by_name('normal')
if not stripped:
return normal_handler.handle(line_info)
# special handlers are only allowed for single line statements
if continue_prompt and not self.multi_line_specials:
return normal_handler.handle(line_info)
prefiltered = self.prefilter_line_info(line_info)
# print "prefiltered line: %r" % prefiltered
return prefiltered
def prefilter_lines(self, lines, continue_prompt=False):
"""Prefilter multiple input lines of text.
This is the main entry point for prefiltering multiple lines of
input. This simply calls :meth:`prefilter_line` for each line of
input.
This covers cases where there are multiple lines in the user entry,
which is the case when the user goes back to a multiline history
entry and presses enter.
"""
llines = lines.rstrip('\n').split('\n')
# We can get multiple lines in one shot, where multiline input 'blends'
# into one line, in cases like recalling from the readline history
# buffer. We need to make sure that in such cases, we correctly
# communicate downstream which line is first and which are continuation
# ones.
if len(llines) > 1:
out = '\n'.join([self.prefilter_line(line, lnum>0)
for lnum, line in enumerate(llines) ])
else:
out = self.prefilter_line(llines[0], continue_prompt)
return out
#-----------------------------------------------------------------------------
# Prefilter transformers
#-----------------------------------------------------------------------------
class PrefilterTransformer(Configurable):
"""Transform a line of user input."""
priority = Integer(100, config=True)
# Transformers don't currently use shell or prefilter_manager, but as we
# move away from checkers and handlers, they will need them.
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager')
enabled = Bool(True, config=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterTransformer, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_transformer(self)
def transform(self, line, continue_prompt):
"""Transform a line, returning the new one."""
return None
def __repr__(self):
return "<%s(priority=%r, enabled=%r)>" % (
self.__class__.__name__, self.priority, self.enabled)
#-----------------------------------------------------------------------------
# Prefilter checkers
#-----------------------------------------------------------------------------
class PrefilterChecker(Configurable):
"""Inspect an input line and return a handler for that line."""
priority = Integer(100, config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager')
enabled = Bool(True, config=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterChecker, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_checker(self)
def check(self, line_info):
"""Inspect line_info and return a handler instance or None."""
return None
def __repr__(self):
return "<%s(priority=%r, enabled=%r)>" % (
self.__class__.__name__, self.priority, self.enabled)
class EmacsChecker(PrefilterChecker):
priority = Integer(100, config=True)
enabled = Bool(False, config=True)
def check(self, line_info):
"Emacs ipython-mode tags certain input lines."
if line_info.line.endswith('# PYTHON-MODE'):
return self.prefilter_manager.get_handler_by_name('emacs')
else:
return None
class MacroChecker(PrefilterChecker):
priority = Integer(250, config=True)
def check(self, line_info):
obj = self.shell.user_ns.get(line_info.ifun)
if isinstance(obj, Macro):
return self.prefilter_manager.get_handler_by_name('macro')
else:
return None
class IPyAutocallChecker(PrefilterChecker):
priority = Integer(300, config=True)
def check(self, line_info):
"Instances of IPyAutocall in user_ns get autocalled immediately"
obj = self.shell.user_ns.get(line_info.ifun, None)
if isinstance(obj, IPyAutocall):
obj.set_ip(self.shell)
return self.prefilter_manager.get_handler_by_name('auto')
else:
return None
class AssignmentChecker(PrefilterChecker):
priority = Integer(600, config=True)
def check(self, line_info):
"""Check to see if user is assigning to a var for the first time, in
which case we want to avoid any sort of automagic / autocall games.
This allows users to assign to either alias or magic names true python
variables (the magic/alias systems always take second seat to true
python code). E.g. ls='hi', or ls,that=1,2"""
if line_info.the_rest:
if line_info.the_rest[0] in '=,':
return self.prefilter_manager.get_handler_by_name('normal')
else:
return None
class AutoMagicChecker(PrefilterChecker):
priority = Integer(700, config=True)
def check(self, line_info):
"""If the ifun is magic, and automagic is on, run it. Note: normal,
non-auto magic would already have been triggered via '%' in
check_esc_chars. This just checks for automagic. Also, before
triggering the magic handler, make sure that there is nothing in the
user namespace which could shadow it."""
if not self.shell.automagic or not self.shell.find_magic(line_info.ifun):
return None
# We have a likely magic method. Make sure we should actually call it.
if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials:
return None
head = line_info.ifun.split('.',1)[0]
if is_shadowed(head, self.shell):
return None
return self.prefilter_manager.get_handler_by_name('magic')
class PythonOpsChecker(PrefilterChecker):
priority = Integer(900, config=True)
def check(self, line_info):
"""If the 'rest' of the line begins with a function call or pretty much
any python operator, we should simply execute the line (regardless of
whether or not there's a possible autocall expansion). This avoids
spurious (and very confusing) geattr() accesses."""
if line_info.the_rest and line_info.the_rest[0] in '!=()<>,+*/%^&|':
return self.prefilter_manager.get_handler_by_name('normal')
else:
return None
class AutocallChecker(PrefilterChecker):
priority = Integer(1000, config=True)
function_name_regexp = CRegExp(re_fun_name, config=True,
help="RegExp to identify potential function names.")
exclude_regexp = CRegExp(re_exclude_auto, config=True,
help="RegExp to exclude strings with this start from autocalling.")
def check(self, line_info):
"Check if the initial word/function is callable and autocall is on."
if not self.shell.autocall:
return None
oinfo = line_info.ofind(self.shell) # This can mutate state via getattr
if not oinfo['found']:
return None
if callable(oinfo['obj']) \
and (not self.exclude_regexp.match(line_info.the_rest)) \
and self.function_name_regexp.match(line_info.ifun):
return self.prefilter_manager.get_handler_by_name('auto')
else:
return None
#-----------------------------------------------------------------------------
# Prefilter handlers
#-----------------------------------------------------------------------------
class PrefilterHandler(Configurable):
handler_name = Unicode('normal')
esc_strings = List([])
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager')
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterHandler, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_handler(
self.handler_name,
self,
self.esc_strings
)
def handle(self, line_info):
# print "normal: ", line_info
"""Handle normal input lines. Use as a template for handlers."""
# With autoindent on, we need some way to exit the input loop, and I
# don't want to force the user to have to backspace all the way to
# clear the line. The rule will be in this case, that either two
# lines of pure whitespace in a row, or a line of pure whitespace but
# of a size different to the indent level, will exit the input loop.
line = line_info.line
continue_prompt = line_info.continue_prompt
if (continue_prompt and
self.shell.autoindent and
line.isspace() and
0 < abs(len(line) - self.shell.indent_current_nsp) <= 2):
line = ''
return line
def __str__(self):
return "<%s(name=%s)>" % (self.__class__.__name__, self.handler_name)
class MacroHandler(PrefilterHandler):
handler_name = Unicode("macro")
def handle(self, line_info):
obj = self.shell.user_ns.get(line_info.ifun)
pre_space = line_info.pre_whitespace
line_sep = "\n" + pre_space
return pre_space + line_sep.join(obj.value.splitlines())
class MagicHandler(PrefilterHandler):
handler_name = Unicode('magic')
esc_strings = List([ESC_MAGIC])
def handle(self, line_info):
"""Execute magic functions."""
ifun = line_info.ifun
the_rest = line_info.the_rest
cmd = '%sget_ipython().magic(%r)' % (line_info.pre_whitespace,
(ifun + " " + the_rest))
return cmd
class AutoHandler(PrefilterHandler):
handler_name = Unicode('auto')
esc_strings = List([ESC_PAREN, ESC_QUOTE, ESC_QUOTE2])
def handle(self, line_info):
"""Handle lines which can be auto-executed, quoting if requested."""
line = line_info.line
ifun = line_info.ifun
the_rest = line_info.the_rest
pre = line_info.pre
esc = line_info.esc
continue_prompt = line_info.continue_prompt
obj = line_info.ofind(self.shell)['obj']
#print 'pre <%s> ifun <%s> rest <%s>' % (pre,ifun,the_rest) # dbg
# This should only be active for single-line input!
if continue_prompt:
return line
force_auto = isinstance(obj, IPyAutocall)
# User objects sometimes raise exceptions on attribute access other
# than AttributeError (we've seen it in the past), so it's safest to be
# ultra-conservative here and catch all.
try:
auto_rewrite = obj.rewrite
except Exception:
auto_rewrite = True
if esc == ESC_QUOTE:
# Auto-quote splitting on whitespace
newcmd = '%s("%s")' % (ifun,'", "'.join(the_rest.split()) )
elif esc == ESC_QUOTE2:
# Auto-quote whole string
newcmd = '%s("%s")' % (ifun,the_rest)
elif esc == ESC_PAREN:
newcmd = '%s(%s)' % (ifun,",".join(the_rest.split()))
else:
# Auto-paren.
if force_auto:
# Don't rewrite if it is already a call.
do_rewrite = not the_rest.startswith('(')
else:
if not the_rest:
# We only apply it to argument-less calls if the autocall
# parameter is set to 2.
do_rewrite = (self.shell.autocall >= 2)
elif the_rest.startswith('[') and hasattr(obj, '__getitem__'):
# Don't autocall in this case: item access for an object
# which is BOTH callable and implements __getitem__.
do_rewrite = False
else:
do_rewrite = True
# Figure out the rewritten command
if do_rewrite:
if the_rest.endswith(';'):
newcmd = '%s(%s);' % (ifun.rstrip(),the_rest[:-1])
else:
newcmd = '%s(%s)' % (ifun.rstrip(), the_rest)
else:
normal_handler = self.prefilter_manager.get_handler_by_name('normal')
return normal_handler.handle(line_info)
# Display the rewritten call
if auto_rewrite:
self.shell.auto_rewrite_input(newcmd)
return newcmd
class EmacsHandler(PrefilterHandler):
handler_name = Unicode('emacs')
esc_strings = List([])
def handle(self, line_info):
"""Handle input lines marked by python-mode."""
# Currently, nothing is done. Later more functionality can be added
# here if needed.
# The input cache shouldn't be updated
return line_info.line
#-----------------------------------------------------------------------------
# Defaults
#-----------------------------------------------------------------------------
_default_transformers = [
]
_default_checkers = [
EmacsChecker,
MacroChecker,
IPyAutocallChecker,
AssignmentChecker,
AutoMagicChecker,
PythonOpsChecker,
AutocallChecker
]
_default_handlers = [
PrefilterHandler,
MacroHandler,
MagicHandler,
AutoHandler,
EmacsHandler
]
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/IPython/core/prefilter.py
|
Python
|
mit
| 25,935
|
[
"Brian"
] |
06e72889b5250970be89666691332af84f3255de62bc65640a20cc072206cc6a
|
__problem_title__ = "Jumping frog"
__problem_url___ = "https://projecteuler.net/problem=490"
__problem_description__ = "There are stones in a pond, numbered 1 to . Consecutive stones are " \
"spaced one unit apart. A frog sits on stone 1. He wishes to visit " \
"each stone exactly once, stopping on stone . However, he can only " \
"jump from one stone to another if they are at most 3 units apart. In " \
"other words, from stone , he can reach a stone if 1 ≤ ≤ and is in the " \
"set { -3, -2, -1, +1, +2, +3}. Let f( ) be the number of ways he can " \
"do this. For example, f(6) = 14, as shown below: 1 → 2 → 3 → 4 → 5 → " \
"6 1 → 2 → 3 → 5 → 4 → 6 1 → 2 → 4 → 3 → 5 → 6 1 → 2 → 4 → 5 → 3 → 6 1 " \
"→ 2 → 5 → 3 → 4 → 6 1 → 2 → 5 → 4 → 3 → 6 1 → 3 → 2 → 4 → 5 → 6 1 → 3 " \
"→ 2 → 5 → 4 → 6 1 → 3 → 4 → 2 → 5 → 6 1 → 3 → 5 → 2 → 4 → 6 1 → 4 → 2 " \
"→ 3 → 5 → 6 1 → 4 → 2 → 5 → 3 → 6 1 → 4 → 3 → 2 → 5 → 6 1 → 4 → 5 → 2 " \
"→ 3 → 6 Other examples are f(10) = 254 and f(40) = 1439682432976. Let " \
"S( ) = ∑ f( ) for 1 ≤ ≤ . Examples: S(10) = 18230635 S(20) = " \
"104207881192114219 S(1 000) mod 10 = 225031475 S(1 000 000) mod 10 = " \
"363486179 Find S(10 ) mod 10 ."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
jrichte43/ProjectEuler
|
Problem-0490/solutions.py
|
Python
|
gpl-3.0
| 2,062
|
[
"VisIt"
] |
48dcb7ef5835710942a44bff63ecb373edb9c780fc1055247cdbadcf7aca3f5e
|
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python 3 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <ping@lfw.org>',
'Yury Selivanov <yselivanov@sprymix.com>')
import ast
import dis
import collections.abc
import enum
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import token
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication
mod_dict = globals()
for k, v in dis.COMPILER_FLAG_NAMES.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR and
not object.__code__.co_flags & CO_COROUTINE)
def iscoroutinefunction(object):
"""Return true if the object is a coroutine function.
Coroutine functions are defined with "async def" syntax,
or generators decorated with "types.coroutine".
"""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & (CO_ITERABLE_COROUTINE |
CO_COROUTINE))
def isawaitable(object):
"""Return true if the object can be used in "await" expression."""
return isinstance(object, collections.abc.Awaitable)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return (isinstance(object, types.GeneratorType) and
not object.gi_code.co_flags & CO_COROUTINE)
def iscoroutine(object):
"""Return true if the object is a coroutine."""
return isinstance(object, collections.abc.Coroutine)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
processed = set()
names = dir(object)
# :dd any DynamicClassAttributes to the list of names if object is a class;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists
try:
for base in object.__bases__:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
except AttributeError:
pass
for key in names:
# First try to get the value via getattr. Some descriptors don't
# like calling their __get__ (see bug #1785), so fall back to
# looking in the __dict__.
try:
value = getattr(object, key)
# handle the duplicate key
if key in processed:
raise AttributeError
except AttributeError:
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
# could be a (currently) missing slot member, or a buggy
# __dir__; discard and move on
continue
if not predicate or predicate(value):
results.append((key, value))
processed.add(key)
results.sort(key=lambda pair: pair[0])
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method or descriptor
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained by calling getattr; if this fails, or if the
resulting object does not live anywhere in the class' mro (including
metaclasses) then the object is looked up in the defining class's
dict (found by walking the mro).
If one of the items in dir(cls) is stored in the metaclass it will now
be discovered and not have None be listed as the class in which it was
defined. Any items whose home class cannot be discovered are skipped.
"""
mro = getmro(cls)
metamro = getmro(type(cls)) # for attributes stored in the metaclass
metamro = tuple([cls for cls in metamro if cls not in (type, object)])
class_bases = (cls,) + mro
all_bases = class_bases + metamro
names = dir(cls)
# :dd any DynamicClassAttributes to the list of names;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists.
for base in mro:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
result = []
processed = set()
for name in names:
# Get the object associated with the name, and where it was defined.
# Normal objects will be looked up with both getattr and directly in
# its class' dict (in case getattr fails [bug #1785], and also to look
# for a docstring).
# For DynamicClassAttributes on the second pass we only look in the
# class's dict.
#
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
homecls = None
get_obj = None
dict_obj = None
if name not in processed:
try:
if name == '__dict__':
raise Exception("__dict__ is special, don't want the proxy")
get_obj = getattr(cls, name)
except Exception as exc:
pass
else:
homecls = getattr(get_obj, "__objclass__", homecls)
if homecls not in class_bases:
# if the resulting object does not live somewhere in the
# mro, drop it and search the mro manually
homecls = None
last_cls = None
# first look in the classes
for srch_cls in class_bases:
srch_obj = getattr(srch_cls, name, None)
if srch_obj is get_obj:
last_cls = srch_cls
# then check the metaclasses
for srch_cls in metamro:
try:
srch_obj = srch_cls.__getattr__(cls, name)
except AttributeError:
continue
if srch_obj is get_obj:
last_cls = srch_cls
if last_cls is not None:
homecls = last_cls
for base in all_bases:
if name in base.__dict__:
dict_obj = base.__dict__[name]
if homecls not in metamro:
homecls = base
break
if homecls is None:
# unable to locate the attribute anywhere, most likely due to
# buggy custom __dir__; discard and move on
continue
obj = get_obj if get_obj is not None else dict_obj
# Classify the object or its descriptor.
if isinstance(dict_obj, staticmethod):
kind = "static method"
obj = dict_obj
elif isinstance(dict_obj, classmethod):
kind = "class method"
obj = dict_obj
elif isinstance(dict_obj, property):
kind = "property"
obj = dict_obj
elif isroutine(obj):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
processed.add(name)
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------------- function helpers
def unwrap(func, *, stop=None):
"""Get the object wrapped by *func*.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
*stop* is an optional callback accepting an object in the wrapper chain
as its sole argument that allows the unwrapping to be terminated early if
the callback returns a true value. If the callback never returns a true
value, the last object in the chain is returned as usual. For example,
:func:`signature` uses this to stop unwrapping if any object in the
chain has a ``__signature__`` attribute defined.
:exc:`ValueError` is raised if a cycle is encountered.
"""
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func # remember the original func for error reporting
memo = {id(f)} # Memoise by id to tolerate non-hashable objects
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def _findclass(func):
cls = sys.modules.get(func.__module__)
if cls is None:
return None
for name in func.__qualname__.split('.')[:-1]:
cls = getattr(cls, name)
if not isclass(cls):
return None
return cls
def _finddoc(obj):
if isclass(obj):
for base in obj.__mro__:
if base is not object:
try:
doc = base.__doc__
except AttributeError:
continue
if doc is not None:
return doc
return None
if ismethod(obj):
name = obj.__func__.__name__
self = obj.__self__
if (isclass(self) and
getattr(getattr(self, name, None), '__func__') is obj.__func__):
# classmethod
cls = self
else:
cls = self.__class__
elif isfunction(obj):
name = obj.__name__
cls = _findclass(obj)
if cls is None or getattr(cls, name) is not obj:
return None
elif isbuiltin(obj):
name = obj.__name__
self = obj.__self__
if (isclass(self) and
self.__qualname__ + '.' + name == obj.__qualname__):
# classmethod
cls = self
else:
cls = self.__class__
elif ismethoddescriptor(obj) or isdatadescriptor(obj):
name = obj.__name__
cls = obj.__objclass__
if getattr(cls, name) is not obj:
return None
elif isinstance(obj, property):
func = f.fget
name = func.__name__
cls = _findclass(func)
if cls is None or getattr(cls, name) is not obj:
return None
else:
return None
for base in cls.__mro__:
try:
doc = getattr(base, name).__doc__
except AttributeError:
continue
if doc is not None:
return doc
return None
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if doc is None:
try:
doc = _finddoc(object)
except (AttributeError, TypeError):
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
if hasattr(object, '__module__'):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning,
2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', PendingDeprecationWarning)
import imp
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if getattr(getmodule(object, filename), '__loader__', None) is not None:
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An OSError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object)
if file:
# Invalidate cache if needed.
linecache.checkcache(file)
else:
file = getfile(object)
# Allow filenames in form of "<something>" to pass through.
# `doctest` monkeypatches `linecache` module to enable
# inspection, so let `linecache.getlines` to be called.
if not (file.startswith('<') and file.endswith('>')):
raise OSError('source code not available')
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise OSError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise OSError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise OSError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise OSError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (OSError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def _line_number_helper(code_obj, lines, lnum):
"""Return a list of source lines and starting line number for a code object.
The arguments must be a code object with lines and lnum from findsource.
"""
_, end_line = list(dis.findlinestarts(code_obj))[-1]
return lines[lnum:end_line], lnum + 1
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An OSError is
raised if the source code cannot be retrieved."""
object = unwrap(object)
lines, lnum = findsource(object)
if ismodule(object):
return lines, 0
elif iscode(object):
return _line_number_helper(object, lines, lnum)
elif isfunction(object):
return _line_number_helper(object.__code__, lines, lnum)
elif ismethod(object):
return _line_number_helper(object.__func__.__code__, lines, lnum)
else:
return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
OSError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, keywords, defaults).
'args' is a list of the argument names, including keyword-only argument names.
'varargs' and 'keywords' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python 3 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
warnings.warn("inspect.getargspec() is deprecated, "
"use inspect.signature() instead", DeprecationWarning,
stacklevel=2)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a callable object's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
This function is deprecated, use inspect.signature() instead.
"""
try:
# Re: `skip_bound_arg=False`
#
# There is a notable difference in behaviour between getfullargspec
# and Signature: the former always returns 'self' parameter for bound
# methods, whereas the Signature always shows the actual calling
# signature of the passed object.
#
# To simulate this behaviour, we "unbind" bound methods, to trick
# inspect.signature to always return their first parameter ("self",
# usually)
# Re: `follow_wrapper_chains=False`
#
# getfullargspec() historically ignored __wrapped__ attributes,
# so we ensure that remains the case in 3.3+
sig = _signature_from_callable(func,
follow_wrapper_chains=False,
skip_bound_arg=False,
sigcls=Signature)
except Exception as ex:
# Most of the times 'signature' will raise ValueError.
# But, it can also raise AttributeError, and, maybe something
# else. So to be fully backwards compatible, we catch all
# possible exceptions here, and reraise a TypeError.
raise TypeError('unsupported callable') from ex
args = []
varargs = None
varkw = None
kwonlyargs = []
defaults = ()
annotations = {}
defaults = ()
kwdefaults = {}
if sig.return_annotation is not sig.empty:
annotations['return'] = sig.return_annotation
for param in sig.parameters.values():
kind = param.kind
name = param.name
if kind is _POSITIONAL_ONLY:
args.append(name)
elif kind is _POSITIONAL_OR_KEYWORD:
args.append(name)
if param.default is not param.empty:
defaults += (param.default,)
elif kind is _VAR_POSITIONAL:
varargs = name
elif kind is _KEYWORD_ONLY:
kwonlyargs.append(name)
if param.default is not param.empty:
kwdefaults[name] = param.default
elif kind is _VAR_KEYWORD:
varkw = name
if param.annotation is not param.empty:
annotations[name] = param.annotation
if not kwdefaults:
# compatibility with 'func.__kwdefaults__'
kwdefaults = None
if not defaults:
# compatibility with 'func.__defaults__'
defaults = None
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwdefaults, annotations)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__qualname__
return annotation.__module__+'.'+annotation.__qualname__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(*names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(*func_and_positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
func = func_and_positional[0]
positional = func_and_positional[1:]
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwonlydefaults and kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except OSError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
FrameInfo = namedtuple('FrameInfo', ('frame',) + Traceback._fields)
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
frameinfo = (frame,) + getframeinfo(frame, context)
framelist.append(FrameInfo(*frameinfo))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)
framelist.append(FrameInfo(*frameinfo))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_ClassMethodWrapper = type(int.__dict__['from_bytes'])
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
_ClassMethodWrapper,
types.BuiltinFunctionType)
def _signature_get_user_defined_method(cls, method_name):
"""Private helper. Checks if ``cls`` has an attribute
named ``method_name`` and returns it only if it is a
pure python function.
"""
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def _signature_get_partial(wrapped_sig, partial, extra_args=()):
"""Private helper to calculate how 'wrapped_sig' signature will
look like after applying a 'functools.partial' object (or alike)
on it.
"""
old_params = wrapped_sig.parameters
new_params = OrderedDict(old_params.items())
partial_args = partial.args or ()
partial_keywords = partial.keywords or {}
if extra_args:
partial_args = extra_args + partial_args
try:
ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(partial)
raise ValueError(msg) from ex
transform_to_kwonly = False
for param_name, param in old_params.items():
try:
arg_value = ba.arguments[param_name]
except KeyError:
pass
else:
if param.kind is _POSITIONAL_ONLY:
# If positional-only parameter is bound by partial,
# it effectively disappears from the signature
new_params.pop(param_name)
continue
if param.kind is _POSITIONAL_OR_KEYWORD:
if param_name in partial_keywords:
# This means that this parameter, and all parameters
# after it should be keyword-only (and var-positional
# should be removed). Here's why. Consider the following
# function:
# foo(a, b, *args, c):
# pass
#
# "partial(foo, a='spam')" will have the following
# signature: "(*, a='spam', b, c)". Because attempting
# to call that partial with "(10, 20)" arguments will
# raise a TypeError, saying that "a" argument received
# multiple values.
transform_to_kwonly = True
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
else:
# was passed as a positional argument
new_params.pop(param.name)
continue
if param.kind is _KEYWORD_ONLY:
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
if transform_to_kwonly:
assert param.kind is not _POSITIONAL_ONLY
if param.kind is _POSITIONAL_OR_KEYWORD:
new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)
new_params[param_name] = new_param
new_params.move_to_end(param_name)
elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):
new_params.move_to_end(param_name)
elif param.kind is _VAR_POSITIONAL:
new_params.pop(param.name)
return wrapped_sig.replace(parameters=new_params.values())
def _signature_bound_method(sig):
"""Private helper to transform signatures for unbound
functions to bound methods.
"""
params = tuple(sig.parameters.values())
if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
raise ValueError('invalid method signature')
kind = params[0].kind
if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):
# Drop first parameter:
# '(p1, p2[, ...])' -> '(p2[, ...])'
params = params[1:]
else:
if kind is not _VAR_POSITIONAL:
# Unless we add a new parameter type we never
# get here
raise ValueError('invalid argument type')
# It's a var-positional parameter.
# Do nothing. '(*args[, ...])' -> '(*args[, ...])'
return sig.replace(parameters=params)
def _signature_is_builtin(obj):
"""Private helper to test if `obj` is a callable that might
support Argument Clinic's __text_signature__ protocol.
"""
return (isbuiltin(obj) or
ismethoddescriptor(obj) or
isinstance(obj, _NonUserDefinedCallables) or
# Can't test 'isinstance(type)' here, as it would
# also be True for regular python classes
obj in (type, object))
def _signature_is_functionlike(obj):
"""Private helper to test if `obj` is a duck type of FunctionType.
A good example of such objects are functions compiled with
Cython, which have all attributes that a pure Python function
would have, but have their code statically compiled.
"""
if not callable(obj) or isclass(obj):
# All function-like objects are obviously callables,
# and not classes.
return False
name = getattr(obj, '__name__', None)
code = getattr(obj, '__code__', None)
defaults = getattr(obj, '__defaults__', _void) # Important to use _void ...
kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here
annotations = getattr(obj, '__annotations__', None)
return (isinstance(code, types.CodeType) and
isinstance(name, str) and
(defaults is None or isinstance(defaults, tuple)) and
(kwdefaults is None or isinstance(kwdefaults, dict)) and
isinstance(annotations, dict))
def _signature_get_bound_param(spec):
""" Private helper to get first parameter name from a
__text_signature__ of a builtin method, which should
be in the following format: '($param1, ...)'.
Assumptions are that the first argument won't have
a default value or an annotation.
"""
assert spec.startswith('($')
pos = spec.find(',')
if pos == -1:
pos = spec.find(')')
cpos = spec.find(':')
assert cpos == -1 or cpos > pos
cpos = spec.find('=')
assert cpos == -1 or cpos > pos
return spec[2:pos]
def _signature_strip_non_python_syntax(signature):
"""
Private helper function. Takes a signature in Argument Clinic's
extended signature format.
Returns a tuple of three things:
* that signature re-rendered in standard Python syntax,
* the index of the "self" parameter (generally 0), or None if
the function does not have a "self" parameter, and
* the index of the last "positional only" parameter,
or None if the signature has no positional-only parameters.
"""
if not signature:
return signature, None, None
self_parameter = None
last_positional_only = None
lines = [l.encode('ascii') for l in signature.split('\n')]
generator = iter(lines).__next__
token_stream = tokenize.tokenize(generator)
delayed_comma = False
skip_next_comma = False
text = []
add = text.append
current_parameter = 0
OP = token.OP
ERRORTOKEN = token.ERRORTOKEN
# token stream always starts with ENCODING token, skip it
t = next(token_stream)
assert t.type == tokenize.ENCODING
for t in token_stream:
type, string = t.type, t.string
if type == OP:
if string == ',':
if skip_next_comma:
skip_next_comma = False
else:
assert not delayed_comma
delayed_comma = True
current_parameter += 1
continue
if string == '/':
assert not skip_next_comma
assert last_positional_only is None
skip_next_comma = True
last_positional_only = current_parameter - 1
continue
if (type == ERRORTOKEN) and (string == '$'):
assert self_parameter is None
self_parameter = current_parameter
continue
if delayed_comma:
delayed_comma = False
if not ((type == OP) and (string == ')')):
add(', ')
add(string)
if (string == ','):
add(' ')
clean_signature = ''.join(text)
return clean_signature, self_parameter, last_positional_only
def _signature_fromstr(cls, obj, s, skip_bound_arg=True):
"""Private helper to parse content of '__text_signature__'
and return a Signature based on it.
"""
Parameter = cls._parameter_cls
clean_signature, self_parameter, last_positional_only = \
_signature_strip_non_python_syntax(s)
program = "def foo" + clean_signature + ": pass"
try:
module = ast.parse(program)
except SyntaxError:
module = None
if not isinstance(module, ast.Module):
raise ValueError("{!r} builtin has invalid signature".format(obj))
f = module.body[0]
parameters = []
empty = Parameter.empty
invalid = object()
module = None
module_dict = {}
module_name = getattr(obj, '__module__', None)
if module_name:
module = sys.modules.get(module_name, None)
if module:
module_dict = module.__dict__
sys_module_dict = sys.modules
def parse_name(node):
assert isinstance(node, ast.arg)
if node.annotation != None:
raise ValueError("Annotations are not currently supported")
return node.arg
def wrap_value(s):
try:
value = eval(s, module_dict)
except NameError:
try:
value = eval(s, sys_module_dict)
except NameError:
raise RuntimeError()
if isinstance(value, str):
return ast.Str(value)
if isinstance(value, (int, float)):
return ast.Num(value)
if isinstance(value, bytes):
return ast.Bytes(value)
if value in (True, False, None):
return ast.NameConstant(value)
raise RuntimeError()
class RewriteSymbolics(ast.NodeTransformer):
def visit_Attribute(self, node):
a = []
n = node
while isinstance(n, ast.Attribute):
a.append(n.attr)
n = n.value
if not isinstance(n, ast.Name):
raise RuntimeError()
a.append(n.id)
value = ".".join(reversed(a))
return wrap_value(value)
def visit_Name(self, node):
if not isinstance(node.ctx, ast.Load):
raise ValueError()
return wrap_value(node.id)
def p(name_node, default_node, default=empty):
name = parse_name(name_node)
if name is invalid:
return None
if default_node and default_node is not _empty:
try:
default_node = RewriteSymbolics().visit(default_node)
o = ast.literal_eval(default_node)
except ValueError:
o = invalid
if o is invalid:
return None
default = o if o is not invalid else default
parameters.append(Parameter(name, kind, default=default, annotation=empty))
# non-keyword-only parameters
args = reversed(f.args.args)
defaults = reversed(f.args.defaults)
iter = itertools.zip_longest(args, defaults, fillvalue=None)
if last_positional_only is not None:
kind = Parameter.POSITIONAL_ONLY
else:
kind = Parameter.POSITIONAL_OR_KEYWORD
for i, (name, default) in enumerate(reversed(list(iter))):
p(name, default)
if i == last_positional_only:
kind = Parameter.POSITIONAL_OR_KEYWORD
# *args
if f.args.vararg:
kind = Parameter.VAR_POSITIONAL
p(f.args.vararg, empty)
# keyword-only arguments
kind = Parameter.KEYWORD_ONLY
for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults):
p(name, default)
# **kwargs
if f.args.kwarg:
kind = Parameter.VAR_KEYWORD
p(f.args.kwarg, empty)
if self_parameter is not None:
# Possibly strip the bound argument:
# - We *always* strip first bound argument if
# it is a module.
# - We don't strip first bound argument if
# skip_bound_arg is False.
assert parameters
_self = getattr(obj, '__self__', None)
self_isbound = _self is not None
self_ismodule = ismodule(_self)
if self_isbound and (self_ismodule or skip_bound_arg):
parameters.pop(0)
else:
# for builtins, self parameter is always positional-only!
p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY)
parameters[0] = p
return cls(parameters, return_annotation=cls.empty)
def _signature_from_builtin(cls, func, skip_bound_arg=True):
"""Private helper function to get signature for
builtin callables.
"""
if not _signature_is_builtin(func):
raise TypeError("{!r} is not a Python builtin "
"function".format(func))
s = getattr(func, "__text_signature__", None)
if not s:
raise ValueError("no signature found for builtin {!r}".format(func))
return _signature_fromstr(cls, func, s, skip_bound_arg)
def _signature_from_function(cls, func):
"""Private helper: constructs Signature for the given python function."""
is_duck_function = False
if not isfunction(func):
if _signature_is_functionlike(func):
is_duck_function = True
else:
# If it's not a pure Python function, and not a duck type
# of pure function:
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & CO_VARARGS:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & CO_VARKEYWORDS:
index = pos_count + keyword_only_count
if func_code.co_flags & CO_VARARGS:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
# Is 'func' is a pure Python function - don't validate the
# parameters list (for correct order and defaults), it should be OK.
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=is_duck_function)
def _signature_from_callable(obj, *,
follow_wrapper_chains=True,
skip_bound_arg=True,
sigcls):
"""Private helper function to get signature for arbitrary
callable objects.
"""
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = _signature_from_callable(
obj.__func__,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
# Was this function wrapped by a decorator?
if follow_wrapper_chains:
obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__")))
if isinstance(obj, types.MethodType):
# If the unwrapped object is a *method*, we might want to
# skip its first parameter (self).
# See test_signature_wrapped_bound_method for details.
return _signature_from_callable(
obj,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
if not isinstance(sig, Signature):
raise TypeError(
'unexpected object {!r} in __signature__ '
'attribute'.format(sig))
return sig
try:
partialmethod = obj._partialmethod
except AttributeError:
pass
else:
if isinstance(partialmethod, functools.partialmethod):
# Unbound partialmethod (see functools.partialmethod)
# This means, that we need to calculate the signature
# as if it's a regular partial object, but taking into
# account that the first positional argument
# (usually `self`, or `cls`) will not be passed
# automatically (as for boundmethods)
wrapped_sig = _signature_from_callable(
partialmethod.func,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
new_params = (first_wrapped_param,) + tuple(sig.parameters.values())
return sig.replace(parameters=new_params)
if isfunction(obj) or _signature_is_functionlike(obj):
# If it's a pure Python function, or an object that is duck type
# of a Python function (Cython functions, for instance), then:
return _signature_from_function(sigcls, obj)
if _signature_is_builtin(obj):
return _signature_from_builtin(sigcls, obj,
skip_bound_arg=skip_bound_arg)
if isinstance(obj, functools.partial):
wrapped_sig = _signature_from_callable(
obj.func,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
return _signature_get_partial(wrapped_sig, obj)
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = _signature_from_callable(
call,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _signature_get_user_defined_method(obj, '__new__')
if new is not None:
sig = _signature_from_callable(
new,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
else:
# Finally, we should have at least __init__ implemented
init = _signature_get_user_defined_method(obj, '__init__')
if init is not None:
sig = _signature_from_callable(
init,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
if sig is None:
# At this point we know, that `obj` is a class, with no user-
# defined '__init__', '__new__', or class-level '__call__'
for base in obj.__mro__[:-1]:
# Since '__text_signature__' is implemented as a
# descriptor that extracts text signature from the
# class docstring, if 'obj' is derived from a builtin
# class, its own '__text_signature__' may be 'None'.
# Therefore, we go through the MRO (except the last
# class in there, which is 'object') to find the first
# class with non-empty text signature.
try:
text_sig = base.__text_signature__
except AttributeError:
pass
else:
if text_sig:
# If 'obj' class has a __text_signature__ attribute:
# return a signature based on it
return _signature_fromstr(sigcls, obj, text_sig)
# No '__text_signature__' was found for the 'obj' class.
# Last option is to check if its '__init__' is
# object.__init__ or type.__init__.
if type not in obj.__mro__:
# We have a class (not metaclass), but no user-defined
# __init__ or __new__ for it
if (obj.__init__ is object.__init__ and
obj.__new__ is object.__new__):
# Return a signature of 'object' builtin.
return signature(object)
else:
raise ValueError(
'no signature found for builtin type {!r}'.format(obj))
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
try:
sig = _signature_from_callable(
call,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
except ValueError as ex:
msg = 'no signature found for {!r}'.format(obj)
raise ValueError(msg) from ex
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
class _void:
"""A private marker - used in Parameter & Signature."""
class _empty:
"""Marker object for Signature.empty and Parameter.empty."""
class _ParameterKind(enum.IntEnum):
POSITIONAL_ONLY = 0
POSITIONAL_OR_KEYWORD = 1
VAR_POSITIONAL = 2
KEYWORD_ONLY = 3
VAR_KEYWORD = 4
def __str__(self):
return self._name_
_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY
_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD
_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL
_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY
_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD
class Parameter:
"""Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is set to
`Parameter.empty`.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is set to
`Parameter.empty`.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
"""
__slots__ = ('_name', '_kind', '_default', '_annotation')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is _empty:
raise ValueError('name is a required attribute for Parameter')
if not isinstance(name, str):
raise TypeError("name must be a str, not a {!r}".format(name))
if not name.isidentifier():
raise ValueError('{!r} is not a valid parameter name'.format(name))
self._name = name
def __reduce__(self):
return (type(self),
(self._name, self._kind),
{'_default': self._default,
'_annotation': self._annotation})
def __setstate__(self, state):
self._default = state['_default']
self._annotation = state['_annotation']
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void,
annotation=_void, default=_void):
"""Creates a customized copy of the Parameter."""
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
return type(self)(name, kind, default=default, annotation=annotation)
def __str__(self):
kind = self.kind
formatted = self._name
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} "{}">'.format(self.__class__.__name__, self)
def __hash__(self):
return hash((self.name, self.kind, self.annotation, self.default))
def __eq__(self, other):
return (self is other or
(issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation))
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments:
"""Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
"""
__slots__ = ('arguments', '_signature', '__weakref__')
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def apply_defaults(self):
"""Set default values for missing arguments.
For variable-positional arguments (*args) the default is an
empty tuple.
For variable-keyword arguments (**kwargs) the default is an
empty dict.
"""
arguments = self.arguments
if not arguments:
return
new_arguments = []
for name, param in self._signature.parameters.items():
try:
new_arguments.append((name, arguments[name]))
except KeyError:
if param.default is not _empty:
val = param.default
elif param.kind is _VAR_POSITIONAL:
val = ()
elif param.kind is _VAR_KEYWORD:
val = {}
else:
# This BoundArguments was likely produced by
# Signature.bind_partial().
continue
new_arguments.append((name, val))
self.arguments = OrderedDict(new_arguments)
def __eq__(self, other):
return (self is other or
(issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments))
def __ne__(self, other):
return not self.__eq__(other)
def __setstate__(self, state):
self._signature = state['_signature']
self.arguments = state['arguments']
def __getstate__(self):
return {'_signature': self._signature, 'arguments': self.arguments}
def __repr__(self):
args = []
for arg, value in self.arguments.items():
args.append('{}={!r}'.format(arg, value))
return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args))
class Signature:
"""A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is set to `Signature.empty`.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
"""
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
"""Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
"""
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
kind_defaults = False
for idx, param in enumerate(parameters):
kind = param.kind
name = param.name
if kind < top_kind:
msg = 'wrong parameter order: {!r} before {!r}'
msg = msg.format(top_kind, kind)
raise ValueError(msg)
elif kind > top_kind:
kind_defaults = False
top_kind = kind
if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD):
if param.default is _empty:
if kind_defaults:
# No default for this parameter, but the
# previous parameter of the same kind had
# a default
msg = 'non-default argument follows default ' \
'argument'
raise ValueError(msg)
else:
# There is a default for this parameter.
kind_defaults = True
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
"""Constructs Signature for the given python function."""
warnings.warn("inspect.Signature.from_function() is deprecated, "
"use Signature.from_callable()",
DeprecationWarning, stacklevel=2)
return _signature_from_function(cls, func)
@classmethod
def from_builtin(cls, func):
"""Constructs Signature for the given builtin function."""
warnings.warn("inspect.Signature.from_builtin() is deprecated, "
"use Signature.from_callable()",
DeprecationWarning, stacklevel=2)
return _signature_from_builtin(cls, func)
@classmethod
def from_callable(cls, obj, *, follow_wrapped=True):
"""Constructs Signature for the given callable object."""
return _signature_from_callable(obj, sigcls=cls,
follow_wrapper_chains=follow_wrapped)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
"""Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
"""
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def _hash_basis(self):
params = tuple(param for param in self.parameters.values()
if param.kind != _KEYWORD_ONLY)
kwo_params = {param.name: param for param in self.parameters.values()
if param.kind == _KEYWORD_ONLY}
return params, kwo_params, self.return_annotation
def __hash__(self):
params, kwo_params, return_annotation = self._hash_basis()
kwo_params = frozenset(kwo_params.values())
return hash((params, kwo_params, return_annotation))
def __eq__(self, other):
return (self is other or
(isinstance(other, Signature) and
self._hash_basis() == other._hash_basis()))
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, *, partial=False):
"""Private method. Don't use directly."""
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
# No default, not VAR_KEYWORD, not VAR_POSITIONAL,
# not in `kwargs`
if partial:
parameters_ex = (param,)
break
else:
msg = 'missing a required argument: {arg!r}'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError(
'too many positional arguments') from None
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError(
'multiple values for argument {arg!r}'.format(
arg=param.name)) from None
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
if param.kind == _VAR_POSITIONAL:
# Named arguments don't refer to '*args'-like parameters.
# We only arrive here if the positional arguments ended
# before reaching the last parameter before *args.
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('missing a required argument: {arg!r}'. \
format(arg=param_name)) from None
else:
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError(
'got an unexpected keyword argument {arg!r}'.format(
arg=next(iter(kwargs))))
return self._bound_arguments_cls(self, arguments)
def bind(*args, **kwargs):
"""Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
"""
return args[0]._bind(args[1:], kwargs)
def bind_partial(*args, **kwargs):
"""Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
"""
return args[0]._bind(args[1:], kwargs, partial=True)
def __reduce__(self):
return (type(self),
(tuple(self._parameters.values()),),
{'_return_annotation': self._return_annotation})
def __setstate__(self, state):
self._return_annotation = state['_return_annotation']
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, self)
def __str__(self):
result = []
render_pos_only_separator = False
render_kw_only_separator = True
for param in self.parameters.values():
formatted = str(param)
kind = param.kind
if kind == _POSITIONAL_ONLY:
render_pos_only_separator = True
elif render_pos_only_separator:
# It's not a positional-only parameter, and the flag
# is set to 'True' (there were pos-only params before.)
result.append('/')
render_pos_only_separator = False
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
if render_pos_only_separator:
# There were only positional-only parameters, hence the
# flag was not reset to 'False'
result.append('/')
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
def signature(obj, *, follow_wrapped=True):
"""Get a signature object for the passed callable."""
return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
def _main():
""" Logic for inspecting an object given at command line """
import argparse
import importlib
parser = argparse.ArgumentParser()
parser.add_argument(
'object',
help="The object to be analysed. "
"It supports the 'module:qualname' syntax")
parser.add_argument(
'-d', '--details', action='store_true',
help='Display info about the module rather than its source code')
args = parser.parse_args()
target = args.object
mod_name, has_attrs, attrs = target.partition(":")
try:
obj = module = importlib.import_module(mod_name)
except Exception as exc:
msg = "Failed to import {} ({}: {})".format(mod_name,
type(exc).__name__,
exc)
print(msg, file=sys.stderr)
exit(2)
if has_attrs:
parts = attrs.split(".")
obj = module
for part in parts:
obj = getattr(obj, part)
if module.__name__ in sys.builtin_module_names:
print("Can't get info for builtin modules.", file=sys.stderr)
exit(1)
if args.details:
print('Target: {}'.format(target))
print('Origin: {}'.format(getsourcefile(module)))
print('Cached: {}'.format(module.__cached__))
if obj is module:
print('Loader: {}'.format(repr(module.__loader__)))
if hasattr(module, '__path__'):
print('Submodule search path: {}'.format(module.__path__))
else:
try:
__, lineno = findsource(obj)
except Exception:
pass
else:
print('Line: {}'.format(lineno))
print('\n')
else:
print(getsource(obj))
if __name__ == "__main__":
_main()
|
munyirik/python
|
cpython/Lib/inspect.py
|
Python
|
bsd-3-clause
| 112,277
|
[
"VisIt"
] |
bbb960e6500b59a223af85d98fe88d8ed41af893904e0db3baa112ca6cc96b68
|
import subprocess,os,sys
sys.path.append('/home/shangzhong/Codes/Pipeline')
from Modules.f05_IDConvert import extract_from_gene2ref,prIDMap2geneIDMap
from Modules.p04_ParseBlast import blastName2ID
def rosetta_pair(group):
"""
This functions detects rosetta pairs, given a list of lines which have same 2nd column.
* group: a list of blast tabular results. each item in the list is a line of blast tabular result.
they should have same 2nd reference name.
"""
rosetta = []
for i in range(len(group)-1):
for j in range(i + 1,len(group)):
if (max(int(group[i][8]),int(group[i][9])) < min(int(group[j][8]),int(group[j][9]))):
ref = group[i][1]; firstName = group[i][0]; secondName = group[j][0]
firBegin=group[i][8];firEnd=group[i][9]
secBegin=group[j][8];secEnd=group[j][9]
rosetta.append([ref,firstName,secondName,firBegin,firEnd,secBegin,secEnd])
elif (max(int(group[j][8]),int(group[j][9])) < min(int(group[i][8]),int(group[i][9]))):
ref = group[i][1]; firstName = group[j][0]; secondName = group[i][0]
firBegin=group[j][8];firEnd=group[j][9]
secBegin=group[i][8];secEnd=group[i][9]
rosetta.append([ref,firstName,secondName,firBegin,firEnd,secBegin,secEnd])
else:
continue
return rosetta
def rosetta_stone(blastFile,organism1,taxID1,organism2,taxID2,gene2refseq):
"""
This functions detects rosetta stone pairs in the blast result.
* blastFile: fileanme of tabular blast output.
"""
# # (1) change first 2 columns into ID, extract top hit
IDblast = blastName2ID(blastFile)
# # (2) extract cho, human from gene2refseq
org1ref = extract_from_gene2ref(gene2refseq,taxID1,organism1,columnNum=[2,6,7])
org2ref = extract_from_gene2ref(gene2refseq,taxID2,organism2,columnNum=[2,6,7])
# # (3) replace protein ID with gene ID
IDmap = prIDMap2geneIDMap(IDblast,org1ref,org2ref)
# # (4) sort file by 2nd column
sortFile = IDmap[:-3] + 'sort.txt'
cmd = ('sort -k2,2 -n {input} > {output}').format(input=IDmap,output=sortFile)
subprocess.call(cmd,shell=True)
os.remove(IDblast)
# # (5) now come to the parsing rosetta stone pairs
filename = sortFile
res = open(filename,'r')
inter = 'inter.txt'
interOut = open(inter,'w')
outputFile = filename[:-16] + 'rosetta.txt'
output = open(outputFile,'w')
# combine lines with same 2 columns.
id_pair = res.readline()[:-1].split('\t')
# start from second line
for line in res:
line = line[:-1]
item = line.split('\t')
# should merge
if id_pair[0] == item[0] and id_pair[1] == item[1]:
id_pair[8] = str(min(int(id_pair[8]),int(id_pair[9]),int(item[8]),int(item[9])))
id_pair[9] = str(max(int(id_pair[8]),int(id_pair[9]),int(item[8]),int(item[9])))
else:
interOut.write('\t'.join(id_pair) + '\n')
id_pair = item
interOut.write('\t'.join(id_pair) + '\n')
interOut.close()
res.close()
# now interOut has unique first 2 columns of blast tabular results
res = open(inter,'r')
group = [res.readline()[:-1].split('\t')] # group stores lines with same reference
for line in res:
item = line[:-1].split('\t')
if item[1] == group[-1][1]:
group.append(item)
else:
# whether to do rosetta pair detection
if len(group) > 1:
rosetta = rosetta_pair(group)
# output to file
if rosetta != []:
for pair in rosetta:
output.write('\t'.join(pair) + '\n')
# there is no rosetta pairs
group = [item]
if len(group) > 1:
rosetta = rosetta_pair(group)
# output to file
if rosetta != []:
for pair in rosetta:
output.write('\t'.join(pair) + '\n')
output.close()
res.close()
return outputFile
blastFile = '/data/shangzhong/CHO2Human/2wayBlastPresult/human2cho.txt'
gene2refseq = '/data/shangzhong/CHO2Human/2wayBlastPresult/141026gene2refseq.gz'
rosetta = rosetta_stone(blastFile,'cho',10029,'human',9606,gene2refseq)
|
shl198/Pipeline
|
CHOGeneId2HumanId/rosetta_stone.py
|
Python
|
mit
| 4,396
|
[
"BLAST"
] |
43fd8e01c698517d0fd30a2f30c1dde85b3ddc16c58b0b0dc0b67df88d7f39d8
|
import numpy as np
import pytest
from conftest import skipif
from devito import (Grid, Dimension, Function, TimeFunction, Eq, Inc, solve,
Operator, switchconfig, norm, cos)
from devito.exceptions import InvalidOperator
from devito.ir.iet import Block, FindNodes, retrieve_iteration_tree
from devito.mpi.routines import IrecvCall, IsendCall
from examples.seismic import TimeAxis, RickerSource, Receiver
class TestCodeGeneration(object):
@switchconfig(platform='nvidiaX')
def test_init_omp_env(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
op = Operator(Eq(u.forward, u.dx+1))
assert str(op.body[0].body[0]) == ('if (deviceid != -1)\n'
'{\n omp_set_default_device(deviceid);\n}')
@pytest.mark.parallel(mode=1)
@switchconfig(platform='nvidiaX')
def test_init_omp_env_w_mpi(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
op = Operator(Eq(u.forward, u.dx+1), opt=('advanced', {'gpu-direct': True}))
assert str(op.body[0].body[0]) ==\
('if (deviceid != -1)\n'
'{\n omp_set_default_device(deviceid);\n}\n'
'else\n'
'{\n int rank = 0;\n'
' MPI_Comm_rank(comm,&rank);\n'
' int ngpus = omp_get_num_devices();\n'
' omp_set_default_device((rank)%(ngpus));\n}')
@switchconfig(platform='nvidiaX')
def test_basic(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
op = Operator(Eq(u.forward, u + 1), language='openmp')
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
assert trees[0][1].pragmas[0].value ==\
'omp target teams distribute parallel for collapse(3)'
assert op.body[2].header[0].value ==\
('omp target enter data map(to: u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
assert str(op.body[2].footer[0]) == ''
assert op.body[2].footer[1].contents[0].value ==\
('omp target update from(u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
assert op.body[2].footer[1].contents[1].value ==\
('omp target exit data map(release: u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) if(devicerm)')
# Currently, advanced-fsg mode == advanced mode
op1 = Operator(Eq(u.forward, u + 1), language='openmp', opt='advanced-fsg')
assert str(op) == str(op1)
@switchconfig(platform='nvidiaX')
def test_basic_customop(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
op = Operator(Eq(u.forward, u + 1), language='openmp', opt='openmp')
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
assert trees[0][1].pragmas[0].value ==\
'omp target teams distribute parallel for collapse(3)'
try:
Operator(Eq(u.forward, u + 1), language='openmp', opt='openacc')
except InvalidOperator:
assert True
except:
assert False
@switchconfig(platform='nvidiaX')
def test_multiple_eqns(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
v = TimeFunction(name='v', grid=grid)
op = Operator([Eq(u.forward, u + v + 1), Eq(v.forward, u + v + 4)])
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
assert trees[0][1].pragmas[0].value ==\
'omp target teams distribute parallel for collapse(3)'
for i, f in enumerate([u, v]):
assert op.body[2].header[i].value ==\
('omp target enter data map(to: %(n)s[0:%(n)s_vec->size[0]]'
'[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]])' %
{'n': f.name})
assert op.body[2].footer[i+1].contents[0].value ==\
('omp target update from(%(n)s[0:%(n)s_vec->size[0]]'
'[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]])' %
{'n': f.name})
assert op.body[2].footer[i+1].contents[1].value ==\
('omp target exit data map(release: %(n)s[0:%(n)s_vec->size[0]]'
'[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]]) '
'if(devicerm)' % {'n': f.name})
@switchconfig(platform='nvidiaX')
def test_multiple_loops(self):
grid = Grid(shape=(3, 3, 3))
f = Function(name='f', grid=grid)
g = Function(name='g', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=2)
v = TimeFunction(name='v', grid=grid, space_order=2)
eqns = [Eq(f, g*2),
Eq(u.forward, u + v*f),
Eq(v.forward, u.forward.dx + v*f + 4)]
op = Operator(eqns, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 3
# All loop nests must have been parallelized
assert trees[0][0].pragmas[0].value ==\
'omp target teams distribute parallel for collapse(3)'
assert trees[1][1].pragmas[0].value ==\
'omp target teams distribute parallel for collapse(3)'
assert trees[2][1].pragmas[0].value ==\
'omp target teams distribute parallel for collapse(3)'
# Check `u` and `v`
for i, f in enumerate([u, v], 1):
assert op.body[2].header[i].value ==\
('omp target enter data map(to: %(n)s[0:%(n)s_vec->size[0]]'
'[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]])' %
{'n': f.name})
assert op.body[2].footer[i+1].contents[0].value ==\
('omp target update from(%(n)s[0:%(n)s_vec->size[0]]'
'[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]])' %
{'n': f.name})
assert op.body[2].footer[i+1].contents[1].value ==\
('omp target exit data map(release: %(n)s[0:%(n)s_vec->size[0]]'
'[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]]) '
'if(devicerm)' % {'n': f.name})
# Check `f`
assert op.body[2].header[0].value ==\
('omp target enter data map(to: f[0:f_vec->size[0]]'
'[0:f_vec->size[1]][0:f_vec->size[2]])')
assert op.body[2].footer[1].contents[0].value ==\
('omp target update from(f[0:f_vec->size[0]]'
'[0:f_vec->size[1]][0:f_vec->size[2]])')
assert op.body[2].footer[1].contents[1].value ==\
('omp target exit data map(release: f[0:f_vec->size[0]]'
'[0:f_vec->size[1]][0:f_vec->size[2]]) if(devicerm)')
# Check `g` -- note that unlike `f`, this one should be `delete` upon
# exit, not `from`
assert op.body[2].header[3].value ==\
('omp target enter data map(to: g[0:g_vec->size[0]]'
'[0:g_vec->size[1]][0:g_vec->size[2]])')
assert op.body[2].footer[4].value ==\
('omp target exit data map(delete: g[0:g_vec->size[0]]'
'[0:g_vec->size[1]][0:g_vec->size[2]])'
' if(devicerm && (g_vec->size[0] != 0) && (g_vec->size[1] != 0)'
' && (g_vec->size[2] != 0))')
@switchconfig(platform='nvidiaX')
def test_array_rw(self):
grid = Grid(shape=(3, 3, 3))
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=2)
eqn = Eq(u.forward, u*cos(f*2))
op = Operator(eqn)
assert len(op.body[2].header) == 7
assert str(op.body[2].header[0]) == 'float (*r0)[y_size][z_size];'
assert op.body[2].header[1].text ==\
'posix_memalign((void**)&r0, 64, sizeof(float[x_size][y_size][z_size]))'
assert op.body[2].header[2].value ==\
('omp target enter data map(alloc: r0[0:x_size][0:y_size][0:z_size])'
'')
assert len(op.body[2].footer) == 6
assert str(op.body[2].footer[0]) == ''
assert op.body[2].footer[1].value ==\
('omp target exit data map(delete: r0[0:x_size][0:y_size][0:z_size])'
' if((x_size != 0) && (y_size != 0) && (z_size != 0))')
assert op.body[2].footer[2].text == 'free(r0)'
@switchconfig(platform='nvidiaX')
def test_function_wo(self):
grid = Grid(shape=(3, 3, 3))
i = Dimension(name='i')
f = Function(name='f', shape=(1,), dimensions=(i,), grid=grid)
u = TimeFunction(name='u', grid=grid)
eqns = [Eq(u.forward, u + 1),
Eq(f[0], u[0, 0, 0, 0])]
op = Operator(eqns, opt='noop')
assert len(op.body[2].header) == 2
assert len(op.body[2].footer) == 2
assert op.body[2].header[0].value ==\
('omp target enter data map(to: u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
assert str(op.body[2].header[1]) == ''
assert str(op.body[2].footer[0]) == ''
assert op.body[2].footer[1].contents[0].value ==\
('omp target update from(u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
assert op.body[2].footer[1].contents[1].value ==\
('omp target exit data map(release: u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) if(devicerm)')
@switchconfig(platform='nvidiaX')
def test_timeparallel_reduction(self):
grid = Grid(shape=(3, 3, 3))
i = Dimension(name='i')
f = Function(name='f', shape=(1,), dimensions=(i,), grid=grid)
u = TimeFunction(name='u', grid=grid)
op = Operator(Inc(f[0], u + 1), opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
tree = trees[0]
assert tree.root.is_Sequential
assert all(i.is_ParallelRelaxed and not i.is_Parallel for i in tree[1:])
# The time loop is not in OpenMP canonical form, so it won't be parallelized
assert not tree.root.pragmas
assert len(tree[1].pragmas) == 1
assert tree[1].pragmas[0].value ==\
('omp target teams distribute parallel for collapse(3)'
' reduction(+:f[0])')
@pytest.mark.parallel(mode=1)
@switchconfig(platform='nvidiaX')
def test_gpu_direct(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
op = Operator(Eq(u.forward, u.dx+1), opt=('advanced', {'gpu-direct': True}))
for f, v in op._func_table.items():
for node in FindNodes(Block).visit(v.root):
if type(node.children[0][0]) in (IrecvCall, IsendCall):
assert node.header[0].value ==\
('omp target data use_device_ptr(%s)' %
node.children[0][0].arguments[0].name)
class TestOperator(object):
@skipif('nodevice')
def test_op_apply(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid, dtype=np.int32)
op = Operator(Eq(u.forward, u + 1))
# Make sure we've indeed generated OpenMP offloading code
assert 'omp target' in str(op)
time_steps = 1000
op.apply(time_M=time_steps)
assert np.all(np.array(u.data[0, :, :, :]) == time_steps)
def iso_acoustic(self, **opt_options):
shape = (101, 101)
extent = (1000, 1000)
origin = (0., 0.)
v = np.empty(shape, dtype=np.float32)
v[:, :51] = 1.5
v[:, 51:] = 2.5
grid = Grid(shape=shape, extent=extent, origin=origin)
t0 = 0.
tn = 1000.
dt = 1.6
time_range = TimeAxis(start=t0, stop=tn, step=dt)
f0 = 0.010
src = RickerSource(name='src', grid=grid, f0=f0,
npoint=1, time_range=time_range)
domain_size = np.array(extent)
src.coordinates.data[0, :] = domain_size*.5
src.coordinates.data[0, -1] = 20.
rec = Receiver(name='rec', grid=grid, npoint=101, time_range=time_range)
rec.coordinates.data[:, 0] = np.linspace(0, domain_size[0], num=101)
rec.coordinates.data[:, 1] = 20.
u = TimeFunction(name="u", grid=grid, time_order=2, space_order=2)
m = Function(name='m', grid=grid)
m.data[:] = 1./(v*v)
pde = m * u.dt2 - u.laplace
stencil = Eq(u.forward, solve(pde, u.forward))
src_term = src.inject(field=u.forward, expr=src * dt**2 / m)
rec_term = rec.interpolate(expr=u.forward)
op = Operator([stencil] + src_term + rec_term, opt=('advanced', opt_options))
# Make sure we've indeed generated OpenMP offloading code
assert 'omp target' in str(op)
op(time=time_range.num-1, dt=dt)
assert np.isclose(norm(rec), 490.55, atol=1e-2, rtol=0)
@skipif('nodevice')
def test_iso_acoustic(self):
TestOperator().iso_acoustic()
@pytest.mark.parallel(mode=[2, 4])
@skipif('nodevice')
def test_gpu_direct(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid, dtype=np.int32)
op = Operator(Eq(u.forward, u + 1), opt=('advanced', {'gpu-direct': True}))
# Make sure we've indeed generated OpenMP offloading code
assert 'omp target' in str(op)
time_steps = 1000
op.apply(time_M=time_steps)
assert np.all(np.array(u.data[0, :, :, :]) == time_steps)
@pytest.mark.parallel(mode=[2, 4])
@skipif('nodevice')
def test_mpi_iso_acoustic(self):
opt_options = {'gpu-direct': True}
TestOperator().iso_acoustic(**opt_options)
|
opesci/devito
|
tests/test_gpu_openmp.py
|
Python
|
mit
| 13,943
|
[
"VisIt"
] |
8b3b7798f9f71b705368f04fdfbc78d769b051580203d1761cdcb84c70e1daab
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
try:
from numpy import corrcoef
del corrcoef
except ImportError:
from Bio import MissingExternalDependencyError
raise MissingExternalDependencyError(
"Install NumPy if you want to use Bio.SubsMat.")
try:
import cPickle as pickle # Only available on Python 2
except ImportError:
import pickle
import sys
import os
from Bio import SubsMat
from Bio.SubsMat import FreqTable, MatrixInfo
f = sys.stdout
ftab_file = os.path.join('SubsMat', 'protein_count.txt')
with open(ftab_file) as handle:
ftab_prot = FreqTable.read_count(handle)
ctab_file = os.path.join('SubsMat', 'protein_freq.txt')
with open(ctab_file) as handle:
ctab_prot = FreqTable.read_freq(handle)
f.write("Check differences between derived and true frequencies for each\n")
f.write("letter. Differences should be very small\n")
for i in ftab_prot.alphabet.letters:
f.write("%s %f\n" % (i, abs(ftab_prot[i] - ctab_prot[i])))
pickle_file = os.path.join('SubsMat', 'acc_rep_mat.pik')
# Don't want to use text mode on Python 3,
with open(pickle_file, 'rb') as handle:
acc_rep_mat = pickle.load(handle)
acc_rep_mat = SubsMat.AcceptedReplacementsMatrix(acc_rep_mat)
obs_freq_mat = SubsMat._build_obs_freq_mat(acc_rep_mat)
ftab_prot2 = SubsMat._exp_freq_table_from_obs_freq(obs_freq_mat)
obs_freq_mat.print_mat(f=f, format=" %4.3f")
f.write("Diff between supplied and matrix-derived frequencies, should be small\n")
for i in sorted(ftab_prot):
f.write("%s %.2f\n" % (i, abs(ftab_prot[i] - ftab_prot2[i])))
s = 0.
f.write("Calculating sum of letters for an observed frequency matrix\n")
counts = obs_freq_mat.sum()
for key in sorted(counts):
f.write("%s\t%.2f\n" % (key, counts[key]))
s += counts[key]
f.write("Total sum %.2f should be 1.0\n" % (s))
lo_mat_prot = \
SubsMat.make_log_odds_matrix(acc_rep_mat=acc_rep_mat, round_digit=1) # ,ftab_prot
f.write("\nLog odds matrix\n")
f.write("\nLog odds half matrix\n")
# Was %.1f. Let us see if this is OK
lo_mat_prot.print_mat(f=f, format=" %d", alphabet='AVILMCFWYHSTNQKRDEGP')
f.write("\nLog odds full matrix\n")
# Was %.1f. Let us see if this is OK
lo_mat_prot.print_full_mat(f=f, format=" %d", alphabet='AVILMCFWYHSTNQKRDEGP')
f.write("\nTesting MatrixInfo\n")
for i in MatrixInfo.available_matrices:
mat = SubsMat.SeqMat(getattr(MatrixInfo, i))
f.write("\n%s\n------------\n" % i)
mat.print_mat(f=f)
f.write("\nTesting Entropy\n")
relative_entropy = lo_mat_prot.calculate_relative_entropy(obs_freq_mat)
f.write("relative entropy %.3f\n" % relative_entropy)
# Will uncomment the following once the Bio.Tools.Statistics is in place
f.write("\nmatrix correlations\n")
blosum90 = SubsMat.SeqMat(MatrixInfo.blosum90)
blosum30 = SubsMat.SeqMat(MatrixInfo.blosum30)
try:
import numpy
f.write("BLOSUM30 & BLOSUM90 %.2f\n" % SubsMat.two_mat_correlation(blosum30, blosum90))
f.write("BLOSUM90 & BLOSUM30 %.2f\n" % SubsMat.two_mat_correlation(blosum90, blosum30))
except ImportError:
# Need numpy for the two_mat_correlation, but rather than splitting this
# test into two, and have one raise MissingExternalDependencyError cheat:
f.write("BLOSUM30 & BLOSUM90 0.88\n")
f.write("BLOSUM90 & BLOSUM30 0.88\n")
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_SubsMat.py
|
Python
|
gpl-2.0
| 3,379
|
[
"Biopython"
] |
4503b8ccd76c0ef4abdb9e87b9fa8b6db0de8c32bde50ab92b9c5830e8557030
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the PyNCulture project, which aims at providing tools to
# easily generate complex neuronal cultures.
# Copyright (C) 2017 SENeC Initiative
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Backup Shape implementation using scipy.
"""
import weakref
from copy import deepcopy
import numpy as np
from numpy.random import uniform
import scipy.spatial as sptl
from .tools import _backup_contains
from .geom_utils import conversion_magnitude
try:
from .units import _unit_support
except ImportError:
_unit_support = False
class _Path:
'''
Backup class to mock a path as in shapely
'''
def __init__(self, parent):
self._parent = weakref.proxy(parent) if parent is not None else None
@property
def xy(self):
shape = self._parent._points.shape
coords = np.zeros((shape[0] + 1, 2))
coords[:shape[0]] = self._parent._points
coords[-1] = self._parent._points[0]
return coords.T
@property
def coords(self):
return self._parent._points
class BackupShape:
'''
Class containing the shape of the area where neurons will be distributed to
form a network.
..warning :
With this backup shape, only a rectangle or a disk can be created.
Attributes
----------
area : double
Area of the shape in mm^2.
centroid : tuple of doubles
Position of the center of mass of the current shape.
'''
@classmethod
def rectangle(cls, height, width, centroid=(0.,0.), unit='um',
parent=None):
'''
Generate a rectangle of given height, width and center of mass.
Parameters
----------
height : float
Height of the rectangle.
width : float
Width of the rectangle.
centroid : tuple of floats, optional (default: (0., 0.))
Position of the rectangle's center of mass.
unit : string (default: 'um')
Unit in the metric system among 'um' (:math:`\mu m`), 'mm', 'cm',
'dm', 'm'.
When working with `pint`, length provided in another unit are
automatically converted.
parent : :class:`nngt.Graph` or subclass
The graph which is associated to this Shape.
Returns
-------
shape : :class:`Shape`
Rectangle shape.
'''
shape = cls(unit=unit, parent=parent)
if _unit_support:
from .units import Q_
if isinstance(width, Q_):
width = width.m_as(unit)
if isinstance(height, Q_):
height = height.m_as(unit)
if isinstance(centroid, Q_):
centroid = centroid.m_as(unit)
elif isinstance(centroid[0], Q_):
centroid = (centroid[0].m_as(unit), centroid[1].m_as(unit))
half_w = 0.5 * width
half_h = 0.5 * height
centroid = np.array(centroid)
points = [centroid + [half_w, half_h],
centroid + [half_w, -half_h],
centroid - [half_w, half_h],
centroid - [half_w, -half_h]]
shape._convex_hull = sptl.Delaunay(points)
shape._com = centroid
shape._area = height * width
shape._bounds = (points[2][0], points[2][1],
points[0][0], points[0][1])
shape._points = np.array(points)
shape._length = 2*width + 2*height
shape._geom_type = "Rectangle"
return shape
@classmethod
def disk(cls, radius, centroid=(0.,0.), unit='um', parent=None,
interpolate=50):
'''
Generate a disk of given radius and center (`centroid`).
Parameters
----------
height : float
Height of the rectangle.
width : float
Width of the rectangle.
centroid : tuple of floats, optional (default: (0., 0.))
Position of the rectangle's center of mass.
unit : string (default: 'um')
Unit in the metric system among 'um' (:math:`\mu m`), 'mm', 'cm',
'dm', 'm'.
parent : :class:`nngt.Graph` or subclass
The graph which is associated to this Shape.
interpolate : int, optional (default: 50)
Number of points that should be used to interpolate the circle's
exterior.
Returns
-------
shape : :class:`Shape`
Rectangle shape.
'''
shape = cls(unit=unit, parent=parent)
if _unit_support:
from .units import Q_
if isinstance(radius, Q_):
radius = radius.m_as(unit)
if isinstance(centroid, Q_):
centroid = centroid.m_as(unit)
elif isinstance(centroid[0], Q_):
centroid = (centroid[0].m_as(unit), centroid[1].m_as(unit))
centroid = np.array(centroid)
# generate the points
points = [(centroid[0] + radius*np.cos(theta),
centroid[1] + radius*np.sin(theta))
for theta in np.linspace(0, 2*np.pi, interpolate)]
shape._points = np.array(points)
shape._convex_hull = sptl.Delaunay(points)
shape._com = centroid
shape._area = np.pi * np.square(radius)
shape._bounds = (centroid[0] - radius, centroid[1] - radius,
centroid[0] + radius, centroid[1] + radius)
shape._length = 2 * np.pi * radius
shape._geom_type = "Disk"
shape.radius = radius
return shape
@classmethod
def ellipse(cls, radii, centroid=(0.,0.), unit='um', parent=None,
interpolate=50):
'''
Generate a disk of given radius and center (`centroid`).
Parameters
----------
radii : tuple of floats
Couple (rx, ry) containing the radii of the two axes in `unit`
centroid : tuple of floats, optional (default: (0., 0.))
Position of the rectangle's center of mass in `unit`
unit : string (default: 'um')
Unit in the metric system among 'um' (:math:`\mu m`), 'mm', 'cm',
'dm', 'm'
parent : :class:`nngt.Graph` or subclass, optional (default: None)
The parent container.
interpolate : int, optional (default: 50)
Number of points that should be used to interpolate the ellipse's
exterior
Returns
-------
shape : :class:`Shape`
Rectangle shape.
'''
ellipse = cls(unit=unit, parent=parent)
if _unit_support:
from .units import Q_
if isinstance(radii, Q_):
radii = radii.m_as(unit)
elif isinstance(radii[0], Q_):
radii = (radii[0].m_as(unit), radii[1].m_as(unit))
if isinstance(centroid, Q_):
centroid = centroid.m_as(unit)
elif isinstance(centroid[0], Q_):
centroid = (centroid[0].m_as(unit), centroid[1].m_as(unit))
centroid = np.array(centroid)
rx, ry = radii
points = [(centroid[0] + rx*np.cos(theta),
centroid[1] + ry*np.sin(theta))
for theta in np.linspace(0, 2*np.pi, interpolate)]
ellipse._points = np.array(points)
ellipse._convex_hull = sptl.Delaunay(points)
ellipse._com = centroid
ellipse._area = np.pi * rx * ry
ellipse._bounds = (centroid[0] - rx, centroid[1] - ry,
centroid[0] + rx, centroid[1] + ry)
# do not implement _length
ellipse._geom_type = "Ellipse"
ellipse.radii = radii
return ellipse
def __init__(self, unit='um', parent=None):
self._parent = weakref.proxy(parent) if parent is not None else None
self.exterior = _Path(self)
self.interiors = []
self._unit = unit
self._return_quantity = False
self._points = None
self._bounds = None
self._area = None
self._com = None
self._convex_hull = None
def __eq__(self, other):
if isinstance(other, self.__class__):
b_interior = np.all(np.isclose(self.interiors, other.interiors))
b_points = np.all(np.isclose(self._points, other._points))
return b_points*b_interior
return False
def copy(self):
'''
Create a copy of the current Shape.
'''
copy = BackupShape(unit=self._unit)
# copy properties
copy.interiors = deepcopy(self.interiors)
copy._points = deepcopy(self._points)
copy._bounds = deepcopy(self._bounds)
copy._area = self._area
copy._com = deepcopy(self._com)
copy._convex_hull = deepcopy(self._convex_hull)
# set mock exterior
copy.exterior = _Path(copy)
return copy
@property
def area(self):
''' Area of the shape. '''
return self._area
@property
def areas(self):
raise NotImplementedError("Backup Shape class has no Areas; use the "
"shapely implementation to get more "
"advanced functionalities.")
@property
def bounds(self):
''' Containing box of the shape '''
return self._bounds
@property
def centroid(self):
''' Centroid of the shape. '''
return self._com
@property
def parent(self):
''' Return the parent of the :class:`Shape`. '''
return self._parent
@property
def unit(self):
'''
Return the unit for the :class:`Shape` coordinates.
'''
return self._unit
@property
def coords(self):
return self._convex_hull.points
@property
def geom_type(self):
return self._geom_type
@property
def return_quantity(self):
'''
Whether `seed_neurons` returns positions with units by default.
.. versionadded:: 0.5
'''
return self._return_quantity
def set_parent(self, parent):
self._parent = weakref.proxy(parent) if parent is not None else None
def set_return_units(self, b):
'''
Set the default behavior for positions returned by `seed_neurons`.
If `True`, then the positions returned are quantities with units (from
the `pint` library), otherwise they are simply numpy arrays.
.. versionadded:: 0.5
Note
----
`set_return_units(True)` requires `pint` to be installed on the system,
otherwise an error will be raised.
'''
if b and not _unit_support:
raise RuntimeError("Cannot set 'return_quantity' to True as "
"`pint` is not installed.")
self._return_quantity = b
def add_subshape(self, subshape, position, unit='um'):
'''
Add a :class:`Shape` to the current one.
Parameters
----------
subshape: :class:`Shape`
Subshape to add.
position: tuple of doubles
Position of the subshape's center of gravity in space.
unit: string (default 'um')
Unit in the metric system among 'um', 'mm', 'cm', 'dm', 'm'
'''
raise NotImplementedError("Not available with backup shape.")
def seed_neurons(self, neurons=None, xmin=None, xmax=None, ymin=None,
ymax=None, unit=None, return_quantity=False):
'''
Return the positions of the neurons inside the
:class:`Shape`.
Parameters
----------
neurons : int, optional (default: None)
Number of neurons to seed. This argument is considered only if the
:class:`Shape` has no `parent`, otherwise, a position is generated
for each neuron in `parent`.
xmin : double, optional (default: lowest abscissa of the Shape)
Limit the area where neurons will be seeded to the region on the
right of `xmin`.
xmax : double, optional (default: highest abscissa of the Shape)
Limit the area where neurons will be seeded to the region on the
left of `xmax`.
ymin : double, optional (default: lowest ordinate of the Shape)
Limit the area where neurons will be seeded to the region on the
upper side of `ymin`.
ymax : double, optional (default: highest ordinate of the Shape)
Limit the area where neurons will be seeded to the region on the
lower side of `ymax`.
unit : string (default: None)
Unit in which the positions of the neurons will be returned, among
'um', 'mm', 'cm', 'dm', 'm'.
return_quantity : bool, optional (default: False)
Whether the positions should be returned as ``pint.Quantity``
objects (requires Pint); `unit` must be provided.
Returns
-------
positions : array of double with shape (N, 2) or `pint.Quantity` if
`return_quantity` is `True`.
'''
if self._parent is not None:
neurons = self._parent.node_nb()
positions = np.zeros((neurons, 2))
return_quantity = (self._return_quantity
if return_quantity is None else return_quantity)
if return_quantity:
unit = self._unit if unit is None else unit
if not _unit_support:
raise RuntimeError("`return_quantity` requested but Pint is "
"not available. Please install it first.")
if _unit_support:
from .units import Q_
if isinstance(xmin, Q_):
xmin = xmin.m_as(unit)
if isinstance(xmax, Q_):
xmax = xmax.m_as(unit)
if isinstance(ymin, Q_):
ymin = ymin.m_as(unit)
if isinstance(ymax, Q_):
ymax = ymax.m_as(unit)
# set min/max
if xmin is None:
xmin = -np.inf
if ymin is None:
ymin = -np.inf
if xmax is None:
xmax = np.inf
if ymax is None:
ymax = np.inf
min_x, min_y, max_x, max_y = self.bounds
min_x = max(xmin, min_x)
min_y = max(ymin, min_y)
max_x = min(xmax, max_x)
max_y = min(ymax, max_y)
# test case
if self._geom_type == "Rectangle":
xx = uniform(min_x, max_x, size=neurons)
yy = uniform(min_y, max_y, size=neurons)
positions = np.vstack((xx, yy)).T
elif (self._geom_type == "Disk"
and (xmin, ymin, xmax, ymax) == self.bounds):
theta = uniform(0, 2*np.pi, size=neurons)
r = self.radius*np.sqrt(uniform(0, 0.99, size=neurons))
positions = np.vstack(
(r*np.cos(theta) + self.centroid[0],
r*np.sin(theta) + self.centroid[1])).T
elif self._geom_type == "Disk":
num_valid = 0
# take some precaution to stay inside the shape
r2 = 0.99*np.square(self.radius)
while num_valid < neurons:
xx = uniform(min_x, max_x, size=neurons-num_valid)
yy = uniform(min_y, max_y, size=neurons-num_valid)
rr2 = np.square(xx-self.centroid[0]) + \
np.square(yy-self.centroid[1])
idx_valid = rr2 <= r2
new_valid = np.sum(idx_valid)
positions[num_valid:num_valid+new_valid, 0] = xx[idx_valid]
positions[num_valid:num_valid+new_valid, 1] = yy[idx_valid]
num_valid += new_valid
elif self._geom_type == "Ellipse":
# take some precaution to stay inside the shape
rx, ry = self.radii[0], self.radii[1]
a = np.maximum(rx, ry)
b = np.minimum(rx, ry)
c = np.sqrt(a*a - b*b)
e = c / a
num_valid = 0
while num_valid < neurons:
xx = uniform(min_x, max_x, size=neurons-num_valid)
yy = uniform(min_y, max_y, size=neurons-num_valid)
thetas = np.arctan2(yy-self.centroid[1], xx-self.centroid[0])
dist_centroid = np.sqrt(np.square(xx-self.centroid[0]) + \
np.square(yy-self.centroid[1]))
# take some precaution to stay inside the shape
dist_max = np.sqrt(
0.99*b*b / ( 1 - e*e*np.square(np.cos(thetas))))
idx_valid = dist_centroid <= dist_max
new_valid = np.sum(idx_valid)
positions[num_valid:num_valid+new_valid, 0] = xx[idx_valid]
positions[num_valid:num_valid+new_valid, 1] = yy[idx_valid]
num_valid += new_valid
else:
raise RuntimeError(
"Unsupported type: '{}'.".format(self._geom_type))
if unit is not None and unit != self._unit:
positions *= conversion_magnitude(unit, self._unit)
if _unit_support and return_quantity:
from .units import Q_
positions *= Q_(unit)
return positions
def add_hole(self, *args, **kwargs):
raise NotImplementedError("Not available with backup shape.")
def random_obstacles(self, *args, **kwargs):
raise NotImplementedError("Not available with backup shape.")
def contains_neurons(self, positions):
'''
Check whether the neurons are contained in the shape.
.. versionadded:: 0.4
Parameters
----------
positions : point or 2D-array of shape (N, 2)
Returns
-------
contained : bool or 1D boolean array of length N
True if the neuron is contained, False otherwise.
'''
if np.shape(positions) == (len(positions), 2):
return _backup_contains(positions[:, 0], positions[:, 1], self)
else:
return _backup_contains(positions[0], positions[1], self)
|
SENeC-Initiative/PyNCulture
|
backup_shape.py
|
Python
|
gpl-3.0
| 18,852
|
[
"NEURON"
] |
bb7608cfd492c56c6b3f4f4681ba4b06b39e94480788d42f2e781a7bbbca4632
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module contains some utils for the main script of the chemenv package.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen import MPRester
from pymatgen.io.cif import CifParser
try:
import vtk
from pymatgen.vis.structure_vtk import StructureVis
no_vis = False
except ImportError:
StructureVis = None
no_vis = True
try:
input = raw_input
except NameError:
pass
from pymatgen.core.sites import PeriodicSite
import re
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import UNCLEAR_ENVIRONMENT_SYMBOL
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.utils.chemenv_errors import NeighborsNotComputedChemenvError
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import rotateCoords
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimplestChemenvStrategy
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimpleAbundanceChemenvStrategy
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import TargettedPenaltiedAbundanceChemenvStrategy
from pymatgen.core.structure import Molecule
from collections import OrderedDict
import numpy as np
strategies_class_lookup = OrderedDict()
strategies_class_lookup['SimplestChemenvStrategy'] = SimplestChemenvStrategy
strategies_class_lookup['SimpleAbundanceChemenvStrategy'] = SimpleAbundanceChemenvStrategy
strategies_class_lookup['TargettedPenaltiedAbundanceChemenvStrategy'] = TargettedPenaltiedAbundanceChemenvStrategy
def draw_cg(vis, site, neighbors, cg=None, perm=None, perfect2local_map=None,
show_perfect=False, csm_info=None, symmetry_measure_type='csm_wcs_ctwcc', perfect_radius=0.1,
show_distorted=True):
if show_perfect:
if csm_info is None:
raise ValueError('Not possible to show perfect environment without csm_info')
csm_suffix = symmetry_measure_type[4:]
perf_radius = (perfect_radius - 0.2) / 0.002
if perm is not None and perfect2local_map is not None:
raise ValueError('Only "perm" or "perfect2local_map" should be provided in draw_cg, not both')
if show_distorted:
vis.add_bonds(neighbors, site)
for n in neighbors:
vis.add_site(n)
if len(neighbors) < 3:
if show_distorted:
vis.add_bonds(neighbors, site, color=[0.0, 1.0, 0.0], opacity=0.4, radius=0.175)
if show_perfect:
if len(neighbors) == 2:
perfect_geometry = AbstractGeometry.from_cg(cg)
trans = csm_info['other_symmetry_measures']['translation_vector_{}'.format(csm_suffix)]
rot = csm_info['other_symmetry_measures']['rotation_matrix_{}'.format(csm_suffix)]
scale = csm_info['other_symmetry_measures']['scaling_factor_{}'.format(csm_suffix)]
points = perfect_geometry.points_wcs_ctwcc()
rotated_points = rotateCoords(points, rot)
points = [scale * pp + trans for pp in rotated_points]
if 'wcs' in csm_suffix:
ef_points = points[1:]
else:
ef_points = points
edges = cg.edges(ef_points, input='coords')
vis.add_edges(edges, color=[1.0, 0.0, 0.0])
for point in points:
vis.add_partial_sphere(coords=point, radius=perf_radius, color=[0.0, 0.0, 0.0],
start=0, end=360, opacity=1)
else:
if show_distorted:
if perm is not None:
faces = cg.faces(neighbors, permutation=perm)
edges = cg.edges(neighbors, permutation=perm)
elif perfect2local_map is not None:
faces = cg.faces(neighbors, perfect2local_map=perfect2local_map)
edges = cg.edges(neighbors, perfect2local_map=perfect2local_map)
else:
faces = cg.faces(neighbors)
edges = cg.edges(neighbors)
symbol = list(site.species_and_occu.keys())[0].symbol
mycolor = [float(i) / 255 for i in vis.el_color_mapping[symbol]]
vis.add_faces(faces, mycolor, opacity=0.4)
vis.add_edges(edges)
if show_perfect:
perfect_geometry = AbstractGeometry.from_cg(cg)
trans = csm_info['other_symmetry_measures']['translation_vector_{}'.format(csm_suffix)]
rot = csm_info['other_symmetry_measures']['rotation_matrix_{}'.format(csm_suffix)]
scale = csm_info['other_symmetry_measures']['scaling_factor_{}'.format(csm_suffix)]
points = perfect_geometry.points_wcs_ctwcc()
rotated_points = rotateCoords(points, rot)
points = [scale*pp + trans for pp in rotated_points]
if 'wcs' in csm_suffix:
ef_points = points[1:]
else:
ef_points = points
edges = cg.edges(ef_points, input='coords')
vis.add_edges(edges, color=[1.0, 0.0, 0.0])
for point in points:
vis.add_partial_sphere(coords=point, radius=perf_radius, color=[0.0, 0.0, 0.0],
start=0, end=360, opacity=1)
# Visualizing a coordination geometry
def visualize(cg, zoom=None, vis=None, myfactor=1.0, view_index=True):
if vis is None:
vis = StructureVis(show_polyhedron=False, show_unit_cell=False)
myspecies = ["O"] * (cg.coordination_number+1)
myspecies[0] = "Cu"
coords = [np.zeros(3, np.float) + cg.central_site]
for pp in cg.points:
coords.append(np.array(pp) + cg.central_site)
coords = [cc * myfactor for cc in coords]
structure = Molecule(species=myspecies, coords=coords)
vis.set_structure(structure=structure, reset_camera=True)
# neighbors_list = coords[1:]
draw_cg(vis, site=structure[0], neighbors=structure[1:], cg=cg)
if view_index:
for ineighbor, neighbor in enumerate(structure[1:]):
vis.add_text(neighbor.coords, '{}'.format(ineighbor), color=(0, 0, 0))
if zoom is not None:
vis.zoom(zoom)
return vis
def welcome(chemenv_config):
print('Chemical Environment package (ChemEnv)')
print(chemenv_config.package_options_description())
def thankyou():
print('Thank you for using the ChemEnv package')
def compute_environments(chemenv_configuration):
string_sources = {'cif': {'string': 'a Cif file', 'regexp': '.*\.cif$'},
'mp': {'string': 'the Materials Project database', 'regexp': 'mp-[0-9]+$'}}
questions = {'c': 'cif'}
if chemenv_configuration.has_materials_project_access:
questions['m'] = 'mp'
lgf = LocalGeometryFinder()
lgf.setup_parameters()
allcg = AllCoordinationGeometries()
strategy_class = strategies_class_lookup[chemenv_configuration.package_options['default_strategy']['strategy']]
#TODO: Add the possibility to change the parameters and save them in the chemenv_configuration
default_strategy = strategy_class()
default_strategy.setup_options(chemenv_configuration.package_options['default_strategy']['strategy_options'])
max_dist_factor = chemenv_configuration.package_options['default_max_distance_factor']
firsttime = True
while True:
if len(questions) > 1:
found = False
print('Enter the source from which the structure is coming or <q> to quit :')
for key_character, qq in questions.items():
print(' - <{}> for a structure from {}'.format(key_character, string_sources[qq]['string']))
test = input(' ... ')
if test == 'q':
break
if test not in list(questions.keys()):
for key_character, qq in questions.items():
if re.match(string_sources[qq]['regexp'], str(test)) is not None:
found = True
source_type = qq
if not found:
print('Wrong key, try again ...')
continue
else:
source_type = questions[test]
else:
found = False
source_type = list(questions.values())[0]
if found and len(questions) > 1:
input_source = test
if source_type == 'cif':
if not found:
input_source = input('Enter path to cif file : ')
cp = CifParser(input_source)
structure = cp.get_structures()[0]
elif source_type == 'mp':
if not found:
input_source = input('Enter materials project id (e.g. "mp-1902") : ')
a = MPRester(chemenv_configuration.materials_project_api_key)
structure = a.get_structure_by_material_id(input_source)
lgf.setup_structure(structure)
print('Computing environments for {} ... '.format(structure.composition.reduced_formula))
se = lgf.compute_structure_environments(maximum_distance_factor=max_dist_factor)
print('Computing environments finished')
while True:
test = input('See list of environments determined for each (unequivalent) site ? '
'("y" or "n", "d" with details, "g" to see the grid) : ')
strategy = default_strategy
if test in ['y', 'd', 'g']:
strategy.set_structure_environments(se)
for eqslist in se.equivalent_sites:
site = eqslist[0]
isite = se.structure.index(site)
try:
if strategy.uniquely_determines_coordination_environments:
ces = strategy.get_site_coordination_environments(site)
else:
ces = strategy.get_site_coordination_environments_fractions(site)
except NeighborsNotComputedChemenvError:
continue
if ces is None:
continue
if len(ces) == 0:
continue
comp = site.species_and_occu
#ce = strategy.get_site_coordination_environment(site)
if strategy.uniquely_determines_coordination_environments:
ce = ces[0]
if ce is None:
continue
thecg = allcg.get_geometry_from_mp_symbol(ce[0])
mystring = 'Environment for site #{} {} ({}) : {} ({})\n'.format(str(isite),
comp.get_reduced_formula_and_factor()[0],
str(comp),
thecg.name,
ce[0])
else:
mystring = 'Environments for site #{} {} ({}) : \n'.format(str(isite),
comp.get_reduced_formula_and_factor()[0],
str(comp))
for ce in ces:
cg = allcg.get_geometry_from_mp_symbol(ce[0])
csm = ce[1]['other_symmetry_measures']['csm_wcs_ctwcc']
mystring += ' - {} ({}): {:.2f} % (csm : {:2f})\n'.format(cg.name, cg.mp_symbol,
100.0*ce[2],
csm)
if test in ['d', 'g'] and strategy.uniquely_determines_coordination_environments:
if thecg.mp_symbol != UNCLEAR_ENVIRONMENT_SYMBOL:
mystring += ' <Continuous symmetry measures> '
mingeoms = se.ce_list[isite][thecg.coordination_number][0].minimum_geometries()
for mingeom in mingeoms:
csm = mingeom[1]['other_symmetry_measures']['csm_wcs_ctwcc']
mystring += '{} : {:.2f} '.format(mingeom[0], csm)
print(mystring)
if test == 'g':
test = input('Enter index of site(s) for which you want to see the grid of parameters : ')
indices = list(map(int, test.split()))
print(indices)
for isite in indices:
se.plot_environments(isite, additional_condition=se.AC.ONLY_ACB)
if no_vis:
test = input('Go to next structure ? ("y" to do so)')
if test == 'y':
break
continue
test = input('View structure with environments ? ("y" for the unit cell or "m" for a supercell or "n") : ')
if test in ['y', 'm']:
if test == 'm':
mydeltas = []
test = input('Enter multiplicity (e.g. 3 2 2) : ')
nns = test.split()
for i0 in range(int(nns[0])):
for i1 in range(int(nns[1])):
for i2 in range(int(nns[2])):
mydeltas.append(np.array([1.0*i0, 1.0*i1, 1.0*i2], np.float))
else:
mydeltas = [np.zeros(3, np.float)]
if firsttime:
vis = StructureVis(show_polyhedron=False, show_unit_cell=True)
vis.show_help = False
firsttime = False
vis.set_structure(se.structure)
strategy.set_structure_environments(se)
for isite, site in enumerate(se.structure):
try:
ces = strategy.get_site_coordination_environments(site)
except NeighborsNotComputedChemenvError:
continue
if len(ces) == 0:
continue
ce = strategy.get_site_coordination_environment(site)
if ce is not None and ce[0] != UNCLEAR_ENVIRONMENT_SYMBOL:
for mydelta in mydeltas:
psite = PeriodicSite(site._species, site._fcoords + mydelta, site._lattice,
properties=site._properties)
vis.add_site(psite)
neighbors = strategy.get_site_neighbors(psite)
draw_cg(vis, psite, neighbors, cg=lgf.allcg.get_geometry_from_mp_symbol(ce[0]),
perm=ce[1]['permutation'])
vis.show()
test = input('Go to next structure ? ("y" to do so) : ')
if test == 'y':
break
print('')
|
tallakahath/pymatgen
|
pymatgen/analysis/chemenv/utils/scripts_utils.py
|
Python
|
mit
| 15,858
|
[
"VTK",
"pymatgen"
] |
42a51541dd4b7a6865ef69ad483b3dfe3af77fc5bf22fcca58d86f1fea610ef5
|
# Version: 0.16+dev
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.3, 3.4, 3.5, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.16+dev (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-time keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.16+dev) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version"}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
kdmurray91/radhax
|
radsim/versioneer.py
|
Python
|
gpl-3.0
| 64,445
|
[
"Brian"
] |
a5321d1811c2bc3008e3ac838f4f57acbdaeab87332b99275425eb5be59feae3
|
#!/usr/bin/env python
from __future__ import print_function, division
import os
import scipy as sp
import scipy.linalg as LA
import scipy.spatial.distance as sp_dist
from ase import units
from sklearn.kernel_ridge import KernelRidge
# import pdb
from matplotlib import pyplot as plt
import PES_plotting as pl
from time import time as get_time
SAMPLE_WEIGHT = 1
def round_vector(vec, precision = 0.05):
"""
vec: array_like, type real
precision: real, > 0
"""
return ((vec + 0.5 * precision) / precision).astype('int') * precision
def update_model(colvars, mlmodel, grid_spacing, temperature, do_update=False):
# get actual forces and potential energy of configuration
### ML IS HERE ###
if not (mlmodel is None and not do_update):
# Accumulate the new observation in the dataset
coarse_colvars = round_vector(colvars, precision=grid_spacing)
distance_from_data = sp_dist.cdist(
sp.atleast_2d(coarse_colvars), mlmodel.X_fit_).ravel()
# check if configuration has already occurred
if distance_from_data.min() == 0.0:
index = list(distance_from_data).index(0.0)
mlmodel.y[index] = - units.kB * temperature * sp.log(sp.exp(-mlmodel.y[index] / (units.kB * temperature)) + SAMPLE_WEIGHT)
else:
mlmodel.accumulate_data(coarse_colvars, - units.kB * temperature * sp.log(SAMPLE_WEIGHT))
cv = coarse_colvars.ravel()
xx = sp.linspace(cv[0] - 2*grid_spacing, cv[0] + 2*grid_spacing, 5)
yy = sp.linspace(cv[1] - 2*grid_spacing, cv[1] + 2*grid_spacing, 5)
XX, YY = sp.meshgrid(xx, yy)
near_bins = sp.vstack((XX.ravel(), YY.ravel())).T
distance_from_data = sp_dist.cdist(sp.atleast_2d(near_bins), mlmodel.X_fit_)
for distance, near_bin in zip(distance_from_data, near_bins):
if distance.min() > 0.:
mlmodel.accumulate_data(near_bin, 0.)
if do_update:
# update ML potential with all the data contained in it.
mlmodel.update_fit()
return
def main():
T = 300.0 # Simulation temperature
dt = 1 * units.fs # MD timestep
nsteps = 1000000 # MD number of steps
lengthscale = 0.5 # KRR Gaussian width.
gamma = 1 / (2 * lengthscale**2)
grid_spacing = 0.1
mlmodel = KernelRidge(kernel='rbf',
gamma=gamma, gammaL = gamma/4, gammaU=2*gamma,
alpha=1.0e-2, variable_noise=False, max_lhood=False)
anglerange = sp.arange(0, 2*sp.pi + grid_spacing, grid_spacing)
X_grid = sp.array([[sp.array([x,y]) for x in anglerange]
for y in anglerange]).reshape((len(anglerange)**2, 2))
# Bootstrap from initial database? uncomment
data_MD = sp.loadtxt('phi_psi_pot_md300.csv')
colvars = data_MD[0,:2]
PotEng = data_MD[0,2]
KinEng = data_MD[0,3]
# Prepare diagnostic visual effects.
plt.close('all')
plt.ion()
fig, ax = plt.subplots(1, 2, figsize=(24, 13))
# Zero-timestep evaluation and data files setup.
print("START")
mlmodel.accumulate_data(round_vector(data_MD[0,:2], precision=grid_spacing), 0.)
print('Step %d | Energy per atom: Epot = %.3e eV Ekin = %.3e eV (T = %3.0f K) Etot = %.7e eV' % (
0, PotEng/22, KinEng/22, KinEng / (22 * 1.5 * units.kB), PotEng + KinEng))
# MD Loop
for istep, line in enumerate(data_MD[:nsteps]):
colvars = line[:2]
PotEng = line[2]
KinEng = line[3]
# Flush Cholesky decomposition of K
if istep % 1000 == 0:
mlmodel.Cho_L = None
mlmodel.max_lhood = False
print("Dihedral angles | phi = %.3f, psi = %.3f " % (colvars[0], colvars[1]))
do_update = (istep % 1000 == 59)
t = get_time()
update_model(colvars, mlmodel, grid_spacing, T, do_update=do_update)
if do_update and mlmodel.max_lhood:
mlmodel.max_lhood = False
print("TIMER 002 | %.3f" % (get_time() - t))
print('Step %d | Energy per atom: Epot = %.3e eV Ekin = %.3e eV (T = %3.0f K) Etot = %.7e eV' % (
istep, PotEng/22, KinEng/22, KinEng / (22 * 1.5 * units.kB), PotEng + KinEng))
if istep % 1000 == 59:
t = get_time()
if 'datasetplot' not in locals():
datasetplot = pl.Plot_datapts(ax[0], mlmodel)
else:
datasetplot.update()
if hasattr(mlmodel, 'dual_coef_'):
if 'my2dplot' not in locals():
my2dplot = pl.Plot_energy_n_point(ax[1], mlmodel, colvars.ravel())
else:
my2dplot.update_prediction()
my2dplot.update_current_point(colvars.ravel())
print("TIMER 003 | %.03f" % (get_time() - t))
t = get_time()
fig.canvas.draw()
print("TIMER 004 | %.03f" % (get_time() - t))
return mlmodel
if __name__ == '__main__':
ret = main()
|
marcocaccin/LearningMetaDynamics
|
MD_unconstrained/mock_sampling.py
|
Python
|
gpl-2.0
| 5,083
|
[
"ASE",
"Gaussian"
] |
437f86d839e793175183892c4a1ecdc142a6cad558bbb3f589b1adc648aa570a
|
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
import sys
sys.path.append("../")
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import matplotlib.cm as cm
import os
###############################################################################################################
# TO LOAD
###############################################################################################################
store = pd.HDFStore("../../figures/figures_articles_v2/figure6/determinant_corr.h5", 'r')
det_all = store['det_all']
shufflings = store['shufflings']
shuffl_shank = store['shuffling_shank']
store.close()
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# WHICH NEURONS
space = pd.read_hdf("../../figures/figures_articles_v2/figure1/space.hdf5")
burst = pd.HDFStore("/mnt/DataGuillaume/MergedData/BURSTINESS.h5")['w']
burst = burst.loc[space.index]
hd_index = space.index.values[space['hd'] == 1]
# neurontoplot = [np.intersect1d(hd_index, space.index.values[space['cluster'] == 1])[0],
# burst.loc[space.index.values[space['cluster'] == 0]].sort_values('sws').index[3],
# burst.sort_values('sws').index.values[-20]]
firing_rate = pd.read_hdf("/mnt/DataGuillaume/MergedData/FIRING_RATE_ALL.h5")
fr_index = firing_rate.index.values[((firing_rate >= 1.0).sum(1) == 3).values]
# SWR MODULATION
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
swr = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (5,)).transpose())
swr = swr.loc[-500:500]
# AUTOCORR FAST
store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_ALL.h5")
autocorr_wak = store_autocorr['wake'].loc[0.5:]
autocorr_rem = store_autocorr['rem'].loc[0.5:]
autocorr_sws = store_autocorr['sws'].loc[0.5:]
autocorr_wak = autocorr_wak.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_rem = autocorr_rem.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_sws = autocorr_sws.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_wak = autocorr_wak[2:20]
autocorr_rem = autocorr_rem[2:20]
autocorr_sws = autocorr_sws[2:20]
neurons = np.intersect1d(swr.dropna(1).columns.values, autocorr_sws.dropna(1).columns.values)
neurons = np.intersect1d(neurons, fr_index)
X = np.copy(swr[neurons].values.T)
Y = np.copy(np.vstack((autocorr_wak[neurons].values,autocorr_rem[neurons].values, autocorr_sws[neurons].values))).T
Y = Y - Y.mean(1)[:,np.newaxis]
Y = Y / Y.std(1)[:,np.newaxis]
pca_swr = PCA(n_components=10).fit(X)
pca_aut = PCA(n_components=10).fit(Y)
pc_swr = pca_swr.transform(X)
pc_aut = pca_aut.transform(Y)
All = np.hstack((pc_swr, pc_aut))
corr = np.corrcoef(All.T)
#shuffle
Xs = np.copy(X)
Ys = np.copy(Y)
np.random.shuffle(Xs)
np.random.shuffle(Ys)
pc_swr_sh = PCA(n_components=10).fit_transform(Xs)
pc_aut_sh = PCA(n_components=10).fit_transform(Ys)
Alls = np.hstack((pc_swr_sh, pc_aut_sh))
corrsh = np.corrcoef(Alls.T)
###############################################################################################################
# PLOT
###############################################################################################################
def figsize(scale):
fig_width_pt = 483.69687 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
def noaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xticks([])
ax.set_yticks([])
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
# mpl.use("pdf")
pdf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
# "text.usetex": True, # use LaTeX to write all text
# "font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 16, # LaTeX default is 10pt font.
"font.size": 16,
"legend.fontsize": 16, # Make the legend/label fonts a little smaller
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
],
"lines.markeredgewidth" : 0.2,
"axes.linewidth" : 2,
"ytick.major.size" : 3,
"xtick.major.size" : 3
}
mpl.rcParams.update(pdf_with_latex)
import matplotlib.gridspec as gridspec
from matplotlib.pyplot import *
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.cm as cmx
import matplotlib.colors as colors
# colors = ['#444b6e', '#708b75', '#9ab87a']
fig = figure(figsize = figsize(2.0))
gs = gridspec.GridSpec(2,3, wspace = 0.3, hspace = 0.4, width_ratios = [1,0.8,1], height_ratios = [1,0.9])
#########################################################################
# A. Examples shank
#########################################################################
# see main_search_examples_fig3.py
# neurons_to_plot = ['Mouse17-130207_39', 'Mouse17-130207_43', 'Mouse17-130207_37']
neurons_to_plot = ['Mouse17-130207_42', 'Mouse17-130207_37']
neuron_seed = 'Mouse17-130207_43'
titles = ['Wake', 'REM', 'NREM']
# colors = ['#384d48', '#7a9b76', '#6e7271']
# cNorm = colors.Normalize(vmin=0, vmax = 1)
# scalarMap = cmx.ScalarMappable(norm=cNorm, cmap = viridis)
# color1 = scalarMap.to_rgba(1)
# color2 = 'crimson'
# color1 = 'steelblue'
# color3 = 'darkgrey'
# color1 = '#003049'
# color2 = '#d62828'
# color3 = '#fcbf49'
# color1 = 'blue'
# color2 = 'darkgrey'
# color3 = 'red'
cmap = get_cmap('tab10')
color1 = cmap(0)
color2 = cmap(1)
color3 = cmap(2)
colors = [color1, color3]
color_ex = [color1, color2, color3]
# axG = subplot(gs[2,:])
axA = subplot(gs[0,:])
noaxis(axA)
gsA = gridspec.GridSpecFromSubplotSpec(1,3,subplot_spec=gs[0,:],width_ratios=[0.6,0.6,0.6], hspace = 0.2, wspace = 0.2)#, height_ratios = [1,1,0.2,1])
new_path = data_directory+neuron_seed.split('-')[0]+'/'+neuron_seed.split("_")[0]
meanWaveF = scipy.io.loadmat(new_path+'/Analysis/SpikeWaveF.mat')['meanWaveF'][0]
lw = 3
# WAWEFORMS
gswave = gridspec.GridSpecFromSubplotSpec(1,3,subplot_spec = gsA[0,1])#, wspace = 0.3, hspace = 0.6)
axmiddle = subplot(gswave[:,1])
noaxis(gca())
for c in range(8):
plot(meanWaveF[int(neuron_seed.split('_')[1])][c]+c*200, color = color2, linewidth = lw)
title("Mean waveforms (a.u.)", fontsize = 16)
idx = [0,2]
for i, n in enumerate(neurons_to_plot):
axchan = subplot(gswave[:,idx[i]])
noaxis(axchan)
for c in range(8):
plot(meanWaveF[int(n.split('_')[1])][c]+c*200, color = colors[i], linewidth = lw)
# # ylabel("Channels")
# if i == 0:
# gca().text(-0.4, 1.06, "b", transform = gca().transAxes, fontsize = 16, fontweight='bold')
cax = inset_axes(axmiddle, "100%", "5%",
bbox_to_anchor=(-1.2, -0.1, 3.3, 1),
bbox_transform=axmiddle.transAxes,
loc = 'lower left')
noaxis(cax)
plot([0,1],[0,0], color = 'black' ,linewidth = 1.0)
plot([0,0],[0,0.01], color = 'black' ,linewidth = 1.0)
plot([1,1],[0,0.01], color = 'black' ,linewidth = 1.0)
plot([0.5,0.5],[0,0.01],color='black' ,linewidth = 1.0)
xlabel("Shank 3")
idx = [0,2]
for i, n in enumerate(neurons_to_plot):
gsneur = gridspec.GridSpecFromSubplotSpec(2,3,subplot_spec = gsA[0,idx[i]], wspace = 0.6, hspace = 0.6)
pairs = [neuron_seed, n]
# CORRELATION AUTO
for j, ep in enumerate(['wake', 'rem', 'sws']):
subplot(gsneur[0,j])
simpleaxis(gca())
title(titles[j], fontsize = 16)
tmp = store_autocorr[ep][pairs]
tmp.loc[0] = 0.0
tmp1 = tmp.loc[:0].rolling(window=20,win_type='gaussian',center=True,min_periods=1).mean(std=3.0)
tmp2 = tmp.loc[0:].rolling(window=20,win_type='gaussian',center=True,min_periods=1).mean(std=3.0)
tmp = pd.concat([tmp1.loc[:-0.5],tmp2])
tmp.loc[0] = 0.0
plot(tmp.loc[-50:50,neuron_seed], color = color2, linewidth = lw)
plot(tmp.loc[-50:50,n], color = colors[i], linewidth = lw)
if j == 1:
xlabel("Time (ms)")
# if i == 0 and j == 0:
# gca().text(-0.9, 1.15, "a", transform = gca().transAxes, fontsize = 16, fontweight='bold')
# if i == 1 and j == 0:
# gca().text(-0.5, 1.15, "c", transform = gca().transAxes, fontsize = 16, fontweight='bold')
# CORRELATION SWR
subplot(gsneur[1,:])
simpleaxis(gca())
plot(swr[neuron_seed], color = color2, linewidth = lw)
plot(swr[n], color =colors[i], linewidth = lw)
xlabel("Time from SWRs (ms)")#, labelpad = -0.1)
if i == 0:
ylabel("Modulation")
########################################################################
# B. PCA
########################################################################
neurontoplot = [neuron_seed]+neurons_to_plot
gsB = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gs[1,0])#, width_ratios = [0.05, 0.95])#, hspace = 0.1, wspace = 0.5)#, height_ratios = [1,1,0.2,1])
# EXEMPLE PCA SWR
subplot(gsB[0,:])
simpleaxis(gca())
gca().spines['bottom'].set_visible(False)
gca().set_xticks([])
axhline(0, linewidth = 1.5, color = 'black')
for i, n in enumerate(neurontoplot):
idx = np.where(n == neurons)[0][0]
scatter(np.arange(pc_aut.shape[1])+i*0.2, pc_aut[idx], 2, color = color_ex[i])
for j in np.arange(pc_swr.shape[1]):
plot([j+i*0.2, j+i*0.2], [0, pc_aut[idx][j]], linewidth = 3, color = color_ex[i])
yticks([-4,0])
ylabel("PCA weights")
gca().yaxis.set_label_coords(-0.15,0.1)
# title("PCA")
# gca().text(-0.2, 1.15, "d", transform = gca().transAxes, fontsize = 16, fontweight='bold')
gca().text(0.15, 0.95, "Autocorr.", transform = gca().transAxes, fontsize = 16)
# EXEMPLE PCA AUTOCORR
gsAA = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gsB[1,:], height_ratios = [0.4,1], hspace = 0.1)#, hspace = 0.1, height_ratios = [1,0.4])
ax1 = subplot(gsAA[0,:])
ax2 = subplot(gsAA[1,:], sharex = ax1)
simpleaxis(ax1)
simpleaxis(ax2)
ax1.spines['bottom'].set_visible(False)
# ax2.spines['bottom'].set_visible(False)
ax1.set_xticks([])
ax1.xaxis.set_tick_params(size=0)
ax2.set_xticks(np.arange(10))
ax2.set_xticklabels(np.arange(10)+1)
ax2.axhline(0, linewidth = 1.5, color = 'black')
for i, n in enumerate(neurontoplot):
idx = np.where(n == neurons)[0][0]
ax1.scatter(np.arange(pc_swr.shape[1])+i*0.2, pc_swr[idx], 2, color = color_ex[i])
ax2.scatter(np.arange(pc_swr.shape[1])+i*0.2, pc_swr[idx], 2, color = color_ex[i])
for j in np.arange(pc_aut.shape[1]):
ax1.plot([j+i*0.2, j+i*0.2],[0, pc_swr[idx][j]], linewidth = 3, color = color_ex[i])
ax2.plot([j+i*0.2, j+i*0.2],[0, pc_swr[idx][j]], linewidth = 3, color = color_ex[i])
ax2.set_xlabel("Components")
idx = [np.where(n == neurons)[0][0] for n in neurontoplot]
ax2.set_ylim(pc_swr[idx,0].min()-1, pc_swr[idx,1:].max()+0.6)
ax1.set_ylim(pc_swr[idx,0].max()-1, pc_swr[idx,0].max()+0.6)
ax1.set_yticks([13])
d = .005 # how big to make the diagonal lines in axes coordinates
kwargs = dict(transform=ax1.transAxes, color='k', clip_on=False, linewidth = 1)
ax1.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax1.text(0.2, 1.15, "SWR", transform = ax1.transAxes, fontsize = 16)
# title("PCA")
###########################################################################
# E MATRIX CORRELATION
###########################################################################
gsC = gridspec.GridSpecFromSubplotSpec(1,2,subplot_spec=gs[1,1])#, hspace = 0.1, wspace = 0.5)#, height_ratios = [1,1,0.2,1])
subplot(gsC[0,0])
noaxis(gca())
vmin = np.minimum(corr[0:10,10:].min(), corrsh[0:10,10:].min())
vmax = np.maximum(corr[0:10,10:].max(), corrsh[0:10,10:].max())
imshow(corr[0:10,10:], vmin = vmin, vmax = vmax)
ylabel("SWR")
xlabel("Autocorr.")
gca().text(0.25, 1.3, "Cell-by-cell correlation", transform = gca().transAxes, fontsize = 16)
# gca().text(-0.35, 1.62, "e", transform = gca().transAxes, fontsize = 9, fontweight='bold')
gca().text(0.02, -0.5, r"$\rho^{2} = $"+str(np.round(1-np.linalg.det(corr),2)), transform = gca().transAxes, fontsize = 16)
# MATRIX SHUFFLED
subplot(gsC[0,1])
noaxis(gca())
imshow(corrsh[0:10,10:], vmin = vmin, vmax = vmax)
title("Shuffle", fontsize = 16, pad = 0.6)
# ylabel("SWR")
# xlabel("Autocorr.")
gca().text(0.02, -0.5, r"$\rho^{2} = $"+str(np.round(1-np.linalg.det(corrsh),2)), transform = gca().transAxes, fontsize = 16)
#########################################################################
# F. SHUFFLING + CORR
#########################################################################
subplot(gs[1,2])
simpleaxis(gca())
axvline(1-det_all['all'], color = 'red')
hist(1-shufflings['all'], 100, color = 'black', weights = np.ones(len(shufflings['all']))/len(shufflings['all']), label = 'All', histtype='stepfilled')
hist(1-shuffl_shank, 100, color = 'grey', alpha = 0.7, weights = np.ones(len(shuffl_shank))/len(shuffl_shank), label = 'Nearby', histtype='stepfilled')
xlabel(r"Total correlation $\rho^{2}$")
ylabel("Probability (%)")
yticks([0,0.02,0.04], ['0','2','4'])
# gca().text(-0.3, 1.08, "f", transform = gca().transAxes, fontsize = 16, fontweight='bold')
gca().text(1-det_all['all']-0.05, gca().get_ylim()[1], "p<0.001",fontsize = 16, ha = 'center', color = 'red')
legend(edgecolor = None, facecolor = None, frameon = False, loc = 'lower left', bbox_to_anchor = (0.35, 0.6))
subplots_adjust(top = 0.95, bottom = 0.1, right = 0.99, left = 0.06)
savefig(r"../../../Dropbox (Peyrache Lab)/Talks/fig_talk_13_2.png", dpi = 300, facecolor = 'white')
|
gviejo/ThalamusPhysio
|
python/figure_talk/main_talk_7_corr2.py
|
Python
|
gpl-3.0
| 14,903
|
[
"Gaussian"
] |
714241afbc44b94cfb2c79c79f176a622188a7a08bd9ab3ac5b018defd50f85f
|
"""
A set of classes for specifying input parameters for the Ember solver.
Create a :class:`.Config` object to be passed to :func:`~ember.utils.run` or
:func:`~ember.utils.multirun`.
Sane defaults are given for most input parameters. Create a customized
configuration by passing :class:`.Options` objects to the constructor for
:class:`.Config`::
conf = Config(
Paths(outputDir="somewhere"),
InitialCondition(equivalenceRatio=0.9))
"""
from __future__ import print_function
import numbers
import os
import sys
import cantera
import numpy as np
from . import utils
import copy
import time
import shutil
from . import _ember
from . import output
if sys.version_info.major == 3:
_stringTypes = (str,)
else:
_stringTypes = (str, unicode)
class Option(object):
"""
Instances of this class are used as class members of descendants of class
:class:`Options` to represent a single configurable value. When a
user-specified value for an option is specified (as a keyword argument to
the constructor of a class derived from :class:`Options`), that value is
stored in this object and validated to make sure it satisfies any
applicable constraints.
:param default:
The default value for this option
:param choices:
A sequence of valid values for this option, e.g. ``['Mix', 'Multi']``.
The *default* choice is automatically included in *choices*.
:param min:
The minimum valid value for this option
:param max:
The maximum valid value for this option
:param nullable:
Set to *True* if *None* is a valid value for this option, regardless
of any other restrictions. Automatically set to *True* if *default* is
*None*.
:param label:
A human readable label to be used in the GUI in place of the attribute
name to which this Option is assigned.
:param level:
A number from 0-3 indicating the obscurity level of this option. Used
to selectively hide advanced options in the GUI. 0 is always shown.
3 is never shown.
:param filter:
A function that takes a :class:`Config` object as an argument and
returns *True* if this option should be enabled
Requiring values of a particular type is done by using one of the derived
classes: :class:`StringOption`, :class:`BoolOption`, :class:`IntegerOption`,
:class:`FloatOption`.
"""
counter = [0] # used to preserve options in the order they are defined
def __init__(self, default, choices=None, min=None, max=None,
nullable=False, label=None, level=0, filter=None):
self.value = default
self.default = default
if choices:
self.choices = set(choices)
self.choices.add(default)
else:
self.choices = None
self.min = min
self.max = max
self.isSet = False
self.nullable = nullable or self.default is None
self.label = label
self.level = level
self.filter = filter
self.sortValue = self.counter[0]
self.counter[0] += 1
def validate(self):
if self.choices and self.value not in self.choices:
return '%r not in %r' % (self.value, list(self.choices))
if self.min is not None and self.value < self.min:
return ('Value (%s) must be greater than or equal to %s' %
(self.value, self.min))
if self.max is not None and self.value > self.max:
return ('Value (%s) must be less than or equal to %s' %
(self.value, self.max))
def __repr__(self):
return repr(self.value)
def __nonzero__(self):
return bool(self.value) # python2
def __bool__(self):
return bool(self.value) # python3
def __eq__(self, other):
try:
return self.value == other.value
except AttributeError:
return self.value == other
def shouldBeEnabled(self, conf):
if self.filter:
return bool(self.filter(conf))
else:
return True
class StringOption(Option):
""" An option whose value must be a string. """
def validate(self):
if not (isinstance(self.value, _stringTypes) or
(self.value is None and self.nullable)):
return 'Value must be a string. Got %r' % self.value
return Option.validate(self)
class BoolOption(Option):
""" An option whose value must be a boolean value. """
def validate(self):
if not (self.value in (True, False, 0, 1) or
(self.value is None and self.nullable)):
return 'Value must be a boolean. Got %r' % self.value
return Option.validate(self)
class IntegerOption(Option):
""" An option whose value must be a integer. """
def validate(self):
if not (isinstance(self.value, numbers.Integral) or
(self.value is None and self.nullable)):
return 'Value must be an integer. Got %r' % self.value
return Option.validate(self)
class FloatOption(Option):
""" An option whose value must be a floating point number. """
def validate(self):
if not (isinstance(self.value, numbers.Number) or
(self.value is None and self.nullable)):
return 'Value must be a number. Got %r' % self.value
return Option.validate(self)
class Options(object):
""" Base class for elements of :class:`.Config` """
def __init__(self, **kwargs):
# Copy the defaults from the class's dictionary
for name,value in self.__class__.__dict__.items():
if isinstance(value, Option):
setattr(self, name, copy.deepcopy(value))
# Apply the options specified in kwargs
for key,value in kwargs.items():
if hasattr(self, key):
opt = getattr(self, key)
if isinstance(opt, Option):
opt.value = value
opt.isSet = True
message = opt.validate()
if message:
raise ValueError('\nInvalid option specified for %s.%s:\n%s' %
(self.__class__.__name__, key, message))
else:
setattr(self, key, value)
else:
raise KeyError('Unrecognized configuration option: %s' % key)
def _stringify(self, indent=0):
ans = []
spaces = None
for attr in dir(self):
if attr.startswith('_') or attr == 'isSet':
continue
value = getattr(self, attr)
if isinstance(value, Option):
if type(value.value) == type(value.default) and value.value == value.default:
continue
else:
value = value.value
if not isinstance(value, numbers.Number):
value = repr(value)
if not spaces:
header = ' '*indent + self.__class__.__name__ + '('
spaces = ' '*len(header)
else:
header = spaces
ans.append('%s%s=%s,' % (header, attr, value))
if ans:
ans[-1] = ans[-1][:-1] + ')'
else:
ans = ''
return ans
def __iter__(self):
allOpts = [item for item in self.__dict__.items()
if isinstance(item[1], Option)]
allOpts.sort(key=lambda item: item[1].sortValue)
return allOpts.__iter__()
def isSet(self, option):
""" Returns True if the named option has a user-specified value """
try:
return getattr(self.original, option).isSet
except AttributeError:
return getattr(self, option).isSet
# frequently-used filter predicates
def _isPremixed(conf):
return conf.initialCondition.flameType == 'premixed'
def _isDiffusion(conf):
return conf.initialCondition.flameType == 'diffusion'
def _isSymmetric(conf):
return conf.general.flameGeometry == 'cylindrical' or conf.general.twinFlame
def _usingCvode(conf):
return conf.general.chemistryIntegrator == 'cvode'
def _usingQss(conf):
return conf.general.chemistryIntegrator == 'qss'
# Dynamically decide on the default file extension: Use HDF5 if h5py is
# available, otherwise default to npz
try:
import h5py
dynamicDefaultFileExtension = StringOption('h5', ('npz',))
except ImportError:
dynamicDefaultFileExtension = StringOption('npz', ('h5',))
class Paths(Options):
""" Directories for input and output files """
#: Relative path to the directory where output files (outNNNNNN.h5,
#: profNNNNNN.h5) will be stored. Automatically created if it doesn't
#: already exist.
outputDir = StringOption("run/test1", label='Output Directory')
#: File to use for log messages. If *None*, write output to stdout
logFile = StringOption(None, label='Log file', level=2)
class General(Options):
""" High-level configuration options """
#: True if the temperature and mass fractions on the burned gas
#: side of the flame should be held constant. Applicable only to
#: premixed flames. The default is *True* for twin or curved flames with
#: burned gas at x=0, and *False* otherwise.
fixedBurnedVal = BoolOption(None, level=1, filter=_isPremixed)
#: True if the position of the leftmost grid point should be held
#: constant. Should usually be True for Twin and Curved flame
#: configurations.
fixedLeftLocation = BoolOption(False, level=1, filter=_isSymmetric)
#: Geometry specification for the flame. Options are: 'planar', 'cylindrical',
#: and 'disc.'
flameGeometry = StringOption("planar", ("cylindrical","disc"), level=1)
#: True if solving a planar flame that is symmetric about the x = 0 plane.
twinFlame = BoolOption(False,
filter=lambda conf: conf.general.flameGeometry != 'cylindrical')
#: Input file (HDF5 format) containing the interpolation data needed for
#: the quasi2d mode. Contains:
#:
#: - vector *r* (length *N*)
#: - vector *z* (length *M*)
#: - Temperature array *T* (size *N* x *M*)
#: - radial velocity array *vr* (size *N* x *M*)
#: - axial velocity array *vz* (size *N* x *M*)
interpFile = StringOption(None, level=2,
filter=lambda conf: conf.initialCondition.flameType == 'quasi2d')
#: *True* if the unburned fuel/air mixture should be used as the
#: left boundary condition. Applicable only to premixed flames.
unburnedLeft = BoolOption(True, level=1, filter=_isPremixed)
#: *True* if the fuel mixture should be used as the left boundary
#: condition. Applicable only to diffusion flames.
fuelLeft = BoolOption(True, level=1, filter=_isDiffusion)
#: Method for setting the boundary condition for the continuity equation
#: Valid options are: ``fixedLeft``, ``fixedRight``, ``fixedQdot``,
#: ``fixedTemperature``, and ``stagnationPoint``. The ``fixedTemperature``
#: condition holds the location where the midpoint temperature is reached
#: constant, while the other options fix the value of V at the specified
#: point.
continuityBC = StringOption("fixedLeft",
("fixedRight", "fixedQdot", "fixedTemperature",
"stagnationPoint"),
level=2)
#: Integrator to use for the chemical source terms. Choices are
#: ``qss`` (explicit, quasi-steady state) and ``cvode`` (implicit,
#: variable-order BDF).
chemistryIntegrator = StringOption("qss", ("cvode",), level=1)
#: Method to use for splitting the convection / diffusion / reaction
#: terms. Options are ``strang`` and ``balanced``.
splittingMethod = StringOption("balanced", ("strang",), level=2)
#: Number of integration failures to tolerate in the chemistry
#: integrator before aborting.
errorStopCount = IntegerOption(100, level=2)
#: Number of threads to use for evaluating terms in parallel
nThreads = IntegerOption(1, min=1, level=1,
label='Number of Processors')
#: Order of Chebyshev polynomial to use in approximating input
#: functions over a single global time step.
chebyshevOrder = IntegerOption(5, min=2, level=3)
class Chemistry(Options):
""" Settings pertaining to the Cantera mechanism file """
#: Path to the Cantera mechanism file in XML format
mechanismFile = StringOption("ucsd-methane.xml")
#: ID of the phase to use in the mechanism file.
#: Found on a line that looks like::
#:
#: <phase dim="3" id="gas">
#:
#: in the mechanism file. This is always "gas" for mechanisms converted
#: using ck2cti and cti2ctml. This option only needs to be specified if
#: the desired phase is not the first phase defined in the input file.
phaseID = StringOption("", level=1)
#: Transport model to use. Valid options are ``Mix``, ``Multi``, ``UnityLewis``, and ``Approx``
transportModel = StringOption("Approx", ("Mix", "Multi", "UnityLewis"), level=1)
#: Kinetics model to use. Valid options are ``standard`` and ``interp``.
kineticsModel = StringOption("interp", ("standard",), level=2)
#: Mole fraction threshold for including species with ``transportModel = "Approx"``
threshold = FloatOption(1e-5, level=2, label='Approx. transport threshold',
filter=lambda conf: conf.chemistry.transportModel == 'Approx')
#: Set a scalar multiplier for the reaction rate term as a function time
rateMultiplierFunction = Option(None, level=3)
class Grid(Options):
""" Parameters controlling the adaptive grid """
#: Maximum relative scalar variation of each state vector
#: component between consecutive grid points. For high accuracy,
#: ``vtol = 0.08``; For minimal accuracy, ``vtol = 0.20``.
vtol = FloatOption(0.12)
#: Maximum relative variation of the gradient of each state vector
#: component between consecutive grid points. For high accuracy,
#: ``dvtol = 0.12``; For minimal accuracy, ``dvtol = 0.4``.
dvtol = FloatOption(0.2)
#: Relative tolerance (compared to vtol and dvtol) for grid point removal.
rmTol = FloatOption(0.6, level=1)
#: Parameter to limit numerical diffusion in regions with high
#: convective velocities.
dampConst = FloatOption(7, level=2)
#: Minimum grid spacing [m]
gridMin = FloatOption(5e-7, level=1)
#: Maximum grid spacing [m]
gridMax = FloatOption(2e-4, level=1)
#: Maximum ratio of the distances between adjacent pairs of grid
#: points.
#:
#: .. math:: \frac{1}{\tt uniformityTol} < \frac{x_{j+1}-x_j}{x_j-x_{j-1}}
#: < {\tt uniformityTol}
uniformityTol = FloatOption(2.5, level=2)
#: State vector components smaller than this value are not
#: considered whether to add or remove a grid point.
absvtol = FloatOption(1e-8, level=2)
#: Tolerance for each state vector component for extending the
#: domain to satisfy zero-gradient conditions at the left and
#: right boundaries.
boundaryTol = FloatOption(5e-5, level=2)
#: Tolerance for removing points at the boundary. Must be smaller
#: than boundaryTol.
boundaryTolRm = FloatOption(1e-5, level=2)
#: For unstrained flames, number of flame thicknesses (based on
#: reaction zone width) downstream of the flame to keep the right
#: edge of the domain.
unstrainedDownstreamWidth = FloatOption(5, level=2)
#: Number of points to add when extending a boundary to satisfy
#: boundaryTol.
addPointCount = IntegerOption(3, min=0, level=2)
#: For curved or twin flames, the minimum position of the first
#: grid point past x = 0.
centerGridMin = FloatOption(1e-4, min=0, level=2, filter=_isSymmetric)
class InitialCondition(Options):
"""
Settings controlling the initial condition for the integrator. If
no restartFile is specified, an initial profile is created based
on the specified fuel and oxidizer compositions.
If an input file is specified, then setting fuel and oxidizer
compositions will cause new values to be used only at the
boundaries of the domain.
The grid parameters are only used if no restart file is specified.
"""
#: Read initial profiles from the specified file, or if 'None',
#: create a new initial profile.
restartFile = StringOption(None, level=1)
#: "premixed", "diffusion", or "quasi2d"
flameType = StringOption('premixed', ('diffusion', 'quasi2d'))
#: Temperature of the unburned fuel/air mixture for premixed flames [K].
Tu = FloatOption(300, label='Reactant Temperature', filter=_isPremixed)
#: Temperature of the fuel mixture for diffusion flames [K].
Tfuel = FloatOption(300, label='Fuel Temperature', filter=_isDiffusion)
#: Temperature of the oxidizer mixture for diffusion flames [K].
Toxidizer = FloatOption(300, label='Oxidizer Temperature', filter=_isDiffusion)
#: Molar composition of the fuel mixture.
fuel = Option("CH4:1.0", label='Molar Fuel Composition')
#: Molar composition of the oxidizer mixture.
oxidizer = Option("N2:3.76, O2:1.0", label='Molar Oxidizer Composition')
#: Equivalence ratio of the fuel/air mixture for premixed flames.
equivalenceRatio = FloatOption(0.75, min=0, label='Equivalence Ratio',
filter=_isPremixed)
#: Molar composition of the fuel + oxidizer mixture. Specify as an alternative
#: to providing fuel and oxidizer compositions and equivalence ratio.
reactants = Option(None, label='Molar Reactant Composition')
#: Molar composition of the flow opposite the premixed reactant
#: stream, if different from the equilibrium composition
counterflow = StringOption(None, label='Composition of counterflow',
filter=_isPremixed, level=1)
#: Temperature of the flow opposite the premixed reactant stream, if
#: different from the equilibrium temperature
Tcounterflow = FloatOption(None, label='Temperature of counteflow',
filter=_isPremixed, level=1)
#: Adjust the composition of the counterflow stream so that the components
#: are at equilibrium. This option specifies the property pair to hold
#: constant during equilibration, or *False* to skip equilibration. The
#: boundary condition is not consistent if this mixture has reactions that
#: are proceeding at finite rates. For diffusion flames, this option is applied
#: to the state of the oxidizer stream.
equilibrateCounterflow = Option('TP', ('HP','UV','SV','TV','SP',False),
label='Equilibrate specified counterflow mixture',
level=1)
#: Thermodynamic pressure [Pa]
pressure = FloatOption(101325, min=0)
#: Number of points in the initial uniform grid.
nPoints = IntegerOption(100, level=1)
#: Position of the leftmost point of the initial grid.
xLeft = FloatOption(-0.002)
#: Position of the rightmost point of the initial grid.
xRight = FloatOption(0.002)
#: The width of the central plateau in the initial profile [m]. For premixed
#: flames, this mixture is composed of equilibrium products. For diffusion
#: flames, this mixture is composed of a stoichiometric fuel/air brought to
#: equilibrium at constant enthalpy and pressure.
centerWidth = FloatOption(0.001, level=1)
#: The width of the slope away from the central plateau in the
#: initial profile [m]. Recommended value for premixed flames:
#: 5e-4. Recommended value for diffusion flames: 1e-3.
slopeWidth = FloatOption(0.0005, level=1)
#: Number of times to run the generated profile through a low pass
#: filter before starting the simulation.
smoothCount = IntegerOption(4, level=2)
#: True if initial profiles for x,T,U,V and Y are given
haveProfiles = BoolOption(False, level=3)
#: Initial grid used if ``haveProfiles`` is set to ``True``
x = Option(None, level=3)
#: Initial temperature profile used if ``haveProfiles`` is set to ``True``
T = Option(None, level=3)
#: Initial tangential velocity gradient profile used if ``haveProfiles`` is
#: set to ``True``
U = Option(None, level=3)
#: Initial mass fraction profiles used if ``haveProfiles`` is set to
#: ``True``
Y = Option(None, level=3)
#: Initial mass flux profile used if ``haveProfiles`` is set to ``True``
V = Option(None, level=3)
class WallFlux(Options):
#: Reference temperature for the wall heat flux
Tinf = FloatOption(300, level=2)
#: Conductance of the wall [W/m^2-K]
Kwall = FloatOption(100, level=2)
class Ignition(Options):
"""
Parameters for an artificial heat release rate function which can
be used to simulate ignition. The heat release rate is a step
function in time with a Gaussian spatial distribution.
"""
#: Beginning of the external heat release rate pulse [s].
tStart = FloatOption(0, level=2)
#: Duration of the external heat release rate pulse [s].
duration = FloatOption(1e-3, level=2)
#: Integral amplitude of the pulse [W/m^2].
energy = FloatOption(0, level=2)
#: Location of the center of the pulse [m].
center = FloatOption(0, level=2)
#: Characteristic width (standard deviation) of the pulse [m].
stddev = FloatOption(1e-4, level=2)
class ExternalHeatFlux(Options):
"""
User-specified function describing heat loss to the environment, e.g.
through radiation.
"""
#: Heat loss function, which must be of the form `qdot = f(x, t, U, T, Y)`
#: where `qdot` is the heat loss rate in W/m^3, `x` is the local position,
#: `t` is the time, `U` is the tangential velocity gradient `T` is the
#: temperature and `Y` is the array of species mass fractions.
heatLoss = Option(None, level=3)
#: Update the value of this function based on the current state vector of
#: the source term integrator, rather than only at the start of each split
#: timestep. Enabling this option incurs a significant performance penalty,
#: and should only be done if the heat flux function is too stiff to be
#: integrated otherwise.
alwaysUpdate = BoolOption(False, level=3)
class StrainParameters(Options):
"""
Parameters defining the strain rate as a function of time.
The strain rate changes linearly from *initial* to *final* over a
period of *dt* seconds, starting at *tStart*.
"""
initial = FloatOption(400) #: Initial strain rate [1/s]
final = FloatOption(400) #: final strain rate [1/s]
tStart = FloatOption(0.000) #: time at which strain rate starts to change [s]
dt = FloatOption(0.002) #: time period over which strain rate changes [s]
#: A list of strain rates to use for a series of sequential
#: integration periods (see :func:`~ember.utils.multirun`), with steady-state
#: profiles generated for each strain rate before proceeding to the next.
#: A typical list of strain rates to use::
#:
#: rates = [9216, 7680, 6144, 4608, 3840, 3072, 2304, 1920, 1536,
#: 1152, 960, 768, 576, 480, 384, 288, 240, 192, 144, 120,
#: 96, 72, 60, 48, 36, 30, 24, 18, 15, 12]
rates = Option(None, level=3)
#: Specify the strain rate as a function of time, using any callable
#! Python object.
function = Option(None, level=2)
class Extinction(Options):
""" Settings pertaining to running a flame extinction simulation """
#: Choice of increasing strain rate by a multiplicative factor or step size
method = StringOption("step", ("step", "factor"), level=1)
#: Starting and lower limits for increasing strain rate under either method
initialStep = FloatOption(25.0)
minStep = FloatOption(0.5)
initialFactor = FloatOption(1.05)
minFactor = FloatOption(1.0001)
#: Factor by which the strain rate step size or increase factor is reduced by after each non-burning solution
reductionFactor = FloatOption(0.6)
#: Maximum profile temperature below which simulation is considered non-burning and immediately ended
cutoffTemp = FloatOption(1000.0)
#: Starting strain rate to be used when progressing to extinction
initialStrainRate = FloatOption(300.0)
class PositionControl(Options):
"""
Parameters defining the position of the flame as a function of
time (for twin / curved flames).
These parameters are used to adjust the mass flux at r = 0 to move
the flame toward the desired location. The flame moves from
*xInitial* to *xFinal* over *dt* seconds, starting at *tStart*.
The feedback controller which determines the mass flux uses the
distance between the current flame location and the desired flame
location with the gains specified by *proportionalGain* and
*integralGain*.
"""
xInitial = FloatOption(0.0025, filter=_isSymmetric) #:
xFinal = FloatOption(0.0025, filter=_isSymmetric) #:
dt = FloatOption(0.01, filter=_isSymmetric) #:
tStart = FloatOption(0, filter=_isSymmetric) #:
proportionalGain = FloatOption(10, filter=_isSymmetric) #:
integralGain = FloatOption(800, filter=_isSymmetric) #:
class Times(Options):
"""
Paremeters controlling integrator timesteps and frequency of
output profiles.
"""
#: Integrator start time.
tStart = FloatOption(0, level=1)
#: Timestep used for operator splitting.
globalTimestep = FloatOption(2e-5, level=1)
#: Control for timestep used by the diffusion integrator.
#: Actual timestep will be this multiplier times the stability
#: limit an explicit integrator.
diffusionTimestepMultiplier = FloatOption(10, level=2)
#: Maximum amount of time before regridding / adaptation.
regridTimeInterval = FloatOption(100, level=2)
#: Maximum number of timesteps before regridding / adaptation.
regridStepInterval = IntegerOption(20, level=2)
#: Maximum number of steps between storing integral flame properties.
outputStepInterval = IntegerOption(1, level=2)
#: Maximum time between storing integral flame properties.
outputTimeInterval = FloatOption(1e-5, level=2)
#: Maximum number of timesteps before writing flame profiles.
profileStepInterval = IntegerOption(1000, level=2)
#: Maximum time between writing flame profiles.
profileTimeInterval = FloatOption(1e-3, level=1)
#: Number of timesteps between writing profNow.h5
currentStateStepInterval = IntegerOption(20, level=2)
#: Number of timesteps between checks of the steady-state
#: termination conditions.
terminateStepInterval = IntegerOption(10, level=2)
class CvodeTolerances(Options):
""" Tolerances for the CVODE chemistry integrator """
#: Relative tolerance for each state variable
relativeTolerance = FloatOption(1e-6, level=2, filter=_usingCvode)
#: Absolute tolerance for U velocity
momentumAbsTol = FloatOption(1e-7, level=2, filter=_usingCvode)
#: Absolute tolerance for T
energyAbsTol = FloatOption(1e-8, level=2, filter=_usingCvode)
#: Absolute tolerance for species mass fractions.
speciesAbsTol = FloatOption(1e-13, level=2, filter=_usingCvode)
#: Minimum internal timestep
minimumTimestep = FloatOption(1e-18, level=2, filter=_usingCvode)
class QssTolerances(Options):
""" Tolerances for the QSS chemistry integrator """
#: Accuracy parameter for determining the next timestep.
epsmin = FloatOption(2e-2, level=2, filter=_usingQss)
#: Accuracy parameter for repeating timesteps
epsmax = FloatOption(1e1, level=2, filter=_usingQss)
#: Minimum internal timestep
dtmin = FloatOption(1e-16, level=2, filter=_usingQss)
#: Maximum internal timestep
dtmax = FloatOption(1e-6, level=2, filter=_usingQss)
#: Number of corrector iterations per timestep.
iterationCount = IntegerOption(1, level=2, filter=_usingQss)
#: Absolute threshold for including each component in the accuracy
#: tests.
abstol = FloatOption(1e-11, level=2, filter=_usingQss)
#: Lower limit on the value of each state vector component.
minval = FloatOption(1e-60, level=2, filter=_usingQss)
#: Enable convergence-based stability check on timestep. Not
#: enabled unless *iterationCount* >= 3.
stabilityCheck = BoolOption(False, level=2, filter=_usingQss)
class Debug(Options):
""" Control of verbose debugging output """
#: Addition / removal of internal grid points.
adaptation = BoolOption(False, level=2)
#: Addition / removal of boundary grid points.
regridding = BoolOption(True, level=2)
#: Print current time after each global timestep.
timesteps = BoolOption(True, level=2)
#: Enable extensive timestep debugging output. Automatically
#: enabled for debug builds.
veryVerbose = BoolOption(False, level=2)
#: Print information about the flame radius feedback controller.
flameRadiusControl = BoolOption(False, level=2)
#: Grid point to print debugging information about at *sourceTime*.
sourcePoint = IntegerOption(-1, level=3)
#: Time at which to print extensive debugging information about
#: the source term at j = *sourcePoint*, then terminate.
sourceTime = FloatOption(0.0, level=3)
#: Time at which to start saving intermediate integrator profiles when
#: OutputFiles.debugIntegratorStages is True
startTime = FloatOption(0.0, level=3)
#: Time at which to stop saving intermediate integrator profiles when
#: OutputFiles.debugIntegratorStages is True
stopTime = FloatOption(100.0, level=3)
class OutputFiles(Options):
""" Control the contents of the periodic output files """
#: File extension of the output files. 'h5' for HDF5 files, which require
#: the 'h5py' Python module and can be read by other programs, or 'npz' for
#: a compressed NumPy data structure.
fileExtension = dynamicDefaultFileExtension
#: Include the heat release rate as a function of space
heatReleaseRate = BoolOption(True, level=2)
#: Include the reaction / diffusion / convection contributions to
#: the net time derivative of each state variable
timeDerivatives = BoolOption(True, level=2)
#: Include variables such as transport properties and grid
#: parameters that can be recomputed from the state variables.
extraVariables = BoolOption(False, level=2)
#: Include other miscellaneous variables
auxiliaryVariables = BoolOption(False, level=2)
#: Used to generate a continuous sequence of output files after
#: restarting the code.
firstFileNumber = IntegerOption(0, level=2)
#: Generate ``profNNNNNN.h5`` files
saveProfiles = BoolOption(True, level=1)
#: Write profiles after each stage of the split integrator.
debugIntegratorStages = BoolOption(False, level=2)
#: Class used to write periodic output files (e.g. profNNNNNN.h5)
stateWriter = Option(output.StateWriter, level=2)
#: Class used to write time-series files (e.g. out.h5)
timeSeriesWriter = Option(output.TimeSeriesWriter, level=2)
class TerminationCondition(Options):
r"""
Integrate until either *tEnd* is reached or a criterion based on
*measurement* is satisfied. The *measurement*-based check is not enabled
until *tMin* is reached.
- If `measurement == None`, integration will proceed to *tEnd*.
- If `measurement == 'Q'`, integration will terminate when the the heat
release rate reaches a steady-state value to within *tolerance* (RMS)
over a time period of *steadyPeriod*, or the mean heat release rate over
*steadyPeriod* is less than *abstol*.
- If `measurement == 'dTdt'`, integration will terminate when
`||1/T * dT/dt|| / sqrt(nPoints)` is less than *dTdtTol*.
"""
tEnd = FloatOption(0.8) #:
measurement = Option("Q", (None,'dTdt')) #:
tolerance = FloatOption(1e-4, level=2) #:
abstol = FloatOption(0.5, min=0, level=2) #:
steadyPeriod = FloatOption(0.002, min=0, level=1) #:
tMin = FloatOption(0.0, level=1) #:
dTdtTol = FloatOption(10.0) #:
class Config(object):
"""
An object consisting of a set of Options objects which define a
complete set of configuration options needed to run the flame
solver.
"""
def __init__(self, *args):
opts = {}
for arg in args:
if not isinstance(arg, Options):
raise TypeError('%r is not an instance of class Options' % arg)
name = arg.__class__.__name__
if name in opts:
raise ValueError('Multiple instances of class %r encountered' % name)
opts[name] = arg
get = lambda cls: opts.get(cls.__name__) or cls()
self.paths = get(Paths)
self.general = get(General)
self.chemistry = get(Chemistry)
self.grid = get(Grid)
self.initialCondition = get(InitialCondition)
self.wallFlux = opts.get('WallFlux')
self.ignition = get(Ignition)
self.externalHeatFlux = get(ExternalHeatFlux)
self.strainParameters = get(StrainParameters)
self.positionControl = opts.get('PositionControl')
self.times = get(Times)
self.cvodeTolerances = get(CvodeTolerances)
self.qssTolerances = get(QssTolerances)
self.debug = get(Debug)
self.outputFiles = get(OutputFiles)
self.terminationCondition = get(TerminationCondition)
self.extinction = get(Extinction)
def evaluate(self):
return ConcreteConfig(self)
def __iter__(self):
for item in self.__dict__.values():
if isinstance(item, Options):
yield item
def stringify(self):
ans = []
for item in self:
text = '\n'.join(item._stringify(4))
if text:
ans.append(text)
return 'conf = Config(\n' + ',\n'.join(ans) + ')\n'
def validate(self):
error = False
cylindricalFlame = True if self.general.flameGeometry == 'cylindrical' else False
discFlame = True if self.general.flameGeometry == 'disc' else False
# Position control can only be used with "twin" or "curved" flames
if (self.positionControl is not None and
not self.general.twinFlame and
not cylindricalFlame):
error = True
print("Error: PositionControl can only be used when either 'twinFlame'\n"
" or 'cylindricalFlame' is set to True.")
# twinFlame and cylindricalFlame are mutually exclusive:
if cylindricalFlame and self.general.twinFlame:
error = True
print("Error: 'twinFlame' and 'cylindricalFlame' are mutually exclusive.")
# discFlame and cylindricalFlame are mutually exclusive:
if cylindricalFlame and discFlame:
error = True
print("Error: 'discFlame' and 'cylindricalFlame' are mutually exclusive.")
# the "fuelLeft" option only makes sense for diffusion flames
if (self.initialCondition.flameType == 'premixed' and
self.general.fuelLeft.isSet):
error = True
print("Error: 'general.fuelLeft' should not be specified for premixed flames.")
# the "unburnedLeft" option only makes sense for premixed flames
if (self.initialCondition.flameType == 'diffusion' and
self.general.unburnedLeft.isSet):
error = True
print("Error: 'general.unburnedLeft' should not be specified for diffusion flames.")
# the "fixedTemperature" boundary condition currently only works with
# balanced splitting
if (self.general.splittingMethod == 'strang' and
self.general.continuityBC == 'fixedTemperature'):
error = True
print("Error: 'fixedTemperature' continuity boundary condition is"
" only compatible with 'balanced' splitting.")
# Make sure that the mechanism file actually works and contains the
# specified fuel and oxidizer species
gas = cantera.Solution(self.chemistry.mechanismFile.value,
self.chemistry.phaseID.value,
transport_model=None)
if self.initialCondition.reactants.value is not None:
gas.X = self.initialCondition.reactants.value
else:
gas.X = self.initialCondition.fuel.value
gas.X = self.initialCondition.oxidizer.value
# Make sure that the mechanism file has sane rate coefficients
if self.initialCondition.flameType == 'premixed':
Tcheck = self.initialCondition.Tu.value
else:
Tcheck = min(self.initialCondition.Tfuel.value,
self.initialCondition.Toxidizer.value)
error |= self.checkRateConstants(gas, Tcheck)
# Make sure the restart file is in the correct place (if specified)
if self.initialCondition.restartFile:
restart = self.initialCondition.restartFile.value
if not os.path.exists(restart):
error = True
print("Error: Couldn't find restart file %r.\n" % restart)
if error:
print('Validation failed.')
print('To force simulation attempt: Config.run("force")')
else:
print('Validation completed successfully.')
return not error
def checkRateConstants(self, gas, T):
"""
A function for finding reactions with suspiciously high
rate constants at low temperatures.
"""
gas.TPY = T, 101325, np.ones(gas.n_species)
Rf = gas.forward_rate_constants
Rr = gas.reverse_rate_constants
error = False
for i in range(len(Rf)):
if Rf[i] > 1e30:
error = True
print('WARNING: Excessively high forward rate constant'
' for reaction %i at T = %6.2f K' % (i+1,T))
print(' Reaction equation: %s' % gas.reaction_equation(i))
print(' Forward rate constant: %e' % Rf[i])
if Rr[i] > 1e30:
error = True
print('WARNING: Excessively high reverse rate constant'
' for reaction %i at T = %6.2f K' % (i+1,T))
print(' Reaction equation: %s' % gas.reaction_equation(i))
print(' Reverse rate constant: %e' % Rr[i])
return error
def run(self, command=None):
"""
Run the simulation using the parameters set in this Config.
If a list strain rates is provided by the field
:attr:`strainParameters.rates`, a sequence of flame simulations at the
given strain rates will be run. Otherwise, a single simulation will be
run.
If the script which calls this function is passed the argument
*validate*, then the configuration will be checked for errors and the
script will exit without running the simulation.
"""
if len(sys.argv) > 1 and sys.argv[1].lower() == 'validate':
# Validate the configuration and exit
self.validate()
return
if command is None:
self.validate()
elif command == 'validate':
self.validate()
return
elif command != 'force':
print('An argument of "force" will allow for skipping validation and attempting to simulate.')
print('Exiting...')
return
concrete = self.evaluate()
if self.strainParameters.rates:
return concrete.multirun()
else:
return concrete.run()
def runESR(self, command=None):
"""
Run an extinction strain rate simulation using the parameters set in
this Config.
The strain rate parameter will be increased until a steady burning flame
can no longer be achieved.
If the script which calls this function is passed the argument
*validate*, then the configuration will be checked for errors and the
script will exit without running the simulation.
"""
if len(sys.argv) > 1 and sys.argv[1].lower() == 'validate':
# Validate the configuration and exit
self.validate()
return
concrete = self.evaluate()
if command is None:
self.validate()
elif command == 'validate':
self.validate()
return
elif command != 'force':
print('An argument of "force" will allow for skipping validation and attempting to simulate.')
print('Exiting...')
return
return concrete.runESR()
class ConcreteConfig(_ember.ConfigOptions):
"""
Same structure as class Config, but all the Option objects are replaced
with their actual values, and these values are propagated to an underlying
C++ object as necessary.
"""
def __init__(self, config):
super(ConcreteConfig, self).__init__()
self.original = config
for name, opts in config.__dict__.items():
if isinstance(opts, Options):
group = opts.__class__()
group.original = opts
setattr(self, name, group)
for name in dir(opts):
opt = getattr(opts, name)
if isinstance(opt, Option):
setattr(group, name, opt.value)
elif opts is None:
setattr(self, name, None)
self.gas = cantera.Solution(self.chemistry.mechanismFile,
self.chemistry.phaseID,
transport_model=None)
if self.general.fixedBurnedVal is None:
if ((self.general.twinFlame or self.general.flameGeometry == 'cylindrical')
and not self.general.unburnedLeft):
self.general.fixedBurnedVal = False
else:
self.general.fixedBurnedVal = True
if self.initialCondition.flameType == 'quasi2d':
self.setupQuasi2d()
elif self.initialCondition.restartFile:
self.readInitialCondition(self.initialCondition.restartFile)
elif not self.initialCondition.haveProfiles:
self.generateInitialCondition()
self.apply_options()
def readInitialCondition(self, restartFile):
"""
Read the initial profiles for temperature, species mass fractions, and
velocity from the specified input file.
"""
IC = self.initialCondition
data = utils.HDFStruct(restartFile)
IC.x = data.x
IC.Y = data.Y
IC.T = data.T
IC.U = data.U
IC.V = data.V
IC.haveProfiles = True
if any(map(IC.isSet, ('fuel', 'oxidizer', 'Tfuel', 'Toxidizer', 'reactants',
'equivalenceRatio', 'Tcounterflow', 'counterflow'))):
self.setBoundaryValues(IC.T, IC.Y, IC.V)
def setBoundaryValues(self, T, Y, V=None):
IC = self.initialCondition
jm = (IC.nPoints-1) // 2
gas = self.gas
if IC.flameType == 'premixed':
# Reactants
if IC.reactants:
gas.X = IC.reactants
else:
gas.set_equivalence_ratio(IC.equivalenceRatio, IC.fuel, IC.oxidizer)
gas.TP = IC.Tu, IC.pressure
rhou = gas.density
Yu = gas.Y
# Products
gas.equilibrate('HP')
if IC.Tcounterflow is None:
Tb = gas.T
else:
Tb = IC.Tcounterflow
if IC.counterflow is None:
Yb = gas.Y
else:
gas.TPX = Tb, IC.pressure, IC.counterflow
Yb = gas.Y
if IC.equilibrateCounterflow:
gas.TPY = Tb, IC.pressure, Yb
gas.equilibrate(IC.equilibrateCounterflow)
Yb = gas.Y
Tb = gas.T
if self.general.unburnedLeft:
T[0] = IC.Tu
Y[:,0] = Yu
if V is None or V[-1] < 0:
T[-1] = Tb
Y[:,-1] = Yb
else:
if V is None or V[0] > 0:
T[0] = Tb
Y[:,0] = Yb
T[-1] = IC.Tu
Y[:,-1] = Yu
elif IC.flameType == 'diffusion':
# Fuel
gas.TPX = IC.Tfuel, IC.pressure, IC.fuel
Yfuel = gas.Y
# Oxidizer
gas.TPX = IC.Toxidizer, IC.pressure, IC.oxidizer
if IC.equilibrateCounterflow:
gas.equilibrate(IC.equilibrateCounterflow)
rhou = gas.density # use oxidizer value for diffusion flame
Yoxidizer = gas.Y
Toxidizer = gas.T
if self.general.fuelLeft:
T[0] = IC.Tfuel
Y[:,0] = Yfuel
T[-1] = Toxidizer
Y[:,-1] = Yoxidizer
else:
T[0] = Toxidizer
Y[:,0] = Yoxidizer
T[-1] = IC.Tfuel
Y[:,-1] = Yfuel
return rhou
def generateInitialCondition(self):
"""
Generate initial profiles for temperature, species mass fractions, and
velocity using the specified fuel and oxidizer compositions and flame
configuration parameters.
"""
IC = self.initialCondition
beta = (2.0 if self.general.flameGeometry=='disc' else 1.0)
N = IC.nPoints
gas = self.gas
xLeft = (0.0 if self.general.twinFlame or self.general.flameGeometry == 'cylindrical'
else IC.xLeft)
x = np.linspace(xLeft, IC.xRight, N)
T = np.zeros(N)
Y = np.zeros((self.gas.n_species, N))
V = np.zeros(N)
jm = (IC.nPoints-1) // 2
# make sure the initial profile fits comfortably in the domain
scale = 0.8 * (x[-1] - x[0]) / (IC.centerWidth + 2 * IC.slopeWidth)
if scale < 1.0:
IC.slopeWidth *= scale
IC.centerWidth *= scale
# Determine the grid indices defining each profile segment
dx = x[1]-x[0]
centerPointCount = int(0.5 + 0.5 * IC.centerWidth / dx)
slopePointCount = int(0.5 + IC.slopeWidth / dx)
jl2 = jm - centerPointCount
jl1 = jl2 - slopePointCount
jr1 = jm + centerPointCount
jr2 = jr1 + slopePointCount
rhou = self.setBoundaryValues(T, Y)
if IC.flameType == 'premixed':
gas.set_equivalence_ratio(IC.equivalenceRatio, IC.fuel, IC.oxidizer)
gas.TP = IC.Tu, IC.pressure
gas.equilibrate('HP')
T[jm] = gas.T
Y[:,jm] = gas.Y
elif IC.flameType == 'diffusion':
# Assume stoichiometric mixture at the center
IC.equivalenceRatio = 1.0
gas.set_equivalence_ratio(1.0, IC.fuel, IC.oxidizer)
gas.TP = 0.5*(IC.Tfuel+IC.Toxidizer), IC.pressure
gas.equilibrate('HP')
T[jm] = gas.T
Y[:,jm] = gas.Y
newaxis = np.newaxis
Y[:,1:jl1] = Y[:,0,newaxis]
T[1:jl1] = T[0]
ramp = np.linspace(0, 1, jl2-jl1)
Y[:,jl1:jl2] = Y[:,0,newaxis] + (Y[:,jm]-Y[:,0])[:,newaxis]*ramp
T[jl1:jl2] = T[0] + (T[jm]-T[0]) * ramp
Y[:,jl2:jr1] = Y[:,jm,newaxis]
T[jl2:jr1] = T[jm]
ramp = np.linspace(0, 1, jr2-jr1)
Y[:,jr1:jr2] = Y[:,jm,newaxis] + (Y[:,-1]-Y[:,jm])[:,newaxis]*ramp
T[jr1:jr2] = T[jm] + (T[-1]-T[jm]) * ramp
Y[:,jr2:] = Y[:,-1,newaxis]
T[jr2:] = T[-1]
YT = Y.T
for _ in range(IC.smoothCount):
utils.smooth(YT)
utils.smooth(T)
Y = YT.T
rho = np.zeros(N)
U = np.zeros(N)
if self.strainParameters.function:
a0 = self.strainParameters.function(self.times.tStart)
else:
a0 = self.strainParameters.initial
for j in range(N):
gas.TPY = T[j], IC.pressure, Y[:,j]
rho[j] = gas.density
U[j] = a0 / beta * np.sqrt(rhou/rho[j])
for _ in range(2):
utils.smooth(U)
if self.general.twinFlame or self.general.flameGeometry == 'cylindrical':
# Stagnation point at x = 0
V[0] = 0
for j in range(1, N):
# derived from finite difference of continuity equation
V[j] = V[j-1] - beta * rho[j]*U[j]*(x[j] - x[j-1])
elif IC.flameType == 'diffusion':
jz = N // 4 # place stagnation point on the fuel side (flame on oxidizer side)
V[jz] = 0
for j in range(jz+1, N):
V[j] = V[j-1] - beta * rho[j]*U[j]*(x[j] - x[j-1])
for j in range(jz-1, -1, -1):
V[j] = V[j+1] + beta * rho[j]*U[j]*(x[j+1] - x[j])
else: # Single Premixed jet opposing inert or hot products
jz = 3 * N // 4 # place stagnation point on the products/inert side
V[jz] = 0
for j in range(jz+1, N):
V[j] = V[j-1] - beta * rho[j]*U[j]*(x[j] - x[j-1])
for j in range(jz-1, -1, -1):
V[j] = V[j+1] + beta * rho[j]*U[j]*(x[j+1] - x[j])
IC.x = x
IC.Y = Y
IC.T = T
IC.U = U
IC.V = V
IC.haveProfiles = True
def setupQuasi2d(self):
IC = self.initialCondition
data = utils.HDFStruct(self.general.interpFile)
IC.x = data.r
IC.Y = data.Y0
IC.T = data.T[0]
IC.U = data.U
IC.V = data.vz[0]
IC.haveProfiles = True
IC.interpData = data
def run(self):
"""
Run a single flame simulation using the parameters set in this Config.
"""
confString = self.original.stringify()
if not os.path.isdir(self.paths.outputDir):
os.makedirs(self.paths.outputDir, 0o0755)
confOutPath = os.path.join(self.paths.outputDir, 'config')
if (os.path.exists(confOutPath)):
os.unlink(confOutPath)
confOut = open(confOutPath, 'w')
confOut.write(confString)
solver = _ember.FlameSolver(self)
solver.initialize()
done = 0
while not done:
done = solver.step()
solver.finalize()
return solver
def runESR(self):
"""
Run a sequence of flame simulations at increasing strain rates until
a steady burning solution is no longer possible with an increase
in strain rate. The parameters governing this progression are defined
under the configuration field :attr:`Extinction`.
"""
if os.path.exists(self.paths.outputDir):
print('Output directory already exists')
print('Exiting...')
return
# Establishing the directory structure for saving the output files from
# the number of flame simulations at increasing strain rates
outDirTop = self.paths.outputDir
logPath = self.paths.outputDir+'/logFiles'
currentRunPath = self.paths.outputDir+'/runFiles'
ssProfilePath = self.paths.outputDir+'/ssFiles'
restartPath = None
self.paths.outputDir = currentRunPath
if not os.path.exists(self.paths.outputDir):
os.makedirs(outDirTop, 0o0755)
os.makedirs(logPath, 0o0755)
os.makedirs(currentRunPath, 0o0755)
os.makedirs(ssProfilePath, 0o0755)
fileExt = self.outputFiles.fileExtension
if self.paths.logFile:
_logFile = open(self.paths.logFile, 'w')
def log(message):
_logFile.write(message)
_logFile.write('\n')
_logFile.flush()
else:
def log(message):
print(message)
strainRateValues = []
maxTemps = []
strainRate = self.extinction.initialStrainRate
# There are two methods that can be used to progress and converge to the
# extinction strain rate. The first is by an initially constant step
# size in strain rate that will later be reduced when converging to the
# extinction point. The second is to increase by a factor of the current
# strain rate which will also be reduced when converging to the
# extinction point.
if self.extinction.method == 'step':
stepSize = self.extinction.initialStep
else:
incFactor = self.extinction.initialFactor
complete = False
hasExtinguished = False
# The extinction simulation is considered converged once the step size
# or increase factor has been reduced below the minimum cutoff value
# that is specified by the user in the configuration script.
while not complete:
# Because a continuation approach is used, the flame solution at the
# previous strain rate is used as an initial guess for the flame at
# the new strain rate, and 'restartPath' references the location of
# the most recently converged strained flame at the highest strain
# rate so far.
if restartPath is not None:
if self.extinction.method == 'step':
strainRate += stepSize
else:
strainRate *= incFactor
log('\nBeginning run at strain rate: %g s^-1' % strainRate)
self.strainParameters.initial = strainRate
self.strainParameters.final = strainRate
self.paths.logFile = os.path.join(logPath, 'log_sr{:08.2f}.txt'.format(strainRate))
self.apply_options()
solver = _ember.FlameSolver(self)
t1 = time.time()
solver.initialize()
done = False
extinguished = False
while not done:
done = solver.step()
# In order to speed up ESR calculations, the user specifies in
# the input a 'cutoffTemp'. If the simulation maximum
# temperature falls below this temperature, the simulation
# stops, and it is assumed that the simulation would simply
# continue in time until converging to a non-burning opposed jet
# solution which can be computationally intensive, especially
# for kinetic models with large numbers of species.
if max(solver.T) < self.extinction.cutoffTemp:
done = True
extinguished = True
solver.finalize()
t2 = time.time()
log('Run finished; Integration took %.1f seconds.' % (t2-t1))
# When using 'dTdt' strained flame convergence near the extinction
# strain rate, under some conditions the simulation will converge
# prematurely. This will yield a maximum temperature that is
# incorrectly greater than the previous max T at a lower strain
# rate. When this issue is observed, the following code block
# modifies and tightens the convergence criteria accordingly to
# avoid this non-physical result.
if hasExtinguished is True:
if max(solver.T) >= maxTemps[-1]:
if self.terminationCondition.measurement == 'dTdt':
print('Switching convergence method to Q')
self.terminationCondition.measurement = 'Q'
done = False
while not done:
done = solver.step()
if max(solver.T) < self.extinction.cutoffTemp:
done = True
extinguished = True
solver.finalize()
t2 = time.time()
if max(solver.T) >= maxTemps[-1]:
print('Tightening tolerance')
self.terminationCondition.tolerance /= 2.0
done = False
while not done:
done = solver.step()
if max(solver.T) < self.extinction.cutoffTemp:
done = True
extinguished = True
solver.finalize()
t2 = time.time()
if max(solver.T) >= maxTemps[-1]:
print('Refining tolerance failed')
print('marking as extinguished..')
extinguished = True
# When a steady, burning solution is found, the solution is saved
# and the starting point for subsequent simulations is updated to
# this newly converged strain rate.
if extinguished is False:
log('Found burning solution with Tmax = %.1f' % max(solver.T))
restartFile = 'prof_sr{:08.2f}.{}'.format(strainRate, fileExt)
restartPath = os.path.join(ssProfilePath, restartFile)
solver.writeStateFile(os.path.splitext(restartFile)[0])
os.rename(os.path.join(self.paths.outputDir, restartFile), restartPath)
strainRateValues.append(strainRate)
maxTemps.append(max(solver.T))
with open(os.path.join(outDirTop, 'conf_extinction.txt'), 'w') as confOut:
confOut.write(self.original.stringify())
else:
log('Found non-burning solution')
if restartPath is not None:
hasExtinguished = True
if self.extinction.method == 'step':
strainRate -= stepSize
stepSize *= self.extinction.reductionFactor
if stepSize < self.extinction.minStep:
complete = True
else:
strainRate /= incFactor
incFactor = 1.0 + (incFactor-1.0) * self.extinction.reductionFactor
if incFactor < self.extinction.minFactor:
complete = True
# To reduce the number of files produced during an extinction
# simulation, the intermediate time point solutions are deleted
# after completing a run to the steady state solution.
if not complete:
shutil.rmtree(currentRunPath)
os.mkdir(currentRunPath, 0o0755)
# It is possible for the user to specify and initial starting strain
# rate that is already beyond the extinction strain rate for their
# specified initial unburned gas. If a steady, burning flame is not
# achieved at the initial strain rate, the initial strain rate is
# reduced by a factor of 2 and convergence is tried again.
if restartPath is None:
strainRate /= 2.0
if strainRate < 10.0:
complete = True
print('Failed to find burning starting point')
else:
self.readInitialCondition(restartPath)
# A summary of the progression to extinction is saved. This is often
# useful when evaluating qualitatively, whether the solution seems to
# have converged to the extinction point.
with open(os.path.join(outDirTop,'extProfile.csv'),'w') as extProgData:
extProgData.write('Strain Rate [1/s],Max. Temp. [K]\n')
for a, Tmax in zip(strainRateValues, maxTemps):
extProgData.write('{0:0.5f},{1:0.5f}\n'.format(a,Tmax))
extProgData.close()
return _ember.FlameSolver(self)
def multirun(self):
"""
Run a sequence of flame simulations at different strain rates using
the parameters set in this Config object. The list of strain rates is
defined by the configuration field :attr:`strainParameters.rates`.
"""
confString = self.original.stringify()
strainRates = self.strainParameters.rates
if not strainRates:
print('No strain rate list specified')
return
self.strainParameters.rates = None
if self.paths.logFile:
_logFile = open(self.paths.logFile, 'w')
def log(message):
_logFile.write(message)
_logFile.write('\n')
_logFile.flush()
else:
def log(message):
print(message)
if not os.path.exists(self.paths.outputDir):
os.makedirs(self.paths.outputDir, 0o0755)
self.strainParameters.initial = strainRates[0]
aSave = []
Q = []
Sc = []
xFlame = []
fileExt = self.outputFiles.fileExtension
for a in strainRates:
aSave.append(a)
restartFile = 'prof_eps{:04d}.{}'.format(a, fileExt)
historyFile = 'out_eps{:04d}.{}'.format(a, fileExt)
configFile = 'conf_eps{:04d}.{}'.format(a, fileExt)
restartPath = os.path.join(self.paths.outputDir, restartFile)
historyPath = os.path.join(self.paths.outputDir, historyFile)
configPath = os.path.join(self.paths.outputDir, configFile)
if os.path.exists(restartPath) and os.path.exists(historyPath):
# If the output files already exist, we simply retrieve the
# integral flame properties from the existing profiles and
# advance to the next strain rate.
log('Skipping run at strain rate a = %g'
' because the output file "%s" already exists.' % (a, restartFile))
# Compute integral properties using points from the last half
# of the termination-check period
data = utils.HDFStruct(historyPath)
mask = data.t > data.t[-1] - 0.5*self.terminationCondition.steadyPeriod
if not any(mask):
log('Warning: old data file did not contain data'
' spanning the requested period.')
mask = data.t > 0.5*data.t[-1]
Q.append(np.mean(data.Q[mask]))
Sc.append(np.mean(data.Sc[mask]))
xFlame.append(np.mean(data.xFlame[mask]))
del data
else:
# Data is not already present, so run the flame solver for this strain rate
log('Beginning run at strain rate a = %g s^-1' % a)
confOut = open(configPath, 'w')
confOut.write(confString)
self.strainParameters.initial = a
self.strainParameters.final = a
self.paths.logFile = os.path.join(self.paths.outputDir, 'log-eps%04i.txt' % a)
log("Writing output file for run to '%s'" % self.paths.logFile)
self.apply_options()
solver = _ember.FlameSolver(self)
t1 = time.time()
solver.initialize()
done = 0
while not done:
done = solver.step()
solver.finalize()
t2 = time.time()
log('Completed run at strain rate a = %g s^-1' % a)
log('Integration took %.1f seconds.' % (t2-t1))
solver.writeStateFile(os.path.splitext(restartFile)[0])
solver.writeTimeseriesFile(os.path.splitext(historyFile)[0])
tRun = np.array(solver.timeseriesWriter.t)
QRun = np.array(solver.timeseriesWriter.Q)
ScRun = np.array(solver.timeseriesWriter.Sc)
xFlameRun = np.array(solver.timeseriesWriter.xFlame)
# Compute integral properties using points from the last half
# of the termination-check period
mask = tRun > tRun[-1] - 0.5*self.terminationCondition.steadyPeriod
Q.append(np.mean(QRun[mask]))
Sc.append(np.mean(ScRun[mask]))
xFlame.append(np.mean(xFlameRun[mask]))
self.readInitialCondition(restartPath)
# Sort by strain rate:
aSave, Q, Sc, xFlame = map(list, zip(*sorted(zip(aSave, Q, Sc, xFlame))))
integralFile = os.path.join(self.paths.outputDir,
"integral.{}".format(self.outputFiles.fileExtension))
if os.path.exists(integralFile):
os.unlink(integralFile)
with output.OutputFile(integralFile) as data:
data['a'] = aSave
data['Q'] = Q
data['Sc'] = Sc
data['xFlame'] = xFlame
return _ember.FlameSolver(self)
|
speth/ember
|
python/ember/input.py
|
Python
|
mit
| 65,505
|
[
"Gaussian"
] |
59d30b7212cc8d8d21056362fbbb8e79fd1bd1306294805ab4ad8251d759fc14
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Autoregressive model."""
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.sts.internal import util as sts_util
from tensorflow_probability.python.sts.structural_time_series import Parameter
from tensorflow_probability.python.sts.structural_time_series import StructuralTimeSeries
class AutoregressiveStateSpaceModel(tfd.LinearGaussianStateSpaceModel):
"""State space model for an autoregressive process.
A state space model (SSM) posits a set of latent (unobserved) variables that
evolve over time with dynamics specified by a probabilistic transition model
`p(z[t+1] | z[t])`. At each timestep, we observe a value sampled from an
observation model conditioned on the current state, `p(x[t] | z[t])`. The
special case where both the transition and observation models are Gaussians
with mean specified as a linear function of the inputs, is known as a linear
Gaussian state space model and supports tractable exact probabilistic
calculations; see `tfp.distributions.LinearGaussianStateSpaceModel` for
details.
In an autoregressive process, the expected level at each timestep is a linear
function of previous levels, with added Gaussian noise:
```python
level[t+1] = (sum(coefficients * levels[t:t-order:-1]) +
Normal(0., level_scale))
```
The process is characterized by a vector `coefficients` whose size determines
the order of the process (how many previous values it looks at), and by
`level_scale`, the standard deviation of the noise added at each step.
This is formulated as a state space model by letting the latent state encode
the most recent values; see 'Mathematical Details' below.
The parameters `level_scale` and `observation_noise_scale` are each (a batch
of) scalars, and `coefficients` is a (batch) vector of size `[order]`. The
batch shape of this `Distribution` is the broadcast batch
shape of these parameters and of the `initial_state_prior`.
#### Mathematical Details
The autoregressive model implements a
`tfp.distributions.LinearGaussianStateSpaceModel` with `latent_size = order`
and `observation_size = 1`. The latent state vector encodes the recent history
of the process, with the current value in the topmost dimension. At each
timestep, the transition sums the previous values to produce the new expected
value, shifts all other values down by a dimension, and adds noise to the
current value. This is formally encoded by the transition model:
```
transition_matrix = [ coefs[0], coefs[1], ..., coefs[order]
1., 0 , ..., 0.
0., 1., ..., 0.
...
0., 0., ..., 1., 0. ]
transition_noise ~ N(loc=0., scale=diag([level_scale, 0., 0., ..., 0.]))
```
The observation model simply extracts the current (topmost) value, and
optionally adds independent noise at each step:
```
observation_matrix = [[1., 0., ..., 0.]]
observation_noise ~ N(loc=0, scale=observation_noise_scale)
```
Models with `observation_noise_scale = 0.` are AR processes in the formal
sense. Setting `observation_noise_scale` to a nonzero value corresponds to a
latent AR process observed under an iid noise model.
#### Examples
A simple model definition:
```python
ar_model = AutoregressiveStateSpaceModel(
num_timesteps=50,
coefficients=[0.8, -0.1],
level_scale=0.5,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[1., 1.]))
y = ar_model.sample() # y has shape [50, 1]
lp = ar_model.log_prob(y) # log_prob is scalar
```
Passing additional parameter dimensions constructs a batch of models. The
overall batch shape is the broadcast batch shape of the parameters:
```python
ar_model = AutoregressiveStateSpaceModel(
num_timesteps=50,
coefficients=[0.8, -0.1],
level_scale=tf.ones([10]),
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.ones([10, 10, 2])))
y = ar_model.sample(5) # y has shape [5, 10, 10, 50, 1]
lp = ar_model.log_prob(y) # has shape [5, 10, 10]
```
"""
def __init__(self,
num_timesteps,
coefficients,
level_scale,
initial_state_prior,
observation_noise_scale=0.,
name=None,
**linear_gaussian_ssm_kwargs):
"""Build a state space model implementing an autoregressive process.
Args:
num_timesteps: Scalar `int` `Tensor` number of timesteps to model
with this distribution.
coefficients: `float` `Tensor` of shape `concat(batch_shape, [order])`
defining the autoregressive coefficients. The coefficients are defined
backwards in time: `coefficients[0] * level[t] + coefficients[1] *
level[t-1] + ... + coefficients[order-1] * level[t-order+1]`.
level_scale: Scalar (any additional dimensions are treated as batch
dimensions) `float` `Tensor` indicating the standard deviation of the
transition noise at each step.
initial_state_prior: instance of `tfd.MultivariateNormal`
representing the prior distribution on latent states. Must have
event shape `[order]`.
observation_noise_scale: Scalar (any additional dimensions are
treated as batch dimensions) `float` `Tensor` indicating the standard
deviation of the observation noise.
Default value: 0.
name: Python `str` name prefixed to ops created by this class.
Default value: "AutoregressiveStateSpaceModel".
**linear_gaussian_ssm_kwargs: Optional additional keyword arguments to
to the base `tfd.LinearGaussianStateSpaceModel` constructor.
"""
parameters = dict(locals())
parameters.update(linear_gaussian_ssm_kwargs)
del parameters['linear_gaussian_ssm_kwargs']
with tf.name_scope(name or 'AutoregressiveStateSpaceModel') as name:
# The initial state prior determines the dtype of sampled values.
# Other model parameters must have the same dtype.
dtype = initial_state_prior.dtype
coefficients = tf.convert_to_tensor(
value=coefficients, name='coefficients', dtype=dtype)
level_scale = tf.convert_to_tensor(
value=level_scale, name='level_scale', dtype=dtype)
observation_noise_scale = tf.convert_to_tensor(
value=observation_noise_scale,
name='observation_noise_scale', dtype=dtype)
order = tf.compat.dimension_value(coefficients.shape[-1])
if order is None:
raise ValueError('Autoregressive coefficients must have static shape.')
self._order = order
self._coefficients = coefficients
self._level_scale = level_scale
super(AutoregressiveStateSpaceModel, self).__init__(
num_timesteps=num_timesteps,
transition_matrix=make_ar_transition_matrix(coefficients),
transition_noise=tfd.MultivariateNormalDiag(
scale_diag=tf.stack([level_scale] +
[tf.zeros_like(level_scale)] * (
self.order - 1), axis=-1)),
observation_matrix=tf.concat([tf.ones([1, 1], dtype=dtype),
tf.zeros([1, self.order - 1],
dtype=dtype)],
axis=-1),
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=observation_noise_scale[..., tf.newaxis]),
initial_state_prior=initial_state_prior,
name=name,
**linear_gaussian_ssm_kwargs)
self._parameters = parameters
@property
def order(self):
return self._order
@property
def coefficients(self):
return self._coefficients
@property
def level_scale(self):
return self._level_scale
def make_ar_transition_matrix(coefficients):
"""Build transition matrix for an autoregressive StateSpaceModel.
When applied to a vector of previous values, this matrix computes
the expected new value (summing the previous states according to the
autoregressive coefficients) in the top dimension of the state space,
and moves all previous values down by one dimension, 'forgetting' the
final (least recent) value. That is, it looks like this:
```
ar_matrix = [ coefs[0], coefs[1], ..., coefs[order]
1., 0 , ..., 0.
0., 1., ..., 0.
...
0., 0., ..., 1., 0. ]
```
Args:
coefficients: float `Tensor` of shape `concat([batch_shape, [order]])`.
Returns:
ar_matrix: float `Tensor` with shape `concat([batch_shape,
[order, order]])`.
"""
top_row = tf.expand_dims(coefficients, -2)
coef_shape = dist_util.prefer_static_shape(coefficients)
batch_shape, order = coef_shape[:-1], coef_shape[-1]
remaining_rows = tf.concat([
tf.eye(order - 1, dtype=coefficients.dtype, batch_shape=batch_shape),
tf.zeros(ps.concat([batch_shape, (order - 1, 1)], axis=0),
dtype=coefficients.dtype)
], axis=-1)
ar_matrix = tf.concat([top_row, remaining_rows], axis=-2)
return ar_matrix
class Autoregressive(StructuralTimeSeries):
"""Formal representation of an autoregressive model.
An autoregressive (AR) model posits a latent `level` whose value at each step
is a noisy linear combination of previous steps:
```python
level[t+1] = (sum(coefficients * levels[t:t-order:-1]) +
Normal(0., level_scale))
```
The latent state is `levels[t:t-order:-1]`. We observe a noisy realization of
the current level: `f[t] = level[t] + Normal(0., observation_noise_scale)` at
each timestep.
If `coefficients=[1.]`, the AR process is a simple random walk, equivalent to
a `LocalLevel` model. However, a random walk's variance increases with time,
while many AR processes (in particular, any first-order process with
`abs(coefficient) < 1`) are *stationary*, i.e., they maintain a constant
variance over time. This makes AR processes useful models of uncertainty.
See the [Wikipedia article](
https://en.wikipedia.org/wiki/Autoregressive_model#Definition) for details on
stationarity and other mathematical properties of autoregressive processes.
"""
def __init__(self,
order,
coefficients_prior=None,
level_scale_prior=None,
initial_state_prior=None,
coefficient_constraining_bijector=None,
observed_time_series=None,
name=None):
"""Specify an autoregressive model.
Args:
order: scalar Python positive `int` specifying the number of past
timesteps to regress on.
coefficients_prior: optional `tfd.Distribution` instance specifying a
prior on the `coefficients` parameter. If `None`, a default standard
normal (`tfd.MultivariateNormalDiag(scale_diag=tf.ones([order]))`) prior
is used.
Default value: `None`.
level_scale_prior: optional `tfd.Distribution` instance specifying a prior
on the `level_scale` parameter. If `None`, a heuristic default prior is
constructed based on the provided `observed_time_series`.
Default value: `None`.
initial_state_prior: optional `tfd.Distribution` instance specifying a
prior on the initial state, corresponding to the values of the process
at a set of size `order` of imagined timesteps before the initial step.
If `None`, a heuristic default prior is constructed based on the
provided `observed_time_series`.
Default value: `None`.
coefficient_constraining_bijector: optional `tfb.Bijector` instance
representing a constraining mapping for the autoregressive coefficients.
For example, `tfb.Tanh()` constrains the coefficients to lie in
`(-1, 1)`, while `tfb.Softplus()` constrains them to be positive, and
`tfb.Identity()` implies no constraint. If `None`, the default behavior
constrains the coefficients to lie in `(-1, 1)` using a `Tanh` bijector.
Default value: `None`.
observed_time_series: optional `float` `Tensor` of shape
`batch_shape + [T, 1]` (omitting the trailing unit dimension is also
supported when `T > 1`), specifying an observed time series. Any `NaN`s
are interpreted as missing observations; missingness may be also be
explicitly specified by passing a `tfp.sts.MaskedTimeSeries` instance.
Any priors not explicitly set will be given default values according to
the scale of the observed time series (or batch of time series).
Default value: `None`.
name: the name of this model component.
Default value: 'Autoregressive'.
"""
init_parameters = dict(locals())
with tf.name_scope(name or 'Autoregressive') as name:
masked_time_series = None
if observed_time_series is not None:
masked_time_series = (
sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series))
dtype = dtype_util.common_dtype(
[(masked_time_series.time_series
if masked_time_series is not None else None),
coefficients_prior,
level_scale_prior,
initial_state_prior], dtype_hint=tf.float32)
if observed_time_series is not None:
_, observed_stddev, observed_initial = sts_util.empirical_statistics(
masked_time_series)
else:
observed_stddev, observed_initial = (
tf.convert_to_tensor(value=1., dtype=dtype),
tf.convert_to_tensor(value=0., dtype=dtype))
batch_ones = tf.ones(ps.concat([
ps.shape(observed_initial), # Batch shape
[order]], axis=0), dtype=dtype)
# Heuristic default priors. Overriding these may dramatically
# change inference performance and results.
if coefficients_prior is None:
coefficients_prior = tfd.MultivariateNormalDiag(
scale_diag=batch_ones)
if level_scale_prior is None:
level_scale_prior = tfd.LogNormal(
loc=tf.math.log(0.05 * observed_stddev), scale=3.)
if (coefficients_prior.event_shape.is_fully_defined() and
order != coefficients_prior.event_shape[0]):
raise ValueError("Prior dimension {} doesn't match order {}.".format(
coefficients_prior.event_shape[0], order))
if initial_state_prior is None:
initial_state_prior = tfd.MultivariateNormalDiag(
loc=observed_initial[..., tf.newaxis] * batch_ones,
scale_diag=(tf.abs(observed_initial) +
observed_stddev)[..., tf.newaxis] * batch_ones)
self._order = order
self._coefficients_prior = coefficients_prior
self._level_scale_prior = level_scale_prior
self._initial_state_prior = initial_state_prior
if coefficient_constraining_bijector is None:
coefficient_constraining_bijector = tfb.Tanh()
super(Autoregressive, self).__init__(
parameters=[
Parameter('coefficients',
coefficients_prior,
coefficient_constraining_bijector),
Parameter('level_scale', level_scale_prior,
tfb.Chain([tfb.Scale(scale=observed_stddev),
tfb.Softplus(low=dtype_util.eps(dtype))]))
],
latent_size=order,
init_parameters=init_parameters,
name=name)
@property
def initial_state_prior(self):
return self._initial_state_prior
def _make_state_space_model(self,
num_timesteps,
param_map=None,
initial_state_prior=None,
**linear_gaussian_ssm_kwargs):
if initial_state_prior is None:
initial_state_prior = self.initial_state_prior
if param_map:
linear_gaussian_ssm_kwargs.update(param_map)
return AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
initial_state_prior=initial_state_prior,
name=self.name,
**linear_gaussian_ssm_kwargs)
|
tensorflow/probability
|
tensorflow_probability/python/sts/components/autoregressive.py
|
Python
|
apache-2.0
| 17,359
|
[
"Gaussian"
] |
b24a09ceba82f0bb95d9ce16b41ddf543e1af80034f605dff9a9f05b9e09d2a4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup, Extension
from pybind11.setup_helpers import Pybind11Extension, build_ext
# Publish the library to PyPI.
if "publish" in sys.argv[-1]:
os.system("python setup.py sdist upload")
sys.exit()
# Default compile arguments.
ext = Pybind11Extension(
"celerite.solver",
sources=["celerite/solver.cpp"],
language="c++",
include_dirs=["cpp/include", "cpp/lib/eigen"],
)
setup(
name="celerite",
use_scm_version={
"write_to": os.path.join("celerite/celerite_version.py"),
"write_to_template": '__version__ = "{version}"\n',
},
author="Daniel Foreman-Mackey",
author_email="foreman.mackey@gmail.com",
url="https://github.com/dfm/celerite",
license="MIT",
packages=["celerite"],
install_requires=["numpy"],
extras_require={
"test": [
"autograd",
"coverage[toml]",
"pytest",
"pytest-cov",
]
},
ext_modules=[ext],
description="Scalable 1D Gaussian Processes",
long_description=open("README.rst").read(),
package_data={"": ["README.rst", "LICENSE", "CITATION"]},
include_package_data=True,
python_requires=">=3.6",
cmdclass=dict(build_ext=build_ext),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
zip_safe=True,
)
|
dfm/celerite
|
setup.py
|
Python
|
mit
| 1,642
|
[
"Gaussian"
] |
02d1813d6b5726705ea3737b464f5da9c3f63218c30700a834df3b20480cb61d
|
#!/usr/bin/env python
"""
Provides PeakFitter class for fitting of gaussian peaks in 1D spectra.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.font_manager import FontProperties
from lmfit import models
import pandas as pd
import time
from collections import OrderedDict
__author__ = "Joseph Curtis"
__copyright__ = "None"
__credits__ = ["Mark Bandstra", "Ren Cooper", "Brian Plimley"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Joseph Curtis"
__email__ = "joseph.c.curtis@gmail.com"
__status__ = "Production"
FWHM_SIG_RATIO = 2.35482
class PeakFitter(object):
def __init__(self, x, y, x_units, y_units, y_sig=None, verbosity=0, **kwargs):
"""
args (converted to numpy arrays of floats):
x = bincen (bincenters)
x_units = `Channels` or `Energy (keV)`
y = spec (best practice is counts/keV/s but then err could be
incorrectly calculated if not provided)
y_units = `Counts` or `Counts / second` or 'Counts / keV' or 'Counts / second / keV'
y_sig = spec uncertainty
kwargs:
roi
det_type
x_edges
x_widths
"""
self.verbosity = verbosity
assert len(x) == len(y)
self.set_x(x, x_units, **kwargs)
self.reset_y_min_max()
self.set_y(y, y_units, y_sig)
self.set_detector(**kwargs)
self.fit_peak(**kwargs)
@classmethod
def from_spectra(cls, x, y, x_units, y_units, y_sig=None, **kwargs):
obj = cls(x, y, x_units=x_units, y_units=y_units, y_sig=y_sig,
**kwargs)
return obj
@classmethod
def from_spectra_series(cls, spec, spec_sig, x_units, y_units, **kwargs):
obj = cls(spec.index, spec.values, x_units=x_units, y_units=y_units,
y_sig=spec_sig.values, **kwargs)
return obj
def set_x(self, x, x_units, x_edges=None, x_widths=None, **kwargs):
# Resetting globals
self.x_edges = None
self.x_widths = None
# Setting x
self.x = np.array(x, dtype=float)
# Units!
valid_x_units = ['Channels', 'Energy (keV)']
assert x_units in valid_x_units, 'x_units: {} not in valid set: {}'.format(
x_units, valid_x_units)
self.x_units = x_units
# Sanity checks!
if x_edges is not None:
assert isinstance(x_edges, np.ndarray), \
'x_edges is a {}'.format(type(x_edges))
assert len(x_edges) == len(self.x) + 1
x_edges = x_edges.astype(float)
if x_widths is not None:
assert isinstance(x_widths, np.ndarray), \
'x_widths is a {}'.format(type(x_widths))
assert len(x_widths) == len(self.x)
x_widths = x_widths.astype(float)
# If only x given we must assume the bins are of equal spacing for
# later normalization
if (x_edges is None) and (x_widths is None):
# All bin widths except the last
binw = np.diff(self.x)
# Check they are all the same
assert np.all(np.abs(np.diff(binw)) < 1e-6), \
'Non-uniform bins and no x_edges or x_widths provided'
# Make bin width array based on first bin
self.x_widths = np.ones_like(self.x, dtype=float) * binw[0]
# Make those edges
self.x_edges = np.concatenate([
self.x - self.x_widths / 2.,
self.x[-1:] + self.x_widths[-1] / 2.])
# If only x_edges given use it to set the x_widths
elif x_widths is None:
self.x_edges = x_edges
self.x_widths = np.diff(self.x_edges)
# If only x_widths given use it to set the x_edges
elif self.x_edges is None:
self.x_widths = x_widths
self.x_edges = np.concatenate([
self.x - self.x_widths / 2.,
self.x[-1:] + self.x_widths[-1] / 2.])
# If both are given...
else:
# More sanity checks!
assert np.all(np.abs(self.x - (x_edges[1:] - x_widths / 2.)) < 1e-6), \
'Right edges minus half width is not equal to given center'
assert np.all(np.abs(self.x - (x_edges[:-1] + x_widths / 2.)) < 1e-6), \
'Left edges plus half width is not equal to given center'
self.x_widths = x_widths
self.x_edges = x_edges
def set_y(self, y, y_units, y_sig):
"""
Set spec y and uncertainty. If no unc given try to calculate unc.
"""
self.y = np.array(y, dtype=float)
# Units!
valid_y_units = ['Counts', 'Counts / second', 'Counts / keV', 'Counts / second / keV']
assert y_units in valid_y_units, 'y_units: {} not in valid set: {}'.format(
y_units, valid_y_units)
self.y_units = y_units
if y_sig is None:
if self.y_units == 'Counts':
y_sig = np.sqrt(self.y)
else:
raise TypeError('y_units is not `Counts` so I cannot calculate uncertainty from y')
assert len(y_sig) == len(self.y)
self.y_sig = np.array(y_sig, dtype=float)
def get_x(self):
return self.x
def get_y(self):
return self.y
def get_y_sig(self):
return self.y_sig
def set_roi(self, roi):
self.x_min = roi[0]
self.x_max = roi[1]
self.roi_slc = (self.x >= self.x_min) & (self.x < self.x_max)
def get_x_roi(self):
return self.x[self.roi_slc]
def get_y_roi(self):
y = self.y[self.roi_slc]
self.track_y_min_max(y)
return y
def get_y_sig_roi(self):
return self.y_sig[self.roi_slc]
def get_x_widths_roi(self):
return self.x_widths[self.roi_slc]
def set_detector(self, det_type='NaI', **kwargs):
self.det_type = det_type
def guess_fwhm(self, xpeak):
"""
Approx sigma for initial gauss width (could be improved)
"""
if self.det_type == 'NaI':
fwhm_guess = 0.08 * xpeak
elif self.det_type == 'HPGe':
fwhm_guess = 0.0025 * xpeak
else:
raise NotImplementedError('Detector type ({}) not recognized'.format(self.det_type))
return fwhm_guess
def guess_sigma(self, xpeak):
return self.guess_fwhm(xpeak) / FWHM_SIG_RATIO
def fit_peak(self, roi=None, xpeak=None, sigma_guess=None,
model_name='gauss-erf-const', **kwargs):
"""
Main routine
"""
# Exit if no roi
if roi is None:
self.fit = None
self.model_name = None
else:
self.model_name = model_name
# Start timer
tic = time.time()
# ---------
# Setup ROI
# ---------
self.set_roi(roi)
x = self.get_x_roi()
y = self.get_y_roi()
y_sig = self.get_y_sig_roi()
x_widths = self.get_x_widths_roi()
# ---------------------------
# Guesses based on input data
# ---------------------------
# Set peak center to center of ROI if not given
if xpeak is None:
xpeak = (x[0] + x[-1]) / 2.
# Guess sigma if not provided
if sigma_guess is None:
fwhm_guess = self.guess_fwhm(xpeak)
sigma_guess = fwhm_guess / FWHM_SIG_RATIO
# Heights at the sides of the ROI
left_shelf_height = y[0]
right_shelf_height = y[-1]
# Line guess
lin_slope = (y[-1] - y[0]) / (x[-1] - x[0])
lin_intercept = y[0] - lin_slope * x[0]
# Two peaks guess (33 and 66 percent through ROI)
xpeak0 = x[0] + (x[-1] - x[0]) * 0.33
xpeak1 = x[0] + (x[-1] - x[0]) * 0.66
# Index of at the ROI center
ix_half = int(round(float(len(x)) / 2.))
# -------------------
# Setup fitting model
# -------------------
if model_name == 'gauss-erf-const':
# Models
erf_mod = models.StepModel(form='erf', prefix='erf_')
gauss_mod = models.GaussianModel(prefix='gauss_')
bk_mod = models.ConstantModel(prefix='bk_')
# Initialize parameters
pars = erf_mod.make_params()
pars.update(gauss_mod.make_params())
pars.update(bk_mod.make_params())
# Erfc (sigma and center are locked to gauss below)
pars['erf_amplitude'].set(right_shelf_height - left_shelf_height, max=0.)
# Gauss
pars['gauss_center'].set(xpeak) # , min=xpeak - 2 * fwhm_guess, max=xpeak + 2 * fwhm_guess)
pars['gauss_sigma'].set(sigma_guess)
pars['gauss_amplitude'].set(np.sum(y * x_widths), min=0)
# Background
pars['bk_c'].set(left_shelf_height, min=0.)
# Same center and sigma
pars.add('erf_center', expr='gauss_center')
pars.add('erf_sigma', expr='gauss_sigma * {}'.format(FWHM_SIG_RATIO))
self.model = gauss_mod + erf_mod + bk_mod
elif model_name == 'double-gauss-line':
# Models
lin_mod = models.LinearModel(prefix='lin_')
g0_mod = models.GaussianModel(prefix='gauss0_')
g1_mod = models.GaussianModel(prefix='gauss1_')
# Initialize parameters
pars = lin_mod.make_params()
pars.update(g0_mod.make_params())
pars.update(g1_mod.make_params())
# Line (background)
pars['lin_slope'].set(lin_slope, max=0.)
pars['lin_intercept'].set(lin_intercept)
# Gauss 0 (left)
pars['gauss0_center'].set(xpeak0) # , min=xpeak - 2 * fwhm_guess, max=xpeak + 2 * fwhm_guess)
pars['gauss0_sigma'].set(sigma_guess)
pars['gauss0_amplitude'].set(np.sum(y[:ix_half] * x_widths[:ix_half]), min=0)
# Gauss 1 (right)
pars['gauss1_center'].set(xpeak1) # , min=xpeak - 2 * fwhm_guess, max=xpeak + 2 * fwhm_guess)
pars['gauss1_sigma'].set(sigma_guess)
pars['gauss1_amplitude'].set(np.sum(y[ix_half:] * x_widths[ix_half:]), min=0)
self.model = lin_mod + g0_mod + g1_mod
else:
raise NotImplementedError('Model ({}) not recognized'.format(model_name))
# -----------
# Perform fit
# -----------
try:
self.fit = self.model.fit(y, pars, x=x, weights=1. / y_sig)
except:
print("[ERROR] Couldn't fit peak")
self.fit = None
if self.verbosity > 0:
print('Fit time: {:.3f} seconds'.format(time.time() - tic))
def get_x_from_fit(self):
independent_var = self.fit.model.independent_vars[0]
return self.fit.userkws[independent_var]
def get_y_from_fit(self):
return self.fit.data
def get_y_sig_from_fit(self):
return 1. / self.fit.weights
def eval_init(self, x):
"""
Evaluate init fit curve as `x`
"""
return self.model.eval(x=x, **self.fit.init_values)
def eval(self, x):
"""
Evaluate best fit curve as `x`
"""
return self.model.eval(x=x, **self.fit.best_values)
def default_plot(self):
if self.fit is not None:
self.fit.plot()
def reset_y_min_max(self):
self.y_min = 0.
self.y_max = 0.
def track_y_min_max(self, y):
"""Always maintain y range +/- 5% of plotted ymin/ymax"""
new_y_min = y.min() - abs(y.min() * 0.2)
new_y_max = y.max() + abs(y.max() * 0.2)
if new_y_min < self.y_min:
self.y_min = new_y_min
if new_y_max > self.y_max:
self.y_max = new_y_max
def custom_plot(self, title=None, savefname=None, **kwargs):
self.reset_y_min_max()
# Prepare plots
gs = GridSpec(2, 2, height_ratios=(4, 1))
gs.update(left=0.05, right=0.99, wspace=0.03, top=0.94, bottom=0.06,
hspace=0.06)
fig = plt.figure(figsize=(18, 9))
fit_ax = fig.add_subplot(gs[0, 0])
res_ax = fig.add_subplot(gs[1, 0], sharex=fit_ax)
txt_ax = fig.add_subplot(gs[:, 1])
# Set fig title
if title is not None:
fig.suptitle(str(title), fontweight='bold', fontsize=24)
# ---------------------------------------
# Fit plot (keep track of min/max in roi)
# ---------------------------------------
# Smooth roi x values
x_plot = np.linspace(self.x_min, self.x_max, 1000)
# All data (not only roi)
fit_ax.errorbar(self.get_x(), self.get_y(), yerr=self.get_y_sig(),
c='k', fmt='o', markersize=5, label='data')
# Init fit
y = self.eval_init(x_plot)
self.track_y_min_max(y)
fit_ax.plot(x_plot, y, 'k--', label='init')
# Best fit
y = self.eval(x_plot)
self.track_y_min_max(y)
fit_ax.plot(x_plot, y, color='#e31a1c', label='best fit')
# Components (currently will work for <=3 component)
colors = ['#1f78b4', '#33a02c', '#6a3d9a']
for i, m in enumerate(self.fit.model.components):
y = m.eval(x=x_plot, **self.fit.best_values)
self.track_y_min_max(y)
fit_ax.plot(x_plot, y, label=m.prefix, color=colors[i])
# Plot Peak center and FWHM
peak_centers = self.get_peak_center()
peak_fwhm = self.get_peak_fwhm_absolute()
for param, series in peak_centers.iterrows():
fit_ax.axvline(series['value'], color='#ff7f00',
label=param + '_center')
fit_ax.axvspan(series['value'] - peak_fwhm.loc[param, 'value'] / 2.,
series['value'] + peak_fwhm.loc[param, 'value'] / 2.,
color='#ff7f00', alpha=0.2, label=param + '_fwhm')
# Misc
fit_ax.legend(loc='upper right')
fit_ax.set_ylabel(self.y_units)
# Set viewing window to only include the roi (not entire spectrum)
fit_ax.set_xlim([self.x_min, self.x_max])
fit_ax.set_ylim([self.y_min, self.y_max])
# ---------
# Residuals
# ---------
res_ax.errorbar(self.get_x_from_fit(),
self.eval(self.get_x_from_fit()) - self.get_y_from_fit(),
yerr=self.get_y_sig_from_fit(), fmt='o', color='k',
markersize=5, label='residuals')
res_ax.set_ylabel('Residuals')
res_ax.set_xlabel(self.x_units)
# -------------------
# Fit report (txt_ax)
# -------------------
txt_ax.get_xaxis().set_visible(False)
txt_ax.get_yaxis().set_visible(False)
best_fit_values = ''
op = self.fit.params
for p in self.fit.params:
best_fit_values += '{:15} {: .6e} +/- {:.5e} ({:6.1%})\n'.format(
p, op[p].value, op[p].stderr, abs(op[p].stderr / op[p].value))
best_fit_values += '{:15} {: .6e}\n'.format('Chi Squared:', self.fit.chisqr)
best_fit_values += '{:15} {: .6e}'.format('Reduced Chi Sq:', self.fit.redchi)
props = dict(boxstyle='round', facecolor='white', edgecolor='black', alpha=1)
props = dict(facecolor='white', edgecolor='none', alpha=0)
fp = FontProperties(family='monospace', size=8)
# Remove first 2 lines of fit report (long model description)
s = '\n'.join(self.fit.fit_report().split('\n')[2:])
# Add some more details
s += '\n'
peak_panel = self.get_peak_info_panel()
for model_name in peak_panel.items:
s += model_name + '\n'
for param_name in peak_panel.major_axis:
v = peak_panel.loc[model_name, param_name, 'value']
e = peak_panel.loc[model_name, param_name, 'stderr']
s += ' {:24}: {: .6e} +/- {:.5e} ({:6.1%})\n'.format(
param_name, v, e, e / v)
# Add to empty axis
txt_ax.text(x=0.01, y=0.99, s=s, fontproperties=fp,
ha='left', va='top', transform=txt_ax.transAxes,
bbox=props)
if savefname is not None:
fig.savefig(savefname)
plt.close(fig)
def get_param_value(self, param):
return self.fit.params[param].value
def get_param_sig(self, param):
return self.fit.params[param].stderr
def get_param_names_by_model_type(self, model_type, param):
keys = []
for k in self.fit.best_values.keys():
if k.startswith(model_type) and k.endswith(param):
keys.append(k)
assert len(keys) > 0, 'No {} keys for {}: \n{}'.format(
param, model_type, self.fit.best_values)
return keys
def get_params_by_model_type(self, model_type, param):
param_values = OrderedDict([
('model', []),
('value', []),
('stderr', [])])
for p in self.get_param_names_by_model_type(model_type, param):
param_values['model'].append(p.split('_')[0])
param_values['value'].append(self.get_param_value(p))
param_values['stderr'].append(self.get_param_sig(p))
param_values = pd.DataFrame(param_values)
param_values = param_values.set_index('model')
return param_values
def get_peak_size(self):
return self.get_params_by_model_type('gauss', 'amplitude')
def get_peak_center(self):
return self.get_params_by_model_type('gauss', 'center')
def get_peak_fwhm_absolute(self):
return self.get_params_by_model_type('gauss', 'sigma') * FWHM_SIG_RATIO
def get_peak_fwhm_relative(self):
f = self.get_peak_fwhm_absolute()
c = self.get_peak_center()
v = f['value'] / c['value']
dv = v * np.sqrt(
(f['stderr'] / f['value']) ** 2 + (c['stderr'] / c['value']) ** 2)
out = v.to_frame(name='value')
out['stderr'] = dv
return out
def get_peak_size_units(self):
if self.y_units == 'Counts / second / keV':
return 'cps'
elif self.y_units == 'Counts / keV':
return 'Counts'
else:
return 'unscaled'
def get_peak_info_panel(self):
pn = pd.Panel(OrderedDict([
('Peak Size ({})'.format(self.get_peak_size_units()), self.get_peak_size()),
('Peak Center ({})'.format(self.x_units), self.get_peak_center()),
('FWHM ({})'.format(self.x_units), self.get_peak_fwhm_absolute()),
('FWHM (ratio)', self.get_peak_fwhm_relative()),
]))
pn = pn.swapaxes('items', 'major')
return pn
def get_peak_characteristics(self):
pc = self.get_peak_center()
pfa = self.get_peak_fwhm_absolute()
pfr = self.get_peak_fwhm_relative()
ps = self.get_peak_size()
data = OrderedDict()
for peak_name in pc.index:
data[peak_name] = OrderedDict([
('Peak_Center_{}'.format(self.x_units), pc.loc[peak_name, 'value']),
('Peak_Center_{}_err'.format(self.x_units), pc.loc[peak_name, 'stderr']),
('Peak_FWHM_{}'.format(self.x_units), pfa.loc[peak_name, 'value']),
('Peak_FWHM_{}_err'.format(self.x_units), pfa.loc[peak_name, 'stderr']),
('Peak_FWHM_Ratio', pfr.loc[peak_name, 'value']),
('Peak_FWHM_Ratio_err', pfr.loc[peak_name, 'stderr']),
('Peak_Size_{}'.format(self.y_units), ps.loc[peak_name, 'value']),
('Peak_Size_{}_err'.format(self.y_units), ps.loc[peak_name, 'stderr']),
('Reduced_Chi_Squared', self.get_reduced_chi_squared()),
])
return pd.DataFrame(data)
def get_chi_squared(self):
return self.fit.chisqr
def get_reduced_chi_squared(self):
return self.fit.redchi
if __name__ == "__main__":
# Load sample spectra and energies
df = pd.read_csv('fitting_test_spectra.csv', index_col=0)
for c in ['source']:
for kwargs in [{'roi': [550, 800], 'xpeak': 662, 'title': 'Cs137'},
{'roi': [1350, 1550], 'xpeak': 1460, 'title': 'K40'},
{'roi': [2450, 2850], 'xpeak': 2614, 'title': 'Tl208'}]:
print(c)
spec = df[c]
spec_sig = df[c + '_sig']
pf = PeakFitter.from_spectra_series(
spec, spec_sig, x_units='Energy (keV)',
y_units='Counts / second / keV', **kwargs)
print('Peak Characteristics:')
print(pf.get_peak_characteristics())
pf.custom_plot(**kwargs)
plt.show()
|
bearing/radwatch-analysis
|
peak_fitting.py
|
Python
|
mit
| 21,235
|
[
"Brian",
"Gaussian"
] |
a52027d8313db5f8e355e603e48e201bb9dec287dfd921927d9e7849c687edfe
|
import os,glob,h5py,astropy,numpy,scipy
from astropy.time import Time
from datetime import datetime,timedelta
from glue.segments import segment,segmentlist
from gwpy.segments import DataQualityDict,DataQualityFlag
from gwpy.timeseries import TimeSeries,TimeSeriesList
from pycbc import types
def impulse_data(epoch=1153742417.0,sample_rate=512,psd_segment_length=60):
"""
Create fake time series data. The flux data is generated using a
random Gaussian distribution.
Parameters
----------
sample_rate : int
Sampling rate of fake data
psd_segment_length : int
Length of each segment in seconds
"""
ts_data = numpy.zeros(sample_rate * psd_segment_length)
ts_data = types.TimeSeries(ts_data, delta_t=1.0/sample_rate, epoch=epoch)
return ts_data
def fake_data(epoch=1153742417.0,sample_rate=512,psd_segment_length=60,nsegs=16):
"""
Create fake time series data. The flux data is generated using a
random Gaussian distribution.
Parameters
----------
sample_rate : int
Sampling rate of fake data
psd_segment_length : int
Length of each segment in seconds
nsegs : int
Number of segments present in time series
"""
ts_data = numpy.random.normal(0,1,sample_rate*psd_segment_length*nsegs)
ts_data = types.TimeSeries(ts_data,delta_t=1.0/sample_rate,epoch=epoch)
return ts_data
def get_data(station,starttime,endtime,rep='/GNOMEDrive/gnome/serverdata/',resample=None):
"""
Glob all files withing user-defined period and extract data.
Parameters
----------
station : str
Name of the station to be analysed
t0 : int
GPS timestamp of the first required magnetic field data
t1 : int
GPS timestamp of the last required magnetic field data
Return
------
ts_data, ts_list, activity : TimeSeries, dictionary, list
Time series data for selected time period, list of time series
for each segment, sampling rate of the retrieved data
"""
# Define data attribute to be extracted from HDF5 files
setname = "MagneticFields"
dstr = ['%Y','%m','%d','%H','%M','%S','%f']
dsplit = '-'.join(dstr[:starttime.count('-')+1])
start = datetime.strptime(starttime,dsplit)
dsplit = '-'.join(dstr[:endtime.count('-')+1])
end = datetime.strptime(endtime,dsplit)
dataset = []
for date in numpy.arange(start,end,timedelta(minutes=1)):
date = date.astype(datetime)
path1 = rep+station+'/'+date.strftime("%Y/%m/%d/")
path2 = station+'_'+date.strftime("%Y%m%d_%H*.hdf5")
fullpath = os.path.join(path1,path2)
dataset += glob.glob(fullpath)
if len(dataset)==0:
print "ERROR: No data files were found..."
quit()
file_order,data_order = {},{}
for fname in dataset:
hfile = h5py.File(fname, "r")
# Extract all atributes from the data
attrs = hfile[setname].attrs
# Define each attribute
dstr, t0, t1 = attrs["Date"], attrs["t0"], attrs["t1"]
# Construct GPS starting time from data
start_utc = construct_utc_from_metadata(dstr, t0)
# Construct GPS ending time from data
end_utc = construct_utc_from_metadata(dstr, t1)
# Represent the range of times in the semi-open interval
segfile = segment(start_utc,end_utc)
file_order[segfile] = fname
data_order[segfile] = hfile
# Create list of time series from every segment
ts_list = TimeSeriesList()
for seg in sorted(file_order):
hfile = h5py.File(file_order[seg], "r")
dset = hfile[setname]
sample_rate = dset.attrs["SamplingRate(Hz)"]
gps_epoch = construct_utc_from_metadata(dset.attrs["Date"], dset.attrs["t0"])
data = hfile[setname][:]
ts_data = TimeSeries(data, sample_rate=sample_rate, epoch=gps_epoch)
ts_list.append(ts_data)
hfile.close()
# Generate an ASCII representation of the GPS timestamped segments of time covered by the input data
seglist = segmentlist(data_order.keys())
# Sort the segment list
seglist.sort()
# Initialise dictionary for segment information
activity = DataQualityDict()
# Save time span for each segment in ASCII file
with open("segments.txt", "w") as fout:
for seg in seglist:
print >>fout, "%10.9f %10.9f" % seg
# FIXME: Active should be masked from the sanity channel
activity[station] = DataQualityFlag(station,active=seglist.coalesce(),known=seglist.coalesce())
# Generate an ASCII representation of the GPS timestamped segments of time covered by the input data
seglist = segmentlist(data_order.keys())
# Sort the segment list
seglist.sort()
# Retrieve channel data for all the segments
full_data = numpy.hstack([data_order[seg][setname][:] for seg in seglist])
new_sample_rate = float(sample_rate) if resample==None else float(resample)
new_data_length = len(full_data)*new_sample_rate/float(sample_rate)
full_data = scipy.signal.resample(full_data,int(new_data_length))
# Models a time series consisting of uniformly sampled scalar values
ts_data = types.TimeSeries(full_data,delta_t=1./new_sample_rate,epoch=seglist[0][0])
for v in data_order.values():
v.close()
return ts_data,ts_list,activity
def time_convert(starttime,endtime):
dstr = ['%Y','%m','%d','%H','%M','%S','%f']
dsplit = '-'.join(dstr[:starttime.count('-')+1])
start = datetime.strptime(starttime,dsplit)
starttime = construct_utc_from_metadata(start.strftime("%Y/%m/%d"),
start.strftime("%H:%M:%S.%f"))
dsplit = '-'.join(dstr[:endtime.count('-')+1])
end = datetime.strptime(endtime,dsplit)
endtime = construct_utc_from_metadata(end.strftime("%Y/%m/%d"),
end.strftime("%H:%M:%S.%f"))
return starttime,endtime
def construct_utc_from_metadata(datestr, t0str):
"""
.. _construct_utc_from_metadata:
Constructing UTC timestamp from metadata
Parameters
----------
datestr : str
Date of the extracted data
t0str : str
GPS time
"""
instr = "%d-%d-%02dT" % tuple(map(int, datestr.split('/')))
instr += t0str
t = astropy.time.Time(instr, format='isot', scale='utc')
return t.gps
|
GNOME-physics/gdas
|
gdas/retrieve.py
|
Python
|
mit
| 6,442
|
[
"Gaussian"
] |
4e69804bb3bd8736d136839f2e4df8549eaeb7e84dad7da5688fe267034f01d3
|
# interpol2d.py ---
#
# Filename: interpol2d.py
# Description:
# Author:
# Maintainer:
# Created: Thu Jun 28 15:19:46 2012 (+0530)
# Version:
# Last-Updated: Thu Jun 28 17:11:42 2012 (+0530)
# By: subha
# Update #: 49
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import numpy as np
import sys
sys.path.append('../../python')
import moose
def interpolation_demo():
interpol = moose.Interpol2D('/interpol2D')
interpol.xmin = 0.0
interpol.xmax = 1.0
interpol.ymin = 0.0
interpol.ymax = 1.0
# Make a 50 element array with entries at equal distance from
# [0,1) and reshape it into a 10x5 matrix and assign to table.
matrix = np.linspace(0, 1.0, 50).reshape(10, 5)
print 'Setting table to'
print matrix
interpol.tableVector2D = matrix
# interpolating beyond top left corner.
# value should be
pos = (0.8, 0.3)
print 'Interpolated value at', pos
print interpol.z[pos[0], pos[1]]
print 'Point going out of bound on both x and y', interpol.z[1.1, 1.1]
print 'Point going out of bound on both x and y', interpol.z[0.5, 1.1]
if __name__ == '__main__':
interpolation_demo()
#
# interpol2d.py ends here
|
dilawar/moose-full
|
moose-examples/snippets/interpol2d.py
|
Python
|
gpl-2.0
| 1,991
|
[
"MOOSE"
] |
b1cf09e985a4d0a0a39d129fe35eb7ea8b9536a996c6f864fcd7ab78dd5e5348
|
import scipy as sp
import pdb
import scipy.linalg as la
import fastlmm.util.util as utilx
import sys
def genphen(y_G0,G1,covDat,options,nInd,K1=None,fracCausal=None,randseed=None):
'''
Generate synthetic phenotype with a LMM and linear kernels, using SNPs in G1 for signal,
snps in GO for background, and one of two link functions.
If genlink=='linear', uses linear LMM. If genlink='logistic', then thresholds to get binary.
fracCausal is the fraction of SNPs that are causal (rounding up) when G1 is provided
Only one of G1 and K1 can be not None (G1 is good for low rank, K1 for full rank)
Returns:
y (binary, or real-valued, as dictated by genlink)
If y is binary, casefrac are 1s, and the rest 0s (default casefrac=0.5)
Notes: uses sp.random.X so that the seed that was set can be used
'''
sp.random.seed(int(randseed % sys.maxint))
if options.has_key("numBackSnps") and options["numBackSnps"]>0:
raise Exception("I accidentally deleted this move from FastLMmSet to here, see code for FastLmmSet.py from 11/24/2013")
## generate from the causal (not background) SNPs---------------
assert not (G1 is not None and K1 is not None), "need to provide only either G1 or K1"
fracCausal=options['fracCausal']
if G1 is not None and options["varG"]>0:
if fracCausal>1.0 or fracCausal<0.01: raise Exception("fraCausal should be between 0.01 and 1")
nSnp=G1.shape[1]
if fracCausal !=1.0:
nSnpNew=sp.ceil(fracCausal*nSnp)
permutationIndex = utilx.generatePermutation(sp.arange(0,nSnp),randseed)[0:nSnpNew]
G1new=G1[:,permutationIndex]
else:
nSnpNew=nSnp
G1new=G1
elif K1 is not None:
assert(fracCausal==1.0 or fracCausal is None)
pass
else:
assert options['varG']==0, "varG is not zero, but neither G1 nor K1 were provided"
stdG=sp.sqrt(options['varG'])
if stdG>0:
if G1 is not None:
y_G1=stdG*G1new.dot(sp.random.randn(nSnpNew,1)) #good for low rank
else:
K1chol = la.cholesky(K1)
y_G1=stdG*K1chol.dot(sp.random.randn(nInd,1)) #good for full rank
else:
y_G1=0.0
##----------------------------------------------------------------
if covDat is not None:
nCov=covDat.shape[1]
covWeights=sp.random.randn(nCov, 1)*sp.sqrt(options['varCov'])
y_beta=covDat.dot(covWeights)
else:
y_beta=0.0
y_noise_t=0
#heavy-tailed noise
if options['varET']>0:
y_noise_t=sp.random.standard_t(df=options['varETd'],size=(nInd,1))*sp.sqrt(options['varET'])
else:
y_noise_t=0
#gaussian noise
y_noise=sp.random.randn(nInd,1)*sp.sqrt(options['varE'])
y=y_noise + y_noise_t + y_G0 + y_beta + y_G1
y=y[:,0]#y.flatten()
if options['link']=='linear':
return y
elif options['link']=='logistic':
if options['casefrac'] is None: options['casefrac']=0.5
ysort=sp.sort(y,axis=None)
thresh=ysort[sp.floor(nInd*options['casefrac'])]
ybin=sp.array(y>thresh,dtype="float")
return ybin
else:
raise Exception("Invald link function for data generation")
|
zhonghualiu/FaST-LMM
|
fastlmm/util/genphen.py
|
Python
|
apache-2.0
| 3,540
|
[
"Gaussian"
] |
d4220f703e486f2da7abad681869d90684aa5d6a743bfbb466b444a586844af2
|
"""Collection of DIRAC useful file related modules.
.. warning::
By default on Error they return None.
"""
__RCSID__ = "$Id$"
import os
import hashlib
import random
import glob
import sys
import re
import errno
# Translation table of a given unit to Bytes
# I know, it should be kB...
SIZE_UNIT_CONVERSION = {
'B': 1,
'KB': 1024,
'MB': 1024 *
1024,
'GB': 1024 *
1024 *
1024,
'TB': 1024 *
1024 *
1024 *
1024,
'PB': 1024 *
1024 *
1024 *
1024 *
1024}
def mkDir(path):
""" Emulate 'mkdir -p path' (if path exists already, don't raise an exception)
"""
try:
if os.path.isdir(path):
return
os.makedirs(path)
except OSError as osError:
if osError.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def mkLink(src, dst):
""" Protected creation of simbolic link
"""
try:
os.symlink(src, dst)
except OSError as osError:
if osError.errno == errno.EEXIST and os.path.islink(dst) and os.path.realpath(dst) == src:
pass
else:
raise
def makeGuid(fileName=None):
"""Utility to create GUID. If a filename is provided the
GUID will correspond to its content's hexadecimal md5 checksum.
Otherwise a random seed is used to create the GUID.
The format is capitalized 8-4-4-4-12.
.. warning::
Could return None in case of OSError or IOError.
:param string fileName: name of file
"""
myMd5 = hashlib.md5()
if fileName:
try:
with open(fileName, 'r') as fd:
data = fd.read(10 * 1024 * 1024)
myMd5.update(data)
except BaseException:
return None
else:
myMd5.update(str(random.getrandbits(128)))
md5HexString = myMd5.hexdigest().upper()
return generateGuid(md5HexString, "MD5")
def generateGuid(checksum, checksumtype):
""" Generate a GUID based on the file checksum
"""
if checksum:
if checksumtype == "MD5":
checksumString = checksum
elif checksumtype == "Adler32":
checksumString = str(checksum).zfill(32)
else:
checksumString = ''
if checksumString:
guid = "%s-%s-%s-%s-%s" % (checksumString[0:8],
checksumString[8:12],
checksumString[12:16],
checksumString[16:20],
checksumString[20:32])
guid = guid.upper()
return guid
# Failed to use the check sum, generate a new guid
myMd5 = hashlib.md5()
myMd5.update(str(random.getrandbits(128)))
md5HexString = myMd5.hexdigest()
guid = "%s-%s-%s-%s-%s" % (md5HexString[0:8],
md5HexString[8:12],
md5HexString[12:16],
md5HexString[16:20],
md5HexString[20:32])
guid = guid.upper()
return guid
def checkGuid(guid):
"""Checks whether a supplied GUID is of the correct format.
The guid is a string of 36 characters [0-9A-F] long split into 5 parts of length 8-4-4-4-12.
.. warning::
As we are using GUID produced by various services and some of them could not follow
convention, this function is passing by a guid which can be made of lower case chars or even just
have 5 parts of proper length with whatever chars.
:param string guid: string to be checked
:return: True (False) if supplied string is (not) a valid GUID.
"""
reGUID = re.compile("^[0-9A-F]{8}(-[0-9A-F]{4}){3}-[0-9A-F]{12}$")
if reGUID.match(guid.upper()):
return True
else:
guid = [len(x) for x in guid.split("-")]
if (guid == [8, 4, 4, 4, 12]):
return True
return False
def getSize(fileName):
"""Get size of a file.
:param string fileName: name of file to be checked
The os module claims only OSError can be thrown,
but just for curiosity it's catching all possible exceptions.
.. warning::
On any exception it returns -1.
"""
try:
return os.stat(fileName)[6]
except OSError:
return - 1
def getGlobbedTotalSize(files):
"""Get total size of a list of files or a single file.
Globs the parameter to allow regular expressions.
:params list files: list or tuple of strings of files
"""
totalSize = 0
if isinstance(files, (list, tuple)):
for entry in files:
size = getGlobbedTotalSize(entry)
if size == -1:
size = 0
totalSize += size
else:
for path in glob.glob(files):
if os.path.isdir(path) and not os.path.islink(path):
for content in os.listdir(path):
totalSize += getGlobbedTotalSize(os.path.join(path, content))
if os.path.isfile(path):
size = getSize(path)
if size == -1:
size = 0
totalSize += size
return totalSize
def getGlobbedFiles(files):
"""Get list of files or a single file.
Globs the parameter to allow regular expressions.
:params list files: list or tuple of strings of files
"""
globbedFiles = []
if isinstance(files, (list, tuple)):
for entry in files:
globbedFiles += getGlobbedFiles(entry)
else:
for path in glob.glob(files):
if os.path.isdir(path) and not os.path.islink(path):
for content in os.listdir(path):
globbedFiles += getGlobbedFiles(os.path.join(path, content))
if os.path.isfile(path):
globbedFiles.append(path)
return globbedFiles
def getCommonPath(files):
"""Get the common path for all files in the file list.
:param files: list of strings with paths
:type files: python:list
"""
def properSplit(dirPath):
"""Splitting of path to drive and path parts for non-Unix file systems.
:param string dirPath: path
"""
nDrive, nPath = os.path.splitdrive(dirPath)
return [nDrive] + [d for d in nPath.split(os.sep) if d.strip()]
if not files:
return ""
commonPath = properSplit(files[0])
for fileName in files:
if os.path.isdir(fileName):
dirPath = fileName
else:
dirPath = os.path.dirname(fileName)
nPath = properSplit(dirPath)
tPath = []
for i in range(min(len(commonPath), len(nPath))):
if commonPath[i] != nPath[i]:
break
tPath .append(commonPath[i])
if not tPath:
return ""
commonPath = tPath
return tPath[0] + os.sep + os.path.join(*tPath[1:])
def getMD5ForFiles(fileList):
"""Calculate md5 for the content of all the files.
:param fileList: list of paths
:type fileList: python:list
"""
fileList.sort()
hashMD5 = hashlib.md5()
for filePath in fileList:
if os.path.isdir(filePath):
continue
with open(filePath, "rb") as fd:
buf = fd.read(4096)
while buf:
hashMD5.update(buf)
buf = fd.read(4096)
return hashMD5.hexdigest()
def convertSizeUnits(size, srcUnit, dstUnit):
""" Converts a number from a given source unit to a destination unit.
Example:
In [1]: convertSizeUnits(1024, 'B', 'kB')
Out[1]: 1
In [2]: convertSizeUnits(1024, 'MB', 'kB')
Out[2]: 1048576
:param size: number to convert
:param srcUnit: unit of the number. Any of ( 'B', 'kB', 'MB', 'GB', 'TB', 'PB')
:param dstUnit: unit expected for the return. Any of ( 'B', 'kB', 'MB', 'GB', 'TB', 'PB')
:returns: the size number converted in the dstUnit. In case of problem -sys.maxint is returned (negative)
"""
srcUnit = srcUnit.upper()
dstUnit = dstUnit.upper()
try:
convertedValue = float(size) * SIZE_UNIT_CONVERSION[srcUnit] / SIZE_UNIT_CONVERSION[dstUnit]
return convertedValue
# TypeError, ValueError: size is not a number
# KeyError: srcUnit or dstUnit are not in the conversion list
except (TypeError, ValueError, KeyError):
return -sys.maxsize
if __name__ == "__main__":
for p in sys.argv[1:]:
print "%s : %s bytes" % (p, getGlobbedTotalSize(p))
|
andresailer/DIRAC
|
Core/Utilities/File.py
|
Python
|
gpl-3.0
| 7,879
|
[
"DIRAC"
] |
08ee42ed53ed3195641027cee4bb9fa6777527eaa124b0a746c30e90fec154d0
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Distribution configuration for neurom
"""
# pylint: disable=R0801
from setuptools import setup
from setuptools import find_packages
setup(
description='NeuroM: a light-weight neuron morphology analysis package',
author='Blue Brain Project, EPFL',
url='http://https://github.com/BlueBrain/NeuroM',
install_requires=[
'click>=7.0',
'h5py>=3.1.0',
'matplotlib>=3.2.1',
'numpy>=1.8.0',
'pandas>=1.0.5',
'pyyaml>=3.10',
'scipy>=1.2.0',
'tqdm>=4.8.4',
],
packages=find_packages(),
license='BSD',
scripts=['apps/raw_data_check',
'apps/morph_check',
'apps/morph_stats',
],
entry_points={
'console_scripts': ['neurom=neurom.apps.cli:cli']
},
name='neurom',
extras_require={
'plotly': ['plotly>=3.6.0'],
},
include_package_data=True,
python_requires='>=3.5',
classifiers=[
'Development Status :: 6 - Mature',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
use_scm_version={"local_scheme": "no-local-version"},
setup_requires=['setuptools_scm'],
)
|
wizmer/NeuroM
|
setup.py
|
Python
|
bsd-3-clause
| 3,142
|
[
"NEURON"
] |
5a620c3691b18209d026b85d7336e401dce6f3eb952e26062d2225c40b1c0888
|
"""This tests the linked_list module."""
import pytest
INSTANTIATION_TABLE = [
([3, 7, 5, 6, 4], 4),
((7, 6, 5, 4), 4),
([4], 4)
]
POP_TABLE = [
([7, 9], 9),
([33, 0, 10], 10),
((3, 2, 1, 34), 34),
]
SIZE_TABLE = [
([795]),
([2, 33]),
([3, 4, 5]),
((5, 3, 2, 23))
]
SEARCH_TABLE = [
(3, [7, 3, 2], 3),
(7, (7, 5, 5), 7),
(3, {2, 1, 3}, 3),
(8, [8, 8, 8], 8),
]
REMOVE_TABLE = [
(3, [7, 3, 2], 7),
(7, (8, 7, 5), 8),
(5, {8, 7, 5}, 7),
]
REMOVE_TABLE_2 = [
(3, [7, 3, 2], 7),
(7, (8, 7, 5), 8),
(5, {8, 7, 5}, 8),
]
REMOVE_TABLE_3 = [
(2, [7, 3, 2], 3),
(5, (8, 7, 5), 7),
]
def test_linkedlist():
"""Test instantiation LinkedList class."""
from linked_list import LinkedList
list_name = LinkedList()
assert list_name.size_of_list == 0
def test_non_iterable_raises_error():
"""Passing a non interable into new LinkedList raises TypeError."""
from linked_list import LinkedList
with pytest.raises(TypeError):
list_name = LinkedList(4)
@pytest.mark.parametrize("val, result", INSTANTIATION_TABLE)
def test_head_is_last_value_in_table(val, result):
"""Test push method to add value to front of linked list."""
from linked_list import LinkedList
list_name = LinkedList(val)
assert list_name.head.val == result
@pytest.mark.parametrize("val, result", POP_TABLE)
def test_pop(val, result):
"""Test pop method to remove node off the front of a linked list."""
from linked_list import LinkedList
list_name = LinkedList(val)
assert list_name.pop() == result
@pytest.mark.parametrize("val", SIZE_TABLE)
def test_size(val):
"""Test size method in LinkedList class."""
from linked_list import LinkedList
list_name = LinkedList(val)
assert list_name.size() == len(val)
@pytest.mark.parametrize('val_to_search_for, val1, result', SEARCH_TABLE)
def test_search(val_to_search_for, val1, result):
"""Test search method to find value."""
from linked_list import LinkedList
search_list = LinkedList(val1)
assert search_list.search(val_to_search_for).val == result
def test_search_returns_none_when_arg_not_found():
"""Make sure search returns a NameError when search term not found."""
from linked_list import LinkedList
search_list = LinkedList([8, 7, 6])
assert search_list.search(9) is None
@pytest.mark.parametrize("val_to_delete, val1, result", REMOVE_TABLE)
def test_remove_decreases_size(val_to_delete, val1, result):
"""Test remove method in LinkedList class."""
from linked_list import LinkedList
list_name = LinkedList(val1)
list_name.remove(list_name.search(val_to_delete))
assert list_name.size() == len(val1) - 1
@pytest.mark.parametrize("val_to_delete, val1, result", REMOVE_TABLE_2)
def test_remove_nodes_point_in_right_direction(val_to_delete, val1, result):
"""Test remove method in LinkedList class."""
from linked_list import LinkedList
list_name = LinkedList(val1)
list_name.remove(list_name.search(val_to_delete))
assert list_name.head.next.val == result
def test_remove_last():
"""Test for removing the last value."""
from linked_list import LinkedList
list_name = LinkedList([5, 6, 7])
list_name.remove(list_name.search(5))
assert list_name.size() == 2
@pytest.mark.parametrize("val_to_delete, val1, result", REMOVE_TABLE_3)
def test_remove_head_makes_new_head(val_to_delete, val1, result):
"""Test remove method in LinkedList class."""
from linked_list import LinkedList
list_name = LinkedList(val1)
list_name.remove(list_name.search(val_to_delete))
assert list_name.head.val == result
def test_remove_head():
"""Test for removing the last value."""
from linked_list import LinkedList
list_name = LinkedList([5, 6, 7])
list_name.remove(list_name.search(7))
assert list_name.size() == 2
def test_remove_not_in_list_raises_error():
"""Test that removing something not in list raises a ValueError."""
from linked_list import LinkedList
list_name = LinkedList([5, 6, 7])
with pytest.raises(ValueError):
list_name.remove(list_name.search(9))
def test_display():
"""Test for displaying as a tuple."""
from linked_list import LinkedList
list_name = LinkedList([5, 6, '*adf'])
assert list_name.display() == '(*adf, 6, 5)'
def test_pop_empty_list_raise_error():
"""Popping an empty list should raise an error."""
from linked_list import LinkedList
list_name = LinkedList()
with pytest.raises(IndexError):
list_name.pop()
|
Copenbacon/data-structures
|
src/test_linked_list.py
|
Python
|
mit
| 4,621
|
[
"ADF"
] |
d7f1ca43e0b4f696a68db6e5bbcacaefa25881274b5cfd2d11bd48a5f650dd4f
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class grafeasCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'batch_create_notes': ('parent', 'notes', ),
'batch_create_occurrences': ('parent', 'occurrences', ),
'create_note': ('parent', 'note_id', 'note', ),
'create_occurrence': ('parent', 'occurrence', ),
'delete_note': ('name', ),
'delete_occurrence': ('name', ),
'get_note': ('name', ),
'get_occurrence': ('name', ),
'get_occurrence_note': ('name', ),
'list_note_occurrences': ('name', 'filter', 'page_size', 'page_token', ),
'list_notes': ('parent', 'filter', 'page_size', 'page_token', ),
'list_occurrences': ('parent', 'filter', 'page_size', 'page_token', ),
'update_note': ('name', 'note', 'update_mask', ),
'update_occurrence': ('name', 'occurrence', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=grafeasCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the grafeas client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-grafeas
|
scripts/fixup_grafeas_v1_keywords.py
|
Python
|
apache-2.0
| 6,683
|
[
"VisIt"
] |
bac23ad20a766f67c320f8f78d4ed016d591ce50205d917a199f409fc2dfeb14
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
from unittest import TestCase, main
from skbio import DistanceMatrix, TreeNode, nj
from skbio.tree._nj import (
_compute_q, _compute_collapsed_dm, _lowest_index,
_pair_members_to_new_node)
class NjTests(TestCase):
def setUp(self):
data1 = [[0, 5, 9, 9, 8],
[5, 0, 10, 10, 9],
[9, 10, 0, 8, 7],
[9, 10, 8, 0, 3],
[8, 9, 7, 3, 0]]
ids1 = list('abcde')
self.dm1 = DistanceMatrix(data1, ids1)
# this newick string was confirmed against http://www.trex.uqam.ca/
# which generated the following (isomorphic) newick string:
# (d:2.0000,e:1.0000,(c:4.0000,(a:2.0000,b:3.0000):3.0000):2.0000);
self.expected1_str = ("(d:2.000000, (c:4.000000, (b:3.000000,"
" a:2.000000):3.000000):2.000000, e:1.000000);")
self.expected1_TreeNode = TreeNode.read(
io.StringIO(self.expected1_str))
# this example was pulled from the Phylip manual
# http://evolution.genetics.washington.edu/phylip/doc/neighbor.html
data2 = [[0.0000, 1.6866, 1.7198, 1.6606, 1.5243, 1.6043, 1.5905],
[1.6866, 0.0000, 1.5232, 1.4841, 1.4465, 1.4389, 1.4629],
[1.7198, 1.5232, 0.0000, 0.7115, 0.5958, 0.6179, 0.5583],
[1.6606, 1.4841, 0.7115, 0.0000, 0.4631, 0.5061, 0.4710],
[1.5243, 1.4465, 0.5958, 0.4631, 0.0000, 0.3484, 0.3083],
[1.6043, 1.4389, 0.6179, 0.5061, 0.3484, 0.0000, 0.2692],
[1.5905, 1.4629, 0.5583, 0.4710, 0.3083, 0.2692, 0.0000]]
ids2 = ["Bovine", "Mouse", "Gibbon", "Orang", "Gorilla", "Chimp",
"Human"]
self.dm2 = DistanceMatrix(data2, ids2)
self.expected2_str = ("(Mouse:0.76891, (Gibbon:0.35793, (Orang:0.28469"
", (Gorilla:0.15393, (Chimp:0.15167, Human:0.117"
"53):0.03982):0.02696):0.04648):0.42027, Bovine:"
"0.91769);")
self.expected2_TreeNode = TreeNode.read(
io.StringIO(self.expected2_str))
data3 = [[0, 5, 4, 7, 6, 8],
[5, 0, 7, 10, 9, 11],
[4, 7, 0, 7, 6, 8],
[7, 10, 7, 0, 5, 8],
[6, 9, 6, 5, 0, 8],
[8, 11, 8, 8, 8, 0]]
ids3 = map(str, range(6))
self.dm3 = DistanceMatrix(data3, ids3)
self.expected3_str = ("((((0:1.000000,1:4.000000):1.000000,2:2.000000"
"):1.250000,5:4.750000):0.750000,3:2.750000,4:2."
"250000);")
self.expected3_TreeNode = TreeNode.read(
io.StringIO(self.expected3_str))
# this dm can yield negative branch lengths
data4 = [[0, 5, 9, 9, 800],
[5, 0, 10, 10, 9],
[9, 10, 0, 8, 7],
[9, 10, 8, 0, 3],
[800, 9, 7, 3, 0]]
ids4 = list('abcde')
self.dm4 = DistanceMatrix(data4, ids4)
def test_nj_dm1(self):
self.assertEqual(nj(self.dm1, result_constructor=str),
self.expected1_str)
# what is the correct way to compare TreeNode objects for equality?
actual_TreeNode = nj(self.dm1)
self.assertEqual(actual_TreeNode.compare_tip_distances(
self.expected1_TreeNode), 0.0)
def test_nj_dm2(self):
actual_TreeNode = nj(self.dm2)
self.assertAlmostEqual(actual_TreeNode.compare_tip_distances(
self.expected2_TreeNode), 0.0)
def test_nj_dm3(self):
actual_TreeNode = nj(self.dm3)
self.assertAlmostEqual(actual_TreeNode.compare_tip_distances(
self.expected3_TreeNode), 0.0)
def test_nj_zero_branch_length(self):
# no nodes have negative branch length when we disallow negative
# branch length. self is excluded as branch length is None
tree = nj(self.dm4)
for n in tree.postorder(include_self=False):
self.assertTrue(n.length >= 0)
# only tips associated with the large distance in the input
# have positive branch lengths when we allow negative branch
# length
tree = nj(self.dm4, False)
self.assertTrue(tree.find('a').length > 0)
self.assertTrue(tree.find('b').length < 0)
self.assertTrue(tree.find('c').length < 0)
self.assertTrue(tree.find('d').length < 0)
self.assertTrue(tree.find('e').length > 0)
def test_nj_trivial(self):
data = [[0, 3, 2],
[3, 0, 3],
[2, 3, 0]]
dm = DistanceMatrix(data, list('abc'))
expected_str = "(b:2.000000, a:1.000000, c:1.000000);"
self.assertEqual(nj(dm, result_constructor=str), expected_str)
def test_nj_error(self):
data = [[0, 3],
[3, 0]]
dm = DistanceMatrix(data, list('ab'))
self.assertRaises(ValueError, nj, dm)
def test_compute_q(self):
expected_data = [[0, -50, -38, -34, -34],
[-50, 0, -38, -34, -34],
[-38, -38, 0, -40, -40],
[-34, -34, -40, 0, -48],
[-34, -34, -40, -48, 0]]
expected_ids = list('abcde')
expected = DistanceMatrix(expected_data, expected_ids)
self.assertEqual(_compute_q(self.dm1), expected)
data = [[0, 3, 2],
[3, 0, 3],
[2, 3, 0]]
dm = DistanceMatrix(data, list('abc'))
# computed this manually
expected_data = [[0, -8, -8],
[-8, 0, -8],
[-8, -8, 0]]
expected = DistanceMatrix(expected_data, list('abc'))
self.assertEqual(_compute_q(dm), expected)
def test_compute_collapsed_dm(self):
expected_data = [[0, 7, 7, 6],
[7, 0, 8, 7],
[7, 8, 0, 3],
[6, 7, 3, 0]]
expected_ids = ['x', 'c', 'd', 'e']
expected1 = DistanceMatrix(expected_data, expected_ids)
self.assertEqual(_compute_collapsed_dm(self.dm1, 'a', 'b', True, 'x'),
expected1)
# computed manually
expected_data = [[0, 4, 3],
[4, 0, 3],
[3, 3, 0]]
expected_ids = ['yy', 'd', 'e']
expected2 = DistanceMatrix(expected_data, expected_ids)
self.assertEqual(
_compute_collapsed_dm(expected1, 'x', 'c', True, 'yy'), expected2)
def test_lowest_index(self):
self.assertEqual(_lowest_index(self.dm1), (4, 3))
self.assertEqual(_lowest_index(_compute_q(self.dm1)), (1, 0))
def test_pair_members_to_new_node(self):
self.assertEqual(_pair_members_to_new_node(self.dm1, 'a', 'b', True),
(2, 3))
self.assertEqual(_pair_members_to_new_node(self.dm1, 'a', 'c', True),
(4, 5))
self.assertEqual(_pair_members_to_new_node(self.dm1, 'd', 'e', True),
(2, 1))
def test_pair_members_to_new_node_zero_branch_length(self):
# the values in this example don't really make sense
# (I'm not sure how you end up with these distances between
# three sequences), but that doesn't really matter for the sake
# of this test
data = [[0, 4, 2],
[4, 0, 38],
[2, 38, 0]]
ids = ['a', 'b', 'c']
dm = DistanceMatrix(data, ids)
self.assertEqual(_pair_members_to_new_node(dm, 'a', 'b', True), (0, 4))
# this makes it clear why negative branch lengths don't make sense...
self.assertEqual(
_pair_members_to_new_node(dm, 'a', 'b', False), (-16, 20))
if __name__ == "__main__":
main()
|
gregcaporaso/scikit-bio
|
skbio/tree/tests/test_nj.py
|
Python
|
bsd-3-clause
| 8,287
|
[
"scikit-bio"
] |
292ee3d4b89bf55cbe96769fa3cc80478a88da1dcbbea3790893360fd72717ad
|
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
import os
import os.path
import sys
import yaml
from collections import defaultdict
from distutils.version import LooseVersion
from jinja2 import Environment
import ansible.constants as C
import ansible.utils
import ansible.galaxy
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.role import GalaxyRole
from ansible.playbook.role.requirement import RoleRequirement
class GalaxyCLI(CLI):
VALID_ACTIONS = ("init", "info", "install", "list", "remove", "search")
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
def __init__(self, args, display=None):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args, display)
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to actions
if self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option(
'--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "search":
self.parser.add_option('--platforms', dest='platforms',
help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='tags',
help='list of galaxy tags to filter by')
self.parser.set_usage("usage: %prog search [<search_term>] [--galaxy-tags <galaxy_tag1,galaxy_tag2>] [--platforms platform]")
# options that apply to more than one action
if self.action != "init":
self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. '
'The default is the roles_path configured in your '
'ansible.cfg file (/etc/ansible/roles if not configured)')
if self.action in ("info","init","install","search"):
self.parser.add_option('-s', '--server', dest='api_server', default="https://galaxy.ansible.com",
help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True,
help='Ignore SSL certificate validation errors.')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role')
# get options, args and galaxy object
self.options, self.args =self.parser.parse_args()
self.display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options, self.display)
return True
def run(self):
super(GalaxyCLI, self).run()
# if not offline, get connect to galaxy api
if self.action in ("info","install", "search") or (self.action == 'init' and not self.options.offline):
api_server = self.options.api_server
self.api = GalaxyAPI(self.galaxy, api_server)
if not self.api:
raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server)
self.execute()
def get_opt(self, k, defval=""):
"""
Returns an option from an Optparse values instance.
"""
try:
data = getattr(self.options, k)
except:
return defval
if k == "roles_path":
if os.pathsep in data:
data = data.split(os.pathsep)[0]
return data
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.get_opt("ignore_errors", False):
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def parse_requirements_files(self, role):
if 'role' in role:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role_info = role_spec_parse(role['role'])
if isinstance(role_info, dict):
# Warning: Slight change in behaviour here. name may be being
# overloaded. Previously, name was only a parameter to the role.
# Now it is both a parameter to the role and the name that
# ansible-galaxy will install under on the local system.
if 'name' in role and 'name' in role_info:
del role_info['name']
role.update(role_info)
else:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = GalaxyRole.url_to_spec(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
return role
def _display_role_info(self, role_info):
text = "\nRole: %s \n" % role_info['name']
text += "\tdescription: %s \n" % role_info['description']
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text += "\t%s: \n" % (k)
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text += "\t\t%s: %s\n" % (key, role_info[k][key])
else:
text += "\t%s: %s\n" % (k, role_info[k])
return text
############################
# execute actions
############################
def execute_init(self):
"""
Executes the init action, which creates the skeleton framework
of a role that complies with the galaxy metadata format.
"""
init_path = self.get_opt('init_path', './')
force = self.get_opt('force', False)
offline = self.get_opt('offline', False)
role_name = self.args.pop(0).strip()
if role_name == "":
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists." % role_path + \
"you can use --force to re-initialize this directory,\n" + \
"however it will reset any main.yml files that may have\n" + \
"been modified there already.")
# create the default README.md
if not os.path.exists(role_path):
os.makedirs(role_path)
readme_path = os.path.join(role_path, "README.md")
f = open(readme_path, "wb")
f.write(self.galaxy.default_readme)
f.close()
for dir in GalaxyRole.ROLE_DIRS:
dir_path = os.path.join(init_path, role_name, dir)
main_yml_path = os.path.join(dir_path, 'main.yml')
# create the directory if it doesn't exist already
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# now create the main.yml file for that directory
if dir == "meta":
# create a skeleton meta/main.yml with a valid galaxy_info
# datastructure in place, plus with all of the available
# platforms included (but commented out), the galaxy_tags
# list, and the dependencies section
platforms = []
if not offline and self.api:
platforms = self.api.get_list("platforms") or []
# group the list of platforms from the api based
# on their names, with the release field being
# appended to a list of versions
platform_groups = defaultdict(list)
for platform in platforms:
platform_groups[platform['name']].append(platform['release'])
platform_groups[platform['name']].sort()
inject = dict(
author = 'your name',
company = 'your company (optional)',
license = 'license (GPLv2, CC-BY, etc)',
issue_tracker_url = 'http://example.com/issue/tracker',
min_ansible_version = '1.2',
platforms = platform_groups,
)
rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject)
f = open(main_yml_path, 'w')
f.write(rendered_meta)
f.close()
pass
elif dir not in ('files','templates'):
# just write a (mostly) empty YAML file for main.yml
f = open(main_yml_path, 'w')
f.write('---\n# %s file for %s\n' % (dir,role_name))
f.close()
self.display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
Executes the info action. This action prints out detailed
information about an installed role as well as info available
from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.get_opt("roles_path")
data = ''
for role in self.args:
role_info = {}
gr = GalaxyRole(self.galaxy, role)
#self.galaxy.add_role(gr)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if self.api:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
__, __, role_spec= req.parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
if not data:
data += "\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Executes the installation action. The args list contains the
roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github),
or it can be a local .tar.gz file.
"""
role_file = self.get_opt("role_file", None)
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file
# or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
elif len(self.args) == 1 and not role_file is None:
# using a role file is mutually exclusive of specifying
# the role name on the command line
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.get_opt("no_deps", False)
force = self.get_opt('force', False)
roles_path = self.get_opt("roles_path")
roles_done = []
roles_left = []
if role_file:
self.display.debug('Getting roles from %s' % role_file)
try:
self.display.debug('Processing role file: %s' % role_file)
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
rolesparsed = map(self.parse_requirements_files, yaml.safe_load(f))
except Exception as e:
raise AnsibleError("%s does not seem like a valid yaml file: %s" % (role_file, str(e)))
roles_left = [GalaxyRole(self.galaxy, **r) for r in rolesparsed]
else:
# roles listed in a file, one per line
self.display.deprecated("Non yaml files for role requirements")
for rname in f.readlines():
if rname.startswith("#") or rname.strip() == '':
continue
roles_left.append(GalaxyRole(self.galaxy, rname.strip()))
f.close()
except (IOError,OSError) as e:
raise AnsibleError("Unable to read requirements file (%s): %s" % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
roles_left.append(GalaxyRole(self.galaxy, rname.strip()))
while len(roles_left) > 0:
# query the galaxy API for the role data
role_data = None
role = roles_left.pop(0)
role_path = role.path
if role.install_info is not None and not force:
self.display.display('- %s is already installed, skipping.' % role.name)
continue
if role_path:
self.options.roles_path = role_path
else:
self.options.roles_path = roles_path
self.display.debug('Installing role %s from %s' % (role.name, self.options.roles_path))
tmp_file = None
installed = False
if role.src and os.path.isfile(role.src):
# installing a local tar.gz
tmp_file = role.src
else:
if role.scm:
# create tar file from scm url
tmp_file = GalaxyRole.scm_archive_role(role.scm, role.src, role.version, role.name)
if role.src:
if '://' not in role.src:
role_data = self.api.lookup_role_by_name(role.src)
if not role_data:
self.display.warning("- sorry, %s was not found on %s." % (role.src, self.options.api_server))
self.exit_without_ignore()
continue
role_versions = self.api.fetch_role_related('versions', role_data['id'])
if not role.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
role.version = str(loose_versions[-1])
else:
role.version = 'master'
elif role.version != 'master':
if role_versions and role.version not in [a.get('name', None) for a in role_versions]:
self.display.warning('role is %s' % role)
self.display.warning("- the specified version (%s) was not found in the list of available versions (%s)." % (role.version, role_versions))
self.exit_without_ignore()
continue
# download the role. if --no-deps was specified, we stop here,
# otherwise we recursively grab roles and all of their deps.
tmp_file = role.fetch(role_data)
if tmp_file:
installed = role.install(tmp_file)
# we're done with the temp file, clean it up
if tmp_file != role.src:
os.unlink(tmp_file)
# install dependencies, if we want them
if not no_deps and installed:
role_dependencies = role.metadata.get('dependencies', [])
for dep in role_dependencies:
self.display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
__, dep_name, __ = dep_req.parse(dep)
dep_role = GalaxyRole(self.galaxy, name=dep_name)
if dep_role.install_info is None or force:
if dep_role not in roles_left:
self.display.display('- adding dependency: %s' % dep_name)
roles_left.append(GalaxyRole(self.galaxy, name=dep_name))
else:
self.display.display('- dependency %s already pending installation.' % dep_name)
else:
self.display.display('- dependency %s is already installed, skipping.' % dep_name)
if not tmp_file or not installed:
self.display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
Executes the remove action. The args list contains the list
of roles to be removed. This list can contain more than one role.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
self.display.display('- successfully removed %s' % role_name)
else:
self.display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
Executes the list action. The args list can contain zero
or one role. If one is specified, only that role will be
shown, otherwise all roles in the specified directory will
be shown.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
self.display.display("- %s, %s" % (name, version))
else:
self.display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.get_opt('roles_path')
roles_path = os.path.expanduser(roles_path)
if not os.path.exists(roles_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path)
elif not os.path.isdir(roles_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path)
path_files = os.listdir(roles_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.metadata
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
self.display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):
search = None
if len(self.args) > 1:
raise AnsibleOptionsError("At most a single search term is allowed.")
elif len(self.args) == 1:
search = self.args.pop()
response = self.api.search_roles(search, self.options.platforms, self.options.tags)
if 'count' in response:
self.galaxy.display.display("Found %d roles matching your search:\n" % response['count'])
data = ''
if 'results' in response:
for role in response['results']:
data += self._display_role_info(role)
self.pager(data)
|
garyjyao1/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 24,555
|
[
"Galaxy"
] |
43fc83cd592f9dcdd067486a91326fdc47e691a4c4c08f7729ae48b6805c9bf6
|
import ast
import token
import tokenize
from os.path import islink
from StringIO import StringIO
from dxr.build import unignored
from dxr.filters import FILE, LINE
from dxr.indexers import (Extent, FileToIndex as FileToIndexBase,
iterable_per_line, Position, split_into_lines,
TreeToIndex as TreeToIndexBase,
QUALIFIED_FILE_NEEDLE, QUALIFIED_LINE_NEEDLE,
with_start_and_end)
from dxr.lines import Ref
from dxr.plugins.python.analysis import TreeAnalysis
from dxr.plugins.python.menus import ClassRef
from dxr.plugins.python.utils import (ClassFunctionVisitorMixin,
convert_node_to_name, local_name,
path_to_module)
mappings = {
FILE: {
'properties': {
'py_module': QUALIFIED_FILE_NEEDLE,
},
},
LINE: {
'properties': {
'py_type': QUALIFIED_LINE_NEEDLE,
'py_function': QUALIFIED_LINE_NEEDLE,
'py_derived': QUALIFIED_LINE_NEEDLE,
'py_bases': QUALIFIED_LINE_NEEDLE,
'py_callers': QUALIFIED_LINE_NEEDLE,
'py_called_by': QUALIFIED_LINE_NEEDLE,
'py_overrides': QUALIFIED_LINE_NEEDLE,
'py_overridden': QUALIFIED_LINE_NEEDLE,
},
},
}
class _FileToIgnore(object):
"""A file that we don't want to bother indexing, usually due to
syntax errors.
"""
def is_interesting(self):
return False
FILE_TO_IGNORE = _FileToIgnore()
class TreeToIndex(TreeToIndexBase):
@property
def unignored_files(self):
return unignored(self.tree.source_folder, self.tree.ignore_paths,
self.tree.ignore_filenames)
def post_build(self):
paths = ((path, self.tree.source_encoding)
for path in self.unignored_files if is_interesting(path))
self.tree_analysis = TreeAnalysis(
python_path=self.plugin_config.python_path,
source_folder=self.tree.source_folder,
paths=paths)
def file_to_index(self, path, contents):
if path in self.tree_analysis.ignore_paths:
return FILE_TO_IGNORE
else:
return FileToIndex(path, contents, self.plugin_name, self.tree,
tree_analysis=self.tree_analysis)
class IndexingNodeVisitor(ast.NodeVisitor, ClassFunctionVisitorMixin):
"""Node visitor that walks through the nodes in an abstract syntax
tree and finds interesting things to index.
"""
def __init__(self, file_to_index, tree_analysis):
super(IndexingNodeVisitor, self).__init__()
self.file_to_index = file_to_index
self.tree_analysis = tree_analysis
self.function_call_stack = [] # List of lists of function names.
self.needles = []
self.refs = []
def visit_FunctionDef(self, node):
# Index the function itself for the function: filter.
start, end = self.file_to_index.get_node_start_end(node)
self.yield_needle('py_function', node.name, start, end)
# Index function calls within this function for the callers: and
# called-by filters.
self.function_call_stack.append([])
super(IndexingNodeVisitor, self).visit_FunctionDef(node)
call_needles = self.function_call_stack.pop()
for name, call_start, call_end in call_needles:
self.yield_needle('py_callers', name, start, end)
self.yield_needle('py_called_by', node.name, call_start, call_end)
def visit_Call(self, node):
# Save this call if we're currently tracking function calls.
if self.function_call_stack:
call_needles = self.function_call_stack[-1]
name = convert_node_to_name(node.func)
if name:
start, end = self.file_to_index.get_node_start_end(node)
call_needles.append((name, start, end))
self.generic_visit(node)
def visit_ClassDef(self, node):
# Index the class itself for the type: filter.
start, end = self.file_to_index.get_node_start_end(node)
self.yield_needle('py_type', node.name, start, end)
# Index the class hierarchy for classes for the derived: and
# bases: filters.
class_name = self.get_class_name(node)
bases = self.tree_analysis.get_base_classes(class_name)
for qualname in bases:
self.yield_needle(needle_type='py_derived',
name=local_name(qualname), qualname=qualname,
start=start, end=end)
derived_classes = self.tree_analysis.get_derived_classes(class_name)
for qualname in derived_classes:
self.yield_needle(needle_type='py_bases',
name=local_name(qualname), qualname=qualname,
start=start, end=end)
# Show a menu when hovering over this class.
self.yield_ref(start, end,
ClassRef(self.file_to_index.tree, class_name))
super(IndexingNodeVisitor, self).visit_ClassDef(node)
def visit_ClassFunction(self, class_node, function_node):
class_name = self.get_class_name(class_node)
function_qualname = class_name + '.' + function_node.name
start, end = self.file_to_index.get_node_start_end(function_node)
# Index this function as being overridden by other functions for
# the overridden: filter.
for qualname in self.tree_analysis.overridden_functions[function_qualname]:
name = qualname.rsplit('.')[-1]
self.yield_needle(needle_type='py_overridden',
name=name, qualname=qualname,
start=start, end=end)
# Index this function as overriding other functions for the
# overrides: filter.
for qualname in self.tree_analysis.overriding_functions[function_qualname]:
name = qualname.rsplit('.')[-1]
self.yield_needle(needle_type='py_overrides',
name=name, qualname=qualname,
start=start, end=end)
def get_class_name(self, class_node):
return self.file_to_index.abs_module_name + '.' + class_node.name
def yield_needle(self, *args, **kwargs):
needle = line_needle(*args, **kwargs)
self.needles.append(needle)
def yield_ref(self, start, end, ref):
self.refs.append((
self.file_to_index.char_offset(*start),
self.file_to_index.char_offset(*end),
ref,
))
class FileToIndex(FileToIndexBase):
def __init__(self, path, contents, plugin_name, tree, tree_analysis):
"""
:arg tree_analysis: TreeAnalysisResult object with the results
from the post-build analysis.
"""
super(FileToIndex, self).__init__(path, contents, plugin_name, tree)
self.tree_analysis = tree_analysis
self.abs_module_name = path_to_module(tree_analysis.python_path, self.path)
self._visitor = None
def is_interesting(self):
return is_interesting(self.path)
@property
def visitor(self):
"""Return IndexingNodeVisitor for this file, lazily creating and
running it if it doesn't exist yet.
"""
if not self._visitor:
self.node_start_table = self.analyze_tokens()
self._visitor = IndexingNodeVisitor(self, self.tree_analysis)
syntax_tree = ast.parse(self.contents)
self._visitor.visit(syntax_tree)
return self._visitor
def needles(self):
# Index module name. For practical purposes, this includes
# __init__.py files for packages even though that's not
# _technically_ a module.
yield file_needle('py_module',
name=local_name(self.abs_module_name),
qualname=self.abs_module_name)
def needles_by_line(self):
return iterable_per_line(
with_start_and_end(
split_into_lines(
self.visitor.needles
)
)
)
def refs(self):
return self.visitor.refs
def analyze_tokens(self):
"""Split the file into tokens and analyze them for data needed
for indexing.
"""
# AST nodes for classes and functions point to the position of
# their 'def' and 'class' tokens. To get the position of their
# names, we look for 'def' and 'class' tokens and store the
# position of the token immediately following them.
node_start_table = {}
previous_start = None
token_gen = tokenize.generate_tokens(StringIO(self.contents).readline)
for tok_type, tok_name, start, end, _ in token_gen:
if tok_type != token.NAME:
continue
if tok_name in ('def', 'class'):
previous_start = start
elif previous_start is not None:
node_start_table[previous_start] = start
previous_start = None
return node_start_table
def get_node_start_end(self, node):
"""Return start and end positions within the file for the given
AST Node.
"""
start = node.lineno, node.col_offset
if start in self.node_start_table:
start = self.node_start_table[start]
end = None
if isinstance(node, ast.ClassDef) or isinstance(node, ast.FunctionDef):
end = start[0], start[1] + len(node.name)
elif isinstance(node, ast.Call):
name = convert_node_to_name(node.func)
if name:
end = start[0], start[1] + len(name)
return start, end
def file_needle(needle_type, name, qualname=None):
data = {'name': name}
if qualname:
data['qualname'] = qualname
return needle_type, data
def line_needle(needle_type, name, start, end, qualname=None):
data = {
'name': name,
'start': start[1],
'end': end[1]
}
if qualname:
data['qualname'] = qualname
return (
needle_type,
data,
Extent(Position(row=start[0],
col=start[1]),
Position(row=end[0],
col=end[1]))
)
def is_interesting(path):
"""Determine if the file at the given path is interesting enough to
analyze.
"""
return path.endswith('.py') and not islink(path)
|
gartung/dxr
|
dxr/plugins/python/indexers.py
|
Python
|
mit
| 10,644
|
[
"VisIt"
] |
e618005f94d92464dccf214f91a0541346e53c84b13c692bf129c8bace5df24b
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MaskedAutoregressiveFlow bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import template as template_ops
from tensorflow.python.ops import variable_scope as variable_scope_lib
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"MaskedAutoregressiveFlow",
"masked_autoregressive_default_template",
"masked_dense",
]
class MaskedAutoregressiveFlow(bijector.Bijector):
"""Affine MaskedAutoregressiveFlow bijector for vector-valued events.
The affine autoregressive flow [(Papamakarios et al., 2016)][3] provides a
relatively simple framework for user-specified (deep) architectures to learn
a distribution over vector-valued events. Regarding terminology,
"Autoregressive models decompose the joint density as a product of
conditionals, and model each conditional in turn. Normalizing flows
transform a base density (e.g. a standard Gaussian) into the target density
by an invertible transformation with tractable Jacobian."
[(Papamakarios et al., 2016)][3]
In other words, the "autoregressive property" is equivalent to the
decomposition, `p(x) = prod{ p(x[i] | x[0:i]) : i=0, ..., d }`. The provided
`shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves
this property by zeroing out weights in its `masked_dense` layers.
In the `tf.distributions` framework, a "normalizing flow" is implemented as a
`tf.contrib.distributions.bijectors.Bijector`. The `forward` "autoregression"
is implemented using a `tf.while_loop` and a deep neural network (DNN) with
masked weights such that the autoregressive property is automatically met in
the `inverse`.
A `TransformedDistribution` using `MaskedAutoregressiveFlow(...)` uses the
(expensive) forward-mode calculation to draw samples and the (cheap)
reverse-mode calculation to compute log-probabilities. Conversely, a
`TransformedDistribution` using `Invert(MaskedAutoregressiveFlow(...))` uses
the (expensive) forward-mode calculation to compute log-probabilities and the
(cheap) reverse-mode calculation to compute samples. See "Example Use"
[below] for more details.
Given a `shift_and_log_scale_fn`, the forward and inverse transformations are
(a sequence of) affine transformations. A "valid" `shift_and_log_scale_fn`
must compute each `shift` (aka `loc` or "mu" in [Germain et al. (2015)][1])
and `log(scale)` (aka "alpha" in [Germain et al. (2015)][1]) such that each
are broadcastable with the arguments to `forward` and `inverse`, i.e., such
that the calculations in `forward`, `inverse` [below] are possible.
For convenience, `masked_autoregressive_default_template` is offered as a
possible `shift_and_log_scale_fn` function. It implements the MADE
architecture [(Germain et al., 2015)][1]. MADE is a feed-forward network that
computes a `shift` and `log(scale)` using `masked_dense` layers in a deep
neural network. Weights are masked to ensure the autoregressive property. It
is possible that this architecture is suboptimal for your task. To build
alternative networks, either change the arguments to
`masked_autoregressive_default_template`, use the `masked_dense` function to
roll-out your own, or use some other architecture, e.g., using `tf.layers`.
Warning: no attempt is made to validate that the `shift_and_log_scale_fn`
enforces the "autoregressive property".
Assuming `shift_and_log_scale_fn` has valid shape and autoregressive
semantics, the forward transformation is
```python
def forward(x):
y = zeros_like(x)
event_size = x.shape[-1]
for _ in range(event_size):
shift, log_scale = shift_and_log_scale_fn(y)
y = x * math_ops.exp(log_scale) + shift
return y
```
and the inverse transformation is
```python
def inverse(y):
shift, log_scale = shift_and_log_scale_fn(y)
return (y - shift) / math_ops.exp(log_scale)
```
Notice that the `inverse` does not need a for-loop. This is because in the
forward pass each calculation of `shift` and `log_scale` is based on the `y`
calculated so far (not `x`). In the `inverse`, the `y` is fully known, thus is
equivalent to the scaling used in `forward` after `event_size` passes, i.e.,
the "last" `y` used to compute `shift`, `log_scale`. (Roughly speaking, this
also proves the transform is bijective.)
#### Examples
```python
tfd = tf.contrib.distributions
tfb = tfd.bijectors
dims = 5
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
maf = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
hidden_layers=[512, 512])),
event_shape=[dims])
x = maf.sample() # Expensive; uses `tf.while_loop`, no Bijector caching.
maf.log_prob(x) # Almost free; uses Bijector caching.
maf.log_prob(0.) # Cheap; no `tf.while_loop` despite no Bijector caching.
# [Papamakarios et al. (2016)][3] also describe an Inverse Autoregressive
# Flow [(Kingma et al., 2016)][2]:
iaf = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.Invert(tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
hidden_layers=[512, 512]))),
event_shape=[dims])
x = iaf.sample() # Cheap; no `tf.while_loop` despite no Bijector caching.
iaf.log_prob(x) # Almost free; uses Bijector caching.
iaf.log_prob(0.) # Expensive; uses `tf.while_loop`, no Bijector caching.
# In many (if not most) cases the default `shift_and_log_scale_fn` will be a
# poor choice. Here's an example of using a "shift only" version and with a
# different number/depth of hidden layers.
shift_only = True
maf_no_scale_hidden2 = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.MaskedAutoregressiveFlow(
tfb.masked_autoregressive_default_template(
hidden_layers=[32],
shift_only=shift_only),
is_constant_jacobian=shift_only),
event_shape=[dims])
```
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
[2]: Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya
Sutskever, and Max Welling. Improving Variational Inference with Inverse
Autoregressive Flow. In _Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.04934
[3]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
shift_and_log_scale_fn,
is_constant_jacobian=False,
validate_args=False,
unroll_loop=False,
name=None):
"""Creates the MaskedAutoregressiveFlow bijector.
Args:
shift_and_log_scale_fn: Python `callable` which computes `shift` and
`log_scale` from both the forward domain (`x`) and the inverse domain
(`y`). Calculation must respect the "autoregressive property" (see class
docstring). Suggested default
`masked_autoregressive_default_template(hidden_layers=...)`.
Typically the function contains `tf.Variables` and is wrapped using
`tf.make_template`. Returning `None` for either (both) `shift`,
`log_scale` is equivalent to (but more efficient than) returning zero.
is_constant_jacobian: Python `bool`. Default: `False`. When `True` the
implementation assumes `log_scale` does not depend on the forward domain
(`x`) or inverse domain (`y`) values. (No validation is made;
`is_constant_jacobian=False` is always safe but possibly computationally
inefficient.)
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
unroll_loop: Python `bool` indicating whether the `tf.while_loop` in
`_forward` should be replaced with a static for loop. Requires that
the final dimension of `x` be known at graph construction time. Defaults
to `False`.
name: Python `str`, name given to ops managed by this object.
"""
name = name or "masked_autoregressive_flow"
self._shift_and_log_scale_fn = shift_and_log_scale_fn
self._unroll_loop = unroll_loop
super(MaskedAutoregressiveFlow, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self._unroll_loop:
event_size = x.shape.with_rank_at_least(1)[-1].value
if event_size is None:
raise ValueError(
"The final dimension of `x` must be known at graph construction "
"time if `unroll_loop=True`. `x.shape: %r`" % x.shape)
y = array_ops.zeros_like(x, name="y0")
for _ in range(event_size):
shift, log_scale = self._shift_and_log_scale_fn(y)
# next_y = scale * x + shift
next_y = x
if log_scale is not None:
next_y *= math_ops.exp(log_scale)
if shift is not None:
next_y += shift
y = next_y
return y
event_size = array_ops.shape(x)[-1]
# If the event size is available at graph construction time, we can inform
# the graph compiler of the maximum number of steps. If not,
# static_event_size will be None, and the maximum_iterations argument will
# have no effect.
static_event_size = x.shape.with_rank_at_least(1)[-1].value
y0 = array_ops.zeros_like(x, name="y0")
# call the template once to ensure creation
_ = self._shift_and_log_scale_fn(y0)
def _loop_body(index, y0):
"""While-loop body for autoregression calculation."""
# Set caching device to avoid re-getting the tf.Variable for every while
# loop iteration.
with variable_scope_lib.variable_scope(
variable_scope_lib.get_variable_scope()) as vs:
if vs.caching_device is None:
vs.set_caching_device(lambda op: op.device)
shift, log_scale = self._shift_and_log_scale_fn(y0)
y = x
if log_scale is not None:
y *= math_ops.exp(log_scale)
if shift is not None:
y += shift
return index + 1, y
_, y = control_flow_ops.while_loop(
cond=lambda index, _: index < event_size,
body=_loop_body,
loop_vars=(0, y0),
maximum_iterations=static_event_size)
return y
def _inverse(self, y):
shift, log_scale = self._shift_and_log_scale_fn(y)
x = y
if shift is not None:
x -= shift
if log_scale is not None:
x *= math_ops.exp(-log_scale)
return x
def _inverse_log_det_jacobian(self, y):
_, log_scale = self._shift_and_log_scale_fn(y)
if log_scale is None:
return constant_op.constant(0., dtype=y.dtype, name="ildj")
return -math_ops.reduce_sum(log_scale, axis=-1)
MASK_INCLUSIVE = "inclusive"
MASK_EXCLUSIVE = "exclusive"
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _gen_slices(num_blocks, n_in, n_out, mask_type=MASK_EXCLUSIVE):
"""Generate the slices for building an autoregressive mask."""
# TODO(b/67594795): Better support of dynamic shape.
slices = []
col = 0
d_in = n_in // num_blocks
d_out = n_out // num_blocks
row = d_out if mask_type == MASK_EXCLUSIVE else 0
for _ in range(num_blocks):
row_slice = slice(row, None)
col_slice = slice(col, col + d_in)
slices.append([row_slice, col_slice])
col += d_in
row += d_out
return slices
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _gen_mask(num_blocks,
n_in,
n_out,
mask_type=MASK_EXCLUSIVE,
dtype=dtypes.float32):
"""Generate the mask for building an autoregressive dense layer."""
# TODO(b/67594795): Better support of dynamic shape.
mask = np.zeros([n_out, n_in], dtype=dtype.as_numpy_dtype())
slices = _gen_slices(num_blocks, n_in, n_out, mask_type=mask_type)
for [row_slice, col_slice] in slices:
mask[row_slice, col_slice] = 1
return mask
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def masked_dense(inputs,
units,
num_blocks=None,
exclusive=False,
kernel_initializer=None,
reuse=None,
name=None,
*args,
**kwargs):
"""A autoregressively masked dense layer. Analogous to `tf.layers.dense`.
See [Germain et al. (2015)][1] for detailed explanation.
Arguments:
inputs: Tensor input.
units: Python `int` scalar representing the dimensionality of the output
space.
num_blocks: Python `int` scalar representing the number of blocks for the
MADE masks.
exclusive: Python `bool` scalar representing whether to zero the diagonal of
the mask, used for the first layer of a MADE.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the
`tf.glorot_random_initializer`.
reuse: Python `bool` scalar representing whether to reuse the weights of a
previous layer by the same name.
name: Python `str` used to describe ops managed by this function.
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
Output tensor.
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
"""
# TODO(b/67594795): Better support of dynamic shape.
input_depth = inputs.shape.with_rank_at_least(1)[-1].value
if input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
mask = _gen_mask(num_blocks, input_depth, units,
MASK_EXCLUSIVE if exclusive else MASK_INCLUSIVE).T
if kernel_initializer is None:
kernel_initializer = init_ops.glorot_normal_initializer()
def masked_initializer(shape, dtype=None, partition_info=None):
return mask * kernel_initializer(shape, dtype, partition_info)
with ops.name_scope(name, "masked_dense", [inputs, units, num_blocks]):
layer = layers.Dense(
units,
kernel_initializer=masked_initializer,
kernel_constraint=lambda x: mask * x,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse,
*args,
**kwargs)
return layer.apply(inputs)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def masked_autoregressive_default_template(
hidden_layers,
shift_only=False,
activation=nn_ops.relu,
log_scale_min_clip=-5.,
log_scale_max_clip=3.,
log_scale_clip_gradient=False,
name=None,
*args,
**kwargs):
"""Build the Masked Autoregressive Density Estimator (Germain et al., 2015).
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the input and returns the `loc` ("mu" in [Germain et
al. (2015)][1]) and `log_scale` ("alpha" in [Germain et al. (2015)][1]) from
the MADE network.
Warning: This function uses `masked_dense` to create randomly initialized
`tf.Variables`. It is presumed that these will be fit, just as you would any
other neural architecture which uses `tf.layers.dense`.
#### About Hidden Layers
Each element of `hidden_layers` should be greater than the `input_depth`
(i.e., `input_depth = tf.shape(input)[-1]` where `input` is the input to the
neural network). This is necessary to ensure the autoregressivity property.
#### About Clipping
This function also optionally clips the `log_scale` (but possibly not its
gradient). This is useful because if `log_scale` is too small/large it might
underflow/overflow making it impossible for the `MaskedAutoregressiveFlow`
bijector to implement a bijection. Additionally, the `log_scale_clip_gradient`
`bool` indicates whether the gradient should also be clipped. The default does
not clip the gradient; this is useful because it still provides gradient
information (for fitting) yet solves the numerical stability problem. I.e.,
`log_scale_clip_gradient = False` means
`grad[exp(clip(x))] = grad[x] exp(clip(x))` rather than the usual
`grad[clip(x)] exp(clip(x))`.
Args:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed. Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
log_scale_min_clip: `float`-like scalar `Tensor`, or a `Tensor` with the
same shape as `log_scale`. The minimum value to clip by. Default: -5.
log_scale_max_clip: `float`-like scalar `Tensor`, or a `Tensor` with the
same shape as `log_scale`. The maximum value to clip by. Default: 3.
log_scale_clip_gradient: Python `bool` indicating that the gradient of
`tf.clip_by_value` should be preserved. Default: `False`.
name: A name for ops managed by this function. Default:
"masked_autoregressive_default_template".
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms (the "mu" in
[Germain et al. (2015)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms (the "alpha" in
[Germain et al. (2015)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: Mathieu Germain, Karol Gregor, Iain Murray, and Hugo Larochelle. MADE:
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
"""
name = name or "masked_autoregressive_default_template"
with ops.name_scope(name, values=[log_scale_min_clip, log_scale_max_clip]):
def _fn(x):
"""MADE parameterized via `masked_autoregressive_default_template`."""
# TODO(b/67594795): Better support of dynamic shape.
input_depth = x.shape.with_rank_at_least(1)[-1].value
if input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
input_shape = (np.int32(x.shape.as_list()) if x.shape.is_fully_defined()
else array_ops.shape(x))
for i, units in enumerate(hidden_layers):
x = masked_dense(
inputs=x,
units=units,
num_blocks=input_depth,
exclusive=True if i == 0 else False,
activation=activation,
*args,
**kwargs)
x = masked_dense(
inputs=x,
units=(1 if shift_only else 2) * input_depth,
num_blocks=input_depth,
activation=None,
*args,
**kwargs)
if shift_only:
x = array_ops.reshape(x, shape=input_shape)
return x, None
x = array_ops.reshape(
x, shape=array_ops.concat([input_shape, [2]], axis=0))
shift, log_scale = array_ops.unstack(x, num=2, axis=-1)
which_clip = (math_ops.clip_by_value if log_scale_clip_gradient
else _clip_by_value_preserve_grad)
log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip)
return shift, log_scale
return template_ops.make_template(name, _fn)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _clip_by_value_preserve_grad(x, clip_value_min, clip_value_max, name=None):
"""Clips input while leaving gradient unaltered."""
with ops.name_scope(name, "clip_by_value_preserve_grad",
[x, clip_value_min, clip_value_max]):
clip_x = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
return x + array_ops.stop_gradient(clip_x - x)
|
AnishShah/tensorflow
|
tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
|
Python
|
apache-2.0
| 23,611
|
[
"Gaussian"
] |
083502f36ca5e694193105a39d194cdc9883f8f36e2208c29e1d2bc4821c793f
|
# -*- coding: utf-8 -*-
import sys
import time
import httplib2
AUTO_RECONNECT_TIMES = 5
crawl_tips_json = {}
SERVER = 'http://api.cn.faceplusplus.com/'
category_Arts_Entertainment = ['Aquarium', 'Arcade', 'Art Gallery', 'Bowling Alley', 'Casino', 'Circus', 'Comedy Club',
'Concert Hall', 'Country Dance Club', 'Disc Golf', 'General Entertainment',
'Go Kart Track', 'Historic Site', 'Laser Tag', 'Mini Golf', 'Movie Theater',
'Indie Movie Theater', 'Multiplex', 'Museum', 'Art Museum', 'Erotic Museum',
'History Museum', 'Planetarium', 'Science Museum', 'Music Venue', 'Jazz Club',
'Piano Bar', 'Rock Club', 'Performing Arts Venue', 'Dance Studio', 'Indie Theater',
'Opera House', 'Theater', 'Pool Hall', 'Public Art', 'Outdoor Sculpture', 'Street Art',
'Racetrack', 'Roller Rink', 'Salsa Club', 'Stadium', 'Baseball Stadium',
'Basketball Stadium', 'Cricket Ground', 'Football Stadium', 'Hockey Arena',
'Soccer Stadium', 'Tennis Stadium', 'Track Stadium', 'Threet Art', 'Theme Park',
'Theme Park Ride / Attraction', 'Water Park', 'Zoo']
category_College_University = ['College Academic Building', 'College Arts Building', 'College Communications Building',
'College Engineering Building', 'College History Building', 'College Math Building',
'College Science Building', 'College Technology Building',
'College Administrative Building', 'College Auditorium', 'College Bookstore',
'College Cafeteria', 'College Classroom', 'College Gym', 'College Lab',
'College Library', 'College Quad', 'College Rec Center', 'College Residence Hall',
'College Stadium', 'College Baseball Diamond', 'College Basketball Court',
'College Cricket Pitch', 'College Football Field', 'College Hockey Rink',
'College Soccer Field', 'College Tennis Court', 'College Track', 'College Theater',
'Community College', 'Fraternity House', 'General College & University', 'Law School',
'Medical School', 'Sorority House', 'Student Center', 'Trade School', 'University']
category_Event = ['Conference', 'Convention', 'Festival', 'Music Festival', 'Other Event', 'Parade', 'Stoop Sale',
'Street Fair']
male_tipping_duration = []
female_tipping_duration = []
all_tip_timestamp = {}
category_Food = ['Afghan Restaurant', 'African Restaurant', 'Ethiopian Restaurant', 'American Restaurant',
'New American Restaurant', 'Arepa Restaurant', 'Argentinian Restaurant', 'Asian Restaurant',
'Dim Sum Restaurant', 'Donburi Restaurant', 'Japanese Curry Restaurant', 'Kaiseki Restaurant',
'Kushikatsu Restaurant', 'Monjayaki Restaurant', 'Nabe Restaurant', 'Okonomiyaki Restaurant',
'Ramen Restaurant', 'Shabu-Shabu Restaurant', 'Soba Restaurant', 'Sukiyaki Restaurant',
'Takoyaki Place', 'Tempura Restaurant', 'Tonkatsu Restaurant', 'Udon Restaurant', 'Unagi Restaurant',
'Wagashi Place', 'Yakitori Restaurant', 'Yoshoku Restaurant', 'Korean Restaurant',
'Malaysian Restaurant', 'Mongolian Restaurant', 'Noodle House', 'Thai Restaurant',
'Tibetan Restaurant', 'Vietnamese Restaurant', 'Australian Restaurant', 'Austrian Restaurant',
'BBQ Joint', 'Bagel Shop', 'Bakery', 'Belarusian Restaurant', 'Belgian Restaurant', 'Bistro',
'Brazilian Restaurant', 'Acai House', 'Baiano Restaurant', 'Central Brazilian Restaurant',
'Churrascaria', 'Empada House', 'Goiano Restaurant', 'Mineiro Restaurant',
'Northeastern Brazilian Restaurant', 'Northern Brazilian Restaurant', 'Pastelaria',
'Southeastern Brazilian Restaurant', 'Southern Brazilian Restaurant', 'Tapiocaria', 'Breakfast Spot',
'Bubble Tea Shop', 'Buffet', 'Burger Joint', 'Burrito Place', 'Cafeteria', u'Café',
'Cajun / Creole Restaurant', 'Cambodian Restaurant', 'Caribbean Restaurant', 'Caucasian Restaurant',
'Chinese Restaurant', 'Anhui Restaurant', 'Beijing Restaurant', 'Cantonese Restaurant',
'Chinese Aristocrat Restaurant', 'Chinese Breakfast Place', 'Dongbei Restaurant', 'Fujian Restaurant',
'Guizhou Restaurant', 'Hainan Restaurant', 'Hakka Restaurant', 'Henan Restaurant',
'Hong Kong Restaurant', 'Huaiyang Restaurant', 'Hubei Restaurant', 'Hunan Restaurant',
'Imperial Restaurant', 'Jiangsu Restaurant', 'Jiangxi Restaurant', 'Macanese Restaurant',
'Manchu Restaurant', 'Peking Duck Restaurant', 'Shaanxi Restaurant', 'Shandong Restaurant',
'Shanghai Restaurant', 'Shanxi Restaurant', 'Szechuan Restaurant', 'Taiwanese Restaurant',
'Tianjin Restaurant', 'Xinjiang Restaurant', 'Yunnan Restaurant', 'Zhejiang Restaurant', 'Coffee Shop',
'Comfort Food Restaurant', 'Creperie', 'Cuban Restaurant', 'Cupcake Shop', 'Czech Restaurant',
'Deli / Bodega', 'Dessert Shop', 'Dim Sum Restaurant', 'Diner', 'Distillery', 'Donut Shop',
'Dumpling Restaurant', 'Eastern European Restaurant', 'English Restaurant', 'Ethiopian Restaurant',
'Falafel Restaurant', 'Fast Food Restaurant', 'Filipino Restaurant', 'Fish & Chips Shop',
'Fondue Restaurant', 'Food Truck', 'French Restaurant', 'Fried Chicken Joint', 'Gastropub',
'German Restaurant', 'Gluten-free Restaurant', 'Greek Restaurant', 'Bougatsa Shop',
'Cretan Restaurant', 'Fish Taverna', 'Grilled Meat Restaurant', 'Kafenio', 'Magirio',
'Meze Restaurant', 'Modern Greek Restaurant', 'Ouzeri', 'Patsa Restaurant', 'Taverna',
'Tsipouro Restaurant', 'Halal Restaurant', 'Hawaiian Restaurant', 'Himalayan Restaurant',
'Hot Dog Joint', 'Hotpot Restaurant', 'Hungarian Restaurant', 'Ice Cream Shop', 'Indian Restaurant',
'Indonesian Restaurant', 'Acehnese Restaurant', 'Balinese Restaurant', 'Betawinese Restaurant',
'Javanese Restaurant', 'Manadonese Restaurant', 'Meatball Place', 'Padangnese Restaurant',
'Sundanese Restaurant', 'Irish Pub', 'Italian Restaurant', 'Japanese Restaurant', 'Jewish Restaurant',
'Juice Bar', 'Korean Restaurant', 'Kosher Restaurant', 'Latin American Restaurant',
'Empanada Restaurant', 'Mac & Cheese Joint', 'Malaysian Restaurant', 'Mediterranean Restaurant',
'Mexican Restaurant']
category_Food.extend(['Middle Eastern Restaurant', 'Modern European Restaurant', 'Molecular Gastronomy Restaurant',
'Mongolian Restaurant', 'Moroccan Restaurant', 'New American Restaurant', 'Pakistani Restaurant',
'Persian Restaurant', 'Peruvian Restaurant', 'Pie Shop', 'Pizza Place', 'Polish Restaurant',
'Portuguese Restaurant', 'Ramen / Noodle House', 'Restaurant', 'Romanian Restaurant',
'Russian Restaurant', 'Blini House', 'Pelmeni House', 'Salad Place', 'Sandwich Place',
'Scandinavian Restaurant', 'Seafood Restaurant', 'Snack Place', 'Soup Place',
'South American Restaurant', 'Southern / Soul Food Restaurant', 'Souvlaki Shop',
'Spanish Restaurant', 'Paella Restaurant', 'Steakhouse', 'Sushi Restaurant', 'Swiss Restaurant',
'Taco Place', 'Tapas Restaurant', 'Tatar Restaurant', 'Tea Room', 'Thai Restaurant',
'Tibetan Restaurant', 'Turkish Restaurant', 'Borek Place', 'Cigkofte Place', 'Doner Restaurant',
'Gozleme Place', 'Home Cooking Restaurant', 'Kebab Restaurant', 'Kofte Place',
u'Kokoreç Restaurant', 'Manti Place', 'Meyhane', 'Pide Place', 'Ukrainian Restaurant',
'Varenyky restaurant', 'West-Ukrainian Restaurant', 'Vegetarian / Vegan Restaurant',
'Vietnamese Restaurant', 'Winery', 'Wings Joint', 'Frozen Yogurt', 'Friterie',
'Andhra Restaurant', 'Awadhi Restaurant', 'Bengali Restaurant', 'Chaat Place',
'Chettinad Restaurant', 'Dhaba', 'Dosa Place', 'Goan Restaurant', 'Gujarati Restaurant',
'Indian Chinese Restaurant', 'Indian Sweet Shop', 'Irani Cafe', 'Jain Restaurant',
'Karnataka Restaurant', 'Kerala Restaurant', 'Maharashtrian Restaurant', 'Mughlai Restaurant',
'Multicuisine Indian Restaurant', 'North Indian Restaurant', 'Northeast Indian Restaurant',
'Parsi Restaurant', 'Punjabi Restaurant', 'Rajasthani Restaurant', 'South Indian Restaurant',
'Udupi Restaurant', 'Indonesian Meatball Place', 'Abruzzo', 'Turkish Home Cooking Restaurant',
'Sri Lankan Restaurant', 'Veneto Restaurant', 'Umbrian Restaurant', 'Tuscan Restaurant',
'Trentino Restaurant', 'Trattoria/Osteria', 'South Tyrolean Restaurant', 'Sicilian Restaurant',
'Sardinian Restaurant', 'Roman Restaurant', 'Romagna Restaurant', 'Rifugio di Montagna',
'Puglia Restaurant', 'Piedmontese Restaurant', 'Piadineria', 'Molise Restaurant',
'Marche Restaurant', 'Malga', 'Lombard Restaurant', 'Ligurian Restaurant', 'Friuli Restaurant',
'Emilia Restaurant', 'Campanian Restaurant', 'Calabria Restaurant', 'Basilicata Restaurant',
'Aosta Restaurant', 'Agriturismo', 'Abruzzo Restaurant', ''])
category_Nightlife_Spot = ['Bar', 'Beach Bar', 'Beer Garden', 'Brewery', 'Champagne Bar', 'Cocktail Bar', 'Dive Bar',
'Gay Bar', 'Hookah Bar', 'Hotel Bar', 'Karaoke Bar', 'Lounge', 'Night Market', 'Nightclub',
'Other Nightlife', 'Pub', 'Sake Bar', 'Speakeasy', 'Sports Bar', 'Strip Club', 'Whisky Bar',
'Wine Bar', 'Speakeasy']
category_Outdoors_Recreation = ['Athletics & Sports', 'Badminton Court', 'Baseball Field', 'Basketball Court',
'Bowling Green', 'Golf Course', 'Hockey Field', 'Paintball Field', 'Rugby Pitch',
'Skate Park', 'Skating Rink', 'Soccer Field', 'Sports Club', 'Squash Court',
'Tennis Court', 'Volleyball Court', 'Bath House', 'Bathing Area', 'Beach',
'Nudist Beach', 'Surf Spot', 'Botanical Garden', 'Bridge', 'Campground', 'Castle',
'Cemetery', 'Dive Spot', 'Dog Run', 'Farm', 'Field', 'Fishing Spot', 'Forest', 'Garden',
'Gun Range', 'Harbor / Marina', 'Hot Spring', 'Island', 'Lake', 'Lighthouse',
'Mountain', 'National Park', 'Nature Preserve', 'Other Great Outdoors', 'Palace',
'Park', 'Pedestrian Plaza', 'Playground', 'Plaza', 'Pool', 'Rafting',
'Recreation Center', 'River', 'Rock Climbing Spot', 'Scenic Lookout',
'Sculpture Garden', 'Ski Area', 'Apres Ski Bar', 'Ski Chairlift', 'Ski Chalet',
'Ski Lodge', 'Ski Trail', 'Stables', 'States & Municipalities', 'City', 'County',
'Country', 'Neighborhood', 'State', 'Town', 'Village', 'Summer Camp', 'Trail', 'Tree',
'Vineyard', 'Volcano', 'Well']
category_Professional_Other_Places = ['Animal Shelter', 'Auditorium', 'Building', 'Club House', 'Community Center',
'Convention Center', 'Meeting Room', 'Cultural Center', 'Distribution Center',
'Event Space', 'Factory', 'Fair', 'Funeral Home', 'Government Building',
'Capitol Building', 'City Hall', 'Courthouse', 'Embassy / Consulate',
'Fire Station', 'Monument / Landmark', 'Police Station', 'Town Hall', 'Library',
'Medical Center', 'Acupuncturist', 'Alternative Healer', 'Chiropractor',
"Dentist's Office", "Doctor's Office", 'Emergency Room', 'Eye Doctor',
'Hospital', 'Laboratory', 'Mental Health Office', 'Veterinarian', 'Military Base',
'Non-Profit', 'Office', 'Advertising Agency', 'Campaign Office',
'Conference Room', 'Coworking Space', 'Tech Startup', 'Parking', 'Post Office',
'Prison', 'Radio Station', 'Recruiting Agency', 'School', 'Circus School',
'Driving School', 'Elementary School', 'Flight School', 'High School',
'Language School', 'Middle School', 'Music School', 'Nursery School', 'Preschool',
'Private School', 'Religious School', 'Swim School', 'Social Club',
'Spiritual Center', 'Buddhist Temple', 'Church', 'Hindu Temple', 'Monastery',
'Mosque', 'Prayer Room', 'Shrine', 'Synagogue', 'Temple', 'TV Station',
'Voting Booth', 'Warehouse']
category_Residence = ['Assisted Living', 'Home (private)', 'Housing Development',
'Residential Building (Apartment / Condo)', 'Trailer Park']
category_Shop_Service = ['Construction & Lanscape', 'Event Service', 'ATM', 'Adult Boutique', 'Antique Shop',
'Arts & Crafts Store', 'Astrologer', 'Auto Garage', 'Automotive Shop', 'Baby Store', 'Bank',
'Betting Shop', 'Big Box Store', 'Bike Shop', 'Board Shop', 'Bookstore', 'Bridal Shop',
'Camera Store', 'Candy Store', 'Car Dealership', 'Car Wash', 'Carpet Store',
'Check Cashing Service', 'Chocolate Shop', 'Christmas Market', 'Clothing Store',
'Accessories Store', 'Boutique', 'Kids Store', 'Lingerie Store', "Men's Store",
'Shoe Store', "Women's Store", 'Comic Shop', 'Convenience Store', 'Cosmetics Shop',
'Costume Shop', 'Credit Union', 'Daycare', 'Department Store', 'Design Studio',
'Discount Store', 'Dive Shop', 'Drugstore / Pharmacy', 'Dry Cleaner', 'EV Charging Station',
'Electronics Store', 'Fabric Shop', 'Financial or Legal Service', 'Fireworks Store',
'Fishing Store', 'Flea Market', 'Flower Shop', 'Food & Drink Shop', 'Beer Store', 'Butcher',
'Cheese Shop', 'Farmers Market', 'Fish Market', 'Food Court', 'Gourmet Shop', 'Grocery Store',
'Health Food Store', 'Liquor Store', 'Organic Grocery', 'Street Food Gathering', 'Supermarket',
'Wine Shop', 'Frame Store', 'Fruit & Vegetable Store', 'Furniture / Home Store', 'Gaming Cafe',
'Garden Center', 'Gas Station / Garage', 'Gift Shop', 'Gun Shop', 'Gym / Fitness Center',
'Boxing Gym', 'Climbing Gym', 'Cycle Studio', 'Gym Pool', 'Gymnastics Gym', 'Gym',
'Martial Arts Dojo', 'Track', 'Yoga Studio', 'Hardware Store', 'Herbs & Spices Store',
'Hobby Shop', 'Hunting Supply', 'IT Services', 'Internet Cafe', 'Jewelry Store',
'Knitting Store', 'Laundromat', 'Laundry Service', 'Lawyer', 'Leather Goods Store',
'Locksmith', 'Lottery Retailer', 'Luggage Store', 'Mall', 'Marijuana Dispensary', 'Market',
'Massage Studio', 'Mattress Store', 'Miscellaneous Shop', 'Mobile Phone Shop',
'Motorcycle Shop', 'Music Store', 'Nail Salon', 'Newsstand', 'Optical Shop',
'Other Repair Shop', 'Outdoor Supply Store', 'Outlet Store', 'Paper / Office Supplies Store',
'Pawn Shop', 'Perfume Shop', 'Pet Service', 'Pet Store', 'Photography Lab', 'Piercing Parlor',
'Pop-Up Shop', 'Print Shop', 'Real Estate Office', 'Record Shop', 'Recording Studio',
'Recycling Facility', 'Salon / Barbershop', 'Shipping Store', 'Shoe Repair', 'Smoke Shop',
'Smoothie Shop', 'Souvenir Shop', 'Spa', 'Sporting Goods Shop', 'Stationery Store',
'Storage Facility', 'Tailor Shop', 'Tanning Salon', 'Tattoo Parlor', 'Thrift / Vintage Store',
'Toy / Game Store', 'Travel Agency', 'Used Bookstore', 'Video Game Store', 'Video Store',
'Warehouse Store', 'Watch Repair Shop']
category_Travel_Transport = ['Cruise', 'Metro Station', 'Transportation Service', 'Airport', 'Airport Food Court',
'Airport Gate', 'Airport Lounge', 'Airport Terminal', 'Airport Tram', 'Plane',
'Bike Rental / Bike Share', 'Boat or Ferry', 'Border Crossing', 'Bus Station', 'Bus Line',
'Bus Stop', 'Cable Car', 'General Travel', 'Hotel', 'Bed & Breakfast', 'Boarding House',
'Hostel', 'Hotel Pool', 'Motel', 'Resort', 'Roof Deck', 'Intersection', 'Light Rail',
'Moving Target', 'Pier', 'RV Park', 'Rental Car Location', 'Rest Area', 'Road', 'Street',
'Subway', 'Taxi Stand', 'Taxi', 'Toll Booth', 'Toll Plaza', 'Tourist Information Center',
'Train Station', 'Platform', 'Train', 'Tram', 'Travel Lounge', 'Tunnel']
#reload(sys)
#sys.setdefaultencoding('utf-8')
h = httplib2.Http(disable_ssl_certificate_validation=True)
def get_raw_info(url):
success = 0
retry = 0
content = -1
while success == 0:
try:
resp, content = h.request(url, "GET")
success = 1
if resp['status'] != '200':
return -1
except:
time.sleep(3)
retry += 1
if retry == AUTO_RECONNECT_TIMES:
return -2
return content
|
chenyang03/Foursquare_Crawler
|
foursq_utils.py
|
Python
|
mit
| 18,877
|
[
"CASINO"
] |
403d880426f16c6afb0a06cb67c5880b1db034e69778e9f8e414ae22b8eb9083
|
"""
Windowed operators that can be expressed as convolutions
"""
import numpy as np
from .window import OpWindow
class OpFilter(OpWindow):
"""
base class for filter operations
subclasses provide the get_filter() method
"""
@classmethod
def apply_window_function(cls, input_array, window_size, output_array):
filter_ = cls.get_filter(window_size)
output_array[:] = np.convolve(input_array, filter_, mode='valid')
@classmethod
def get_filter(cls, window_size):
"""
return a filter of shape (window_size,)
"""
raise NotImplementedError()
class OpMean(OpFilter):
"""
mean value over window size
m_k = \\frac{1}{w}\\sum_{i=1}^w x_{k-i+1}
"""
@classmethod
def get_filter(cls, window):
return np.ones((window,), dtype=np.float32)/window
class OpLinearWeightedMean(OpFilter):
"""
linear weighted mean over window size
m_k = \\frac{2}{w(w+1)}\\sum_{i=1}^w \\frac{x_{k-i+1}}{i}
"""
@classmethod
def get_filter(cls, window):
filter_ = np.arange(window, dtype=np.float32)
filter_ = window - filter_
filter_ *= 2.0/(window*(window+1))
return filter_
class OpExponentialFilter(OpFilter):
"""
exponential filter
parameter \\lambda is chosen such that 99% of weight is inside filter
"""
@classmethod
def get_filter(cls, window):
# F(x) = 1 - exp(-lambda*x) >= .99
# lambda >= -log(0.01)/x
lambda_ = -np.log(0.01)/window
filter_ = np.arange(window, dtype=np.float32)
filter_ = lambda_ * np.exp(-lambda_ * filter_)
filter_ /= filter_.sum()
return filter_
class OpGaussianSmoothing(OpFilter):
"""
gaussian smoothing with a radius of (window - 1) / 2
"""
@classmethod
def get_filter(cls, window):
assert window % 2 == 1,\
"window size for gaussian kernel must be odd"
radius = (window - 1) / 2.0
sigma = radius / 3
radius_range = np.linspace(-radius, radius, window)
filter_ = np.exp(-radius_range**2/(2*sigma**2))
filter_ /= filter_.sum()
return filter_
|
burgerdev/hostload
|
tsdl/features/filter.py
|
Python
|
mit
| 2,189
|
[
"Gaussian"
] |
a2fe30722c8b1f9b71ba5c5f6919807e6553404b9e0f38220e4dec634ce7669e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.