text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
""" Given the FTPdetectinfo file (assuming FF files are available) and the stddev of Gaussian PSF of the image,
correct the magnitudes and levels in the file for saturation. """
from __future__ import print_function, division, absolute_import
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
from RMS.Formats.FFfile import validFFName
from RMS.Formats.FFfile import read as readFF
from RMS.Formats.FTPdetectinfo import readFTPdetectinfo, writeFTPdetectinfo
from RMS.Routines.Image import thickLine, loadFlat, applyFlat
from Utils.SaturationSimulation import findUnsaturatedMagnitude
if __name__ == "__main__":
import argparse
### COMMAND LINE ARGUMENTS
# Init the command line arguments parser
arg_parser = argparse.ArgumentParser(description="Correct the magnitudes in the FTPdetectinfo file for saturation.")
arg_parser.add_argument('ftpdetectinfo_path', nargs=1, metavar='DIR_PATH', type=str, \
help='Path to the FTPdetectinfo file.')
arg_parser.add_argument('psf_sigma', nargs=1, metavar='PSF_SIGMA', type=float, \
help='Standard deviation of the Gaussian PSF in pixels.')
arg_parser.add_argument('-s', '--satlvl', metavar='SATURATION_LEVEL', type=int, \
help="Saturation level. 255 by default.", default=255)
arg_parser.add_argument('-f', '--flat', metavar='FLAT', type=str, \
help="Path to the flat frame.")
# Parse the command line arguments
cml_args = arg_parser.parse_args()
#########################
# Read command line arguments
ftpdetectinfo_path = cml_args.ftpdetectinfo_path[0]
dir_path, ftpdetectinfo_name = os.path.split(ftpdetectinfo_path)
gauss_sigma = cml_args.psf_sigma[0]
saturation_lvl = cml_args.satlvl
# Load meteor data from FTPdetecinfo
cam_code, fps, meteor_list = readFTPdetectinfo(dir_path, ftpdetectinfo_name, ret_input_format=True)
# Load the flat, if given
flat = None
if cml_args.flat:
flat = loadFlat(*os.path.split(cml_args.flat))
corrected_meteor_list = []
# Find matching FF files in the directory
for entry in meteor_list:
ftp_ff_name, meteor_No, rho, phi, meteor_meas = entry
# Find the matching FTPdetectinfo file in the directory
for ff_name in sorted(os.listdir(dir_path)):
# Reject all non-FF files
if not validFFName(ff_name):
continue
# Reject all FF files which do not match the name in the FTPdetecinfo
if ff_name != ftp_ff_name:
continue
print('Correcting for saturation:', ff_name)
# Load the FF file
ff = readFF(dir_path, ff_name)
# Apply the flat to avepixel
if flat:
avepixel = applyFlat(ff.avepixel, flat)
else:
avepixel = ff.avepixel
# Compute angular velocity
first_centroid = meteor_meas[0]
last_centroid = meteor_meas[-1]
frame1, x1, y1 = first_centroid[:3]
frame2, x2, y2 = last_centroid[:3]
px_fm = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)/float(frame2 - frame1)
print('Ang vel:', px_fm*fps, 'px/s')
corrected_meteor_meas = []
print('Frame, App mag, Corr mag, Background')
# Go though all meteor centroids
for line in meteor_meas:
frame_n, x, y, ra, dec, azim, elev, inten, mag = line
# Compute the photometric offset
photom_offset = mag + 2.5*np.log10(inten)
### Compute the background intensity value behind the meteor ###
# Get the mask for the background as a 3 sigma streak around the meteor, but using avepixel
mask = thickLine(avepixel.shape[0], avepixel.shape[1], x, y, px_fm, phi - 90, \
3*gauss_sigma).astype(np.bool)
img = np.ma.masked_array(avepixel, ~mask)
bg_val = np.ma.median(img)
### ###
# Find the unsaturated magnitude
unsaturated_mag = findUnsaturatedMagnitude(mag, photom_offset, bg_val, fps, px_fm*fps,
gauss_sigma, saturation_point=saturation_lvl)
print("{:5.1f}, {:7.2f}, {:8.2f}, {:10.1f}".format(frame_n, mag, unsaturated_mag, bg_val))
# Compute the intensity from unsaturated magnitude
unsaturated_inten = round(10**((photom_offset - mag)/2.5), 0)
corrected_meteor_meas.append([frame_n, x, y, ra, dec, azim, elev, unsaturated_inten,
unsaturated_mag])
if not corrected_meteor_meas:
corrected_meteor_meas = meteor_meas
corrected_meteor_list.append([ftp_ff_name, meteor_No, rho, phi, corrected_meteor_meas])
# Calibration string to be written to the FTPdetectinfo file
calib_str = "RMS - Saturation corrected on {:s} UTC".format(str(datetime.datetime.utcnow()))
# Write a corrected FTPdetectinfo file
corrected_ftpdetectinfo_name = ftpdetectinfo_name.strip('.txt') + '_saturation_corrected.txt'
print('Saving to:', os.path.join(dir_path, corrected_ftpdetectinfo_name))
writeFTPdetectinfo(corrected_meteor_list, dir_path, corrected_ftpdetectinfo_name, dir_path, cam_code, \
fps, calibration=calib_str, celestial_coords_given=True)
| apevec/RMS | Utils/SaturationCorrection.py | Python | gpl-3.0 | 5,479 | [
"Gaussian"
] | 7ca594de670b909f01749a375525277578e17f2cb7844f74d0994d29c35423a0 |
"""
Class for outlier detection.
This class provides a framework for outlier detection. It consists in
several methods that can be added to a covariance estimator in order to
assess the outlying-ness of the observations of a data set.
Such a "outlier detector" object is proposed constructed from a robust
covariance estimator (the Minimum Covariance Determinant).
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
import scipy as sp
from . import MinCovDet
from ..utils.validation import check_is_fitted, check_array
from ..metrics import accuracy_score
class EllipticEnvelope(MinCovDet):
"""An object for detecting outliers in a Gaussian distributed dataset.
Read more in the :ref:`User Guide <outlier_detection>`.
Parameters
----------
store_precision : boolean, optional (default=True)
Specify if the estimated precision is stored.
assume_centered : boolean, optional (default=False)
If True, the support of robust location and covariance estimates
is computed, and a covariance estimate is recomputed from it,
without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float in (0., 1.), optional (default=None)
The proportion of points to be included in the support of the raw
MCD estimate. If None, the minimum value of support_fraction will
be used within the algorithm: `[n_sample + n_features + 1] / 2`.
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set.
Attributes
----------
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute the
robust estimates of location and shape.
See Also
--------
EmpiricalCovariance, MinCovDet
Notes
-----
Outlier detection from covariance estimation may break or not
perform well in high-dimensional settings. In particular, one will
always take care to work with ``n_samples > n_features ** 2``.
References
----------
.. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the minimum
covariance determinant estimator" Technometrics 41(3), 212 (1999)
"""
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, contamination=0.1,
random_state=None):
super(EllipticEnvelope, self).__init__(
store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
random_state=random_state)
self.contamination = contamination
def fit(self, X, y=None):
super(EllipticEnvelope, self).fit(X)
self.threshold_ = sp.stats.scoreatpercentile(
self.dist_, 100. * (1. - self.contamination))
return self
def decision_function(self, X, raw_values=False):
"""Compute the decision function of the given observations.
Parameters
----------
X : array-like, shape (n_samples, n_features)
raw_values : bool
Whether or not to consider raw Mahalanobis distances as the
decision function. Must be False (default) for compatibility
with the others outlier detection tools.
Returns
-------
decision : array-like, shape (n_samples, )
Decision function of the samples.
It is equal to the Mahalanobis distances if `raw_values`
is True. By default (``raw_values=False``), it is equal
to the cubic root of the shifted Mahalanobis distances.
In that case, the threshold for being an outlier is 0, which
ensures a compatibility with other outlier detection tools
such as the One-Class SVM.
"""
check_is_fitted(self, 'threshold_')
X = check_array(X)
mahal_dist = self.mahalanobis(X)
if raw_values:
decision = mahal_dist
else:
transformed_mahal_dist = mahal_dist ** 0.33
decision = self.threshold_ ** 0.33 - transformed_mahal_dist
return decision
def predict(self, X):
"""Outlyingness of observations in X according to the fitted model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
is_outliers : array, shape = (n_samples, ), dtype = bool
For each observation, tells whether or not it should be considered
as an outlier according to the fitted model.
threshold : float,
The values of the less outlying point's decision function.
"""
check_is_fitted(self, 'threshold_')
X = check_array(X)
is_inlier = -np.ones(X.shape[0], dtype=int)
if self.contamination is not None:
values = self.decision_function(X, raw_values=True)
is_inlier[values <= self.threshold_] = 1
else:
raise NotImplementedError("You must provide a contamination rate.")
return is_inlier
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = (n_samples,), optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
| ldirer/scikit-learn | sklearn/covariance/outlier_detection.py | Python | bsd-3-clause | 6,604 | [
"Gaussian"
] | fc8ec396cc4a1148aac228a34b21ef387b8be823f63a09932bf91dec8c5b7b35 |
"""Use Hydra to detect structural variation using discordant read pairs.
Hydra: http://code.google.com/p/hydra-sv/
Pipeline: http://code.google.com/p/hydra-sv/wiki/TypicalWorkflow
"""
import os
import copy
import collections
import subprocess
import pysam
from bcbio import utils, broad
from bcbio.pipeline.alignment import align_to_sort_bam
from bcbio.pipeline import lane
from bcbio.distributed.transaction import file_transaction
from bcbio.structural import shared
## Prepare alignments to identify discordant pair mappings
def select_unaligned_read_pairs(in_bam, extra, out_dir, config):
"""Retrieve unaligned read pairs from input alignment BAM, as two fastq files.
"""
runner = broad.runner_from_config(config)
base, ext = os.path.splitext(os.path.basename(in_bam))
nomap_bam = os.path.join(out_dir, "{}-{}{}".format(base, extra, ext))
if not utils.file_exists(nomap_bam):
with file_transaction(nomap_bam) as tx_out:
runner.run("FilterSamReads", [("INPUT", in_bam),
("OUTPUT", tx_out),
("EXCLUDE_ALIGNED", "true"),
("WRITE_READS_FILES", "false"),
("SORT_ORDER", "queryname")])
has_reads = False
with pysam.Samfile(nomap_bam, "rb") as in_pysam:
for read in in_pysam:
if read.is_paired:
has_reads = True
break
if has_reads:
out_fq1, out_fq2 = ["{}-{}.fq".format(os.path.splitext(nomap_bam)[0], i) for i in [1, 2]]
runner.run_fn("picard_bam_to_fastq", nomap_bam, out_fq1, out_fq2)
return out_fq1, out_fq2
else:
return None, None
def remove_nopairs(in_bam, out_dir, config):
"""Remove any reads without both pairs present in the file.
"""
runner = broad.runner_from_config(config)
out_bam = os.path.join(out_dir, "{}-safepair{}".format(*os.path.splitext(os.path.basename(in_bam))))
if not utils.file_exists(out_bam):
read_counts = collections.defaultdict(int)
with pysam.Samfile(in_bam, "rb") as in_pysam:
for read in in_pysam:
if read.is_paired:
read_counts[read.qname] += 1
with pysam.Samfile(in_bam, "rb") as in_pysam:
with file_transaction(out_bam) as tx_out_bam:
with pysam.Samfile(tx_out_bam, "wb", template=in_pysam) as out_pysam:
for read in in_pysam:
if read_counts[read.qname] == 2:
out_pysam.write(read)
return runner.run_fn("picard_sort", out_bam, "queryname")
def tiered_alignment(in_bam, tier_num, multi_mappers, extra_args,
genome_build, pair_stats,
work_dir, dirs, config):
"""Perform the alignment of non-mapped reads from previous tier.
"""
nomap_fq1, nomap_fq2 = select_unaligned_read_pairs(in_bam, "tier{}".format(tier_num),
work_dir, config)
if nomap_fq1 is not None:
base_name = "{}-tier{}out".format(os.path.splitext(os.path.basename(in_bam))[0],
tier_num)
config = copy.deepcopy(config)
dirs = copy.deepcopy(dirs)
config["algorithm"]["bam_sort"] = "queryname"
config["algorithm"]["multiple_mappers"] = multi_mappers
config["algorithm"]["extra_align_args"] = ["-i", int(pair_stats["mean"]),
int(pair_stats["std"])] + extra_args
out_bam, ref_file = align_to_sort_bam(nomap_fq1, nomap_fq2,
lane.rg_names(base_name, base_name, config),
genome_build, "novoalign",
dirs, config,
dir_ext=os.path.join("hydra", os.path.split(nomap_fq1)[0]))
return out_bam
else:
return None
## Run hydra to identify structural variation breakpoints
@utils.memoize_outfile(ext=".bed")
def convert_bam_to_bed(in_bam, out_file):
"""Convert BAM to bed file using BEDTools.
"""
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
subprocess.check_call(["bamToBed", "-i", in_bam, "-tag", "NM"],
stdout=out_handle)
return out_file
@utils.memoize_outfile(ext="-pair.bed")
def pair_discordants(in_bed, pair_stats, out_file):
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
subprocess.check_call(["pairDiscordants.py", "-i", in_bed,
"-m", "hydra",
"-z", str(int(pair_stats["median"]) +
10 * int(pair_stats["mad"]))],
stdout=out_handle)
return out_file
@utils.memoize_outfile(ext="-dedup.bed")
def dedup_discordants(in_bed, out_file):
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
subprocess.check_call(["dedupDiscordants.py", "-i", in_bed, "-s", "3"],
stdout=out_handle)
return out_file
def run_hydra(in_bed, pair_stats):
base_out = "{}-hydra.breaks".format(os.path.splitext(in_bed)[0])
final_file = "{}.final".format(base_out)
if not utils.file_exists(final_file):
subprocess.check_call(["hydra", "-in", in_bed, "-out", base_out,
"-ms", "1", "-li",
"-mld", str(int(pair_stats["mad"]) * 10),
"-mno", str(int(pair_stats["median"]) +
20 * int(pair_stats["mad"]))])
return final_file
def hydra_breakpoints(in_bam, pair_stats):
"""Detect structural variation breakpoints with hydra.
"""
in_bed = convert_bam_to_bed(in_bam)
if os.path.getsize(in_bed) > 0:
pair_bed = pair_discordants(in_bed, pair_stats)
dedup_bed = dedup_discordants(pair_bed)
return run_hydra(dedup_bed, pair_stats)
else:
return None
## Top level organizational code
def detect_sv(align_bam, genome_build, dirs, config):
"""Detect structural variation from discordant aligned pairs.
"""
work_dir = utils.safe_makedir(os.path.join(dirs["work"], "structural"))
pair_stats = shared.calc_paired_insert_stats(align_bam)
fix_bam = remove_nopairs(align_bam, work_dir, config)
tier2_align = tiered_alignment(fix_bam, "2", True, [],
genome_build, pair_stats,
work_dir, dirs, config)
if tier2_align:
tier3_align = tiered_alignment(tier2_align, "3", "Ex 1100", ["-t", "300"],
genome_build, pair_stats,
work_dir, dirs, config)
if tier3_align:
hydra_bps = hydra_breakpoints(tier3_align, pair_stats)
| a113n/bcbio-nextgen | bcbio/structural/hydra.py | Python | mit | 7,180 | [
"pysam"
] | c4f7195800aca57c26b127485e0ad9cc121005f72daa770e078c2d117f945b84 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# smc.py: Sequential Monte Carlo module
##
# © 2012 Chris Ferrie (csferrie@gmail.com) and
# Christopher E. Granade (cgranade@gmail.com)
#
# This file is a part of the Qinfer project.
# Licensed under the AGPL version 3.
##
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## FEATURES ###################################################################
from __future__ import absolute_import
from __future__ import division, unicode_literals
## ALL ########################################################################
# We use __all__ to restrict what globals are visible to external modules.
__all__ = [
'SMCUpdater',
'SMCUpdaterBCRB',
'MixedApproximateSMCUpdater'
]
## IMPORTS ####################################################################
from builtins import map, zip
import warnings
import numpy as np
# from itertools import zip
from scipy.spatial import ConvexHull, Delaunay
import scipy.linalg as la
import scipy.stats
import scipy.interpolate
from scipy.ndimage.filters import gaussian_filter1d
from qinfer.abstract_model import DifferentiableModel
from qinfer.metrics import rescaled_distance_mtx
from qinfer.distributions import ParticleDistribution
import qinfer.resamplers
import qinfer.clustering
import qinfer.metrics
from qinfer.utils import outer_product, mvee, uniquify, format_uncertainty, \
in_ellipsoid
from qinfer._exceptions import ApproximationWarning, ResamplerWarning
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn("Could not import pyplot. Plotting methods will not work.")
plt = None
try:
import mpltools.special as mpls
except:
# Don't even warn in this case.
mpls = None
## LOGGING ####################################################################
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
## CLASSES #####################################################################
class SMCUpdater(ParticleDistribution):
r"""
Creates a new Sequential Monte carlo updater, using the algorithm of
[GFWC12]_.
:param Model model: Model whose parameters are to be inferred.
:param int n_particles: The number of particles to be used in the particle approximation.
:param Distribution prior: A representation of the prior distribution.
:param callable resampler: Specifies the resampling algorithm to be used. See :ref:`resamplers`
for more details.
:param float resample_thresh: Specifies the threshold for :math:`N_{\text{ess}}` to decide when to resample.
:param bool debug_resampling: If `True`, debug information will be
generated on resampling performance, and will be written to the
standard Python logger.
:param bool track_resampling_divergence: If true, then the divergences
between the pre- and post-resampling distributions are tracked and
recorded in the ``resampling_divergences`` attribute.
:param str zero_weight_policy: Specifies the action to be taken when the
particle weights would all be set to zero by an update.
One of ``["ignore", "skip", "warn", "error", "reset"]``.
:param float zero_weight_thresh: Value to be used when testing for the
zero-weight condition.
:param bool canonicalize: If `True`, particle locations will be updated
to canonical locations as described by the model class after each
prior sampling and resampling.
"""
def __init__(self,
model, n_particles, prior,
resample_a=None, resampler=None, resample_thresh=0.5,
debug_resampling=False,
track_resampling_divergence=False,
zero_weight_policy='error', zero_weight_thresh=None,
canonicalize=True
):
super(SMCUpdater, self).__init__(
particle_locations=np.zeros((0, model.n_modelparams)),
particle_weights=np.zeros((0,))
)
# Initialize metadata on resampling performance.
self._resample_count = 0
self._min_n_ess = n_particles
self.model = model
self.prior = prior
# Record whether we are to canonicalize or not.
self._canonicalize = bool(canonicalize)
## RESAMPLER CONFIGURATION ##
# Backward compatibility with the old resample_a keyword argument,
# which assumed that the Liu and West resampler was being used.
self._debug_resampling = debug_resampling
if resample_a is not None:
warnings.warn("The 'resample_a' keyword argument is deprecated; use 'resampler=LiuWestResampler(a)' instead.", DeprecationWarning)
if resampler is not None:
raise ValueError("Both a resample_a and an explicit resampler were provided; please provide only one.")
self.resampler = qinfer.resamplers.LiuWestResampler(a=resample_a)
else:
if resampler is None:
self.resampler = qinfer.resamplers.LiuWestResampler(default_n_particles=n_particles)
else:
self.resampler = resampler
self.resample_thresh = resample_thresh
# Initialize properties to hold information about the history.
self._just_resampled = False
self._data_record = []
self._normalization_record = []
self._resampling_divergences = [] if track_resampling_divergence else None
self._zero_weight_policy = zero_weight_policy
self._zero_weight_thresh = (
zero_weight_thresh
if zero_weight_thresh is not None else
10 * np.spacing(1)
)
## PARTICLE INITIALIZATION ##
self.reset(n_particles)
## PROPERTIES #############################################################
@property
def resample_count(self):
"""
Returns the number of times that the updater has resampled the particle
approximation.
:type: `int`
"""
# We wrap this in a property to prevent external resetting and to enable
# a docstring.
return self._resample_count
@property
def just_resampled(self):
"""
`True` if and only if there has been no data added since the last
resampling, or if there has not yet been a resampling step.
:type: `bool`
"""
return self._just_resampled
@property
def normalization_record(self):
"""
Returns the normalization record.
:type: `float`
"""
# We wrap this in a property to prevent external resetting and to enable
# a docstring.
return self._normalization_record
@property
def log_total_likelihood(self):
"""
Returns the log-likelihood of all the data collected so far.
Equivalent to::
np.sum(np.log(updater.normalization_record))
:type: `float`
"""
return np.sum(np.log(self.normalization_record))
@property
def min_n_ess(self):
"""
Returns the smallest effective sample size (ESS) observed in the
history of this updater.
:type: `float`
:return: The minimum of observed effective sample sizes as
reported by :attr:`~qinfer.SMCUpdater.n_ess`.
"""
return self._min_n_ess
@property
def data_record(self):
"""
List of outcomes given to :meth:`~SMCUpdater.update`.
:type: `list` of `int`
"""
# We use [:] to force a new list to be made, decoupling
# this property from the caller.
return self._data_record[:]
@property
def resampling_divergences(self):
"""
List of KL divergences between the pre- and post-resampling
distributions, if that is being tracked. Otherwise, `None`.
:type: `list` of `float` or `None`
"""
return self._resampling_divergences
## PRIVATE METHODS ########################################################
def _maybe_resample(self):
"""
Checks the resample threshold and conditionally resamples.
"""
ess = self.n_ess
if ess <= 10:
warnings.warn(
"Extremely small n_ess encountered ({}). "
"Resampling is likely to fail. Consider adding particles, or "
"resampling more often.".format(ess),
ApproximationWarning
)
if ess < self.n_particles * self.resample_thresh:
self.resample()
pass
## INITIALIZATION METHODS #################################################
def reset(self, n_particles=None, only_params=None, reset_weights=True):
"""
Causes all particle locations and weights to be drawn fresh from the
initial prior.
:param int n_particles: Forces the size of the new particle set. If
`None`, the size of the particle set is not changed.
:param slice only_params: Resets only some of the parameters. Cannot
be set if ``n_particles`` is also given.
:param bool reset_weights: Resets the weights as well as the particles.
"""
# Particles are stored using two arrays, particle_locations and
# particle_weights, such that:
#
# particle_locations[idx_particle, idx_modelparam] is the idx_modelparam
# parameter of the particle idx_particle.
# particle_weights[idx_particle] is the weight of the particle
# idx_particle.
if n_particles is not None and only_params is not None:
raise ValueError("Cannot set both n_particles and only_params.")
if n_particles is None:
n_particles = self.n_particles
if reset_weights:
self.particle_weights = np.ones((n_particles,)) / n_particles
if only_params is None:
sl = np.s_[:, :]
# Might as well make a new array if we're resetting everything.
self.particle_locations = np.zeros((n_particles, self.model.n_modelparams))
else:
sl = np.s_[:, only_params]
self.particle_locations[sl] = self.prior.sample(n=n_particles)[sl]
# Since this changes particle positions, we must recanonicalize.
if self._canonicalize:
self.particle_locations[sl] = self.model.canonicalize(self.particle_locations[sl])
## UPDATE METHODS #########################################################
def hypothetical_update(self, outcomes, expparams, return_likelihood=False, return_normalization=False):
"""
Produces the particle weights for the posterior of a hypothetical
experiment.
:param outcomes: Integer index of the outcome of the hypothetical
experiment.
:type outcomes: int or an ndarray of dtype int.
:param numpy.ndarray expparams: Experiments to be used for the hypothetical
updates.
:type weights: ndarray, shape (n_outcomes, n_expparams, n_particles)
:param weights: Weights assigned to each particle in the posterior
distribution :math:`\Pr(\omega | d)`.
"""
# It's "hypothetical", don't want to overwrite old weights yet!
weights = self.particle_weights
locs = self.particle_locations
# Check if we have a single outcome or an array. If we only have one
# outcome, wrap it in a one-index array.
if not isinstance(outcomes, np.ndarray):
outcomes = np.array([outcomes])
# update the weights sans normalization
# Rearrange so that likelihoods have shape (outcomes, experiments, models).
# This makes the multiplication with weights (shape (models,)) make sense,
# since NumPy broadcasting rules align on the right-most index.
L = self.model.likelihood(outcomes, locs, expparams).transpose([0, 2, 1])
hyp_weights = weights * L
# Sum up the weights to find the renormalization scale.
norm_scale = np.sum(hyp_weights, axis=2)[..., np.newaxis]
# As a special case, check whether any entries of the norm_scale
# are zero. If this happens, that implies that all of the weights are
# zero--- that is, that the hypothicized outcome was impossible.
# Conditioned on an impossible outcome, all of the weights should be
# zero. To allow this to happen without causing a NaN to propagate,
# we forcibly set the norm_scale to 1, so that the weights will
# all remain zero.
#
# We don't actually want to propagate this out to the caller, however,
# and so we save the "fixed" norm_scale to a new array.
fixed_norm_scale = norm_scale.copy()
fixed_norm_scale[np.abs(norm_scale) < np.spacing(1)] = 1
# normalize
norm_weights = hyp_weights / fixed_norm_scale
# Note that newaxis is needed to align the two matrices.
# This introduces a length-1 axis for the particle number,
# so that the normalization is broadcast over all particles.
if not return_likelihood:
if not return_normalization:
return norm_weights
else:
return norm_weights, norm_scale
else:
if not return_normalization:
return norm_weights, L
else:
return norm_weights, L, norm_scale
def update(self, outcome, expparams, check_for_resample=True):
"""
Given an experiment and an outcome of that experiment, updates the
posterior distribution to reflect knowledge of that experiment.
After updating, resamples the posterior distribution if necessary.
:param int outcome: Label for the outcome that was observed, as defined
by the :class:`~qinfer.abstract_model.Model` instance under study.
:param expparams: Parameters describing the experiment that was
performed.
:type expparams: :class:`~numpy.ndarray` of dtype given by the
:attr:`~qinfer.abstract_model.Model.expparams_dtype` property
of the underlying model
:param bool check_for_resample: If :obj:`True`, after performing the
update, the effective sample size condition will be checked and
a resampling step may be performed.
"""
# First, record the outcome.
# TODO: record the experiment as well.
self._data_record.append(outcome)
self._just_resampled = False
# Perform the update.
weights, norm = self.hypothetical_update(outcome, expparams, return_normalization=True)
# Check for negative weights before applying the update.
if not np.all(weights >= 0):
warnings.warn("Negative weights occured in particle approximation. Smallest weight observed == {}. Clipping weights.".format(np.min(weights)), ApproximationWarning)
np.clip(weights, 0, 1, out=weights)
# Next, check if we have caused the weights to go to zero, as can
# happen if the likelihood is identically zero for all particles,
# or if the previous clip step choked on a NaN.
if np.sum(weights) <= self._zero_weight_thresh:
if self._zero_weight_policy == 'ignore':
pass
elif self._zero_weight_policy == 'skip':
return
elif self._zero_weight_policy == 'warn':
warnings.warn("All particle weights are zero. This will very likely fail quite badly.", ApproximationWarning)
elif self._zero_weight_policy == 'error':
raise RuntimeError("All particle weights are zero.")
elif self._zero_weight_policy == 'reset':
warnings.warn("All particle weights are zero. Resetting from initial prior.", ApproximationWarning)
self.reset()
else:
raise ValueError("Invalid zero-weight policy {} encountered.".format(self._zero_weight_policy))
# Since hypothetical_update returns an array indexed by
# [outcome, experiment, particle], we need to strip off those two
# indices first.
self.particle_weights[:] = weights[0,0,:]
# Record the normalization
self._normalization_record.append(norm[0][0])
# Update the particle locations according to the model's timestep.
self.particle_locations = self.model.update_timestep(
self.particle_locations, expparams
)[:, :, 0]
# Check if we need to update our min_n_ess attribute.
if self.n_ess <= self._min_n_ess:
self._min_n_ess = self.n_ess
# Resample if needed.
if check_for_resample:
self._maybe_resample()
def batch_update(self, outcomes, expparams, resample_interval=5):
r"""
Updates based on a batch of outcomes and experiments, rather than just
one.
:param numpy.ndarray outcomes: An array of outcomes of the experiments that
were performed.
:param numpy.ndarray expparams: Either a scalar or record single-index
array of experiments that were performed.
:param int resample_interval: Controls how often to check whether
:math:`N_{\text{ess}}` falls below the resample threshold.
"""
# TODO: write a faster implementation here using vectorized calls to
# likelihood.
# Check that the number of outcomes and experiments is the same.
n_exps = outcomes.shape[0]
if expparams.shape[0] != n_exps:
raise ValueError("The number of outcomes and experiments must match.")
if len(expparams.shape) == 1:
expparams = expparams[:, None]
# Loop over experiments and update one at a time.
for idx_exp, (outcome, experiment) in enumerate(zip(iter(outcomes), iter(expparams))):
self.update(outcome, experiment, check_for_resample=False)
if (idx_exp + 1) % resample_interval == 0:
self._maybe_resample()
## RESAMPLING METHODS #####################################################
def resample(self):
"""
Forces the updater to perform a resampling step immediately.
"""
if self.just_resampled:
warnings.warn(
"Resampling without additional data; this may not perform as "
"desired.",
ResamplerWarning
)
# Record that we have performed a resampling step.
self._just_resampled = True
self._resample_count += 1
# If we're tracking divergences, make a copy of the weights and
# locations.
if self._resampling_divergences is not None:
old_locs = self.particle_locations.copy()
old_weights = self.particle_weights.copy()
# Record the previous mean, cov if needed.
if self._debug_resampling:
old_mean = self.est_mean()
old_cov = self.est_covariance_mtx()
# Find the new particle locations according to the chosen resampling
# algorithm.
# We pass the model so that the resampler can check for validity of
# newly placed particles.
# FIXME This feels fishy. If we update particles elsewwhere
new_distribution = self.resampler(self.model, self)
self.particle_weights = new_distribution.particle_weights
self.particle_locations = new_distribution.particle_locations
# Possibly canonicalize, if we've been asked to do so.
if self._canonicalize:
self.particle_locations[:, :] = self.model.canonicalize(self.particle_locations)
# Instruct the model to clear its cache, demoting any errors to
# warnings.
try:
self.model.clear_cache()
except Exception as e:
warnings.warn("Exception raised when clearing model cache: {}. Ignoring.".format(e))
# Possibly track the new divergence.
if self._resampling_divergences is not None:
self._resampling_divergences.append(
self._kl_divergence(old_locs, old_weights)
)
# Report current and previous mean, cov.
if self._debug_resampling:
new_mean = self.est_mean()
new_cov = self.est_covariance_mtx()
logger.debug("Resampling changed mean by {}. Norm change in cov: {}.".format(
old_mean - new_mean,
np.linalg.norm(new_cov - old_cov)
))
def bayes_risk(self, expparams):
r"""
Calculates the Bayes risk for a hypothetical experiment, assuming the
quadratic loss function defined by the current model's scale matrix
(see :attr:`qinfer.abstract_model.Simulatable.Q`).
:param expparams: The experiment at which to compute the Bayes risk.
:type expparams: :class:`~numpy.ndarray` of dtype given by the current
model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property,
and of shape ``(1,)``
:return float: The Bayes risk for the current posterior distribution
of the hypothetical experiment ``expparams``.
"""
# This subroutine computes the bayes risk for a hypothetical experiment
# defined by expparams.
# Assume expparams is a single experiment
# expparams =
# Q = np array(Nmodelparams), which contains the diagonal part of the
# rescaling matrix. Non-diagonal could also be considered, but
# for the moment this is not implemented.
nout = self.model.n_outcomes(expparams) # This is a vector so this won't work
w, N = self.hypothetical_update(np.arange(nout), expparams, return_normalization=True)
w = w[:, 0, :] # Fix w.shape == (n_outcomes, n_particles).
N = N[:, :, 0] # Fix L.shape == (n_outcomes, n_particles).
xs = self.particle_locations.transpose([1, 0]) # shape (n_mp, n_particles).
# In the following, we will use the subscript convention that
# "o" refers to an outcome, "p" to a particle, and
# "i" to a model parameter.
# Thus, mu[o,i] is the sum over all particles of w[o,p] * x[i,p].
mu = np.transpose(np.tensordot(w,xs,axes=(1,1)))
var = (
# This sum is a reduction over the particle index and thus
# represents an expectation value over the diagonal of the
# outer product $x . x^T$.
np.transpose(np.tensordot(w,xs**2,axes=(1,1)))
# We finish by subracting from the above expectation value
# the diagonal of the outer product $mu . mu^T$.
- mu**2).T
rescale_var = np.sum(self.model.Q * var, axis=1)
# Q has shape (n_mp,), therefore rescale_var has shape (n_outcomes,).
tot_norm = np.sum(N, axis=1)
return np.dot(tot_norm.T, rescale_var)
def expected_information_gain(self, expparams):
r"""
Calculates the expected information gain for a hypothetical experiment.
:param expparams: The experiment at which to compute expected
information gain.
:type expparams: :class:`~numpy.ndarray` of dtype given by the current
model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property,
and of shape ``(1,)``
:return float: The Bayes risk for the current posterior distribution
of the hypothetical experiment ``expparams``.
"""
nout = self.model.n_outcomes(expparams)
w, N = self.hypothetical_update(np.arange(nout), expparams, return_normalization=True)
w = w[:, 0, :] # Fix w.shape == (n_outcomes, n_particles).
N = N[:, :, 0] # Fix N.shape == (n_outcomes, n_particles).
# This is a special case of the KL divergence estimator (see below),
# in which the other distribution is guaranteed to share support.
#
# KLD[idx_outcome] = Sum over particles(self * log(self / other[idx_outcome])
# Est. KLD = E[KLD[idx_outcome] | outcomes].
KLD = np.sum(
w * np.log(w / self.particle_weights ),
axis=1 # Sum over particles.
)
tot_norm = np.sum(N, axis=1)
return np.dot(tot_norm, KLD)
## MISC METHODS ###########################################################
def risk(self, x0):
return self.bayes_risk(np.array([(x0,)], dtype=self.model.expparams_dtype))
## PLOTTING METHODS #######################################################
def posterior_marginal(self, idx_param=0, res=100, smoothing=0, range_min=None, range_max=None):
"""
Returns an estimate of the marginal distribution of a given model parameter, based on
taking the derivative of the interpolated cdf.
:param int idx_param: Index of parameter to be marginalized.
:param int res1: Resolution of of the axis.
:param float smoothing: Standard deviation of the Gaussian kernel
used to smooth; same units as parameter.
:param float range_min: Minimum range of the output axis.
:param float range_max: Maximum range of the output axis.
.. seealso::
:meth:`SMCUpdater.plot_posterior_marginal`
"""
# We need to sort the particles to get cumsum to make sense.
# interp1d would do it anyways (using argsort, too), so it's not a waste
s = np.argsort(self.particle_locations[:,idx_param])
locs = self.particle_locations[s,idx_param]
# relevant axis discretization
r_min = np.min(locs) if range_min is None else range_min
r_max = np.max(locs) if range_max is None else range_max
ps = np.linspace(r_min, r_max, res)
# interpolate the cdf of the marginal distribution using cumsum
interp = scipy.interpolate.interp1d(
np.append(locs, r_max + np.abs(r_max-r_min)),
np.append(np.cumsum(self.particle_weights[s]), 1),
#kind='cubic',
bounds_error=False,
fill_value=0,
assume_sorted=True
)
# get distribution from derivative of cdf, and smooth it
pr = np.gradient(interp(ps), ps[1]-ps[0])
if smoothing > 0:
gaussian_filter1d(pr, res*smoothing/(np.abs(r_max-r_min)), output=pr)
del interp
return ps, pr
def plot_posterior_marginal(self, idx_param=0, res=100, smoothing=0,
range_min=None, range_max=None, label_xaxis=True,
other_plot_args={}, true_model=None
):
"""
Plots a marginal of the requested parameter.
:param int idx_param: Index of parameter to be marginalized.
:param int res1: Resolution of of the axis.
:param float smoothing: Standard deviation of the Gaussian kernel
used to smooth; same units as parameter.
:param float range_min: Minimum range of the output axis.
:param float range_max: Maximum range of the output axis.
:param bool label_xaxis: Labels the :math:`x`-axis with the model parameter name
given by this updater's model.
:param dict other_plot_args: Keyword arguments to be passed to
matplotlib's ``plot`` function.
:param np.ndarray true_model: Plots a given model parameter vector
as the "true" model for comparison.
.. seealso::
:meth:`SMCUpdater.posterior_marginal`
"""
res = plt.plot(*self.posterior_marginal(
idx_param, res, smoothing,
range_min, range_max
), **other_plot_args)
if label_xaxis:
plt.xlabel('${}$'.format(self.model.modelparam_names[idx_param]))
if true_model is not None:
true_model = true_model[0, idx_param] if true_model.ndim == 2 else true_model[idx_param]
old_ylim = plt.ylim()
plt.vlines(true_model, old_ylim[0] - 0.1, old_ylim[1] + 0.1, color='k', linestyles='--')
plt.ylim(old_ylim)
return res
def plot_covariance(self, corr=False, param_slice=None, tick_labels=None, tick_params=None):
"""
Plots the covariance matrix of the posterior as a Hinton diagram.
.. note::
This function requires that mpltools is installed.
:param bool corr: If `True`, the covariance matrix is first normalized
by the outer product of the square root diagonal of the covariance matrix
such that the correlation matrix is plotted instead.
:param slice param_slice: Slice of the modelparameters to
be plotted.
:param list tick_labels: List of tick labels for each component;
by default, these are drawn from the model itself.
"""
if mpls is None:
raise ImportError("Hinton diagrams require mpltools.")
if param_slice is None:
param_slice = np.s_[:]
tick_labels = (
list(range(len(self.model.modelparam_names[param_slice]))),
tick_labels
if tick_labels is not None else
list(map(u"${}$".format, self.model.modelparam_names[param_slice]))
)
cov = self.est_covariance_mtx(corr=corr)[param_slice, param_slice]
retval = mpls.hinton(cov)
plt.xticks(*tick_labels, **(tick_params if tick_params is not None else {}))
plt.yticks(*tick_labels, **(tick_params if tick_params is not None else {}))
plt.gca().xaxis.tick_top()
return retval
def posterior_mesh(self, idx_param1=0, idx_param2=1, res1=100, res2=100, smoothing=0.01):
"""
Returns a mesh, useful for plotting, of kernel density estimation
of a 2D projection of the current posterior distribution.
:param int idx_param1: Parameter to be treated as :math:`x` when
plotting.
:param int idx_param2: Parameter to be treated as :math:`y` when
plotting.
:param int res1: Resolution along the :math:`x` direction.
:param int res2: Resolution along the :math:`y` direction.
:param float smoothing: Standard deviation of the Gaussian kernel
used to smooth the particle approximation to the current posterior.
.. seealso::
:meth:`SMCUpdater.plot_posterior_contour`
"""
# WARNING: fancy indexing is used here, which means that a copy is
# made.
locs = self.particle_locations[:, [idx_param1, idx_param2]]
p1s, p2s = np.meshgrid(
np.linspace(np.min(locs[:, 0]), np.max(locs[:, 0]), res1),
np.linspace(np.min(locs[:, 1]), np.max(locs[:, 1]), res2)
)
plot_locs = np.array([p1s, p2s]).T.reshape((np.prod(p1s.shape), 2))
pr = np.sum( # <- sum over the particles in the SMC approximation.
np.prod( # <- product over model parameters to get a multinormal
# Evaluate the PDF at the plotting locations, with a normal
# located at the particle locations.
scipy.stats.norm.pdf(
plot_locs[:, np.newaxis, :],
scale=smoothing,
loc=locs
),
axis=-1
) * self.particle_weights,
axis=1
).reshape(p1s.shape) # Finally, reshape back into the same shape as the mesh.
return p1s, p2s, pr
def plot_posterior_contour(self, idx_param1=0, idx_param2=1, res1=100, res2=100, smoothing=0.01):
"""
Plots a contour of the kernel density estimation
of a 2D projection of the current posterior distribution.
:param int idx_param1: Parameter to be treated as :math:`x` when
plotting.
:param int idx_param2: Parameter to be treated as :math:`y` when
plotting.
:param int res1: Resolution along the :math:`x` direction.
:param int res2: Resolution along the :math:`y` direction.
:param float smoothing: Standard deviation of the Gaussian kernel
used to smooth the particle approximation to the current posterior.
.. seealso::
:meth:`SMCUpdater.posterior_mesh`
"""
return plt.contour(*self.posterior_mesh(idx_param1, idx_param2, res1, res2, smoothing))
## IPYTHON SUPPORT METHODS ################################################
def _repr_html_(self):
return r"""
<strong>{cls_name}</strong> for model of type <strong>{model}</strong>:
<table>
<caption>Current estimated parameters</caption>
<thead>
<tr>
{parameter_names}
</tr>
</thead>
<tbody>
<tr>
{parameter_values}
</tr>
</tbody>
</table>
<em>Resample count:</em> {resample_count}
""".format(
cls_name=type(self).__name__, # Useful for subclassing.
model=(
type(self.model).__name__
if not self.model.model_chain else
# FIXME: the <strong> here is ugly as sin.
"{}</strong> (based on <strong>{}</strong>)<strong>".format(
type(self.model).__name__,
"</strong>, <strong>".join(type(model).__name__ for model in self.model.model_chain)
)
),
parameter_names="\n".join(
map("<td>${}$</td>".format, self.model.modelparam_names)
),
# TODO: change format string based on number of digits of precision
# admitted by the variance.
parameter_values="\n".join(
"<td>${}$</td>".format(
format_uncertainty(mu, std)
)
for mu, std in
zip(self.est_mean(), np.sqrt(np.diag(self.est_covariance_mtx())))
),
resample_count=self.resample_count
)
class MixedApproximateSMCUpdater(SMCUpdater):
"""
Subclass of :class:`SMCUpdater` that uses a mixture of two models, one
of which is assumed to be expensive to compute, while the other is assumed
to be cheaper. This allows for approximate computation to be used on the
lower-weight particles.
:param ~qinfer.abstract_model.Model good_model: The more expensive, but
complete model.
:param ~qinfer.abstract_model.Model approximate_model: The less expensive,
but approximate model.
:param float mixture_ratio: The ratio of the posterior weight that will
be delegated to the good model in each update step.
:param float mixture_thresh: Any particles of weight at least equal to this
threshold will be delegated to the complete model, irrespective
of the value of ``mixture_ratio``.
:param int min_good: Minimum number of "good" particles to assign at each
step.
All other parameters are as described in the documentation of
:class:`SMCUpdater`.
"""
def __init__(self,
good_model, approximate_model,
n_particles, prior,
resample_a=None, resampler=None, resample_thresh=0.5,
mixture_ratio=0.5, mixture_thresh=1.0, min_good=0
):
self._good_model = good_model
self._apx_model = approximate_model
super(MixedApproximateSMCUpdater, self).__init__(
good_model, n_particles, prior,
resample_a, resampler, resample_thresh
)
self._mixture_ratio = mixture_ratio
self._mixture_thresh = mixture_thresh
self._min_good = min_good
def hypothetical_update(self, outcomes, expparams, return_likelihood=False, return_normalization=False):
# TODO: consolidate code with SMCUpdater by breaking update logic
# into private method.
# It's "hypothetical", don't want to overwrite old weights yet!
weights = self.particle_weights
locs = self.particle_locations
# Check if we have a single outcome or an array. If we only have one
# outcome, wrap it in a one-index array.
if not isinstance(outcomes, np.ndarray):
outcomes = np.array([outcomes])
# Make an empty array for likelihoods. We'll fill it in in two steps,
# the good step and the approximate step.
L = np.zeros((outcomes.shape[0], locs.shape[0], expparams.shape[0]))
# Which indices go to good_model?
# Start by getting a permutation that sorts the weights.
# Since sorting as implemented by NumPy is stable, we want to break
# that stability to avoid introducing patterns, and so we first
# randomly shuffle the identity permutation.
idxs_random = np.arange(weights.shape[0])
np.random.shuffle(idxs_random)
idxs_sorted = np.argsort(weights[idxs_random])
# Find the inverse permutation to be that which returns
# the composed permutation sort º shuffle to the identity.
inv_idxs_sort = np.argsort(idxs_random[idxs_sorted])
# Now strip off a set of particles producing the desired total weight
# or that have weights above a given threshold.
sorted_weights = weights[idxs_random[idxs_sorted]]
cum_weights = np.cumsum(sorted_weights)
good_mask = (np.logical_or(
cum_weights >= 1 - self._mixture_ratio,
sorted_weights >= self._mixture_thresh
))[inv_idxs_sort]
if np.sum(good_mask) < self._min_good:
# Just take the last _min_good instead of something sophisticated.
good_mask = np.zeros_like(good_mask)
good_mask[idxs_random[idxs_sorted][-self._min_good:]] = True
bad_mask = np.logical_not(good_mask)
# Finally, separate out the locations that go to each of the good and
# bad models.
locs_good = locs[good_mask, :]
locs_bad = locs[bad_mask, :]
assert_thresh=1e-6
assert np.mean(weights[good_mask]) - np.mean(weights[bad_mask]) >= -assert_thresh
# Now find L for each of the good and bad models.
L[:, good_mask, :] = self._good_model.likelihood(outcomes, locs_good, expparams)
L[:, bad_mask, :] = self._apx_model.likelihood(outcomes, locs_bad, expparams)
L = L.transpose([0, 2, 1])
# update the weights sans normalization
# Rearrange so that likelihoods have shape (outcomes, experiments, models).
# This makes the multiplication with weights (shape (models,)) make sense,
# since NumPy broadcasting rules align on the right-most index.
hyp_weights = weights * L
# Sum up the weights to find the renormalization scale.
norm_scale = np.sum(hyp_weights, axis=2)[..., np.newaxis]
# As a special case, check whether any entries of the norm_scale
# are zero. If this happens, that implies that all of the weights are
# zero--- that is, that the hypothicized outcome was impossible.
# Conditioned on an impossible outcome, all of the weights should be
# zero. To allow this to happen without causing a NaN to propagate,
# we forcibly set the norm_scale to 1, so that the weights will
# all remain zero.
#
# We don't actually want to propagate this out to the caller, however,
# and so we save the "fixed" norm_scale to a new array.
fixed_norm_scale = norm_scale.copy()
fixed_norm_scale[np.abs(norm_scale) < np.spacing(1)] = 1
# normalize
norm_weights = hyp_weights / fixed_norm_scale
# Note that newaxis is needed to align the two matrices.
# This introduces a length-1 axis for the particle number,
# so that the normalization is broadcast over all particles.
if not return_likelihood:
if not return_normalization:
return norm_weights
else:
return norm_weights, norm_scale
else:
if not return_normalization:
return norm_weights, L
else:
return norm_weights, L, norm_scale
class SMCUpdaterBCRB(SMCUpdater):
"""
Subclass of :class:`SMCUpdater`, adding Bayesian Cramer-Rao bound
functionality.
Models considered by this class must be differentiable.
In addition to the arguments taken by :class:`SMCUpdater`, this class
takes the following keyword-only arguments:
:param bool adaptive: If `True`, the updater will track both the
non-adaptive and adaptive Bayes Information matrices.
:param initial_bim: If the regularity conditions are not met, then taking
the outer products of gradients over the prior will not give the correct
initial BIM. In such cases, ``initial_bim`` can be set to the correct
BIM corresponding to having done no experiments.
"""
def __init__(self, *args, **kwargs):
SMCUpdater.__init__(self, *args, **{
key: kwargs[key] for key in kwargs
if key in [
'resampler_a', 'resampler', 'resample_thresh', 'model',
'prior', 'n_particles'
]
})
if not isinstance(self.model, DifferentiableModel):
raise ValueError("Model must be differentiable.")
# TODO: fix distributions to make grad_log_pdf return the right
# shape, such that the indices are
# [idx_model, idx_param] → [idx_model, idx_param],
# so that prior.grad_log_pdf(modelparams[i, j])[i, k]
# returns the partial derivative with respect to the kth
# parameter evaluated at the model parameter vector
# modelparams[i, :].
if 'initial_bim' not in kwargs or kwargs['initial_bim'] is None:
gradients = self.prior.grad_log_pdf(self.particle_locations)
self._current_bim = np.sum(
gradients[:, :, np.newaxis] * gradients[:, np.newaxis, :],
axis=0
) / self.n_particles
else:
self._current_bim = kwargs['initial_bim']
# Also track the adaptive BIM, if we've been asked to.
if "adaptive" in kwargs and kwargs["adaptive"]:
self._track_adaptive = True
# Both the prior- and posterior-averaged BIMs start
# from the prior.
self._adaptive_bim = self.current_bim
else:
self._track_adaptive = False
# TODO: since we are guaranteed differentiability, and since SMCUpdater is
# now a Distribution subclass representing posterior sampling, write
# a grad_log_pdf for the posterior distribution, perhaps?
def _bim(self, modelparams, expparams, modelweights=None):
# TODO: document
# rough idea of this function is to take expectations of an
# FI over some distribution, here represented by modelparams.
# NOTE: The signature of this function is a bit odd, but it allows
# us to represent in one function both prior samples of uniform
# weight and weighted samples from a posterior.
# Because it's a bit odd, we make it a private method and expose
# functionality via the prior_bayes_information and
# posterior_bayes_information methods.
# About shapes: the FI we will be averaging over has four indices:
# FI[i, j, m, e], i and j being matrix indices, m being a model index
# and e being a model index.
# We will thus want to return an array of shape BI[i, j, e], reducing
# over the model index.
fi = self.model.fisher_information(modelparams, expparams)
# We now either reweight and sum, or sum and divide, based on whether we
# have model weights to consider or not.
if modelweights is None:
# Assume uniform weights.
bim = np.sum(fi, axis=2) / modelparams.shape[0]
else:
bim = np.einsum("m,ijme->ije", modelweights, fi)
return bim
@property
def current_bim(self):
"""
Returns a copy of the current Bayesian Information Matrix (BIM)
of the :class:`SMCUpdaterBCRB`
:returns: `np.array` of shape [idx_modelparams,idx_modelparams]
"""
return np.copy(self._current_bim)
@property
def adaptive_bim(self):
"""
Returns a copy of the adaptive Bayesian Information Matrix (BIM)
of the :class:`SMCUpdaterBCRB`. Will raise an error if
`method`:`SMCUpdaterBCRB.track_adaptive` is `False`.
:returns: `np.array` of shape [idx_modelparams,idx_modelparams]
"""
if not self.track_adaptive:
raise ValueError('To track the adaptive_bim, the adaptive keyword argument'
'must be set True when initializing class.')
return np.copy(self._adaptive_bim)
@property
def track_adaptive(self):
"""
If `True` the :class:`SMCUpdaterBCRB` will track the adaptive BIM. Set by
keyword argument `adaptive` to :method:`SMCUpdaterBCRB.__init__`.
:returns: `bool`
"""
return self._track_adaptive
def prior_bayes_information(self, expparams, n_samples=None):
"""
Evaluates the local Bayesian Information Matrix (BIM) for a set of
samples from the SMC particle set, with uniform weights.
:param expparams: Parameters describing the experiment that was
performed.
:type expparams: :class:`~numpy.ndarray` of dtype given by the
:attr:`~qinfer.abstract_model.Model.expparams_dtype` property
of the underlying model
:param n_samples int: Number of samples to draw from particle distribution,
to evaluate BIM over.
"""
if n_samples is None:
n_samples = self.particle_locations.shape[0]
return self._bim(self.prior.sample(n_samples), expparams)
def posterior_bayes_information(self, expparams):
"""
Evaluates the local Bayesian Information Matrix (BIM) over all particles
of the current posterior distribution with corresponding weights.
:param expparams: Parameters describing the experiment that was
performed.
:type expparams: :class:`~numpy.ndarray` of dtype given by the
:attr:`~qinfer.abstract_model.Model.expparams_dtype` property
of the underlying model
"""
return self._bim(
self.particle_locations, expparams,
modelweights=self.particle_weights
)
def update(self, outcome, expparams,check_for_resample=True):
"""
Given an experiment and an outcome of that experiment, updates the
posterior distribution to reflect knowledge of that experiment.
After updating, resamples the posterior distribution if necessary.
:param int outcome: Label for the outcome that was observed, as defined
by the :class:`~qinfer.abstract_model.Model` instance under study.
:param expparams: Parameters describing the experiment that was
performed.
:type expparams: :class:`~numpy.ndarray` of dtype given by the
:attr:`~qinfer.abstract_model.Model.expparams_dtype` property
of the underlying model
:param bool check_for_resample: If :obj:`True`, after performing the
update, the effective sample size condition will be checked and
a resampling step may be performed.
"""
# Before we update, we need to commit the new Bayesian information
# matrix corresponding to the measurement we just made.
self._current_bim += self.prior_bayes_information(expparams)[:, :, 0]
# If we're tracking the information content accessible to adaptive
# algorithms, then we must use the current posterior as the prior
# for the next step, then add that accordingly.
if self._track_adaptive:
self._adaptive_bim += self.posterior_bayes_information(expparams)[:, :, 0]
# We now can update as normal.
SMCUpdater.update(self, outcome, expparams,check_for_resample=check_for_resample)
| MichalKononenko/python-qinfer | src/qinfer/smc.py | Python | agpl-3.0 | 49,222 | [
"Gaussian"
] | 1149d3e8288a529e50553c1daacc70867b9c1d1e198709ebae55c184c51c8682 |
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
from __future__ import division, print_function, absolute_import
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'OptimizeResult', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import sys
import numpy
from scipy._lib.six import callable
from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze,
vectorize, asarray, sqrt, Inf, asfarray, isinf)
import numpy as np
from .linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search,
LineSearchWarning)
from inspect import getargspec
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.'}
class MemoizeJac(object):
""" Decorator that caches the value gradient of function each time it
is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self.x = None
def __call__(self, x, *args):
self.x = numpy.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
return fg[0]
def derivative(self, x, *args):
if self.jac is not None and numpy.alltrue(x == self.x):
return self.jac
else:
self(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess: ndarray
Values of objective function, its Jacobian and its Hessian (if
available). The Hessians may be approximations, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in self.items()])
else:
return self.__class__.__name__ + "()"
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in Scipy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(numpy.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(numpy.abs(x))
elif ord == -Inf:
return numpy.amin(numpy.abs(x))
else:
return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord)
def rosen(x):
"""
The Rosenbrock function.
The function computed is::
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function.
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def rosen_der(x):
"""
The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the derivative is to be computed.
Returns
-------
rosen_der : (N,) ndarray
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def rosen_hess(x):
"""
The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
"""
x = atleast_1d(x)
H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""
Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
p : array_like
1-D array, the vector to be multiplied by the Hessian matrix.
Returns
-------
rosen_hess_prod : ndarray
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
"""
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
(202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -
400 * x[1:-1] * p[2:])
Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]
return Hp
def wrap_function(function, args):
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(*wrapper_args):
ncalls[0] += 1
return function(*(wrapper_args + args))
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e. ``f(x,*args)``.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
xtol : float, optional
Relative error in xopt acceptable for convergence.
ftol : number, optional
Relative error in func(xopt) acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does.
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall}
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
N = len(x0)
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
one2np1 = list(range(1, N + 1))
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
fsim = numpy.zeros((N + 1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
fsim[0] = func(x0)
nonzdelt = 0.05
zdelt = 0.00025
for k in range(0, N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
f = func(y)
fsim[k + 1] = f
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xtol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= ftol):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None):
"""
See ``approx_fprime``. An optional initial function value arg is added.
"""
if f0 is None:
f0 = f(*((xk,) + args))
grad = numpy.zeros((len(xk),), float)
ei = numpy.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
def approx_fprime(xk, f, epsilon, *args):
"""Finite-difference approximation of the gradient of a scalar function.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
The function of which to determine the gradient (partial derivatives).
Should take `xk` as first argument, other arguments to `f` can be
supplied in ``*args``. Should return a scalar, the value of the
function at `xk`.
epsilon : array_like
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`.
\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
grad : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
The main use of `approx_fprime` is in scalar function optimizers like
`fmin_bfgs`, to determine numerically the Jacobian of a function.
Examples
--------
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004198])
"""
return _approx_fprime_helper(xk, f, epsilon, args=args)
def check_grad(func, grad, x0, *args, **kwargs):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08.
Returns
-------
err : float
The square root of the sum of squares (i.e. the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x):
... return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
... return [2 * x[0], -1.5 * x[1]**2]
>>> from scipy.optimize import check_grad
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08
"""
step = kwargs.pop('epsilon', _epsilon)
if kwargs:
raise ValueError("Unknown keyword arguments: %r" %
(list(kwargs.keys()),))
return sqrt(sum((grad(x0, *args) -
approx_fprime(x0, func, step, *args))**2))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
f2 = fprime(*((x0 + epsilon*p,) + args))
f1 = fprime(*((x0,) + args))
return (f2 - f1) / epsilon
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is None:
# line search failed: try different one.
with warnings.catch_warnings():
warnings.simplefilter('ignore', LineSearchWarning)
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
if ret[0] is None:
raise _LineSearchError()
return ret
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than gtol before successful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If fprime is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool, optional
Print convergence message if True.
retall : bool, optional
Return a list of results at each iteration if True.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e. the inverse hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : list
`OptimizeResult` at each iteration. Only returned if retall is True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'BFGS' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS)
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = numpy.eye(N, dtype=int)
Hk = I
old_fval = f(x0)
old_old_fval = None
xk = x0
if retall:
allvecs = [x0]
sk = [2 * gtol]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -numpy.dot(Hk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
k += 1
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not numpy.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (numpy.dot(yk, sk))
except ZeroDivisionError:
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
if isinf(rhok): # this is patch for numpy
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok
Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] *
sk[numpy.newaxis, :])
fval = old_fval
if np.isnan(fval):
# This can happen if the first call to f returned NaN;
# the loop is then never entered.
warnflag = 2
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using a nonlinear conjugate gradient algorithm.
Parameters
----------
f : callable, ``f(x, *args)``
Objective function to be minimized. Here `x` must be a 1-D array of
the variables that are to be changed in the search for a minimum, and
`args` are the other (fixed) parameters of `f`.
x0 : ndarray
A user-supplied initial estimate of `xopt`, the optimal value of `x`.
It must be a 1-D array of values.
fprime : callable, ``fprime(x, *args)``, optional
A function that returns the gradient of `f` at `x`. Here `x` and `args`
are as described above for `f`. The returned value must be a 1-D array.
Defaults to None, in which case the gradient is approximated
numerically (see `epsilon`, below).
args : tuple, optional
Parameter values passed to `f` and `fprime`. Must be supplied whenever
additional fixed parameters are needed to completely specify the
functions `f` and `fprime`.
gtol : float, optional
Stop when the norm of the gradient is less than `gtol`.
norm : float, optional
Order to use for the norm of the gradient
(``-np.Inf`` is min, ``np.Inf`` is max).
epsilon : float or ndarray, optional
Step size(s) to use when `fprime` is approximated numerically. Can be a
scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the
floating point machine precision. Usually ``sqrt(eps)`` is about
1.5e-8.
maxiter : int, optional
Maximum number of iterations to perform. Default is ``200 * len(x0)``.
full_output : bool, optional
If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in
addition to `xopt`. See the Returns section below for additional
information on optional return values.
disp : bool, optional
If True, return a convergence message, followed by `xopt`.
retall : bool, optional
If True, add to the returned values the results of each iteration.
callback : callable, optional
An optional user-supplied function, called after each iteration.
Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float, optional
Minimum value found, f(xopt). Only returned if `full_output` is True.
func_calls : int, optional
The number of function_calls made. Only returned if `full_output`
is True.
grad_calls : int, optional
The number of gradient calls made. Only returned if `full_output` is
True.
warnflag : int, optional
Integer value with warning status, only returned if `full_output` is
True.
0 : Success.
1 : The maximum number of iterations was exceeded.
2 : Gradient and/or function calls were not changing. May indicate
that precision was lost, i.e., the routine did not converge.
allvecs : list of ndarray, optional
List of arrays, containing the results at each iteration.
Only returned if `retall` is True.
See Also
--------
minimize : common interface to all `scipy.optimize` algorithms for
unconstrained and constrained minimization of multivariate
functions. It provides an alternative way to call
``fmin_cg``, by specifying ``method='CG'``.
Notes
-----
This conjugate gradient algorithm is based on that of Polak and Ribiere
[1]_.
Conjugate gradient methods tend to work better when:
1. `f` has a unique global minimizing point, and no local minima or
other stationary points,
2. `f` is, at least locally, reasonably well approximated by a
quadratic function of the variables,
3. `f` is continuous and has a continuous gradient,
4. `fprime` is not too large, e.g., has a norm less than 1000,
5. The initial guess, `x0`, is reasonably close to `f` 's global
minimizing point, `xopt`.
References
----------
.. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122.
Examples
--------
Example 1: seek the minimum value of the expression
``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values
of the parameters and an initial guess ``(u, v) = (0, 0)``.
>>> args = (2, 3, 7, 8, 9, 10) # parameter values
>>> def f(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f
>>> def gradf(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... gu = 2*a*u + b*v + d # u-component of the gradient
... gv = b*u + 2*c*v + e # v-component of the gradient
... return np.asarray((gu, gv))
>>> x0 = np.asarray((0, 0)) # Initial guess.
>>> from scipy import optimize
>>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 2
Function evaluations: 5
Gradient evaluations: 5
>>> res1
array([-1.80851064, -0.25531915])
Example 2: solve the same problem using the `minimize` function.
(This `myopts` dictionary shows all of the available options,
although in practice only non-default values would be needed.
The returned value will be a dictionary.)
>>> opts = {'maxiter' : None, # default value.
... 'disp' : True, # non-default value.
... 'gtol' : 1e-5, # default value.
... 'norm' : np.inf, # default value.
... 'eps' : 1.4901161193847656e-08} # default value.
>>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,
... method='CG', options=opts)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 2
Function evaluations: 5
Gradient evaluations: 5
>>> res2.x # minimum found
array([-1.80851064, -0.25531915])
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
xk = x0
old_fval = f(xk)
old_old_fval = None
if retall:
allvecs = [xk]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,
old_old_fval, c2=0.4)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xk = xk + alpha_k * pk
if retall:
allvecs.append(xk)
if gfkp1 is None:
gfkp1 = myfprime(xk)
yk = gfkp1 - gfk
beta_k = max(0, numpy.dot(yk, gfkp1) / deltak)
pk = -gfkp1 + beta_k * pk
gfk = gfkp1
gnorm = vecnorm(gfk, ord=norm)
if callback is not None:
callback(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable ``f(x, *args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x, *args)``
Gradient of f.
fhess_p : callable ``fhess_p(x, p, *args)``, optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable ``fhess(x, *args)``, optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
avextol : float, optional
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return the optional outputs.
disp : bool, optional
If True, print convergence message.
retall : bool, optional
If True, return a list of results at each iteration.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in python using numpy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable separately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.
"""
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall}
res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
res['nhev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Note that the `jac` parameter (Jacobian) is required.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
maxiter : int
Maximum number of iterations to perform.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Newton-CG method')
f = fun
fprime = jac
fhess_p = hessp
fhess = hess
avextol = xtol
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
gcalls, fprime = wrap_function(fprime, args)
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
xtol = len(x0) * avextol
update = [2 * xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
old_fval = f(x0)
old_old_fval = None
float64eps = numpy.finfo(numpy.float64).eps
warnflag = 0
while (numpy.add.reduce(numpy.abs(update)) > xtol) and (k < maxiter):
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = numpy.add.reduce(numpy.abs(b))
eta = numpy.min([0.5, numpy.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = numpy.dot(ri, ri)
if fhess is not None: # you want to compute hessian once.
A = fhess(*(xk,) + args)
hcalls = hcalls + 1
while numpy.add.reduce(numpy.abs(ri)) > termcond:
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
else:
Ap = fhess_p(xk, psupi, *args)
hcalls = hcalls + 1
else:
Ap = numpy.dot(A, psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = numpy.dot(psupi, Ap)
if 0 <= curv <= 3 * float64eps:
break
elif curv < 0:
if (i > 0):
break
else:
# fall back to steepest descent direction
xsupi = dri0 / (-curv) * b
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = numpy.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update numpy.dot(ri,ri) for next time.
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(xk)
if retall:
allvecs.append(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0], njev=gcalls[0],
nhev=hcalls, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple, optional
Extra arguments passed to function.
xtol : float, optional
The convergence tolerance.
maxfun : int, optional
Maximum number of function evaluations allowed.
full_output : bool, optional
If True, return optional outputs.
disp : int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Returns
-------
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Bounded' `method` in particular.
Notes
-----
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing).
"""
options = {'xatol': xtol,
'maxiter': maxfun,
'disp': disp}
res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
if full_output:
return res['x'], res['fun'], res['status'], res['nfev']
else:
return res['x']
def _minimize_scalar_bounded(func, bounds, args=(),
xatol=1e-5, maxiter=500, disp=0,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
xatol : float
Absolute error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
# Test bounds are of correct form
if len(bounds) != 2:
raise ValueError('bounds must have two elements.')
x1, x2 = bounds
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimisation bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step = ' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5 * (3.0 - sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean * (b - a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x, *args)
num = 1
fmin_data = (1, xf, fx)
ffulc = fnfc = fx
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if disp > 2:
print(" ")
print(header)
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))):
golden = 1
# Check for parabolic fit
if numpy.abs(e) > tol1:
golden = 0
r = (xf - nfc) * (fx - ffulc)
q = (xf - fulc) * (fx - fnfc)
p = (xf - fulc) * q - (xf - nfc) * r
q = 2.0 * (q - r)
if q > 0.0:
p = -p
q = numpy.abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and
(p < q * (b - xf))):
rat = (p + 0.0) / q
x = xf + rat
step = ' parabolic'
if ((x - a) < tol2) or ((b - x) < tol2):
si = numpy.sign(xm - xf) + ((xm - xf) == 0)
rat = tol1 * si
else: # do a golden section step
golden = 1
if golden: # Do a golden-section step
if xf >= xm:
e = a - xf
else:
e = b - xf
rat = golden_mean*e
step = ' golden'
si = numpy.sign(rat) + (rat == 0)
x = xf + si * numpy.max([numpy.abs(rat), tol1])
fu = func(x, *args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if num >= maxfun:
flag = 1
break
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xatol, disp)
result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),
message={0: 'Solution found.',
1: 'Maximum number of function calls '
'reached.'}.get(flag, ''),
x=xf, nfev=num)
return result
class Brent:
#need to rethink design of __init__
def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
full_output=0):
self.func = func
self.args = args
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
# need to rethink design of set_bracket (new options, etc)
def set_bracket(self, brack=None):
self.brack = brack
def get_bracket_info(self):
#set up
func = self.func
args = self.args
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be "
"length 2 or 3 sequence.")
### END core bracket_info code ###
return xa, xb, xc, fa, fb, fc, funcalls
def optimize(self):
# set up for optimization
func = self.func
xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#################################
x = w = v = xb
fw = fv = fx = func(*((x,) + self.args))
if (xa < xc):
a = xa
b = xc
else:
a = xc
b = xa
deltax = 0.0
funcalls = 1
iter = 0
while (iter < self.maxiter):
tol1 = self.tol * numpy.abs(x) + _mintol
tol2 = 2.0 * tol1
xmid = 0.5 * (a + b)
# check for convergence
if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)):
break
# XXX In the first iteration, rat is only bound in the true case
# of this conditional. This used to cause an UnboundLocalError
# (gh-4140). It should be set before the if (but to what?).
if (numpy.abs(deltax) <= tol1):
if (x >= xmid):
deltax = a - x # do a golden section step
else:
deltax = b - x
rat = _cg * deltax
else: # do a parabolic step
tmp1 = (x - w) * (fx - fv)
tmp2 = (x - v) * (fx - fw)
p = (x - v) * tmp2 - (x - w) * tmp1
tmp2 = 2.0 * (tmp2 - tmp1)
if (tmp2 > 0.0):
p = -p
tmp2 = numpy.abs(tmp2)
dx_temp = deltax
deltax = rat
# check parabolic fit
if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and
(numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))):
rat = p * 1.0 / tmp2 # if parabolic step is useful.
u = x + rat
if ((u - a) < tol2 or (b - u) < tol2):
if xmid - x >= 0:
rat = tol1
else:
rat = -tol1
else:
if (x >= xmid):
deltax = a - x # if it's not do a golden section step
else:
deltax = b - x
rat = _cg * deltax
if (numpy.abs(rat) < tol1): # update by at least tol1
if rat >= 0:
u = x + tol1
else:
u = x - tol1
else:
u = x + rat
fu = func(*((u,) + self.args)) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u < x):
a = u
else:
b = u
if (fu <= fw) or (w == x):
v = w
w = u
fv = fw
fw = fu
elif (fu <= fv) or (v == x) or (v == w):
v = u
fv = fu
else:
if (u >= x):
a = x
else:
b = x
v = w
w = x
x = u
fv = fw
fw = fx
fx = fu
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""
Given a function of one-variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable f(x,*args)
Objective function.
args : tuple, optional
Additional arguments (if present).
brack : tuple, optional
Triple (a,b,c) where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,c)
then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that the obtained solution will satisfy a<=x<=c.
tol : float, optional
Stop if between iteration change is less than `tol`.
full_output : bool, optional
If True, return all output args (xmin, fval, iter,
funcalls).
maxiter : int, optional
Maximum number of iterations in solution.
Returns
-------
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Brent' `method` in particular.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
options = {'xtol': tol,
'maxiter': maxiter}
res = _minimize_scalar_brent(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nit'], res['nfev']
else:
return res['x']
def _minimize_scalar_brent(func, brack=None, args=(),
xtol=1.48e-8, maxiter=500,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
_check_unknown_options(unknown_options)
tol = xtol
if tol < 0:
raise ValueError('tolerance should be >= 0, got %r' % tol)
brent = Brent(func=func, args=args, tol=tol,
full_output=True, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
x, fval, nit, nfev = brent.get_result(full_output=True)
return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev,
success=nit < maxiter)
def golden(func, args=(), brack=None, tol=_epsilon, full_output=0):
"""
Return the minimum of a function of one variable.
Given a function of one variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable func(x,*args)
Objective function to minimize.
args : tuple, optional
Additional arguments (if present), passed to func.
brack : tuple, optional
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float, optional
x tolerance stop criterion
full_output : bool, optional
If True, return optional outputs.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Golden' `method` in particular.
Notes
-----
Uses analog of bisection method to decrease the bracketed
interval.
"""
options = {'xtol': tol}
res = _minimize_scalar_golden(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nfev']
else:
return res['x']
def _minimize_scalar_golden(func, brack=None, args=(),
xtol=_epsilon, **unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
tol = xtol
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
_gR = 0.61803399
_gC = 1.0 - _gR
x3 = xc
x0 = xa
if (numpy.abs(xc - xb) > numpy.abs(xb - xa)):
x1 = xb
x2 = xb + _gC * (xc - xb)
else:
x2 = xb
x1 = xb - _gC * (xb - xa)
f1 = func(*((x1,) + args))
f2 = func(*((x2,) + args))
funcalls += 2
while (numpy.abs(x3 - x0) > tol * (numpy.abs(x1) + numpy.abs(x2))):
if (f2 < f1):
x0 = x1
x1 = x2
x2 = _gR * x1 + _gC * x3
f1 = f2
f2 = func(*((x2,) + args))
else:
x3 = x2
x2 = x1
x1 = _gR * x2 + _gC * x0
f2 = f1
f1 = func(*((x1,) + args))
funcalls += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
return OptimizeResult(fun=fval, nfev=funcalls, x=xmin)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""
Bracket the minimum of the function.
Given a function and distinct initial points, search in the
downhill direction (as defined by the initital points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb
Parameters
----------
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float, optional
Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
args : tuple, optional
Additional arguments (if present), passed to `func`.
grow_limit : float, optional
Maximum grow limit. Defaults to 110.0
maxiter : int, optional
Maximum number of iterations to perform. Defaults to 1000.
Returns
-------
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
"""
_gold = 1.618034
_verysmall_num = 1e-21
fa = func(*(xa,) + args)
fb = func(*(xb,) + args)
if (fa < fb): # Switch so fa > fb
xa, xb = xb, xa
fa, fb = fb, fa
xc = xb + _gold * (xb - xa)
fc = func(*((xc,) + args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa) * (fb - fc)
tmp2 = (xb - xc) * (fb - fa)
val = tmp2 - tmp1
if numpy.abs(val) < _verysmall_num:
denom = 2.0 * _verysmall_num
else:
denom = 2.0 * val
w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom
wlim = xb + grow_limit * (xc - xb)
if iter > maxiter:
raise RuntimeError("Too many iterations.")
iter += 1
if (w - xc) * (xb - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xa = xb
xb = w
fa = fb
fb = fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(wlim - xc) >= 0.0:
w = wlim
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(xc - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xb = xc
xc = w
w = xc + _gold * (xc - xb)
fb = fc
fc = fw
fw = func(*((w,) + args))
funcalls += 1
else:
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
xa = xb
xb = xc
xc = w
fa = fb
fb = fc
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
def _linesearch_powell(func, p, xi, tol=1e-3):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method. This method
only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func.
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray, optional
Initial direction set.
xtol : float, optional
Line-search error tolerance.
ftol : float, optional
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : int, optional
Maximum number of function evaluations to make.
full_output : bool, optional
If True, fopt, xi, direc, iter, funcalls, and
warnflag are returned.
disp : bool, optional
If True, print convergence messages.
retall : bool, optional
If True, return a list of the solution at each iteration.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' `method` in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop
merely iterates over the inner loop. The inner loop minimizes
over each current direction in the direction set. At the end
of the inner loop, if certain conditions are met, the direction
that gave the largest decrease is dropped and replaced with
the difference between the current estiamted x and the estimated
x from the beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
if maxiter is None:
maxiter = N * 1000
if maxfun is None:
maxfun = N * 1000
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0
ilist = list(range(N))
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol * 100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20
if 2.0 * (fx - fval) <= bnd:
break
if fcalls[0] >= maxfun:
break
if iter >= maxiter:
break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol*100)
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls[0])
x = squeeze(x)
result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
"(using xtol = ", xtol, ")")
if flag == 1:
if disp:
print("\nMaximum number of function evaluations exceeded --- "
"increase maxfun argument.\n")
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
disp=False):
"""Minimize a function over a given range by brute force.
Uses the "brute force" method, i.e. computes the function's value
at each point of a multidimensional grid of points, to find the global
minimum of the function.
The function is evaluated everywhere in the range with the datatype of the
first call to the function, as enforced by the ``vectorize`` NumPy
function. The value and type of the function evaluation returned when
``full_output=True`` are affected in addition by the ``finish`` argument
(see Notes).
Parameters
----------
func : callable
The objective function to be minimized. Must be in the
form ``f(x, *args)``, where ``x`` is the argument in
the form of a 1-D array and ``args`` is a tuple of any
additional fixed parameters needed to completely specify
the function.
ranges : tuple
Each component of the `ranges` tuple must be either a
"slice object" or a range tuple of the form ``(low, high)``.
The program uses these to create the grid of points on which
the objective function will be computed. See `Note 2` for
more detail.
args : tuple, optional
Any additional fixed parameters needed to completely specify
the function.
Ns : int, optional
Number of grid points along the axes, if not otherwise
specified. See `Note2`.
full_output : bool, optional
If True, return the evaluation grid and the objective function's
values on it.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take `func` and
the initial guess as positional arguments, and take `args` as
keyword arguments. It may additionally take `full_output`
and/or `disp` as keyword arguments. Use None if no "polishing"
function is to be used. See Notes for more details.
disp : bool, optional
Set to True to print convergence messages.
Returns
-------
x0 : ndarray
A 1-D array containing the coordinates of a point at which the
objective function had its minimum value. (See `Note 1` for
which point is returned.)
fval : float
Function value at the point `x0`. (Returned when `full_output` is
True.)
grid : tuple
Representation of the evaluation grid. It has the same
length as `x0`. (Returned when `full_output` is True.)
Jout : ndarray
Function values at each point of the evaluation
grid, `i.e.`, ``Jout = func(*grid)``. (Returned
when `full_output` is True.)
See Also
--------
basinhopping, differential_evolution
Notes
-----
*Note 1*: The program finds the gridpoint at which the lowest value
of the objective function occurs. If `finish` is None, that is the
point returned. When the global minimum occurs within (or not very far
outside) the grid's boundaries, and the grid is fine enough, that
point will be in the neighborhood of the gobal minimum.
However, users often employ some other optimization program to
"polish" the gridpoint values, `i.e.`, to seek a more precise
(local) minimum near `brute's` best gridpoint.
The `brute` function's `finish` option provides a convenient way to do
that. Any polishing program used must take `brute's` output as its
initial guess as a positional argument, and take `brute's` input values
for `args` as keyword arguments, otherwise an error will be raised.
It may additionally take `full_output` and/or `disp` as keyword arguments.
`brute` assumes that the `finish` function returns either an
`OptimizeResult` object or a tuple in the form:
``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing
value of the argument, ``Jmin`` is the minimum value of the objective
function, "..." may be some other returned values (which are not used
by `brute`), and ``statuscode`` is the status code of the `finish` program.
Note that when `finish` is not None, the values returned are those
of the `finish` program, *not* the gridpoint ones. Consequently,
while `brute` confines its search to the input grid points,
the `finish` program's results usually will not coincide with any
gridpoint, and may fall outside the grid's boundary.
*Note 2*: The grid of points is a `numpy.mgrid` object.
For `brute` the `ranges` and `Ns` inputs have the following effect.
Each component of the `ranges` tuple can be either a slice object or a
two-tuple giving a range of values, such as (0, 5). If the component is a
slice object, `brute` uses it directly. If the component is a two-tuple
range, `brute` internally converts it to a slice object that interpolates
`Ns` points from its low-value to its high-value, inclusive.
Examples
--------
We illustrate the use of `brute` to seek the global minimum of a function
of two variables that is given as the sum of a positive-definite
quadratic and two deep "Gaussian-shaped" craters. Specifically, define
the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions
are as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
Thus, the objective function may have local minima near the minimum
of each of the three functions of which it is composed. To
use `fmin` to polish its gridpoint result, we may then continue as
follows:
>>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
>>> from scipy import optimize
>>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
... finish=optimize.fmin)
>>> resbrute[0] # global minimum
array([-1.05665192, 1.80834843])
>>> resbrute[1] # function value at global minimum
-3.4085818767
Note that if `finish` had been set to None, we would have gotten the
gridpoint [-1.0 1.75] where the rounded function value is -2.892.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more "
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
def _scalarfunc(*params):
params = squeeze(asarray(params))
return func(params, *args)
vecfunc = vectorize(_scalarfunc)
grid = mgrid[lrange]
if (N == 1):
grid = (grid,)
Jout = vecfunc(*grid)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = zeros(N, int)
xmin = zeros(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
# set up kwargs for `finish` function
finish_args = getargspec(finish).args
finish_kwargs = dict()
if 'full_output' in finish_args:
finish_kwargs['full_output'] = 1
if 'disp' in finish_args:
finish_kwargs['disp'] = disp
elif 'options' in finish_args:
# pass 'disp' as `options`
# (e.g. if `finish` is `minimize`)
finish_kwargs['options'] = {'disp': disp}
# run minimizer
res = finish(func, xmin, args=args, **finish_kwargs)
if isinstance(res, OptimizeResult):
xmin = res.x
Jmin = res.fun
success = res.success
else:
xmin = res[0]
Jmin = res[1]
success = res[-1] == 0
if not success:
if disp:
print("Warning: Either final optimization did not succeed "
"or `finish` does not return `statuscode` as its last "
"argument.")
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
def show_options(solver=None, method=None, disp=True):
"""
Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of 'minimize', 'minimize_scalar',
'root', or 'linprog'.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g. 'BFGS' for
'minimize').
disp : bool, optional
Whether to print the result rather than returning it.
Returns
-------
text
Either None (for disp=False) or the text string (disp=True)
Notes
-----
The solver-specific methods are:
`scipy.optimize.minimize`
- :ref:`Nelder-Mead <optimize.minimize-neldermead>`
- :ref:`Powell <optimize.minimize-powell>`
- :ref:`CG <optimize.minimize-cg>`
- :ref:`BFGS <optimize.minimize-bfgs>`
- :ref:`Newton-CG <optimize.minimize-newtoncg>`
- :ref:`L-BFGS-B <optimize.minimize-lbfgsb>`
- :ref:`TNC <optimize.minimize-tnc>`
- :ref:`COBYLA <optimize.minimize-cobyla>`
- :ref:`SLSQP <optimize.minimize-slsqp>`
- :ref:`dogleg <optimize.minimize-dogleg>`
- :ref:`trust-ncg <optimize.minimize-trustncg>`
`scipy.optimize.root`
- :ref:`hybr <optimize.root-hybr>`
- :ref:`lm <optimize.root-lm>`
- :ref:`broyden1 <optimize.root-broyden1>`
- :ref:`broyden2 <optimize.root-broyden2>`
- :ref:`anderson <optimize.root-anderson>`
- :ref:`linearmixing <optimize.root-linearmixing>`
- :ref:`diagbroyden <optimize.root-diagbroyden>`
- :ref:`excitingmixing <optimize.root-excitingmixing>`
- :ref:`krylov <optimize.root-krylov>`
- :ref:`df-sane <optimize.root-dfsane>`
`scipy.optimize.minimize_scalar`
- :ref:`brent <optimize.minimize_scalar-brent>`
- :ref:`golden <optimize.minimize_scalar-golden>`
- :ref:`bounded <optimize.minimize_scalar-bounded>`
`scipy.optimize.linprog`
- :ref:`simplex <optimize.linprog-simplex>`
"""
import textwrap
doc_routines = {
'minimize': (
('bfgs', 'scipy.optimize.optimize._minimize_bfgs'),
('cg', 'scipy.optimize.optimize._minimize_cg'),
('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'),
('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),
('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'),
('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'),
('newtoncg', 'scipy.optimize.optimize._minimize_newtoncg'),
('powell', 'scipy.optimize.optimize._minimize_powell'),
('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'),
('tnc', 'scipy.optimize.tnc._minimize_tnc'),
('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),
),
'root': (
('hybr', 'scipy.optimize.minpack._root_hybr'),
('lm', 'scipy.optimize._root._root_leastsq'),
('broyden1', 'scipy.optimize._root._root_broyden1_doc'),
('broyden2', 'scipy.optimize._root._root_broyden2_doc'),
('anderson', 'scipy.optimize._root._root_anderson_doc'),
('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),
('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),
('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),
('krylov', 'scipy.optimize._root._root_krylov_doc'),
('df-sane', 'scipy.optimize._spectral._root_df_sane'),
),
'linprog': (
('simplex', 'scipy.optimize._linprog._linprog_simplex'),
),
'minimize_scalar': (
('brent', 'scipy.optimize.optimize._minimize_scalar_brent'),
('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'),
('golden', 'scipy.optimize.optimize._minimize_scalar_golden'),
),
}
if solver is None:
text = ["\n\n\n========\n", "minimize\n", "========\n"]
text.append(show_options('minimize', disp=False))
text.extend(["\n\n===============\n", "minimize_scalar\n",
"===============\n"])
text.append(show_options('minimize_scalar', disp=False))
text.extend(["\n\n\n====\n", "root\n",
"====\n"])
text.append(show_options('root', disp=False))
text.extend(['\n\n\n=======\n', 'linprog\n',
'=======\n'])
text.append(show_options('linprog', disp=False))
text = "".join(text)
else:
solver = solver.lower()
if solver not in doc_routines:
raise ValueError('Unknown solver %r' % (solver,))
if method is None:
text = []
for name, _ in doc_routines[solver]:
text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"])
text.append(show_options(solver, name, disp=False))
text = "".join(text)
else:
methods = dict(doc_routines[solver])
if method not in methods:
raise ValueError("Unknown method %r" % (method,))
name = methods[method]
# Import function object
parts = name.split('.')
mod_name = ".".join(parts[:-1])
__import__(mod_name)
obj = getattr(sys.modules[mod_name], parts[-1])
# Get doc
doc = obj.__doc__
if doc is not None:
text = textwrap.dedent(doc).strip()
else:
text = ""
if disp:
print(text)
return
else:
return text
def main():
import time
times = []
algor = []
x0 = [0.8, 1.2, 0.7]
print("Nelder-Mead Simplex")
print("===================")
start = time.time()
x = fmin(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print()
print("Powell Direction Set Method")
print("===========================")
start = time.time()
x = fmin_powell(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print()
print("Nonlinear CG")
print("============")
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print(x)
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print()
print("BFGS Quasi-Newton")
print("=================")
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print()
print("BFGS approximate gradient")
print("=========================")
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print(x)
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print()
print("Newton-CG with Hessian product")
print("==============================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print()
print("Newton-CG with full Hessian")
print("===========================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with full hessian')
print()
print("\nMinimizing the Rosenbrock function of order 3\n")
print(" Algorithm \t\t\t Seconds")
print("===========\t\t\t =========")
for k in range(len(algor)):
print(algor[k], "\t -- ", times[k])
if __name__ == "__main__":
main()
| felipebetancur/scipy | scipy/optimize/optimize.py | Python | bsd-3-clause | 96,319 | [
"Gaussian"
] | ec5e474c913e368d0e15ecea771915e31e4c5b92563fa2b189bc2508352cedfd |
# -*- coding: utf-8 -*-
"""Filters to supplement :mod:`pybel.struct.filters`."""
from .edge_filters import * # noqa: F401,F403
from .node_deletion import * # noqa: F401,F403
from .node_filters import * # noqa: F401,F403
| pybel/pybel-tools | src/pybel_tools/filters/__init__.py | Python | mit | 225 | [
"Pybel"
] | 21fab9091e235e532e676df1ba767b2298eb4cade2d2a5e2de5f6e71f9cae183 |
#!/usr/bin/env python
'''
Developer script to convert yaml periodic table to json format.
Created on Nov 15, 2011
'''
from __future__ import division
import json
from itertools import product
import ruamel.yaml as yaml
import re
from monty.serialization import loadfn
from pymatgen import Element
from pymatgen.core.periodic_table import get_el_sp
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Nov 15, 2011"
def test_yaml():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
print(data)
def test_json():
with open('periodic_table.json', 'r') as f:
data = json.load(f)
print(data)
def parse_oxi_state():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
f = open('oxidation_states.txt', 'r')
oxidata = f.read()
f.close()
oxidata = re.sub('[\n\r]', '', oxidata)
patt = re.compile('<tr>(.*?)</tr>', re.MULTILINE)
for m in patt.finditer(oxidata):
line = m.group(1)
line = re.sub('</td>', '', line)
line = re.sub('(<td>)+', '<td>', line)
line = re.sub('</*a[^>]*>', '', line)
el = None
oxistates = []
common_oxi = []
for tok in re.split('<td>', line.strip()):
m2 = re.match("<b>([A-Z][a-z]*)</b>", tok)
if m2:
el = m2.group(1)
else:
m3 = re.match("(<b>)*([\+\-]\d)(</b>)*", tok)
if m3:
oxistates.append(int(m3.group(2)))
if m3.group(1):
common_oxi.append(int(m3.group(2)))
if el in data:
del data[el]['Max oxidation state']
del data[el]['Min oxidation state']
del data[el]['Oxidation_states']
del data[el]['Common_oxidation_states']
data[el]['Oxidation states'] = oxistates
data[el]['Common oxidation states'] = common_oxi
else:
print(el)
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
def parse_ionic_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
f = open('ionic_radii.csv', 'r')
radiidata = f.read()
f.close()
radiidata = radiidata.split("\r")
header = radiidata[0].split(",")
for i in range(1, len(radiidata)):
line = radiidata[i]
toks = line.strip().split(",")
suffix = ""
name = toks[1]
if len(name.split(" ")) > 1:
suffix = "_" + name.split(" ")[1]
el = toks[2]
ionic_radii = {}
for j in range(3, len(toks)):
m = re.match("^\s*([0-9\.]+)", toks[j])
if m:
ionic_radii[int(header[j])] = float(m.group(1))
if el in data:
data[el]['Ionic_radii' + suffix] = ionic_radii
if suffix == '_hs':
data[el]['Ionic_radii'] = ionic_radii
else:
print(el)
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
def parse_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
f = open('radii.csv', 'r')
radiidata = f.read()
f.close()
radiidata = radiidata.split("\r")
header = radiidata[0].split(",")
for i in range(1, len(radiidata)):
line = radiidata[i]
toks = line.strip().split(",")
el = toks[1]
try:
atomic_radii = float(toks[3]) / 100
except:
atomic_radii = toks[3]
try:
atomic_radii_calc = float(toks[4]) / 100
except:
atomic_radii_calc = toks[4]
try:
vdw_radii = float(toks[5]) / 100
except:
vdw_radii = toks[5]
if el in data:
data[el]['Atomic radius'] = atomic_radii
data[el]['Atomic radius calculated'] = atomic_radii_calc
data[el]['Van der waals radius'] = vdw_radii
else:
print(el)
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def update_ionic_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
for el, d in data.items():
if "Ionic_radii" in d:
d["Ionic radii"] = {k: v / 100
for k, v in d["Ionic_radii"].items()}
del d["Ionic_radii"]
if "Ionic_radii_hs" in d:
d["Ionic radii hs"] = {k: v / 100
for k, v in d["Ionic_radii_hs"].items()}
del d["Ionic_radii_hs"]
if "Ionic_radii_ls" in d:
d["Ionic radii ls"] = {k: v / 100
for k, v in d["Ionic_radii_ls"].items()}
del d["Ionic_radii_ls"]
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def parse_shannon_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
from openpyxl import load_workbook
import collections
wb = load_workbook('Shannon Radii.xlsx')
print(wb.get_sheet_names())
sheet = wb["Sheet1"]
i = 2
radii = collections.defaultdict(dict)
while sheet["E%d" % i].value:
if sheet["A%d" % i].value:
el = sheet["A%d" % i].value
if sheet["B%d" % i].value:
charge = int(sheet["B%d" % i].value)
radii[el][charge] = dict()
if sheet["C%d" % i].value:
cn = sheet["C%d" % i].value
if cn not in radii[el][charge]:
radii[el][charge][cn] = dict()
if sheet["D%d" % i].value is not None:
spin = sheet["D%d" % i].value
else:
spin = ""
# print("%s - %d - %s" % (el, charge, cn))
radii[el][charge][cn][spin] = {
"crystal_radius": float(sheet["E%d" % i].value),
"ionic_radius": float(sheet["F%d" % i].value),
}
i += 1
for el in radii.keys():
if el in data:
data[el]["Shannon radii"] = dict(radii[el])
with open('periodic_table.yaml', 'w') as f:
yaml.safe_dump(data, f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def gen_periodic_table():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def gen_iupac_ordering():
periodic_table = loadfn("periodic_table.json")
order = [([18], range(6, 0, -1)), # noble gasses
([1], range(7, 1, -1)), # alkali metals
([2], range(7, 1, -1)), # alkali earth metals
(range(17, 2, -1), [9]), # actinides
(range(17, 2, -1), [8]), # lanthanides
([3], (5, 4)), # Y, Sc
([4], (6, 5, 4)), # Hf -> Ti
([5], (6, 5, 4)), # Ta -> V
([6], (6, 5, 4)), # W -> Cr
([7], (6, 5, 4)), # Re -> Mn
([8], (6, 5, 4)), # Os -> Fe
([9], (6, 5, 4)), # Ir -> Co
([10], (6, 5, 4)), # Pt -> Ni
([11], (6, 5, 4)), # Au -> Cu
([12], (6, 5, 4)), # Hg -> Zn
([13], range(6, 1, -1)), # Tl -> B
([14], range(6, 1, -1)), # Pb -> C
([15], range(6, 1, -1)), # Bi -> N
([1], [1]), # Hydrogen
([16], range(6, 1, -1)), # Po -> O
([17], range(6, 1, -1))] # At -> F
order = sum([list(product(x, y)) for x, y in order], [])
iupac_ordering_dict = dict(zip(
[Element.from_row_and_group(row, group) for group, row in order],
range(len(order))))
# first clean periodic table of any IUPAC ordering
for el in periodic_table:
periodic_table[el].pop('IUPAC ordering', None)
# now add iupac ordering
for el in periodic_table:
if 'IUPAC ordering' in periodic_table[el]:
# sanity check that we don't cover the same element twice
raise KeyError("IUPAC ordering already exists for {}".format(el))
periodic_table[el]['IUPAC ordering'] = iupac_ordering_dict[get_el_sp(el)]
if __name__ == "__main__":
parse_shannon_radii()
#gen_periodic_table()
| dongsenfo/pymatgen | dev_scripts/update_pt_data.py | Python | mit | 8,456 | [
"pymatgen"
] | 3f5ec1366eca567975455246ea652391eb9f3e59aaab9393235771d33dcb579d |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import numpy as np
import unittest
import os
from pymatgen.analysis.structure_analyzer import VoronoiCoordFinder, \
solid_angle, contains_peroxide, RelaxationAnalyzer, VoronoiConnectivity, \
oxide_type, sulfide_type, OrderParameters, average_coordination_number, \
VoronoiAnalyzer, JMolCoordFinder, get_dimensionality
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.outputs import Xdatcar
from pymatgen import Element, Structure, Lattice
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class VoronoiCoordFinderTest(PymatgenTest):
def setUp(self):
s = self.get_structure('LiFePO4')
self.finder = VoronoiCoordFinder(s, [Element("O")])
def test_get_voronoi_polyhedra(self):
self.assertEqual(len(self.finder.get_voronoi_polyhedra(0).items()), 8)
def test_get_coordination_number(self):
self.assertAlmostEqual(self.finder.get_coordination_number(0),
5.809265748999465, 7)
def test_get_coordinated_sites(self):
self.assertEqual(len(self.finder.get_coordinated_sites(0)), 8)
class VoronoiAnalyzerTest(PymatgenTest):
def setUp(self):
self.ss = Xdatcar(os.path.join(test_dir, 'XDATCAR.MD')).structures
self.s = self.ss[1]
self.va = VoronoiAnalyzer(cutoff=4.0)
def test_analyze(self):
# Check for the Voronoi index of site i in Structure
single_structure = self.va.analyze(self.s, n=5)
self.assertIn(single_structure.view(),
np.array([4, 3, 3, 4, 2, 2, 1, 0]).view(),
"Cannot find the right polyhedron.")
# Check for the presence of a Voronoi index and its frequency in
# a ensemble (list) of Structures
ensemble = self.va.analyze_structures(self.ss, step_freq=2,
most_frequent_polyhedra=10)
self.assertIn(('[1 3 4 7 1 0 0 0]', 3),
ensemble, "Cannot find the right polyhedron in ensemble.")
class JMolCoordFinderTest(PymatgenTest):
def test_get_coordination_number(self):
s = self.get_structure('LiFePO4')
# test the default coordination finder
finder = JMolCoordFinder()
nsites_checked = 0
for site_idx, site in enumerate(s):
if site.specie == Element("Li"):
self.assertEqual(finder.get_coordination_number(s, site_idx), 0)
nsites_checked += 1
elif site.specie == Element("Fe"):
self.assertEqual(finder.get_coordination_number(s, site_idx), 6)
nsites_checked += 1
elif site.specie == Element("P"):
self.assertEqual(finder.get_coordination_number(s, site_idx), 4)
nsites_checked += 1
self.assertEqual(nsites_checked, 12)
# test a user override that would cause Li to show up as 6-coordinated
finder = JMolCoordFinder({"Li": 1})
self.assertEqual(finder.get_coordination_number(s, 0), 6)
# verify get_coordinated_sites function works
self.assertEqual(len(finder.get_coordinated_sites(s, 0)), 6)
class GetDimensionalityTest(PymatgenTest):
def test_get_dimensionality(self):
s = self.get_structure('LiFePO4')
self.assertEqual(get_dimensionality(s), 3)
s = self.get_structure('Graphite')
self.assertEqual(get_dimensionality(s), 2)
def test_get_dimensionality_with_bonds(self):
s = self.get_structure('CsCl')
self.assertEqual(get_dimensionality(s), 1)
self.assertEqual(get_dimensionality(s, bonds={("Cs", "Cl"): 3.7}), 3)
class RelaxationAnalyzerTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.Li2O'),
check_for_POTCAR=False)
s1 = p.structure
p = Poscar.from_file(os.path.join(test_dir, 'CONTCAR.Li2O'),
check_for_POTCAR=False)
s2 = p.structure
self.analyzer = RelaxationAnalyzer(s1, s2)
def test_vol_and_para_changes(self):
for k, v in self.analyzer.get_percentage_lattice_parameter_changes().items():
self.assertAlmostEqual(-0.0092040921155279731, v)
latt_change = v
vol_change = self.analyzer.get_percentage_volume_change()
self.assertAlmostEqual(-0.0273589101391,
vol_change)
# This is a simple cubic cell, so the latt and vol change are simply
# Related. So let's test that.
self.assertAlmostEqual((1 + latt_change) ** 3 - 1, vol_change)
def test_get_percentage_bond_dist_changes(self):
for k, v in self.analyzer.get_percentage_bond_dist_changes().items():
for k2, v2 in v.items():
self.assertAlmostEqual(-0.009204092115527862, v2)
class VoronoiConnectivityTest(PymatgenTest):
def test_connectivity_array(self):
vc = VoronoiConnectivity(self.get_structure("LiFePO4"))
ca = vc.connectivity_array
np.set_printoptions(threshold=np.NAN, linewidth=np.NAN, suppress=np.NAN)
expected = np.array([0, 1.96338392, 0, 0.04594495])
self.assertTrue(np.allclose(ca[15, :4, ca.shape[2] // 2], expected))
expected = np.array([0, 0, 0])
self.assertTrue(np.allclose(ca[1, -3:, 51], expected))
site = vc.get_sitej(27, 51)
self.assertEqual(site.specie, Element('O'))
expected = np.array([-0.29158, 0.74889, 0.95684])
self.assertTrue(np.allclose(site.frac_coords, expected))
class MiscFunctionTest(PymatgenTest):
def test_average_coordination_number(self):
xdatcar = Xdatcar(os.path.join(test_dir, 'XDATCAR.MD'))
coordination_numbers = average_coordination_number(xdatcar.structures,
freq=1)
self.assertAlmostEqual(coordination_numbers['Fe'], 4.771903318390836, 5,
"Coordination number not calculated properly.")
def test_solid_angle(self):
center = [2.294508207929496, 4.4078057081404, 2.299997773791287]
coords = [[1.627286218099362, 3.081185538926995, 3.278749383217061],
[1.776793751092763, 2.93741167455471, 3.058701096568852],
[3.318412187495734, 2.997331084033472, 2.022167590167672],
[3.874524708023352, 4.425301459451914, 2.771990305592935],
[2.055778446743566, 4.437449313863041, 4.061046832034642]]
self.assertAlmostEqual(solid_angle(center, coords), 1.83570965938, 7,
"Wrong result returned by solid_angle")
def test_contains_peroxide(self):
for f in ['LiFePO4', 'NaFePO4', 'Li3V2(PO4)3', 'Li2O']:
self.assertFalse(contains_peroxide(self.get_structure(f)))
for f in ['Li2O2', "K2O2"]:
self.assertTrue(contains_peroxide(self.get_structure(f)))
def test_oxide_type(self):
el_li = Element("Li")
el_o = Element("O")
latt = Lattice([[3.985034, 0.0, 0.0],
[0.0, 4.881506, 0.0],
[0.0, 0.0, 2.959824]])
elts = [el_li, el_li, el_o, el_o, el_o, el_o]
coords = list()
coords.append([0.500000, 0.500000, 0.500000])
coords.append([0.0, 0.0, 0.0])
coords.append([0.632568, 0.085090, 0.500000])
coords.append([0.367432, 0.914910, 0.500000])
coords.append([0.132568, 0.414910, 0.000000])
coords.append([0.867432, 0.585090, 0.000000])
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "superoxide")
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_o, el_o, el_o]
latt = Lattice.from_parameters(3.999911, 3.999911, 3.999911, 133.847504,
102.228244, 95.477342)
coords = [[0.513004, 0.513004, 1.000000],
[0.017616, 0.017616, 0.000000],
[0.649993, 0.874790, 0.775203],
[0.099587, 0.874790, 0.224797]]
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "ozonide")
latt = Lattice.from_parameters(3.159597, 3.159572, 7.685205, 89.999884,
89.999674, 60.000510)
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_li, el_li, el_o, el_o, el_o, el_o]
coords = [[0.666656, 0.666705, 0.750001],
[0.333342, 0.333378, 0.250001],
[0.000001, 0.000041, 0.500001],
[0.000001, 0.000021, 0.000001],
[0.333347, 0.333332, 0.649191],
[0.333322, 0.333353, 0.850803],
[0.666666, 0.666686, 0.350813],
[0.666665, 0.666684, 0.149189]]
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "peroxide")
el_li = Element("Li")
el_o = Element("O")
el_h = Element("H")
latt = Lattice.from_parameters(3.565276, 3.565276, 4.384277, 90.000000,
90.000000, 90.000000)
elts = [el_h, el_h, el_li, el_li, el_o, el_o]
coords = [[0.000000, 0.500000, 0.413969],
[0.500000, 0.000000, 0.586031],
[0.000000, 0.000000, 0.000000],
[0.500000, 0.500000, 0.000000],
[0.000000, 0.500000, 0.192672],
[0.500000, 0.000000, 0.807328]]
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "hydroxide")
el_li = Element("Li")
el_n = Element("N")
el_h = Element("H")
latt = Lattice.from_parameters(3.565276, 3.565276, 4.384277, 90.000000,
90.000000, 90.000000)
elts = [el_h, el_h, el_li, el_li, el_n, el_n]
coords = [[0.000000, 0.500000, 0.413969],
[0.500000, 0.000000, 0.586031],
[0.000000, 0.000000, 0.000000],
[0.500000, 0.500000, 0.000000],
[0.000000, 0.500000, 0.192672],
[0.500000, 0.000000, 0.807328]]
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "None")
el_o = Element("O")
latt = Lattice.from_parameters(4.389828, 5.369789, 5.369789, 70.786622,
69.244828, 69.244828)
elts = [el_o, el_o, el_o, el_o, el_o, el_o, el_o, el_o]
coords = [[0.844609, 0.273459, 0.786089],
[0.155391, 0.213911, 0.726541],
[0.155391, 0.726541, 0.213911],
[0.844609, 0.786089, 0.273459],
[0.821680, 0.207748, 0.207748],
[0.178320, 0.792252, 0.792252],
[0.132641, 0.148222, 0.148222],
[0.867359, 0.851778, 0.851778]]
struct = Structure(latt, elts, coords)
self.assertEqual(oxide_type(struct, 1.1), "None")
def test_sulfide_type(self):
# NaS2 -> polysulfide
latt = Lattice.tetragonal(9.59650, 11.78850)
species = ["Na"] * 2 + ["S"] * 2
coords = [[0.00000, 0.00000, 0.17000],
[0.27600, 0.25000, 0.12500],
[0.03400, 0.25000, 0.29600],
[0.14700, 0.11600, 0.40000]]
struct = Structure.from_spacegroup(122, latt, species, coords)
self.assertEqual(sulfide_type(struct), "polysulfide")
# NaCl type NaS -> sulfide
latt = Lattice.cubic(5.75)
species = ["Na", "S"]
coords = [[0.00000, 0.00000, 0.00000],
[0.50000, 0.50000, 0.50000]]
struct = Structure.from_spacegroup(225, latt, species, coords)
self.assertEqual(sulfide_type(struct), "sulfide")
# Na2S2O3 -> None (sulfate)
latt = Lattice.monoclinic(6.40100, 8.10000, 8.47400, 96.8800)
species = ["Na"] * 2 + ["S"] * 2 + ["O"] * 3
coords = [[0.29706, 0.62396, 0.08575],
[0.37673, 0.30411, 0.45416],
[0.52324, 0.10651, 0.21126],
[0.29660, -0.04671, 0.26607],
[0.17577, 0.03720, 0.38049],
[0.38604, -0.20144, 0.33624],
[0.16248, -0.08546, 0.11608]]
struct = Structure.from_spacegroup(14, latt, species, coords)
self.assertEqual(sulfide_type(struct), None)
# Na3PS3O -> sulfide
latt = Lattice.orthorhombic(9.51050, 11.54630, 5.93230)
species = ["Na"] * 2 + ["S"] * 2 + ["P", "O"]
coords = [[0.19920, 0.11580, 0.24950],
[0.00000, 0.36840, 0.29380],
[0.32210, 0.36730, 0.22530],
[0.50000, 0.11910, 0.27210],
[0.50000, 0.29400, 0.35500],
[0.50000, 0.30300, 0.61140]]
struct = Structure.from_spacegroup(36, latt, species, coords)
self.assertEqual(sulfide_type(struct), "sulfide")
class OrderParametersTest(PymatgenTest):
def setUp(self):
self.linear = Structure(
Lattice.from_lengths_and_angles(
[10, 10, 10], [90, 90, 90]),
["H", "H", "H"], [[1, 0, 0], [0, 0, 0], [2, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.bent45 = Structure(
Lattice.from_lengths_and_angles(
[10, 10, 10], [90, 90, 90]), ["H", "H", "H"],
[[0, 0, 0], [0.707, 0.707, 0], [0.707, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cubic = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]),
["H"], [[0, 0, 0]], validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=False,
site_properties=None)
self.bcc = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]),
["H", "H"], [[0, 0, 0], [0.5, 0.5, 0.5]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.fcc = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]), ["H", "H", "H", "H"],
[[0, 0, 0], [0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.hcp = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1.633], [90, 90, 120]), ["H", "H"],
[[0.3333, 0.6667, 0.25], [0.6667, 0.3333, 0.75]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.diamond = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]), ["H", "H", "H", "H", "H", "H", "H", "H"],
[[0, 0, 0.5], [0.75, 0.75, 0.75], [0, 0.5, 0], [0.75, 0.25, 0.25],
[0.5, 0, 0], [0.25, 0.75, 0.25], [0.5, 0.5, 0.5],
[0.25, 0.25, 0.75]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.regular_triangle = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H"],
[[15, 15.28867, 15.65], [14.5, 15, 15], [15.5, 15, 15], \
[15, 15.866, 15]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H", "H"],
[[15, 15, 15.707], [14.75, 14.75, 15], [14.75, 15.25, 15], \
[15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square_pyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H", "H", "H"],
[[15, 15, 15], [15, 15, 15.3535], [14.75, 14.75, 15],
[14.75, 15.25, 15], [15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.pentagonal_planar = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["Xe", "F", "F", "F", "F", "F"],
[[0, -1.6237, 0], [1.17969, 0, 0], [-1.17969, 0, 0], \
[1.90877, -2.24389, 0], [-1.90877, -2.24389, 0], [0, -3.6307, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_bipyramidal = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["P", "Cl", "Cl", "Cl", "Cl", "Cl"],
[[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0],
[1.74937, -1.01, 0], [-1.74937, -1.01, 0], [0, 0, -2.14]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
def test_init(self):
self.assertIsNotNone(OrderParameters(["cn"], [[]], 0.99))
def test_get_order_parameters(self):
# Set up everything.
op_types = ["cn", "lin", "bent", "tet", "oct", "bcc", "q2", "q4", \
"q6", "reg_tri", "sq", "sq_pyr", "tri_bipyr"]
op_paras = [[], [], [], [], [], [], [], [], [], [], [], [], []]
op_paras = [[], [], [45.0, 0.0667], [], [], [], [], [], [], [], [], [], []]
ops_044 = OrderParameters(op_types, op_paras, 0.44)
ops_071 = OrderParameters(op_types, op_paras, 0.71)
ops_087 = OrderParameters(op_types, op_paras, 0.87)
ops_099 = OrderParameters(op_types, op_paras, 0.99)
ops_101 = OrderParameters(op_types, op_paras, 1.01)
ops_voro = OrderParameters(op_types, op_paras)
# Linear motif.
op_vals = ops_101.get_order_parameters(self.linear, 0)
self.assertAlmostEqual(int(op_vals[1] * 1000), 1000)
# 45 degrees-bent motif.
op_vals = ops_101.get_order_parameters(self.bent45, 0)
self.assertAlmostEqual(int(op_vals[2] * 1000), 1000)
# Cubic structure.
op_vals = ops_099.get_order_parameters(self.cubic, 0)
self.assertAlmostEqual(op_vals[0], 0.0)
self.assertIsNone(op_vals[3])
self.assertIsNone(op_vals[4])
self.assertIsNone(op_vals[5])
self.assertIsNone(op_vals[6])
self.assertIsNone(op_vals[7])
self.assertIsNone(op_vals[8])
op_vals = ops_101.get_order_parameters(self.cubic, 0)
self.assertAlmostEqual(op_vals[0], 6.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 14)
self.assertAlmostEqual(int(op_vals[4] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[5] * 1000), 333)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 763)
self.assertAlmostEqual(int(op_vals[8] * 1000), 353)
# Bcc structure.
op_vals = ops_087.get_order_parameters(self.bcc, 0)
self.assertAlmostEqual(op_vals[0], 8.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 142)
self.assertAlmostEqual(int(op_vals[4] * 1000), 145)
self.assertAlmostEqual(int(op_vals[5] * 1000), 975)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 509)
self.assertAlmostEqual(int(op_vals[8] * 1000), 628)
# Fcc structure.
op_vals = ops_071.get_order_parameters(self.fcc, 0)
self.assertAlmostEqual(op_vals[0], 12.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 30)
self.assertAlmostEqual(int(op_vals[4] * 1000), 78)
self.assertAlmostEqual(int(op_vals[5] * 1000), 0)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 190)
self.assertAlmostEqual(int(op_vals[8] * 1000), 574)
# Hcp structure.
op_vals = ops_101.get_order_parameters(self.hcp, 0)
self.assertAlmostEqual(op_vals[0], 12.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 30)
self.assertAlmostEqual(int(op_vals[4] * 1000), 89)
self.assertAlmostEqual(int(op_vals[5] * 1000), -38)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 97)
self.assertAlmostEqual(int(op_vals[8] * 1000), 484)
# Diamond structure.
op_vals = ops_044.get_order_parameters(self.diamond, 0)
self.assertAlmostEqual(op_vals[0], 4.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[4] * 1000), 45)
self.assertAlmostEqual(int(op_vals[5] * 1000), 727)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 509)
self.assertAlmostEqual(int(op_vals[8] * 1000), 628)
# Regular triangle motif.
op_vals = ops_101.get_order_parameters(self.regular_triangle, 0)
self.assertAlmostEqual(int(op_vals[9] * 1000), 999)
# Square motif.
op_vals = ops_101.get_order_parameters(self.square, 0)
self.assertAlmostEqual(int(op_vals[10] * 1000), 1000)
# Pentagonal planar.
op_vals = ops_101.get_order_parameters(
self.pentagonal_planar.sites, 0, indeces_neighs=[1,2,3,4,5])
self.assertAlmostEqual(int(op_vals[12] * 1000), 100)
# Square pyramid motif.
op_vals = ops_101.get_order_parameters(self.square_pyramid, 0)
self.assertAlmostEqual(int(op_vals[11] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 500)
# Trigonal bipyramidal.
op_vals = ops_101.get_order_parameters(
self.trigonal_bipyramidal.sites, 0, indeces_neighs=[1,2,3,4,5])
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 1000)
# Test providing explicit neighbor lists.
op_vals = ops_101.get_order_parameters(self.bcc, 0, indeces_neighs=[1])
self.assertIsNotNone(op_vals[0])
self.assertIsNone(op_vals[3])
with self.assertRaises(ValueError):
ops_101.get_order_parameters(self.bcc, 0, indeces_neighs=[2])
def tearDown(self):
del self.linear
del self.bent45
del self.cubic
del self.fcc
del self.bcc
del self.hcp
del self.diamond
del self.regular_triangle
del self.square
del self.square_pyramid
if __name__ == '__main__':
unittest.main()
| matk86/pymatgen | pymatgen/analysis/tests/test_structure_analyzer.py | Python | mit | 23,270 | [
"VASP",
"pymatgen"
] | 06bf0e37522b7a2f6fb55d88e9e832ab306910189a266753e102ae6541e4e5e2 |
# coding=utf-8
# Copyright 2022 The Balloon Learning Environment Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An exploratory agent that selects actions based on a random walk.
Note that this class assumes the features passed in correspond to the
Perciatelli features (see balloon_learning_environment.env.features).
"""
import datetime as dt
import time
from typing import Optional, Sequence
from balloon_learning_environment.agents import agent
from balloon_learning_environment.env import features
from balloon_learning_environment.env.balloon import control
from balloon_learning_environment.utils import constants
from balloon_learning_environment.utils import sampling
import gin
import jax
import numpy as np
_PERCIATELLI_FEATURES_SHAPE = (1099,) # Expected shape of Perciatelli features.
_HYSTERESIS = 100 # In Pascals.
_STDDEV = 0.1666 # ~ 10 [Pa/min].
# Although this class does not have any gin-configurable parameters, it is
# decorated as gin-configurable so it can be injected into other classes
# (e.g. MarcoPoloExploration).
@gin.configurable
class RandomWalkAgent(agent.Agent):
"""An exploratory agent that selects actions based on a random walk."""
def __init__(self, num_actions: int, observation_shape: Sequence[int],
seed: Optional[int] = None):
del num_actions
del observation_shape
seed = int(time.time() * 1e6) if seed is None else seed
self._rng = jax.random.PRNGKey(seed)
self._time_elapsed = dt.timedelta()
self._sample_new_target_pressure()
def _sample_new_target_pressure(self):
self._rng, rng = jax.random.split(self._rng)
self._target_pressure = sampling.sample_pressure(rng)
def _select_action(self, features_as_vector: np.ndarray) -> int:
assert features_as_vector.shape == _PERCIATELLI_FEATURES_SHAPE
balloon_pressure = features.NamedPerciatelliFeatures(
features_as_vector).balloon_pressure
# Note: higher pressures means lower altitude.
if balloon_pressure - _HYSTERESIS > self._target_pressure:
return control.AltitudeControlCommand.UP
if balloon_pressure + _HYSTERESIS < self._target_pressure:
return control.AltitudeControlCommand.DOWN
return control.AltitudeControlCommand.STAY
def begin_episode(self, observation: np.ndarray) -> int:
self._time_elapsed = dt.timedelta()
self._sample_new_target_pressure()
return self._select_action(observation)
def step(self, reward: float, observation: np.ndarray) -> int:
del reward
# Advance time_elapsed.
self._time_elapsed += constants.AGENT_TIME_STEP
# Update target pressure. This is essentially a random walk between
# altitudes by sampling from zero-mean Gaussian noise, where the amount of
# variance is proportional to the amount of time (in seconds) that has
# elapsed since the last time it was updated.
self._rng, rng = jax.random.split(self._rng)
self._target_pressure += (
self._time_elapsed.total_seconds() * _STDDEV * jax.random.normal(rng))
return self._select_action(observation)
def end_episode(self, reward: float, terminal: bool = True) -> None:
pass
| google/balloon-learning-environment | balloon_learning_environment/agents/random_walk_agent.py | Python | apache-2.0 | 3,650 | [
"Gaussian"
] | 92f0a6278b414587045ab5a5f6eb9da5a3e427f9a9071943b0bbc1786f6ad681 |
import os
import warnings
import numpy as np
from netCDF4 import Dataset
from cfmeta import Cmip5File
from pyclimate.nchelpers import nc_copy_atts, nc_copy_var
class DerivableBase(object):
"""Reprents a group of base variables.
Grouped base variables are used to initiate calculating derived variables.
Attributes:
model (str): Model name. Typically the model_id attribute from the source NetCDF
experiment (str): Experiment name. Typically the 'experiment' attribute from a source NetCDF
eg: 'historical', 'rcp26'
ensemble_member (str): Ensemble member id.
eg: 'r1i1p1', r2i1p1'
trange (str): Temporal range of data contained in the base data. Format YYYYMMDD-YYYYMMDD
eg: '20060101-21991231'
"""
required_atts = ['model', 'experiment', 'ensemble_member', 'temporal_subset']
def __init__(self, **kwargs):
"""Initializes a `DerivableBase`
Dynamically stores whatever args are supplied by a user.
Args:
**kwargs: Attributes defining `DerivableBase`
Note:
While not strictly enforced, `model`, `experiment`, `ensemble_member`, `temporal_subset` /should/
be sufficient to uniquely group a set of variables. If required, a user can
supply additional attributes to further define a grouping.
"""
for att in self.required_atts:
try:
v = kwargs.pop(att)
setattr(self, att, v)
except KeyError:
raise KeyError('Required attribute {} not provided'.format(att))
if len(kwargs) != 0:
for k, v in kwargs.items():
setattr(self, k, v)
self.variables = {}
def add_base_variable(self, variable, dataset_fp):
"""Adds base variable to the DerivableBase instance.
Args:
variable (str): The CMIP5 variable name being added.
dataset_fp (str): The location of the file.
Returns:
None
"""
self.variables[variable] = dataset_fp
def derive_variable(self, variable, outdir):
"""Entry point to calculate derived variables from a ``DerivableBase`` class.
Args:
variable (str): Short name of the variable to generate.
outdir (str): Root directory to place output file.
Returns:
A variable specific subclass of DerivableBase.
Raises:
None.
"""
if variable == 'tas':
v = tas(self.variables, outdir)
elif variable == 'gdd':
v = gdd(self.variables, outdir)
elif variable == 'hdd':
v = hdd(self.variables, outdir)
elif variable == 'ffd':
v = ffd(self.variables, outdir)
elif variable == 'pas':
v = pas(self.variables, outdir)
else:
return None
return v
def get_output_file_path_from_base(base_fp, new_varname, outdir=None):
"""Generates a new file path from an existing template using a different variable
Args:
base_fp (str): base filename to use as template
new_varname (str): new variable name
Returns:
str: the new filename
"""
cf = Cmip5File(datanode_fp = base_fp)
cf.update(variable_name = new_varname)
return os.path.join(outdir, cf.datanode_fp)
def get_output_netcdf_from_base(base_nc, base_varname, new_varname, new_atts, outfp):
"""Prepares a blank NetCDF file for a new variable
Copies structure and attributes of an existing NetCDF into a new NetCDF
alterting varialbe specific metadata
Args:
base_nc (netCDF4.Dataset): Source netCDF file as returned by netCDF4.Dataset.
base_varname (str): Source variable to copy structure from.
new_varname (str): New variable name.
new_atts (dict): Attributes to assign to the new variable.
out_fp (str): Location to create the new netCDF4.Dataset
Returns:
netCDF4.Dataset: The new netCDF4.Dataset
"""
cf = Cmip5File(outfp)
if not os.path.exists(os.path.dirname(outfp)):
os.makedirs(os.path.dirname(outfp))
new_nc = Dataset(outfp, 'w')
ncvar = nc_copy_var(base_nc, new_nc, base_varname, new_varname)
nc_copy_atts(base_nc, new_nc) #copy global atts
for k, v in new_atts.items():
setattr(ncvar, k, v)
return new_nc
class DerivedVariable(object):
"""Used as a parent for all derived variables.
Stores variable specific information and provides common methods.
Attributes:
base_variables (dict): Dictionary mapping base variable name to file location.
eg: {'pr': 'path/to/pr/variable.nc',
'tasmax': 'path/to/tasmax/variable.nc'}
outdir (str): Location to put the generated NetCDF.
variable_name (str): Derived variable name.
required_vars (list): List of variables required by the specific derived variable
variable_atts (dict): Attributes to set on the derived variable
"""
def __init__(self, base_variables, outdir, variable_name, required_vars, variable_atts):
"""Initializes a ``DerivedVariable`` class
Args:
Same as ``Attributes``
"""
self.base_variables = base_variables
self.outdir = outdir
self.variable_name = variable_name
self.required_vars = required_vars
self.variable_atts = variable_atts
def __call__(self):
"""__call__ method should be overridden by a child class
"""
raise NotImplemented
def __str__(self):
return 'Generating {} with base variables {}'.format(type(self).__name__, self.base_variables.keys())
@property
def base_varname(self):
"""Used to set which base variable to use as a template.
"""
return self.required_vars[0]
@property
def outfp(self):
"""Generates a string
"""
return get_output_file_path_from_base(self.base_variables[self.base_varname], self.variable_name, self.outdir)
def has_required_vars(self, required_vars):
if not all([x in self.base_variables.keys() for x in required_vars]):
warnings.warn('Insufficient base variables to calculate {}'.format(self.variable_name))
return False
return True
class tas(DerivedVariable):
variable_name = 'tas'
required_vars = ['tasmax', 'tasmin']
variable_atts = {
'long_name': 'Near-Surface Air Temperature',
'standard_name': 'air_temperature',
'units': 'K',
'cell_methods': 'time: mean',
'cell_measures': 'area: areacella'
}
def __init__(self, base_variables, outdir):
super(tas, self).__init__(base_variables, outdir, self.variable_name, self.required_vars, self.variable_atts)
def __call__(self):
if not self.has_required_vars(self.required_vars):
return 1
nc_tasmax = Dataset(self.base_variables['tasmax'])
var_tasmax = nc_tasmax.variables['tasmax']
nc_tasmin = Dataset(self.base_variables['tasmin'])
var_tasmin = nc_tasmin.variables['tasmin']
nc_out = get_output_netcdf_from_base(nc_tasmax, self.base_varname, self.variable_name, self.variable_atts, self.outfp)
ncvar_tas = nc_out.variables[self.variable_name]
for i in range(var_tasmax.shape[0]):
ncvar_tas[i,:,:] = (var_tasmax[i,:,:] + var_tasmin[i,:,:]) / 2
for nc in [nc_out, nc_tasmax, nc_tasmin]:
nc.close()
return self.outfp
class gdd(DerivedVariable):
variable_name = 'gdd'
required_vars = ['tasmax', 'tasmin']
variable_atts = {
'units': 'degree days',
'long_name': 'Growing Degree Days'
}
def __init__(self, base_variables, outdir):
super(gdd, self).__init__(base_variables, outdir, self.variable_name, self.required_vars, self.variable_atts)
def __call__(self):
if not self.has_required_vars(self.required_vars):
return 1
nc_tasmax = Dataset(self.base_variables['tasmax'])
var_tasmax = nc_tasmax.variables['tasmax']
nc_tasmin = Dataset(self.base_variables['tasmin'])
var_tasmin = nc_tasmin.variables['tasmin']
nc_out = get_output_netcdf_from_base(nc_tasmax, self.base_varname, self.variable_name, self.variable_atts, self.outfp)
ncvar_gdd = nc_out.variables[self.variable_name]
for i in range(var_tasmax.shape[0]):
tas = (var_tasmax[i,:,:] + var_tasmin[i,:,:]) / 2
ncvar_gdd[i,:,:] = np.where(tas > 278.15, (tas - 278.15), 0)
for nc in [nc_out, nc_tasmax, nc_tasmin]:
nc.close()
return self.outfp
class hdd(DerivedVariable):
variable_name = 'hdd'
required_vars = ['tasmax', 'tasmin']
variable_atts = {
'units': 'degree days',
'long_name': 'Heating Degree Days'
}
def __init__(self, base_variables, outdir):
super(hdd, self).__init__(base_variables, outdir, self.variable_name, self.required_vars, self.variable_atts)
def __call__(self):
if not self.has_required_vars(self.required_vars):
return 1
nc_tasmax = Dataset(self.base_variables['tasmax'])
var_tasmax = nc_tasmax.variables['tasmax']
nc_tasmin = Dataset(self.base_variables['tasmin'])
var_tasmin = nc_tasmin.variables['tasmin']
nc_out = get_output_netcdf_from_base(nc_tasmax, self.base_varname, self.variable_name, self.variable_atts, self.outfp)
ncvar_hdd = nc_out.variables[self.variable_name]
for i in range(var_tasmax.shape[0]):
tas = (var_tasmax[i,:,:] + var_tasmin[i,:,:]) / 2
ncvar_hdd[i,:,:] = np.where(tas < 291.15, np.absolute(tas - 291.15), 0)
for nc in [nc_out, nc_tasmax, nc_tasmin]:
nc.close()
return self.outfp
class ffd(DerivedVariable):
variable_name = 'ffd'
required_vars = ['tasmin']
variable_atts = {
'units': 'days',
'long_name': 'Frost Free Days'
}
def __init__(self, base_variables, outdir):
super(ffd, self).__init__(base_variables, outdir, self.variable_name, self.required_vars, self.variable_atts)
def __call__(self):
if not self.has_required_vars(self.required_vars):
return 1
nc_tasmin = Dataset(self.base_variables['tasmin'])
var_tasmin = nc_tasmin.variables['tasmin']
nc_out = get_output_netcdf_from_base(nc_tasmin, self.base_varname, self.variable_name, self.variable_atts, self.outfp)
ncvar_ffd = nc_out.variables[self.variable_name]
for i in range(var_tasmin.shape[0]):
ncvar_ffd[i,:,:] = np.where(var_tasmin[i,:,:] > 273.15, 1, 0)
for nc in [nc_out, nc_tasmin]:
nc.close()
return self.outfp
class pas(DerivedVariable):
variable_name = 'pas'
required_vars = ['tasmax', 'pr']
variable_atts = {
'units': 'mm',
'long_name': 'Precip as snow'
}
def __init__(self, base_variables, outdir):
super(pas, self).__init__(base_variables, outdir, self.variable_name, self.required_vars, self.variable_atts)
def __call__(self):
if not self.has_required_vars(self.required_vars):
return 1
nc_tasmax = Dataset(self.base_variables['tasmax'])
var_tasmax = nc_tasmax.variables['tasmax']
nc_pr = Dataset(self.base_variables['pr'])
var_pr = nc_pr.variables['pr']
nc_out = get_output_netcdf_from_base(nc_tasmax, self.base_varname, self.variable_name, self.variable_atts, self.outfp)
ncvar_pas = nc_out.variables[self.variable_name]
for i in range(var_tasmax.shape[0]):
ncvar_pas[i,:,:] = np.where(var_tasmax[i,:,:] < 273.15, var_pr[i,:,:] , 0)
for nc in [nc_out, nc_tasmax]:
nc.close()
return self.outfp
| pacificclimate/pyclimate | pyclimate/variables.py | Python | gpl-3.0 | 11,961 | [
"NetCDF"
] | 7da6ad1b3eec30e21aec7b428049947b8cf3d5f5c0444d7fb3be805870f147e2 |
# Generated by Django 2.2.4 on 2019-09-10 11:45
from django.db import migrations
PEOPLE_LIABLE_TO_VANDALISM = {
2811, # Theresa May
1120, # Jeremy Corbyn
4546, # Boris Johnson
6035, # Paul Nuttall
8372, # Nicola Sturgeon
737, # Ruth Davidson
34605, # Matt Furey-King (due to a vandalism incident)
31705, # Lance Charles Quantrill (due to a vandalism incident)
1528, # Janus Polenceus
25402, # Giles Game
4230, # Craig Mackinlay
# Below we include the person ID of anyone who is currently a minister.
# This list was generated by running moderation_queue_generate_ministers_liable_to_vandalism:
1018, # Anne Milton (Minister of State (Education))
1104, # Edward Timpson (Minister of State (Department for Education))
1303, # Karen Bradley (The Secretary of State for Northern Ireland)
1326, # Priti Patel (The Secretary of State for International Development)
1476, # Harriett Baldwin (Minister of State (Foreign and Commonwealth Office) (Joint with the Department for International Development))
155, # John Glen (Minister of State (Treasury) (City))
1557, # George Hollingbery (Minister of State (International Trade))
1573, # Claire Perry (Minister of State (Business, Energy and Industrial Strategy) (Energy and Clean Growth))
1592, # David Mundell (The Secretary of State for Scotland)
1604, # Jeremy Wright (The Secretary of State for Digital, Culture, Media and Sport)
1692, # Penny Mordaunt (The Secretary of State for International Development)
1918, # Greg Clark (The Secretary of State for Business, Energy and Industrial Strategy )
1923, # Alun Cairns (The Secretary of State for Wales)
212, # Alan Duncan (Minister of State)
2204, # Jo Johnson (Minister of State (Department for Education) (Universities and Science) (Joint with the Department for Business, Energy and Industrial Strategy))
2253, # Stephen Hammond (Minister of State (Department of Health and Social Care))
239, # Stephen Barclay (The Secretary of State for Exiting the European Union)
2534, # James Brokenshire (The Secretary of State for Housing, Communities and Local Government)
2811, # Theresa May (The Prime Minister)
2832, # Michael Fallon (The Secretary of State for Defence)
2875, # Andrea Leadsom (The Secretary of State for Environment, Food and Rural Affairs)
2885, # David Davis (The Secretary of State for Exiting the European Union)
2937, # Caroline Dinenage (Minister of State (Department of Health and Social Care))
3151, # David Jones (Minister of State (Department for Exiting the European Union))
3155, # Jeremy Hunt (The Secretary of State for Health)
3238, # Ben Wallace (Minister of State (Home Office) (Security))
3284, # Chris Grayling (The Secretary of State for Transport)
3417, # Jesse Norman (Minister of State (Department for Transport))
3445, # John Hayes (Minister of State (Department for Transport))
3449, # Damian Hinds (The Secretary of State for Education)
3486, # Damian Green (The Secretary of State for Work and Pensions)
349, # Sajid Javid (The Secretary of State for the Home Department)
3533, # Brandon Lewis (Minister without Portfolio )
3737, # Matthew Hancock (Minister of State (Department for Culture, Media and Sport) (Digital Policy))
3741, # Robert Halfon (Minister of State (Department of Education) (Apprenticeships and Skills))
3745, # Chris Skidmore (Minister of State (Department for Business, Energy and Industrial Strategy) (Universities and Science) (Joint with the Department for Education))
4014, # Michael Gove (The Secretary of State for Environment, Food and Rural Affairs)
4021, # Justine Greening (The Secretary of State for Education)
4099, # David Lidington (Minister of State (Cabinet Office))
451, # Liam Fox (The Secretary of State for International Trade and President of the Board of Trade)
4881, # Gavin Barwell (Minister of State (Department for Communities and Local Government) (Housing, Planning and London))
4893, # Victoria Atkins (Minister for Women)
519, # Amber Rudd (The Secretary of State for Work and Pensions)
5272, # Kit Malthouse (Minister of State (Housing, Communities and Local Government))
600, # Mark Field (Minister of State)
769, # Gavin Williamson (The Secretary of State for Defence)
918, # Nick Gibb (Minister of State (Education))
# europarl.2019-05-023 MEP candidates in list position 1
11857, # Colum Eastwood
12218, # Magid Magid
12326, # Shaffaq Mohammed
1516, # Donald Mackay
16, # Stephen Dorrell
183, # Gerard Batten
19920, # Jackie Jones
21119, # Barbara Gibson
2126, # Stuart Agnew
2351, # Mike Hookem
2454, # Naomi Long
26664, # Caroline Voaden
26849, # Sam Bennett
31, # Danny Kennedy
34111, # Molly Scott Cato
34506, # Bill Newton Dunn
34830, # Fiona Hall
3526, # Lawrence Webb
3594, # Richard Elvin
36075, # Irina von Wiese
37323, # Scott Ainslie
5247, # Piers Wauchope
5824, # Clare Bailey
5828, # Ernest John Valentine
5998, # Catherine Rowett
6550, # Robert Hill
67695, # Alexandra Phillips
6951, # Phil Bennion
69533, # Jill Evans
69537, # Catherine Bearder
69696, # Emma McClarkin
69708, # Syed Salah Kamall
69720, # Sajjad Karim
69730, # Nosheena Mobarik
69734, # Daniel Hannan
69744, # Ashley Fox
69943, # Rory Palmer
69948, # Alex Mayer
69954, # Claude Moraes
69962, # Judith Kirton-Darling
69965, # Theresa Griffin
69973, # David Martin
69980, # John Howarth
69989, # Clare Moody
69999, # Neena Gill
70007, # Richard Corbett
70035, # Chris Davies
7011, # Stephen Morris
70176, # Alyn Smith
70323, # Alan Graves Snr
70325, # Adam Richardson
70336, # Kris Hicks
70343, # Sheila Ritchie
70354, # Rachel Johnson
70360, # Catherine Mayer
70367, # Neville Seed
70368, # Mothiur Rahman
70369, # Larch Ian Albert Frank Maxey
70370, # Gavin Esler
70375, # Ann Widdecombe
70381, # Andrea Cooper
70399, # Claire Regina Fox
70407, # Sophie Catherine Larroque
70408, # Neil Patrick McCann
70413, # Kate Godfrey
70430, # Pierre Edmond Kirk
70447, # Benyamin Naeem Habib
7065, # Amandeep Singh Bhogal
7233, # Jenny Knight
7400, # Vanessa Helen Hudson
986, # Gina Dowding
70456, # Tommy Robinson
70330, # Mark Meechan (alias CountDankula)
70334, # Carl Benjamin (alias Sargon of Akkad)
70307, # Andrew Adonis (alias Lord Adonis)
# September 2019 cabinet
918, # Nick Gibb (Minister of State (Education))
4546, # Boris Johnson (The Prime Minister)
1022, # Andrew Murrison (Minister of State (Foreign and Commonwealth Office) (Joint with the Department for International Development))
3369, # Theresa Villiers (The Secretary of State for Environment, Food and Rural Affairs)
3238, # Ben Wallace (The Secretary of State for Defence)
2595, # Grant Shapps (The Secretary of State for Transport)
2253, # Stephen Hammond (Minister of State (Department of Health and Social Care))
3745, # Chris Skidmore (Minister of State (Department of Health and Social Care))
769, # Gavin Williamson (The Secretary of State for Education)
2783, # Nicky Morgan (The Secretary of State for Digital, Culture, Media and Sport)
1923, # Alun Cairns (The Secretary of State for Wales)
3706, # Christopher Pincher (Minister of State)
2871, # Andrew Stephenson (Minister of State (Foreign and Commonwealth Office) (Joint with the Department for International Development))
1326, # Priti Patel (The Secretary of State for the Home Department)
519, # Amber Rudd (The Secretary of State for Work and Pensions)
3451, # Dominic Raab (The Secretary of State for Foreign and Commonwealth Affairs)
2827, # George Freeman (Minister of State (Department for Transport))
2875, # Andrea Leadsom (The Secretary of State for Business, Energy and Industrial Strategy )
155, # John Glen (Minister of State (Treasury) (City))
1729, # Chris Heaton-Harris (Minister of State (Department for Transport))
4079, # Jake Berry (Minister of State (Cabinet Office) (jointly with the Ministry of Housing, Communities and Local Government))
2937, # Caroline Dinenage (Minister of State (Department of Health and Social Care))
1959, # Nigel Adams (Minister of State)
3524, # Esther Louise McVey (Minister of State (Housing, Communities and Local Government))
2204, # Jo Johnson (Minister of State (Department for Business, Energy and Industrial Strategy) (Jointly with the Department for Education))
3636, # Alok Sharma (The Secretary of State for International Development)
239, # Stephen Barclay (The Secretary of State for Exiting the European Union)
188, # Elizabeth Truss (The Secretary of State for International Trade and President of the Board of Trade)
2798, # Julian Smith (The Secretary of State for Northern Ireland)
5031, # Robert Jenrick (The Secretary of State for Housing, Communities and Local Government)
4796, # Oliver Dowden (Minister of State (Cabinet Office))
5464, # James Cleverly (Minister without Portfolio )
4893, # Victoria Atkins (Minister for Women)
}
def get_by_id_with_redirects(person_id, Person, PersonRedirect):
try:
person = Person.objects.get(id=person_id)
except Person.DoesNotExist:
try:
person_id = PersonRedirect.objects.get(
old_person_id=person_id
).new_person_id
person = get_by_id_with_redirects(person_id, Person, PersonRedirect)
except PersonRedirect.DoesNotExist:
person = None
return person
def set_liable_to_vandalism(apps, schema_editor):
Person = apps.get_model("people", "Person")
PersonRedirect = apps.get_model("candidates", "PersonRedirect")
for person_id in PEOPLE_LIABLE_TO_VANDALISM:
person = get_by_id_with_redirects(person_id, Person, PersonRedirect)
if person:
# We can't use the Enum here, because the `Person` object isn't the
# actual class in the code. The Enum values might change over time,
# so it's best to hard code this.
person.edit_limitations = "NEEDS_REVIEW"
person.save()
class Migration(migrations.Migration):
dependencies = [("people", "0016_add_edit_limitations")]
operations = [
migrations.RunPython(set_liable_to_vandalism, migrations.RunPython.noop)
]
| DemocracyClub/yournextrepresentative | ynr/apps/people/migrations/0017_set_vandalism_list.py | Python | agpl-3.0 | 10,823 | [
"Amber"
] | dbf7d06394e20cca6a3a53d2e8fdbe4d2592e15f362df0f7a3af7f871558663c |
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from monty.json import MSONable
import numpy as np
import warnings
from pymatgen.core.spectrum import Spectrum
from copy import deepcopy
from veidt.elsie.preprocessing import Preprocessing
from veidt.elsie import similarity_measures
from scipy.interpolate import interp1d
from scipy import signal
class SpectraSimilarity(MSONable):
def __init__(self, sp1, sp2, interp_points=200):
"""
Initialization SpectrumSimilarity object to determine the similarity
between two spectra. Both spectra object should be follow pymatgen.
Args:
sp1: Spectrum object 1. Given spectrum to match, usually
collected from exp.
sp2: Spectrum object 2. Candidate spectrum, usually
computational reference spectrum.
interp_points: Number of points used for spectrum interpolation
throughout comparison
"""
self.sp1 = sp1
self.sp2 = sp2
self.shifted_sp1 = None
self.shifted_sp2 = None
self.interp_points = interp_points
self._energy_validation()
def _energy_validation(self):
"""
Valid the overlap absorption range of both spectra. Warning will raise
in the following two cases:
1. If the overlap energy range is less than 30 meV,
2. If there is no overlap energy, self.valid_comparison set to false
"""
min_energy_1, max_energy_1 = np.min(self.sp1.x), \
np.max(self.sp1.x)
min_energy_2, max_energy_2 = np.min(self.sp2.x), \
np.max(self.sp2.x)
max_min_energy = max(min_energy_1, min_energy_2)
min_max_energy = min(max_energy_1, max_energy_2)
if (min_energy_2 > max_energy_1) or (min_energy_1 > max_energy_2):
warning_msg = "Candidate spectrum has no overlap with given " \
"spectrum to match"
warnings.warn(warning_msg)
self.valid_comparison = False
elif (min_max_energy - max_min_energy) < 30:
warning_msg = "Candidate and given spectra's overlap absorption " \
"energy is less than 30 meV"
warnings.warn(warning_msg)
self.valid_comparison = True
else:
self.valid_comparison = True
def _spectrum_shift(self, algo='threshold_shift', intensity_threshold=0.06, preset_shift=None):
"""
Shift self.sp2 with respect to self.spec1. Self.spec1 will be
untouched.
Args:
algo: Algorithm used to determine the energy shift between two
spectra. Currently available types are:
"threshold_shift": Use the onset of absorption. Onset energy
are determined by the intensity_threshold.
"cross_correlate": Use the cross correlation function between
two spectra to determine the shift energy.
"user_specify": User specify the shift energy between the two
spectra. The shift energy value should be set
through the preset_shift.
intensity_threshold: The absorption peak intensity threshold used
to determine the absorption onset, default set to 0.1
preset_shift: The energy shift value between the two spectra.
preset_shift > 0 means sp2 needs to shift left w.r.t sp1
"""
if algo == 'user_specify':
if preset_shift is None:
raise ValueError('The energy shift value has not been set')
self.shifted_sp1, self.shifted_sp2, self.shifted_energy = \
preset_value_shift(self.sp1, self.sp2, preset_shift)
if algo == 'threshold_shift':
self.sp1, self.sp2 = spectra_lower_extend(self.sp1, self.sp2)
self.shifted_sp1, self.shifted_sp2, self.shifted_energy, \
self.abs_onset = absorption_onset_shift(
self.sp1, self.sp2, intensity_threshold)
elif algo == 'cross_correlate':
self.sp1, self.sp2 = spectra_lower_extend(self.sp1, self.sp2)
self.shifted_sp1, self.shifted_sp2, self.shifted_energy = \
signal_corre_shift(self.sp1, self.sp2)
def get_shifted_similarity(self, similarity_metric, energy_variation=None,
spect_preprocess=None, **kwargs):
"""
Calculate the similarity between two shifted spectra
Args:
similarity_metric (string): The similarity metric used for comparison.
energy_variation (list): Energy variation value used to squeeze or broaden the candidate
spectrum (sp2) beyonds spectrum shift onset point. E.g., [-2, 2, 0.1]
specifies sp2's spectrum energy (Es) beyonds onset point will scale from Es - 2 to Es + 2
at 0.1 interval. Maximum similarity and its' corresponding scale energy will be returned.
spect_preprocess (list/tuple): Preprocessing steps need to taken for each spectrum
"""
if not self.valid_comparison:
return 0
if (self.shifted_sp1 is None) and (self.shifted_sp2 is None):
self._spectrum_shift(**kwargs)
simi_class = getattr(similarity_measures, similarity_metric)
if energy_variation is not None:
sp2_energy_scale_onset = self.shifted_sp2.x[np.argmax(
self.shifted_sp2.x > self.abs_onset)]
sp2_energy_scale_end = max(self.shifted_sp2.x)
sp2_scale_energy_den = (self.shifted_sp2.x > self.abs_onset).sum()
max_simi = float("-inf")
for scale_energy in np.arange(
energy_variation[0], energy_variation[1], energy_variation[2]):
sp2_scaled_energy = np.linspace(
sp2_energy_scale_onset,
sp2_energy_scale_end + scale_energy,
sp2_scale_energy_den)
shifted_sp2_scaled_energy = np.hstack(
(self.shifted_sp2.x[:np.argmax(
self.shifted_sp2.x > self.abs_onset)],
sp2_scaled_energy))
if shifted_sp2_scaled_energy.shape != self.shifted_sp2.x.shape:
raise ValueError('The scaled energy grid density is '
'different from pre-scaled')
scaled_shifted_sp2 = Spectrum(shifted_sp2_scaled_energy,
self.shifted_sp2.y)
# Interpolate and calculate the similarity between
# scaled_shifted_sp2 and shifted_sp1
overlap_energy = energy_overlap(self.shifted_sp1,
scaled_shifted_sp2)
overlap_energy_grid = np.linspace(
overlap_energy[0], overlap_energy[1], self.interp_points)
shifted_sp1_interp = spectra_energy_interpolate(
self.shifted_sp1, overlap_energy_grid)
scaled_shifted_sp2_interp = spectra_energy_interpolate(
scaled_shifted_sp2, overlap_energy_grid)
pre_shifted_sp1_interp = Preprocessing(shifted_sp1_interp)
pre_shifted_sp1_interp.spectrum_process(['intnorm'])
pre_scaled_shifted_sp2_interp = Preprocessing(scaled_shifted_sp2_interp)
pre_scaled_shifted_sp2_interp.spectrum_process(['intnorm'])
shifted_sp1_interp = pre_shifted_sp1_interp.spectrum
scaled_shifted_sp2_interp = pre_scaled_shifted_sp2_interp.spectrum
similarity_obj = simi_class(shifted_sp1_interp.y,
scaled_shifted_sp2_interp.y)
try:
similarity_value = similarity_obj.similarity_measure()
except:
warnings.warn("Cannot generate valid similarity value for the two spectra")
similarity_value = np.NaN
if similarity_value > max_simi:
max_simi = similarity_value
self.interp_shifted_sp1 = shifted_sp1_interp
self.interp_scaled_shift_sp2 = scaled_shifted_sp2_interp
# max_scale_energy<0 means the sp2 should be squeeze for
# maximum matching
self.max_scale_energy = scale_energy
if spect_preprocess is not None:
pre_shifted_sp1_interp = Preprocessing(
self.interp_shifted_sp1)
pre_scaled_shifted_sp2_interp = Preprocessing(
self.interp_scaled_shift_sp2)
pre_shifted_sp1_interp.spectrum_process(spect_preprocess)
pre_scaled_shifted_sp2_interp.spectrum_process(
spect_preprocess)
shifted_sp1_interp = pre_shifted_sp1_interp.spectrum
scaled_shifted_sp2_interp = pre_scaled_shifted_sp2_interp.spectrum
similarity_obj = simi_class(shifted_sp1_interp.y,
scaled_shifted_sp2_interp.y)
max_simi = similarity_obj.similarity_measure()
return max_simi
elif energy_variation is None:
overlap_energy = energy_overlap(self.shifted_sp1,
self.shifted_sp2)
overlap_energy_grid = np.linspace(
overlap_energy[0], overlap_energy[1], self.interp_points)
shifted_sp1_interp = spectra_energy_interpolate(
self.shifted_sp1, overlap_energy_grid)
shifted_sp2_interp = spectra_energy_interpolate(
self.shifted_sp2, overlap_energy_grid)
if spect_preprocess is not None:
pre_shifted_sp1_interp = Preprocessing(shifted_sp1_interp)
pre_shifted_sp2_interp = Preprocessing(shifted_sp2_interp)
pre_shifted_sp1_interp.spectrum_process(spect_preprocess)
pre_shifted_sp2_interp.spectrum_process(spect_preprocess)
shifted_sp1_interp = pre_shifted_sp1_interp.spectrum
shifted_sp2_interp = pre_shifted_sp2_interp.spectrum
similarity_obj = simi_class(shifted_sp1_interp.y,
shifted_sp2_interp.y)
try:
similarity_value = similarity_obj.similarity_measure()
except:
warnings.warn("Cannot generate valid similarity value "
"for the two spectra")
similarity_value = 0
return similarity_value
def energy_overlap(sp1, sp2):
"""
Calculate the overlap energy range of two spectra, i.e. lower bound is the
maximum of two spectra's minimum energy.
Upper bound is the minimum of two spectra's maximum energy
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
Returns:
Overlap energy range
"""
overlap_range = [max(sp1.x.min(), sp2.x.min()), min(sp1.x.max(),
sp2.x.max())]
return overlap_range
def spectra_energy_interpolate(sp1, energy_range):
"""
Use Scipy's interp1d and returns spectrum object with absorption value
interpolated with given energy_range
Args:
sp1: Spectrum object 1
energy_range: new energy range used in interpolate
Returns:
Spectrum object with given energy range and interpolated absorption value
"""
interp = interp1d(sp1.x, sp1.y)
interp_spect = interp(energy_range)
sp1.x = np.array(energy_range)
sp1.y = interp_spect
return sp1
def spectra_lower_extend(sp1, sp2):
"""
Extend the energy range of spectra and ensure both spectra has same lower
bound in energy. The spectrum with higher low energy
bound with be extended, the first absorption value will be used for
absorption extension.
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
Returns:
Two Spectrum objects with same lower energy bound
"""
min_energy = min(sp1.x.min(), sp2.x.min())
if sp1.x.min() > min_energy:
# Calculate spectrum point density used for padding
sp1_den = np.ptp(sp1.x) / sp1.ydim[0]
extend_spec1_energy = np.linspace(min_energy, sp1.x.min(),
retstep=sp1_den)[0][:-1]
sp1_ext_energy = np.hstack((extend_spec1_energy, sp1.x))
sp1_ext_mu = np.lib.pad(sp1.y, (len(extend_spec1_energy), 0), 'constant',
constant_values=(sp1.y[0], 0))
sp1.x = sp1_ext_energy
sp1.y = sp1_ext_mu
elif sp2.x.min() > min_energy:
sp2_den = np.ptp(sp2.x) / sp2.ydim[0]
extend_spec2_energy = np.linspace(min_energy, sp2.x.min(),
retstep=sp2_den)[0][:-1]
sp2_ext_energy = np.hstack((extend_spec2_energy, sp2.x))
sp2_ext_mu = np.lib.pad(sp2.y, (len(extend_spec2_energy), 0),
'constant',
constant_values=(sp2.y[0], 0))
sp2.x = sp2_ext_energy
sp2.y = sp2_ext_mu
return sp1, sp2
def absorption_onset_shift(sp1, sp2, intensity_threshold):
"""
Shift spectrum 2 with respect to spectrum 1 using the difference between
two spectra's onset of absorption.
The onset is determined by ascertaining the lowest incident energy at which
the spectra's absorption intensity reaches the 'intensity_threshold' of the
peak intensity.
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
intensity_threshold: The absorption peak intensity threshold used to
determine the absorption onset. Must be a float between 0 and 1.
Returns:
shifted_sp1: Spectrum object 1
shifted_sp2: Spectrum object with absorption same as sp2 and
shifted energy range
energy_diff: Energy difference between sp1 and sp2,
energy_diff > 0 mean sp2 needs to shift left
"""
if not 0 <= float(intensity_threshold) <= 1:
raise ValueError("The intensity threshold must be between 0 and 1")
sp1_inten_thres = max(sp1.y) * float(intensity_threshold)
sp2_inten_thres = max(sp2.y) * float(intensity_threshold)
threpoint_1_energy = sp1.x[np.argmax(sp1.y > sp1_inten_thres)]
threpoint_2_energy = sp2.x[np.argmax(sp2.y > sp2_inten_thres)]
energy_diff = threpoint_2_energy - threpoint_1_energy
# sp2 need to shift left
if energy_diff >= 0:
sp2_new_energy = sp2.x - energy_diff
sp2_new_mu = sp2.y
# sp2 need to shift right
elif energy_diff < 0:
sp2_new_energy = sp2.x - energy_diff
sp2_new_mu = sp2.y
shifted_sp1 = Spectrum(sp1.x, sp1.y)
shifted_sp2 = Spectrum(sp2_new_energy, sp2_new_mu)
return shifted_sp1, shifted_sp2, energy_diff, threpoint_1_energy
def signal_corre_shift(sp1, sp2):
"""
Using the cross correlation function between two spectra to determine the shift energy.
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
Returns:
energy_diff: Energy difference between sp1 and sp2,
energy_diff > 0 means sp2 needs to shift left
"""
overlap_energy = energy_overlap(sp1, sp2)
# Energy grid interpolate point density: 0.01 eV
overlap_energy_grid = np.linspace(overlap_energy[0], overlap_energy[1],
int(float(overlap_energy[1] - overlap_energy[0]) / 0.01))
interp_sp1 = spectra_energy_interpolate(Spectrum(sp1.x, sp1.y), overlap_energy_grid)
interp_sp2 = spectra_energy_interpolate(Spectrum(sp2.x, sp2.y), overlap_energy_grid)
if not np.allclose(interp_sp1.x, interp_sp2.x, 1e-5):
raise ValueError("Two scaled spectra's energy grid densities are different")
sp2_shift_index = np.argmax(signal.correlate(interp_sp2.y, interp_sp1.y))
# sp2 need to shift left
if sp2_shift_index > interp_sp2.x.shape[0]:
left_shift_index = sp2_shift_index - interp_sp2.x.shape[0]
energy_diff = interp_sp2.x[left_shift_index] - interp_sp2.x.min()
# sp2 need to shift right
elif sp2_shift_index < interp_sp2.x.shape[0]:
right_shift_index = interp_sp2.x.shape[0] - sp2_shift_index
energy_diff = -(interp_sp2.x[right_shift_index] - interp_sp2.x.min())
else:
energy_diff = 0
shifted_sp1 = Spectrum(sp1.x, sp1.y)
shifted_sp2 = Spectrum(sp2.x - energy_diff, sp2.y)
return shifted_sp1, shifted_sp2, energy_diff
def preset_value_shift(sp1, sp2, preset_shift):
"""
Using the preset value to shift the two spectra
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
preset_shift: Preset energy shift value between two spectra,
energy_diff > 0 means sp2 needs to shift left
"""
shifted_sp1 = Spectrum(sp1.x, sp1.y)
shifted_sp2 = Spectrum(sp2.x - preset_shift, sp2.y)
return shifted_sp1, shifted_sp2, preset_shift
| czhengsci/veidt | veidt/elsie/spectra_similarity.py | Python | bsd-3-clause | 17,366 | [
"pymatgen"
] | e58af651ce9dd88aa0e7ee494c1da831f1df38508594554392edf6dfcdbf1bc8 |
from PyQt4.QtGui import QGroupBox, QSizePolicy, QRadioButton, QHBoxLayout
class BasisTypeWidget(QGroupBox):
def __init__(self, parent=None):
super(QGroupBox, self).__init__(parent)
self.initUI()
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
def initUI(self):
self.setTitle("Orbital type")
radio1 = QRadioButton("&Gaussian")
radio1.setObjectName("gaussian")
radio2 = QRadioButton("&Slater")
radio2.setObjectName("slater")
vbox = QHBoxLayout()
vbox.addWidget(radio1)
vbox.addWidget(radio2)
vbox.addStretch(1)
self.setLayout(vbox)
def setGausian(self):
self.findChild(QRadioButton, "gaussian").setChecked(True)
def setSlater(self):
self.findChild(QRadioButton, "slater").setChecked(True)
| beangoben/toulouse_secretgui | cipsi/BasisTypeWidget.py | Python | gpl-3.0 | 844 | [
"Gaussian"
] | a447db40469128fd89118f2b51dce1d2124cfc0c3afc3aa714b9bac190e73a08 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 22 17:21:56 2018
.. codeauthor:: Jonas Svenstrup Hansen <jonas.svenstrup@gmail.com>
"""
import os
import numpy as np
import random
from astropy.io import fits
from astropy.table import Table, Column
from astropy.wcs import WCS
# Import stuff from the photometry directory:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__))))
from photometry.psf import PSF
from photometry.utilities import mag2flux
#from photometry.plots import plot_image
class simulateFITS(object):
def __init__(self, Nstars = 5, Ntimes = 5,
save_images=True, overwrite_images=True):
"""
Simulate FITS images with stars, background and noise.
The purpose of this code is not to replace SPyFFI, but to supplement it
in making simulated images simpler and more customizable. The aim is to
supply simulated images that can illustrate the performance of various
photometry methods in the photometry pipeline.
Parameters:
Nstars (int): Number of stars in image. Default is 5.
Ntimes (int): Number of time steps in timeseries. Default is 5.
save_images (boolean): True if images and catalog should be saved.
Default is True.
overwrite_images (boolean): True if image and catalog files should
be overwritten. Default is True
Output:
The output FITS images are saved to a subdirectory images in the
parent directory specified by the environment variable
TESSPHOT_INPUT. An ASCII file named catalog.txt.gz with the
simulated catalog, prepared in the format read by
`prepare_photometry`, is written to this parent directory.
Example:
Default use. Write 5 FITS images of shape 200x200px with 5 stars in
them to 5 separate files in a subdirectory called images in the
directory specified by the TESSPHOT_INPUT environment variable:
>>> sim = simulateFITS()
Print catalog. This call does not save images or a catalog file,
but will just print the catalog.
>>> sim = simulateFITS(save_images=False)
ra decl prop_mot_ra prop_mot_dec row col tmag
-------------- --------------- ----------- ------------ ------------- ------------- -------
0.029851440263 0.68646339125 0.0 0.0 117.6794385 5.11738975937 12.5627
0.42553055972 1.00578707012 0.0 0.0 172.420640592 72.948095952 14.4416
1.32855128151 0.643677266712 0.0 0.0 110.344674294 227.751648259 11.5697
1.23209768011 0.0831070155292 0.0 0.0 14.2469169479 211.216745161 13.4081
0.451164111667 0.512332559648 0.0 0.0 87.8284387967 77.342419143 7.69913
.. codeauthor:: Jonas Svenstrup Hansen <jonas.svenstrup@gmail.com>
"""
self.Nstars = np.int(Nstars) # Number of stars in image
self.Ntimes = np.int(Ntimes) # Number of images in time series
self.save_images = save_images # True if images+catalog should be saved
self.overwrite_images = overwrite_images # True if overwrite in saving
# Get output directory from enviroment variable:
self.output_folder = os.environ.get('TESSPHOT_INPUT',
os.path.abspath('.'))
# Set image parameters:
self.pixel_scale = 21.0 # Size of single pixel in arcsecs
self.Nrows = 256
self.Ncols = 256
self.stamp = (0,self.Nrows,0,self.Ncols)
self.coord_zero_point = [0.,0.] # Zero point
# TODO: move part of __init__ to a file run_simulateFITS in parent dir
# Define time stamps:
self.times = self.make_times()
# Set random number generator seed:
random.seed(0)
np.random.seed(0)
# Make catalog:
self.catalog = self.make_catalog()
# Adjust catalog for debugging purposes:
# self.catalog['row'][4] = 13
# self.catalog['col'][4] = 13
# Save catalog to file:
self.make_catalog_file(self.catalog)
# Apply time-independent changes to catalog:
# self.catalog = self.apply_inaccurate_catalog(self.catalog)
# Loop through the time stamps:
for i, timestamp in enumerate(self.times):
print("Making timestamp: "+str(timestamp))
# Apply time-dependent changes to catalog:
# self.catalog = self.apply_variable_magnitudes(self.catalog,
# timestamp)
# Make stars from catalog:
stars = self.make_stars()
# Make uniform background:
bkg = self.make_background()
# Make Gaussian noise:
noise = self.make_noise()
# Sum image from its parts:
img = stars + bkg + noise
if self.save_images:
# Write img to FITS file:
# TODO: Add possibility to write to custom directory
self.make_fits(img, timestamp, i)
def make_times(self, cadence = 1800.0):
"""
Make the time stamps.
Parameters:
cadence (float): Time difference between frames. Default is 1800
seconds.
Returns:
times (numpy array): Timestamps of all images to be made.
"""
# Define time stamps:
times = np.arange(0, cadence*self.Ntimes, cadence)
# Force correct number of time steps:
# (this is necessary because cadence is not an int)
if len(times) > self.Ntimes:
times = times[0:10]
return times
def make_catalog(self):
"""
Make catalog of stars in the current image.
The table contains the following columns:
* starid: Identifier. Starts at 0.
* row: Pixel row in image.
* col: Pixel column in image.
* tmag: TESS magnitude.
Returns:
catalog (`astropy.table.Table`): Table with stars in the current
image.
"""
# Set star identification:
starids = np.arange(self.Nstars, dtype=int)
# Set buffer pixel size around edge where not to put stars:
# TODO: Add possibility of stars on the edges
bufferpx = 3
# Draw uniform row positions:
starrows = np.random.uniform(bufferpx, self.Nrows-bufferpx,
self.Nstars)
# Draw uniform column positions:
starcols = np.random.uniform(bufferpx, self.Ncols-bufferpx,
self.Nstars)
# Draw stellar magnitudes:
starmag = np.random.uniform(5, 15, self.Nstars)
# Collect star parameters in list for catalog:
cat = [starids, starrows, starcols, starmag]
# Make astropy table with catalog:
return Table(
cat,
names=('starid', 'row', 'col', 'tmag'),
dtype=('int64', 'float64', 'float64', 'float32')
)
def make_catalog_file(self, catalog, fname='catalog', compress=True):
"""
Write simulated catalog to an ASCII file in the format used by
`prepare_photometry`.
The name of each column in the catalog is written as a header in the
first line of the catalog file. The following columns will be written:
* ra: Right ascension coordinate.
* decl: Declination coordinate.
* prop_mot_ra: Proper motion in right ascension. Is set to 0.
* prop_mot_decl: Proper motion in declination. Is set to 0.
* row: Pixel row in 200x200px full frame image.
* col: Pixel column in 200x200px full frame image.
* tmag: TESS magnitude.
Parameters:
catalog (`astropy.table.Table`): Table with stars in the current
image. Columns must be starid, row, col, tmag.
fname (string): Filename of catalog. Default is catalog.
compress (boolean): True if catalog txt file is to be compressed.
Default is True.
"""
# Remove starid in input catalog:
catalog.remove_column('starid')
# Set arbitrary ra and dec from pixel coordinates:
# (neglect spacial transformation to spherical coordinates)
ra = catalog['col'] * self.pixel_scale/3600 + self.coord_zero_point[0]
decl = catalog['row'] * self.pixel_scale/3600 + self.coord_zero_point[1]
# Set proper motion:
prop_mot_ra = np.zeros_like(catalog['tmag'])
prop_mot_dec = np.zeros_like(catalog['tmag'])
# Define extra columns:
Col_ra = Column(data=ra, name='ra', dtype=np.float64)
Col_decl = Column(data=decl, name='decl', dtype=np.float64)
Col_prop_mot_ra = Column(data=prop_mot_ra, name='prop_mot_ra',
dtype=np.float64)
Col_prop_mot_decl = Column(data=prop_mot_dec, name='prop_mot_dec',
dtype=np.float64)
# Add extra columns to catalog:
catalog.add_columns([Col_ra, Col_decl,
Col_prop_mot_ra, Col_prop_mot_decl],
indexes=[0,0,0,0])
if self.save_images:
# Convert catalog to numpy array:
catalog_out = np.asarray(catalog)
if self.overwrite_images:
# Directory with filename of catalog output file:
if compress:
fextension = '.txt.gz'
else:
fextension = '.txt'
txtfiledir = os.path.join(self.output_folder, fname+fextension)
# Write catalog to txt file:
np.savetxt(txtfiledir, catalog_out,
delimiter='\t',
header=' '.join(catalog.colnames))
else:
# TODO: add check and error if file exists
pass
else:
pass
# Print the catalog:
print("Writing catalog to file: "+txtfiledir)
print(catalog)
def apply_inaccurate_catalog(self, catalog):
"""
Modify the input catalog to simulate inaccurate catalog information
independent of time.
It is assumed that the right ascension and declination uncertainties
apply directly to pixel row and column positions. Thus, the spacial
transformation from spherical coordinates is neglected.
Parameters:
catalog (`astropy.table.Table`): Table with stars in the current
image. Columns must be starid, row, col, tmag.
Returns:
catalog (`astropy.table.Table`): Table formatted like the catalog
parameter, but with changes to its entries.
"""
# Scatter of Gaia band to TESS band calibration (Stassun, 28 Jun 2017):
sigma_tmag = 0.015 # (magnitudes)
# Median RA std. in Gaia DR1 (Lindegren, 29 June 2016, Table 1):
sigma_RA = 0.254 # (milliarcsec)
sigma_col = self.pixel_scale * sigma_RA / 1e3
# Median DEC std. in Gaia DR1 (Lindegren, 29 June 2016, Table 1):
sigma_DEC = 0.233 # (milliarcsec)
sigma_row = self.pixel_scale * sigma_DEC / 1e3
# Loop through each star in the catalog:
for star in range(len(catalog['tmag'])):
# Modify TESS magnitude:
catalog['tmag'][star] += random.gauss(0, sigma_tmag)
# Modify column pixel positions:
catalog['col'][star] += random.gauss(0, sigma_col)
# Modify row pixel positions:
catalog['row'][star] += random.gauss(0, sigma_row)
return catalog
def apply_variable_magnitudes(self, catalog, timestamp):
"""
Modify the input catalog to simulate variable stars.
Parameters:
catalog (`astropy.table.Table`): Table with stars in the current
image. Columns must be starid, row, col, tmag.
Returns:
catalog (`astropy.table.Table`): Table formatted like the catalog
parameter, but with changes to its entries.
"""
# TODO: Introduce some variation in the TESS magnitude here
return catalog
def make_stars(self, camera=1, ccd=1):
"""
Make stars for the image and append catalog with flux column.
Parameters:
camera (int): Kepler camera. Used to get PSF. Default is 1.
ccd (int): Kepler CCD. Used to get PSF. Default is 1.
Returns:
stars (numpy array): Summed PRFs of stars in the image of the same
shape as image.
"""
# Create PSF class instance:
KPSF = PSF(camera=camera, ccd=ccd, stamp=self.stamp)
# Make list with parameter numpy arrays for the pixel integrater:
params = [
np.array(
[self.catalog['row'][i],
self.catalog['col'][i],
mag2flux(self.catalog['tmag'][i])]
)
for i in range(self.Nstars)
]
# Integrate stars to image:
return KPSF.integrate_to_image(params, cutoff_radius=20)
def make_background(self, bkg_level=1e3):
"""
Make a background for the image.
Parameters:
bkg_level (float): Background level of uniform background. Default
is 1000.
Returns:
bkg (numpy array): Background array of the same shape as image.
"""
# Apply background level by multiplying:
return bkg_level * np.ones([self.Nrows, self.Ncols])
def make_noise(self, sigma=500.0):
"""
Make Gaussian noise uniformily across the image.
Parameters:
sigma (float): Sigma parameter of Gaussian distribution for noise.
Default is 500.0.
Returns:
noise (numpy array): Noise array of the same shape as image.
"""
# Preallocate noise array:
noise = np.zeros([self.Nrows, self.Ncols])
# Loop over each pixel:
for row in range(self.Nrows):
for col in range(self.Ncols):
# Draw a random value from a Gaussian (normal) distribution:
noise[row,col] = random.gauss(mu=0, sigma=sigma)
return noise
def make_fits(self, img, timestamp, i, outdir=None):
"""
Write image to FITS file.
Parameters:
img (numpy array): Image to write to file.
timestamp (float): Timestamp in seconds of image.
i (int): Timestamp index that is used in filename.
"""
# Make WCS solution parameters:
w = WCS(naxis=2)
w.wcs.crpix = [0,0]
w.wcs.cdelt = [self.pixel_scale/3600, self.pixel_scale/3600]
w.wcs.crval = self.coord_zero_point # [0.,0.]
w.wcs.ctype = ["RA---AIR", "DEC--AIR"]
header = w.to_header()
# Instantiate primary header data unit:
hdu = fits.PrimaryHDU(data=img, header=header)
# Add timestamp to header with a unit of days:
hdu.header['BJD'] = (timestamp/3600/24,
'time in days (arb. starting point)')
hdu.header['NAXIS'] = (2, 'Number of data dimension')
hdu.header['NAXIS1'] = (self.Ncols, 'Number of pixel columns')
hdu.header['NAXIS2'] = (self.Nrows, 'Number of pixel rows')
# TODO: write more info to header
# Specify output directory:
if outdir is None:
outdir = os.path.join(self.output_folder, 'images')
# Remove any previous hdf5 file made by prepare_photometry:
try:
hdf5filename = 'camera1_ccd1.hdf5'
os.remove(os.path.join(self.output_folder,hdf5filename))
except:
pass
# Write FITS file to output directory:
hdu.writeto(os.path.join(outdir, 'tess{time:011d}-{camera:d}-{ccd:d}-0000-s_ffic.fits'.format(time=i, camera=1, ccd=1)),
overwrite=self.overwrite_images)
if __name__ == '__main__':
sim = simulateFITS(save_images=False)
| tasoc/photometry | simulation/simulateFITS.py | Python | gpl-3.0 | 13,921 | [
"Gaussian"
] | e364f3ba75bc0c6d1a236bb0780c9eb42862bfe58d696b74e957d29622383a4f |
#!/usr/bin/env python
import argparse
import random
words = ["a",
"a&p",
"a's",
"aa",
"aaa",
"aaaa",
"aaron",
"ab",
"aba",
"ababa",
"aback",
"abase",
"abash",
"abate",
"abbas",
"abbe",
"abbey",
"abbot",
"abbott",
"abc",
"abe",
"abed",
"abel",
"abet",
"abide",
"abject",
"ablaze",
"able",
"abner",
"abo",
"abode",
"abort",
"about",
"above",
"abrade",
"abram",
"absorb",
"abuse",
"abut",
"abyss",
"ac",
"acadia",
"accra",
"accrue",
"ace",
"acetic",
"ache",
"acid",
"acidic",
"acm",
"acme",
"acorn",
"acre",
"acrid",
"act",
"acton",
"actor",
"acts",
"acuity",
"acute",
"ad",
"ada",
"adage",
"adagio",
"adair",
"adam",
"adams",
"adapt",
"add",
"added",
"addict",
"addis",
"addle",
"adele",
"aden",
"adept",
"adieu",
"adjust",
"adler",
"admit",
"admix",
"ado",
"adobe",
"adonis",
"adopt",
"adore",
"adorn",
"adult",
"advent",
"advert",
"advise",
"ae",
"aegis",
"aeneid",
"af",
"afar",
"affair",
"affine",
"affix",
"afire",
"afoot",
"afraid",
"africa",
"afro",
"aft",
"ag",
"again",
"agate",
"agave",
"age",
"agee",
"agenda",
"agent",
"agile",
"aging",
"agnes",
"agnew",
"ago",
"agone",
"agony",
"agree",
"ague",
"agway",
"ah",
"ahead",
"ahem",
"ahoy",
"ai",
"aid",
"aida",
"aide",
"aides",
"aiken",
"ail",
"aile",
"aim",
"ain't",
"ainu",
"air",
"aires",
"airman",
"airway",
"airy",
"aisle",
"aj",
"ajar",
"ajax",
"ak",
"akers",
"akin",
"akron",
"al",
"ala",
"alai",
"alamo",
"alan",
"alarm",
"alaska",
"alb",
"alba",
"album",
"alcoa",
"alden",
"alder",
"ale",
"alec",
"aleck",
"aleph",
"alert",
"alex",
"alexei",
"alga",
"algae",
"algal",
"alger",
"algol",
"ali",
"alia",
"alias",
"alibi",
"alice",
"alien",
"alight",
"align",
"alike",
"alive",
"all",
"allah",
"allan",
"allay",
"allen",
"alley",
"allied",
"allis",
"allot",
"allow",
"alloy",
"allure",
"ally",
"allyl",
"allyn",
"alma",
"almost",
"aloe",
"aloft",
"aloha",
"alone",
"along",
"aloof",
"aloud",
"alp",
"alpha",
"alps",
"also",
"alsop",
"altair",
"altar",
"alter",
"alto",
"alton",
"alum",
"alumni",
"alva",
"alvin",
"alway",
"am",
"ama",
"amass",
"amaze",
"amber",
"amble",
"ambush",
"amen",
"amend",
"ames",
"ami",
"amid",
"amide",
"amigo",
"amino",
"amiss",
"amity",
"amman",
"ammo",
"amoco",
"amok",
"among",
"amort",
"amos",
"amp",
"ampere",
"ampex",
"ample",
"amply",
"amra",
"amulet",
"amuse",
"amy",
"an",
"ana",
"and",
"andes",
"andre",
"andrew",
"andy",
"anent",
"anew",
"angel",
"angelo",
"anger",
"angie",
"angle",
"anglo",
"angola",
"angry",
"angst",
"angus",
"ani",
"anion",
"anise",
"anita",
"ankle",
"ann",
"anna",
"annal",
"anne",
"annex",
"annie",
"annoy",
"annul",
"annuli",
"annum",
"anode",
"ansi",
"answer",
"ant",
"ante",
"anti",
"antic",
"anton",
"anus",
"anvil",
"any",
"anyhow",
"anyway",
"ao",
"aok",
"aorta",
"ap",
"apart",
"apathy",
"ape",
"apex",
"aphid",
"aplomb",
"appeal",
"append",
"apple",
"apply",
"april",
"apron",
"apse",
"apt",
"aq",
"aqua",
"ar",
"arab",
"araby",
"arc",
"arcana",
"arch",
"archer",
"arden",
"ardent",
"are",
"area",
"arena",
"ares",
"argive",
"argo",
"argon",
"argot",
"argue",
"argus",
"arhat",
"arid",
"aries",
"arise",
"ark",
"arlen",
"arlene",
"arm",
"armco",
"army",
"arnold",
"aroma",
"arose",
"arpa",
"array",
"arrear",
"arrow",
"arson",
"art",
"artery",
"arthur",
"artie",
"arty",
"aruba",
"arum",
"aryl",
"as",
"ascend",
"ash",
"ashen",
"asher",
"ashley",
"ashy",
"asia",
"aside",
"ask",
"askew",
"asleep",
"aspen",
"aspire",
"ass",
"assai",
"assam",
"assay",
"asset",
"assort",
"assure",
"aster",
"astm",
"astor",
"astral",
"at",
"at&t",
"ate",
"athens",
"atlas",
"atom",
"atomic",
"atone",
"atop",
"attic",
"attire",
"au",
"aubrey",
"audio",
"audit",
"aug",
"auger",
"augur",
"august",
"auk",
"aunt",
"aura",
"aural",
"auric",
"austin",
"auto",
"autumn",
"av",
"avail",
"ave",
"aver",
"avert",
"avery",
"aviate",
"avid",
"avis",
"aviv",
"avoid",
"avon",
"avow",
"aw",
"await",
"awake",
"award",
"aware",
"awash",
"away",
"awe",
"awful",
"awl",
"awn",
"awoke",
"awry",
"ax",
"axe",
"axes",
"axial",
"axiom",
"axis",
"axle",
"axon",
"ay",
"aye",
"ayers",
"az",
"aztec",
"azure",
"b",
"b's",
"ba",
"babe",
"babel",
"baby",
"bach",
"back",
"backup",
"bacon",
"bad",
"bade",
"baden",
"badge",
"baffle",
"bag",
"baggy",
"bah",
"bahama",
"bail",
"baird",
"bait",
"bake",
"baku",
"bald",
"baldy",
"bale",
"bali",
"balk",
"balkan",
"balky",
"ball",
"balled",
"ballot",
"balm",
"balmy",
"balsa",
"bam",
"bambi",
"ban",
"banal",
"band",
"bandit",
"bandy",
"bane",
"bang",
"banish",
"banjo",
"bank",
"banks",
"bantu",
"bar",
"barb",
"bard",
"bare",
"barfly",
"barge",
"bark",
"barley",
"barn",
"barnes",
"baron",
"barony",
"barr",
"barre",
"barry",
"barter",
"barth",
"barton",
"basal",
"base",
"basel",
"bash",
"basic",
"basil",
"basin",
"basis",
"bask",
"bass",
"bassi",
"basso",
"baste",
"bat",
"batch",
"bate",
"bater",
"bates",
"bath",
"bathe",
"batik",
"baton",
"bator",
"batt",
"bauble",
"baud",
"bauer",
"bawd",
"bawdy",
"bawl",
"baxter",
"bay",
"bayda",
"bayed",
"bayou",
"bazaar",
"bb",
"bbb",
"bbbb",
"bc",
"bcd",
"bd",
"be",
"beach",
"bead",
"beady",
"beak",
"beam",
"bean",
"bear",
"beard",
"beast",
"beat",
"beau",
"beauty",
"beaux",
"bebop",
"becalm",
"beck",
"becker",
"becky",
"bed",
"bedim",
"bee",
"beebe",
"beech",
"beef",
"beefy",
"been",
"beep",
"beer",
"beet",
"befall",
"befit",
"befog",
"beg",
"began",
"beget",
"beggar",
"begin",
"begun",
"behind",
"beige",
"being",
"beirut",
"bel",
"bela",
"belch",
"belfry",
"belie",
"bell",
"bella",
"belle",
"belly",
"below",
"belt",
"bema",
"beman",
"bemoan",
"ben",
"bench",
"bend",
"bender",
"benny",
"bent",
"benz",
"berea",
"bereft",
"beret",
"berg",
"berlin",
"bern",
"berne",
"bernet",
"berra",
"berry",
"bert",
"berth",
"beryl",
"beset",
"bess",
"bessel",
"best",
"bestir",
"bet",
"beta",
"betel",
"beth",
"bethel",
"betsy",
"bette",
"betty",
"bevel",
"bevy",
"beware",
"bey",
"bezel",
"bf",
"bg",
"bh",
"bhoy",
"bi",
"bias",
"bib",
"bibb",
"bible",
"bicep",
"biceps",
"bid",
"biddy",
"bide",
"bien",
"big",
"biggs",
"bigot",
"bile",
"bilge",
"bilk",
"bill",
"billow",
"billy",
"bin",
"binary",
"bind",
"bing",
"binge",
"bingle",
"bini",
"biota",
"birch",
"bird",
"birdie",
"birth",
"bison",
"bisque",
"bit",
"bitch",
"bite",
"bitt",
"bitten",
"biz",
"bizet",
"bj",
"bk",
"bl",
"blab",
"black",
"blade",
"blair",
"blake",
"blame",
"blanc",
"bland",
"blank",
"blare",
"blast",
"blat",
"blatz",
"blaze",
"bleak",
"bleat",
"bled",
"bleed",
"blend",
"bless",
"blest",
"blew",
"blimp",
"blind",
"blink",
"blinn",
"blip",
"bliss",
"blithe",
"blitz",
"bloat",
"blob",
"bloc",
"bloch",
"block",
"bloke",
"blond",
"blonde",
"blood",
"bloom",
"bloop",
"blot",
"blotch",
"blow",
"blown",
"blue",
"bluet",
"bluff",
"blum",
"blunt",
"blur",
"blurt",
"blush",
"blvd",
"blythe",
"bm",
"bmw",
"bn",
"bo",
"boa",
"boar",
"board",
"boast",
"boat",
"bob",
"bobbin",
"bobby",
"bobcat",
"boca",
"bock",
"bode",
"body",
"bog",
"bogey",
"boggy",
"bogus",
"bogy",
"bohr",
"boil",
"bois",
"boise",
"bold",
"bole",
"bolo",
"bolt",
"bomb",
"bombay",
"bon",
"bona",
"bond",
"bone",
"bong",
"bongo",
"bonn",
"bonus",
"bony",
"bonze",
"boo",
"booby",
"boogie",
"book",
"booky",
"boom",
"boon",
"boone",
"boor",
"boost",
"boot",
"booth",
"booty",
"booze",
"bop",
"borax",
"border",
"bore",
"borg",
"boric",
"boris",
"born",
"borne",
"borneo",
"boron",
"bosch",
"bose",
"bosom",
"boson",
"boss",
"boston",
"botch",
"both",
"bottle",
"bough",
"bouncy",
"bound",
"bourn",
"bout",
"bovine",
"bow",
"bowel",
"bowen",
"bowie",
"bowl",
"box",
"boxy",
"boy",
"boyar",
"boyce",
"boyd",
"boyle",
"bp",
"bq",
"br",
"brace",
"bract",
"brad",
"brady",
"brae",
"brag",
"bragg",
"braid",
"brain",
"brainy",
"brake",
"bran",
"brand",
"brandt",
"brant",
"brash",
"brass",
"brassy",
"braun",
"brave",
"bravo",
"brawl",
"bray",
"bread",
"break",
"bream",
"breath",
"bred",
"breed",
"breeze",
"bremen",
"brent",
"brest",
"brett",
"breve",
"brew",
"brian",
"briar",
"bribe",
"brice",
"brick",
"bride",
"brief",
"brig",
"briggs",
"brim",
"brine",
"bring",
"brink",
"briny",
"brisk",
"broad",
"brock",
"broil",
"broke",
"broken",
"bronx",
"brood",
"brook",
"brooke",
"broom",
"broth",
"brow",
"brown",
"browse",
"bruce",
"bruit",
"brunch",
"bruno",
"brunt",
"brush",
"brute",
"bryan",
"bryant",
"bryce",
"bryn",
"bs",
"bstj",
"bt",
"btl",
"bu",
"bub",
"buck",
"bud",
"budd",
"buddy",
"budge",
"buena",
"buenos",
"buff",
"bug",
"buggy",
"bugle",
"buick",
"build",
"built",
"bulb",
"bulge",
"bulk",
"bulky",
"bull",
"bully",
"bum",
"bump",
"bun",
"bunch",
"bundy",
"bunk",
"bunny",
"bunt",
"bunyan",
"buoy",
"burch",
"bureau",
"buret",
"burg",
"buried",
"burke",
"burl",
"burly",
"burma",
"burn",
"burnt",
"burp",
"burr",
"burro",
"burst",
"burt",
"burton",
"burtt",
"bury",
"bus",
"busch",
"bush",
"bushel",
"bushy",
"buss",
"bust",
"busy",
"but",
"butane",
"butch",
"buteo",
"butt",
"butte",
"butyl",
"buxom",
"buy",
"buyer",
"buzz",
"buzzy",
"bv",
"bw",
"bx",
"by",
"bye",
"byers",
"bylaw",
"byline",
"byrd",
"byrne",
"byron",
"byte",
"byway",
"byword",
"bz",
"c",
"c's",
"ca",
"cab",
"cabal",
"cabin",
"cable",
"cabot",
"cacao",
"cache",
"cacm",
"cacti",
"caddy",
"cadent",
"cadet",
"cadre",
"cady",
"cafe",
"cage",
"cagey",
"cahill",
"caiman",
"cain",
"caine",
"cairn",
"cairo",
"cake",
"cal",
"calder",
"caleb",
"calf",
"call",
"calla",
"callus",
"calm",
"calve",
"cam",
"camber",
"came",
"camel",
"cameo",
"camp",
"can",
"can't",
"canal",
"canary",
"cancer",
"candle",
"candy",
"cane",
"canis",
"canna",
"cannot",
"canny",
"canoe",
"canon",
"canopy",
"cant",
"canto",
"canton",
"cap",
"cape",
"caper",
"capo",
"car",
"carbon",
"card",
"care",
"caress",
"caret",
"carey",
"cargo",
"carib",
"carl",
"carla",
"carlo",
"carne",
"carob",
"carol",
"carp",
"carpet",
"carr",
"carrie",
"carry",
"carson",
"cart",
"carte",
"caruso",
"carve",
"case",
"casey",
"cash",
"cashew",
"cask",
"casket",
"cast",
"caste",
"cat",
"catch",
"cater",
"cathy",
"catkin",
"catsup",
"cauchy",
"caulk",
"cause",
"cave",
"cavern",
"cavil",
"cavort",
"caw",
"cayuga",
"cb",
"cbs",
"cc",
"ccc",
"cccc",
"cd",
"cdc",
"ce",
"cease",
"cecil",
"cedar",
"cede",
"ceil",
"celia",
"cell",
"census",
"cent",
"ceres",
"cern",
"cetera",
"cetus",
"cf",
"cg",
"ch",
"chad",
"chafe",
"chaff",
"chai",
"chain",
"chair",
"chalk",
"champ",
"chance",
"chang",
"chant",
"chao",
"chaos",
"chap",
"chapel",
"char",
"chard",
"charm",
"chart",
"chase",
"chasm",
"chaste",
"chat",
"chaw",
"cheap",
"cheat",
"check",
"cheek",
"cheeky",
"cheer",
"chef",
"chen",
"chert",
"cherub",
"chess",
"chest",
"chevy",
"chew",
"chi",
"chic",
"chick",
"chide",
"chief",
"child",
"chile",
"chili",
"chill",
"chilly",
"chime",
"chin",
"china",
"chine",
"chink",
"chip",
"chirp",
"chisel",
"chit",
"chive",
"chock",
"choir",
"choke",
"chomp",
"chop",
"chopin",
"choral",
"chord",
"chore",
"chose",
"chosen",
"chou",
"chow",
"chris",
"chub",
"chuck",
"chuff",
"chug",
"chum",
"chump",
"chunk",
"churn",
"chute",
"ci",
"cia",
"cicada",
"cider",
"cigar",
"cilia",
"cinch",
"cindy",
"cipher",
"circa",
"circe",
"cite",
"citrus",
"city",
"civet",
"civic",
"civil",
"cj",
"ck",
"cl",
"clad",
"claim",
"clam",
"clammy",
"clamp",
"clan",
"clang",
"clank",
"clap",
"clara",
"clare",
"clark",
"clarke",
"clash",
"clasp",
"class",
"claus",
"clause",
"claw",
"clay",
"clean",
"clear",
"cleat",
"cleft",
"clerk",
"cliche",
"click",
"cliff",
"climb",
"clime",
"cling",
"clink",
"clint",
"clio",
"clip",
"clive",
"cloak",
"clock",
"clod",
"clog",
"clomp",
"clone",
"close",
"closet",
"clot",
"cloth",
"cloud",
"clout",
"clove",
"clown",
"cloy",
"club",
"cluck",
"clue",
"cluj",
"clump",
"clumsy",
"clung",
"clyde",
"cm",
"cn",
"co",
"coach",
"coal",
"coast",
"coat",
"coax",
"cobb",
"cobble",
"cobol",
"cobra",
"coca",
"cock",
"cockle",
"cocky",
"coco",
"cocoa",
"cod",
"coda",
"coddle",
"code",
"codon",
"cody",
"coed",
"cog",
"cogent",
"cohen",
"cohn",
"coil",
"coin",
"coke",
"col",
"cola",
"colby",
"cold",
"cole",
"colon",
"colony",
"colt",
"colza",
"coma",
"comb",
"combat",
"come",
"comet",
"cometh",
"comic",
"comma",
"con",
"conch",
"cone",
"coney",
"congo",
"conic",
"conn",
"conner",
"conway",
"cony",
"coo",
"cook",
"cooke",
"cooky",
"cool",
"cooley",
"coon",
"coop",
"coors",
"coot",
"cop",
"cope",
"copra",
"copy",
"coral",
"corbel",
"cord",
"core",
"corey",
"cork",
"corn",
"corny",
"corp",
"corps",
"corvus",
"cos",
"cosec",
"coset",
"cosh",
"cost",
"costa",
"cosy",
"cot",
"cotta",
"cotty",
"couch",
"cough",
"could",
"count",
"coup",
"coupe",
"court",
"cousin",
"cove",
"coven",
"cover",
"covet",
"cow",
"cowan",
"cowl",
"cowman",
"cowry",
"cox",
"coy",
"coyote",
"coypu",
"cozen",
"cozy",
"cp",
"cpa",
"cq",
"cr",
"crab",
"crack",
"craft",
"crag",
"craig",
"cram",
"cramp",
"crane",
"crank",
"crap",
"crash",
"crass",
"crate",
"crater",
"crave",
"craw",
"crawl",
"craze",
"crazy",
"creak",
"cream",
"credit",
"credo",
"creed",
"creek",
"creep",
"creole",
"creon",
"crepe",
"crept",
"cress",
"crest",
"crete",
"crew",
"crib",
"cried",
"crime",
"crimp",
"crisp",
"criss",
"croak",
"crock",
"crocus",
"croft",
"croix",
"crone",
"crony",
"crook",
"croon",
"crop",
"cross",
"crow",
"crowd",
"crown",
"crt",
"crud",
"crude",
"cruel",
"crumb",
"crump",
"crush",
"crust",
"crux",
"cruz",
"cry",
"crypt",
"cs",
"ct",
"cu",
"cub",
"cuba",
"cube",
"cubic",
"cud",
"cuddle",
"cue",
"cuff",
"cull",
"culpa",
"cult",
"cumin",
"cuny",
"cup",
"cupful",
"cupid",
"cur",
"curb",
"curd",
"cure",
"curfew",
"curia",
"curie",
"curio",
"curl",
"curry",
"curse",
"curt",
"curve",
"cusp",
"cut",
"cute",
"cutlet",
"cv",
"cw",
"cx",
"cy",
"cycad",
"cycle",
"cynic",
"cyril",
"cyrus",
"cyst",
"cz",
"czar",
"czech",
"d",
"d'art",
"d's",
"da",
"dab",
"dacca",
"dactyl",
"dad",
"dada",
"daddy",
"dade",
"daffy",
"dahl",
"dahlia",
"dairy",
"dais",
"daisy",
"dakar",
"dale",
"daley",
"dally",
"daly",
"dam",
"dame",
"damn",
"damon",
"damp",
"damsel",
"dan",
"dana",
"dance",
"dandy",
"dane",
"dang",
"dank",
"danny",
"dante",
"dar",
"dare",
"dark",
"darken",
"darn",
"darry",
"dart",
"dash",
"data",
"date",
"dater",
"datum",
"daub",
"daunt",
"dave",
"david",
"davis",
"davit",
"davy",
"dawn",
"dawson",
"day",
"daze",
"db",
"dc",
"dd",
"ddd",
"dddd",
"de",
"deacon",
"dead",
"deaf",
"deal",
"dealt",
"dean",
"deane",
"dear",
"death",
"debar",
"debby",
"debit",
"debra",
"debris",
"debt",
"debug",
"debut",
"dec",
"decal",
"decay",
"decca",
"deck",
"decker",
"decor",
"decree",
"decry",
"dee",
"deed",
"deem",
"deep",
"deer",
"deere",
"def",
"defer",
"deform",
"deft",
"defy",
"degas",
"degum",
"deify",
"deign",
"deity",
"deja",
"del",
"delay",
"delft",
"delhi",
"delia",
"dell",
"della",
"delta",
"delve",
"demark",
"demit",
"demon",
"demur",
"den",
"deneb",
"denial",
"denny",
"dense",
"dent",
"denton",
"deny",
"depot",
"depth",
"depute",
"derby",
"derek",
"des",
"desist",
"desk",
"detach",
"deter",
"deuce",
"deus",
"devil",
"devoid",
"devon",
"dew",
"dewar",
"dewey",
"dewy",
"dey",
"df",
"dg",
"dh",
"dhabi",
"di",
"dial",
"diana",
"diane",
"diary",
"dibble",
"dice",
"dick",
"dicta",
"did",
"dido",
"die",
"died",
"diego",
"diem",
"diesel",
"diet",
"diety",
"dietz",
"dig",
"digit",
"dilate",
"dill",
"dim",
"dime",
"din",
"dinah",
"dine",
"ding",
"dingo",
"dingy",
"dint",
"diode",
"dip",
"dirac",
"dire",
"dirge",
"dirt",
"dirty",
"dis",
"disc",
"dish",
"disk",
"disney",
"ditch",
"ditto",
"ditty",
"diva",
"divan",
"dive",
"dixie",
"dixon",
"dizzy",
"dj",
"dk",
"dl",
"dm",
"dn",
"dna",
"do",
"dobbs",
"dobson",
"dock",
"docket",
"dod",
"dodd",
"dodge",
"dodo",
"doe",
"doff",
"dog",
"doge",
"dogma",
"dolan",
"dolce",
"dole",
"doll",
"dolly",
"dolt",
"dome",
"don",
"don't",
"done",
"doneck",
"donna",
"donor",
"doom",
"door",
"dope",
"dora",
"doria",
"doric",
"doris",
"dose",
"dot",
"dote",
"double",
"doubt",
"douce",
"doug",
"dough",
"dour",
"douse",
"dove",
"dow",
"dowel",
"down",
"downs",
"dowry",
"doyle",
"doze",
"dozen",
"dp",
"dq",
"dr",
"drab",
"draco",
"draft",
"drag",
"drain",
"drake",
"dram",
"drama",
"drank",
"drape",
"draw",
"drawl",
"drawn",
"dread",
"dream",
"dreamy",
"dreg",
"dress",
"dressy",
"drew",
"drib",
"dried",
"drier",
"drift",
"drill",
"drink",
"drip",
"drive",
"droll",
"drone",
"drool",
"droop",
"drop",
"dross",
"drove",
"drown",
"drub",
"drug",
"druid",
"drum",
"drunk",
"drury",
"dry",
"dryad",
"ds",
"dt",
"du",
"dual",
"duane",
"dub",
"dubhe",
"dublin",
"ducat",
"duck",
"duct",
"dud",
"due",
"duel",
"duet",
"duff",
"duffy",
"dug",
"dugan",
"duke",
"dull",
"dully",
"dulse",
"duly",
"duma",
"dumb",
"dummy",
"dump",
"dumpy",
"dun",
"dunce",
"dune",
"dung",
"dunham",
"dunk",
"dunlop",
"dunn",
"dupe",
"durer",
"dusk",
"dusky",
"dust",
"dusty",
"dutch",
"duty",
"dv",
"dw",
"dwarf",
"dwell",
"dwelt",
"dwight",
"dwyer",
"dx",
"dy",
"dyad",
"dye",
"dyer",
"dying",
"dyke",
"dylan",
"dyne",
"dz",
"e",
"e'er",
"e's",
"ea",
"each",
"eagan",
"eager",
"eagle",
"ear",
"earl",
"earn",
"earth",
"ease",
"easel",
"east",
"easy",
"eat",
"eaten",
"eater",
"eaton",
"eave",
"eb",
"ebb",
"eben",
"ebony",
"ec",
"echo",
"eclat",
"ecole",
"ed",
"eddie",
"eddy",
"eden",
"edgar",
"edge",
"edgy",
"edict",
"edify",
"edit",
"edith",
"editor",
"edna",
"edt",
"edwin",
"ee",
"eee",
"eeee",
"eel",
"eeoc",
"eerie",
"ef",
"efface",
"effie",
"efg",
"eft",
"eg",
"egan",
"egg",
"ego",
"egress",
"egret",
"egypt",
"eh",
"ei",
"eider",
"eight",
"eire",
"ej",
"eject",
"ek",
"eke",
"el",
"elan",
"elate",
"elba",
"elbow",
"elder",
"eldon",
"elect",
"elegy",
"elena",
"eleven",
"elfin",
"elgin",
"eli",
"elide",
"eliot",
"elite",
"elk",
"ell",
"ella",
"ellen",
"ellis",
"elm",
"elmer",
"elope",
"else",
"elsie",
"elton",
"elude",
"elute",
"elves",
"ely",
"em",
"embalm",
"embark",
"embed",
"ember",
"emcee",
"emery",
"emil",
"emile",
"emily",
"emit",
"emma",
"emory",
"empty",
"en",
"enact",
"enamel",
"end",
"endow",
"enemy",
"eng",
"engel",
"engle",
"engulf",
"enid",
"enjoy",
"enmity",
"enoch",
"enol",
"enos",
"enrico",
"ensue",
"enter",
"entrap",
"entry",
"envoy",
"envy",
"eo",
"ep",
"epa",
"epic",
"epoch",
"epoxy",
"epsom",
"eq",
"equal",
"equip",
"er",
"era",
"erase",
"erato",
"erda",
"ere",
"erect",
"erg",
"eric",
"erich",
"erie",
"erik",
"ernest",
"ernie",
"ernst",
"erode",
"eros",
"err",
"errand",
"errol",
"error",
"erupt",
"ervin",
"erwin",
"es",
"essay",
"essen",
"essex",
"est",
"ester",
"estes",
"estop",
"et",
"eta",
"etc",
"etch",
"ethan",
"ethel",
"ether",
"ethic",
"ethos",
"ethyl",
"etude",
"eu",
"eucre",
"euler",
"eureka",
"ev",
"eva",
"evade",
"evans",
"eve",
"even",
"event",
"every",
"evict",
"evil",
"evoke",
"evolve",
"ew",
"ewe",
"ewing",
"ex",
"exact",
"exalt",
"exam",
"excel",
"excess",
"exert",
"exile",
"exist",
"exit",
"exodus",
"expel",
"extant",
"extent",
"extol",
"extra",
"exude",
"exult",
"exxon",
"ey",
"eye",
"eyed",
"ez",
"ezra",
"f",
"f's",
"fa",
"faa",
"faber",
"fable",
"face",
"facet",
"facile",
"fact",
"facto",
"fad",
"fade",
"faery",
"fag",
"fahey",
"fail",
"fain",
"faint",
"fair",
"fairy",
"faith",
"fake",
"fall",
"false",
"fame",
"fan",
"fancy",
"fang",
"fanny",
"fanout",
"far",
"farad",
"farce",
"fare",
"fargo",
"farley",
"farm",
"faro",
"fast",
"fat",
"fatal",
"fate",
"fatty",
"fault",
"faun",
"fauna",
"faust",
"fawn",
"fay",
"faze",
"fb",
"fbi",
"fc",
"fcc",
"fd",
"fda",
"fe",
"fear",
"feast",
"feat",
"feb",
"fed",
"fee",
"feed",
"feel",
"feet",
"feign",
"feint",
"felice",
"felix",
"fell",
"felon",
"felt",
"femur",
"fence",
"fend",
"fermi",
"fern",
"ferric",
"ferry",
"fest",
"fetal",
"fetch",
"fete",
"fetid",
"fetus",
"feud",
"fever",
"few",
"ff",
"fff",
"ffff",
"fg",
"fgh",
"fh",
"fi",
"fiat",
"fib",
"fibrin",
"fiche",
"fide",
"fief",
"field",
"fiend",
"fiery",
"fife",
"fifo",
"fifth",
"fifty",
"fig",
"fight",
"filch",
"file",
"filet",
"fill",
"filler",
"filly",
"film",
"filmy",
"filth",
"fin",
"final",
"finale",
"finch",
"find",
"fine",
"finite",
"fink",
"finn",
"finny",
"fir",
"fire",
"firm",
"first",
"fish",
"fishy",
"fisk",
"fiske",
"fist",
"fit",
"fitch",
"five",
"fix",
"fj",
"fjord",
"fk",
"fl",
"flack",
"flag",
"flail",
"flair",
"flak",
"flake",
"flaky",
"flam",
"flame",
"flank",
"flap",
"flare",
"flash",
"flask",
"flat",
"flatus",
"flaw",
"flax",
"flea",
"fleck",
"fled",
"flee",
"fleet",
"flesh",
"flew",
"flex",
"flick",
"flier",
"flinch",
"fling",
"flint",
"flip",
"flirt",
"flit",
"flo",
"float",
"floc",
"flock",
"floe",
"flog",
"flood",
"floor",
"flop",
"floppy",
"flora",
"flour",
"flout",
"flow",
"flown",
"floyd",
"flu",
"flub",
"flue",
"fluff",
"fluid",
"fluke",
"flung",
"flush",
"flute",
"flux",
"fly",
"flyer",
"flynn",
"fm",
"fmc",
"fn",
"fo",
"foal",
"foam",
"foamy",
"fob",
"focal",
"foci",
"focus",
"fodder",
"foe",
"fog",
"foggy",
"fogy",
"foil",
"foist",
"fold",
"foley",
"folio",
"folk",
"folly",
"fond",
"font",
"food",
"fool",
"foot",
"foote",
"fop",
"for",
"foray",
"force",
"ford",
"fore",
"forge",
"forgot",
"fork",
"form",
"fort",
"forte",
"forth",
"forty",
"forum",
"foss",
"fossil",
"foul",
"found",
"fount",
"four",
"fovea",
"fowl",
"fox",
"foxy",
"foyer",
"fp",
"fpc",
"fq",
"fr",
"frail",
"frame",
"fran",
"franc",
"franca",
"frank",
"franz",
"frau",
"fraud",
"fray",
"freak",
"fred",
"free",
"freed",
"freer",
"frenzy",
"freon",
"fresh",
"fret",
"freud",
"frey",
"freya",
"friar",
"frick",
"fried",
"frill",
"frilly",
"frisky",
"fritz",
"fro",
"frock",
"frog",
"from",
"front",
"frost",
"froth",
"frown",
"froze",
"fruit",
"fry",
"frye",
"fs",
"ft",
"ftc",
"fu",
"fuchs",
"fudge",
"fuel",
"fugal",
"fugue",
"fuji",
"full",
"fully",
"fum",
"fume",
"fun",
"fund",
"fungal",
"fungi",
"funk",
"funny",
"fur",
"furl",
"furry",
"fury",
"furze",
"fuse",
"fuss",
"fussy",
"fusty",
"fuzz",
"fuzzy",
"fv",
"fw",
"fx",
"fy",
"fz",
"g",
"g's",
"ga",
"gab",
"gable",
"gabon",
"gad",
"gadget",
"gaff",
"gaffe",
"gag",
"gage",
"gail",
"gain",
"gait",
"gal",
"gala",
"galaxy",
"gale",
"galen",
"gall",
"gallop",
"galt",
"gam",
"game",
"gamin",
"gamma",
"gamut",
"gander",
"gang",
"gao",
"gap",
"gape",
"gar",
"garb",
"garish",
"garner",
"garry",
"garth",
"gary",
"gas",
"gash",
"gasp",
"gassy",
"gate",
"gates",
"gator",
"gauche",
"gaudy",
"gauge",
"gaul",
"gaunt",
"gaur",
"gauss",
"gauze",
"gave",
"gavel",
"gavin",
"gawk",
"gawky",
"gay",
"gaze",
"gb",
"gc",
"gd",
"ge",
"gear",
"gecko",
"gee",
"geese",
"geigy",
"gel",
"geld",
"gem",
"gemma",
"gene",
"genie",
"genii",
"genoa",
"genre",
"gent",
"gentry",
"genus",
"gerbil",
"germ",
"gerry",
"get",
"getty",
"gf",
"gg",
"ggg",
"gggg",
"gh",
"ghana",
"ghent",
"ghetto",
"ghi",
"ghost",
"ghoul",
"gi",
"giant",
"gibbs",
"gibby",
"gibe",
"giddy",
"gift",
"gig",
"gil",
"gila",
"gild",
"giles",
"gill",
"gilt",
"gimbal",
"gimpy",
"gin",
"gina",
"ginn",
"gino",
"gird",
"girl",
"girth",
"gist",
"give",
"given",
"gj",
"gk",
"gl",
"glad",
"gladdy",
"glade",
"glamor",
"gland",
"glans",
"glare",
"glass",
"glaze",
"gleam",
"glean",
"glee",
"glen",
"glenn",
"glib",
"glide",
"glint",
"gloat",
"glob",
"globe",
"glom",
"gloom",
"glory",
"gloss",
"glove",
"glow",
"glue",
"glued",
"gluey",
"gluing",
"glum",
"glut",
"glyph",
"gm",
"gmt",
"gn",
"gnarl",
"gnash",
"gnat",
"gnaw",
"gnome",
"gnp",
"gnu",
"go",
"goa",
"goad",
"goal",
"goat",
"gob",
"goer",
"goes",
"goff",
"gog",
"goggle",
"gogh",
"gogo",
"gold",
"golf",
"golly",
"gone",
"gong",
"goo",
"good",
"goode",
"goody",
"goof",
"goofy",
"goose",
"gop",
"gordon",
"gore",
"goren",
"gorge",
"gorky",
"gorse",
"gory",
"gosh",
"gospel",
"got",
"gouda",
"gouge",
"gould",
"gourd",
"gout",
"gown",
"gp",
"gpo",
"gq",
"gr",
"grab",
"grace",
"grad",
"grade",
"grady",
"graff",
"graft",
"grail",
"grain",
"grand",
"grant",
"grape",
"graph",
"grasp",
"grass",
"grata",
"grate",
"grater",
"grave",
"gravy",
"gray",
"graze",
"great",
"grebe",
"greed",
"greedy",
"greek",
"green",
"greer",
"greet",
"greg",
"gregg",
"greta",
"grew",
"grey",
"grid",
"grief",
"grieve",
"grill",
"grim",
"grime",
"grimm",
"grin",
"grind",
"grip",
"gripe",
"grist",
"grit",
"groan",
"groat",
"groin",
"groom",
"grope",
"gross",
"groton",
"group",
"grout",
"grove",
"grow",
"growl",
"grown",
"grub",
"gruff",
"grunt",
"gs",
"gsa",
"gt",
"gu",
"guam",
"guano",
"guard",
"guess",
"guest",
"guide",
"guild",
"guile",
"guilt",
"guise",
"guitar",
"gules",
"gulf",
"gull",
"gully",
"gulp",
"gum",
"gumbo",
"gummy",
"gun",
"gunk",
"gunky",
"gunny",
"gurgle",
"guru",
"gus",
"gush",
"gust",
"gusto",
"gusty",
"gut",
"gutsy",
"guy",
"guyana",
"gv",
"gw",
"gwen",
"gwyn",
"gx",
"gy",
"gym",
"gyp",
"gypsy",
"gyro",
"gz",
"h",
"h's",
"ha",
"haag",
"haas",
"habib",
"habit",
"hack",
"had",
"hades",
"hadron",
"hagen",
"hager",
"hague",
"hahn",
"haifa",
"haiku",
"hail",
"hair",
"hairy",
"haiti",
"hal",
"hale",
"haley",
"half",
"hall",
"halma",
"halo",
"halt",
"halvah",
"halve",
"ham",
"hamal",
"hamlin",
"han",
"hand",
"handy",
"haney",
"hang",
"hank",
"hanna",
"hanoi",
"hans",
"hansel",
"hap",
"happy",
"hard",
"hardy",
"hare",
"harem",
"hark",
"harley",
"harm",
"harp",
"harpy",
"harry",
"harsh",
"hart",
"harvey",
"hash",
"hasp",
"hast",
"haste",
"hasty",
"hat",
"hatch",
"hate",
"hater",
"hath",
"hatred",
"haul",
"haunt",
"have",
"haven",
"havoc",
"haw",
"hawk",
"hay",
"haydn",
"hayes",
"hays",
"hazard",
"haze",
"hazel",
"hazy",
"hb",
"hc",
"hd",
"he",
"he'd",
"he'll",
"head",
"heady",
"heal",
"healy",
"heap",
"hear",
"heard",
"heart",
"heat",
"heath",
"heave",
"heavy",
"hebe",
"hebrew",
"heck",
"heckle",
"hedge",
"heed",
"heel",
"heft",
"hefty",
"heigh",
"heine",
"heinz",
"heir",
"held",
"helen",
"helga",
"helix",
"hell",
"hello",
"helm",
"helmut",
"help",
"hem",
"hemp",
"hen",
"hence",
"henri",
"henry",
"her",
"hera",
"herb",
"herd",
"here",
"hero",
"heroic",
"heron",
"herr",
"hertz",
"hess",
"hesse",
"hettie",
"hetty",
"hew",
"hewitt",
"hewn",
"hex",
"hey",
"hf",
"hg",
"hh",
"hhh",
"hhhh",
"hi",
"hiatt",
"hick",
"hicks",
"hid",
"hide",
"high",
"hij",
"hike",
"hill",
"hilly",
"hilt",
"hilum",
"him",
"hind",
"hindu",
"hines",
"hinge",
"hint",
"hip",
"hippo",
"hippy",
"hiram",
"hire",
"hirsch",
"his",
"hiss",
"hit",
"hitch",
"hive",
"hj",
"hk",
"hl",
"hm",
"hn",
"ho",
"hoagy",
"hoar",
"hoard",
"hob",
"hobbs",
"hobby",
"hobo",
"hoc",
"hock",
"hodge",
"hodges",
"hoe",
"hoff",
"hog",
"hogan",
"hoi",
"hokan",
"hold",
"holdup",
"hole",
"holly",
"holm",
"holst",
"holt",
"home",
"homo",
"honda",
"hondo",
"hone",
"honey",
"hong",
"honk",
"hooch",
"hood",
"hoof",
"hook",
"hookup",
"hoop",
"hoot",
"hop",
"hope",
"horde",
"horn",
"horny",
"horse",
"horus",
"hose",
"host",
"hot",
"hotbox",
"hotel",
"hough",
"hound",
"hour",
"house",
"hove",
"hovel",
"hover",
"how",
"howdy",
"howe",
"howl",
"hoy",
"hoyt",
"hp",
"hq",
"hr",
"hs",
"ht",
"hu",
"hub",
"hubbub",
"hubby",
"huber",
"huck",
"hue",
"hued",
"huff",
"hug",
"huge",
"hugh",
"hughes",
"hugo",
"huh",
"hulk",
"hull",
"hum",
"human",
"humid",
"hump",
"humus",
"hun",
"hunch",
"hung",
"hunk",
"hunt",
"hurd",
"hurl",
"huron",
"hurrah",
"hurry",
"hurst",
"hurt",
"hurty",
"hush",
"husky",
"hut",
"hutch",
"hv",
"hw",
"hx",
"hy",
"hyde",
"hydra",
"hydro",
"hyena",
"hying",
"hyman",
"hymen",
"hymn",
"hymnal",
"hz",
"i",
"i'd",
"i'll",
"i'm",
"i's",
"i've",
"ia",
"iambic",
"ian",
"ib",
"ibex",
"ibid",
"ibis",
"ibm",
"ibn",
"ic",
"icc",
"ice",
"icing",
"icky",
"icon",
"icy",
"id",
"ida",
"idaho",
"idea",
"ideal",
"idiom",
"idiot",
"idle",
"idol",
"idyll",
"ie",
"ieee",
"if",
"iffy",
"ifni",
"ig",
"igloo",
"igor",
"ih",
"ii",
"iii",
"iiii",
"ij",
"ijk",
"ik",
"ike",
"il",
"ileum",
"iliac",
"iliad",
"ill",
"illume",
"ilona",
"im",
"image",
"imbue",
"imp",
"impel",
"import",
"impute",
"in",
"inane",
"inapt",
"inc",
"inca",
"incest",
"inch",
"incur",
"index",
"india",
"indies",
"indy",
"inept",
"inert",
"infect",
"infer",
"infima",
"infix",
"infra",
"ingot",
"inhere",
"injun",
"ink",
"inlay",
"inlet",
"inman",
"inn",
"inner",
"input",
"insect",
"inset",
"insult",
"intend",
"inter",
"into",
"inure",
"invoke",
"io",
"ion",
"ionic",
"iota",
"iowa",
"ip",
"ipso",
"iq",
"ir",
"ira",
"iran",
"iraq",
"irate",
"ire",
"irene",
"iris",
"irish",
"irk",
"irma",
"iron",
"irony",
"irs",
"irvin",
"irwin",
"is",
"isaac",
"isabel",
"ising",
"isis",
"islam",
"island",
"isle",
"isn't",
"israel",
"issue",
"it",
"it&t",
"it'd",
"it'll",
"italy",
"itch",
"item",
"ito",
"itt",
"iu",
"iv",
"ivan",
"ive",
"ivory",
"ivy",
"iw",
"ix",
"iy",
"iz",
"j",
"j's",
"ja",
"jab",
"jack",
"jacky",
"jacm",
"jacob",
"jacobi",
"jade",
"jag",
"jail",
"jaime",
"jake",
"jam",
"james",
"jan",
"jane",
"janet",
"janos",
"janus",
"japan",
"jar",
"jason",
"java",
"jaw",
"jay",
"jazz",
"jazzy",
"jb",
"jc",
"jd",
"je",
"jean",
"jed",
"jeep",
"jeff",
"jejune",
"jelly",
"jenny",
"jeres",
"jerk",
"jerky",
"jerry",
"jersey",
"jess",
"jesse",
"jest",
"jesus",
"jet",
"jew",
"jewel",
"jewett",
"jewish",
"jf",
"jg",
"jh",
"ji",
"jibe",
"jiffy",
"jig",
"jill",
"jilt",
"jim",
"jimmy",
"jinx",
"jive",
"jj",
"jjj",
"jjjj",
"jk",
"jkl",
"jl",
"jm",
"jn",
"jo",
"joan",
"job",
"jock",
"jockey",
"joe",
"joel",
"joey",
"jog",
"john",
"johns",
"join",
"joint",
"joke",
"jolla",
"jolly",
"jolt",
"jon",
"jonas",
"jones",
"jorge",
"jose",
"josef",
"joshua",
"joss",
"jostle",
"jot",
"joule",
"joust",
"jove",
"jowl",
"jowly",
"joy",
"joyce",
"jp",
"jq",
"jr",
"js",
"jt",
"ju",
"juan",
"judas",
"judd",
"jude",
"judge",
"judo",
"judy",
"jug",
"juggle",
"juice",
"juicy",
"juju",
"juke",
"jukes",
"julep",
"jules",
"julia",
"julie",
"julio",
"july",
"jumbo",
"jump",
"jumpy",
"junco",
"june",
"junk",
"junky",
"juno",
"junta",
"jura",
"jure",
"juror",
"jury",
"just",
"jut",
"jute",
"jv",
"jw",
"jx",
"jy",
"jz",
"k",
"k's",
"ka",
"kabul",
"kafka",
"kahn",
"kajar",
"kale",
"kalmia",
"kane",
"kant",
"kapok",
"kappa",
"karate",
"karen",
"karl",
"karma",
"karol",
"karp",
"kate",
"kathy",
"katie",
"katz",
"kava",
"kay",
"kayo",
"kazoo",
"kb",
"kc",
"kd",
"ke",
"keats",
"keel",
"keen",
"keep",
"keg",
"keith",
"keller",
"kelly",
"kelp",
"kemp",
"ken",
"keno",
"kent",
"kenya",
"kepler",
"kept",
"kern",
"kerr",
"kerry",
"ketch",
"kevin",
"key",
"keyed",
"keyes",
"keys",
"kf",
"kg",
"kh",
"khaki",
"khan",
"khmer",
"ki",
"kick",
"kid",
"kidde",
"kidney",
"kiev",
"kigali",
"kill",
"kim",
"kin",
"kind",
"king",
"kink",
"kinky",
"kiosk",
"kiowa",
"kirby",
"kirk",
"kirov",
"kiss",
"kit",
"kite",
"kitty",
"kiva",
"kivu",
"kiwi",
"kj",
"kk",
"kkk",
"kkkk",
"kl",
"klan",
"klaus",
"klein",
"kline",
"klm",
"klux",
"km",
"kn",
"knack",
"knapp",
"knauer",
"knead",
"knee",
"kneel",
"knelt",
"knew",
"knick",
"knife",
"knit",
"knob",
"knock",
"knoll",
"knot",
"knott",
"know",
"known",
"knox",
"knurl",
"ko",
"koala",
"koch",
"kodak",
"kola",
"kombu",
"kong",
"koran",
"korea",
"kp",
"kq",
"kr",
"kraft",
"krause",
"kraut",
"krebs",
"kruse",
"ks",
"kt",
"ku",
"kudo",
"kudzu",
"kuhn",
"kulak",
"kurd",
"kurt",
"kv",
"kw",
"kx",
"ky",
"kyle",
"kyoto",
"kz",
"l",
"l's",
"la",
"lab",
"laban",
"label",
"labia",
"labile",
"lac",
"lace",
"lack",
"lacy",
"lad",
"laden",
"ladle",
"lady",
"lag",
"lager",
"lagoon",
"lagos",
"laid",
"lain",
"lair",
"laity",
"lake",
"lam",
"lamar",
"lamb",
"lame",
"lamp",
"lana",
"lance",
"land",
"lane",
"lang",
"lange",
"lanka",
"lanky",
"lao",
"laos",
"lap",
"lapel",
"lapse",
"larch",
"lard",
"lares",
"large",
"lark",
"larkin",
"larry",
"lars",
"larva",
"lase",
"lash",
"lass",
"lasso",
"last",
"latch",
"late",
"later",
"latest",
"latex",
"lath",
"lathe",
"latin",
"latus",
"laud",
"laue",
"laugh",
"launch",
"laura",
"lava",
"law",
"lawn",
"lawson",
"lax",
"lay",
"layup",
"laze",
"lazy",
"lb",
"lc",
"ld",
"le",
"lea",
"leach",
"lead",
"leaf",
"leafy",
"leak",
"leaky",
"lean",
"leap",
"leapt",
"lear",
"learn",
"lease",
"leash",
"least",
"leave",
"led",
"ledge",
"lee",
"leech",
"leeds",
"leek",
"leer",
"leery",
"leeway",
"left",
"lefty",
"leg",
"legal",
"leggy",
"legion",
"leigh",
"leila",
"leland",
"lemma",
"lemon",
"len",
"lena",
"lend",
"lenin",
"lenny",
"lens",
"lent",
"leo",
"leon",
"leona",
"leone",
"leper",
"leroy",
"less",
"lessee",
"lest",
"let",
"lethe",
"lev",
"levee",
"level",
"lever",
"levi",
"levin",
"levis",
"levy",
"lew",
"lewd",
"lewis",
"leyden",
"lf",
"lg",
"lh",
"li",
"liar",
"libel",
"libido",
"libya",
"lice",
"lick",
"lid",
"lie",
"lied",
"lien",
"lieu",
"life",
"lifo",
"lift",
"light",
"like",
"liken",
"lila",
"lilac",
"lilly",
"lilt",
"lily",
"lima",
"limb",
"limbo",
"lime",
"limit",
"limp",
"lin",
"lind",
"linda",
"linden",
"line",
"linen",
"lingo",
"link",
"lint",
"linus",
"lion",
"lip",
"lipid",
"lisa",
"lise",
"lisle",
"lisp",
"list",
"listen",
"lit",
"lithe",
"litton",
"live",
"liven",
"livid",
"livre",
"liz",
"lizzie",
"lj",
"lk",
"ll",
"lll",
"llll",
"lloyd",
"lm",
"lmn",
"ln",
"lo",
"load",
"loaf",
"loam",
"loamy",
"loan",
"loath",
"lob",
"lobar",
"lobby",
"lobe",
"lobo",
"local",
"loci",
"lock",
"locke",
"locus",
"lodge",
"loeb",
"loess",
"loft",
"lofty",
"log",
"logan",
"loge",
"logic",
"loin",
"loire",
"lois",
"loiter",
"loki",
"lola",
"loll",
"lolly",
"lomb",
"lome",
"lone",
"long",
"look",
"loom",
"loon",
"loop",
"loose",
"loot",
"lop",
"lope",
"lopez",
"lord",
"lore",
"loren",
"los",
"lose",
"loss",
"lossy",
"lost",
"lot",
"lotte",
"lotus",
"lou",
"loud",
"louis",
"louise",
"louse",
"lousy",
"louver",
"love",
"low",
"lowe",
"lower",
"lowry",
"loy",
"loyal",
"lp",
"lq",
"lr",
"ls",
"lsi",
"lt",
"ltv",
"lu",
"lucas",
"lucia",
"lucid",
"luck",
"lucky",
"lucre",
"lucy",
"lug",
"luge",
"luger",
"luis",
"luke",
"lull",
"lulu",
"lumbar",
"lumen",
"lump",
"lumpy",
"lunar",
"lunch",
"lund",
"lung",
"lunge",
"lura",
"lurch",
"lure",
"lurid",
"lurk",
"lush",
"lust",
"lusty",
"lute",
"lutz",
"lux",
"luxe",
"luzon",
"lv",
"lw",
"lx",
"ly",
"lydia",
"lye",
"lying",
"lykes",
"lyle",
"lyman",
"lymph",
"lynch",
"lynn",
"lynx",
"lyon",
"lyons",
"lyra",
"lyric",
"lz",
"m",
"m&m",
"m's",
"ma",
"mabel",
"mac",
"mace",
"mach",
"macho",
"mack",
"mackey",
"macon",
"macro",
"mad",
"madam",
"made",
"madman",
"madsen",
"mae",
"magi",
"magic",
"magma",
"magna",
"magog",
"maid",
"maier",
"mail",
"maim",
"main",
"maine",
"major",
"make",
"malady",
"malay",
"male",
"mali",
"mall",
"malt",
"malta",
"mambo",
"mamma",
"mammal",
"man",
"mana",
"manama",
"mane",
"mange",
"mania",
"manic",
"mann",
"manna",
"manor",
"mans",
"manse",
"mantle",
"many",
"mao",
"maori",
"map",
"maple",
"mar",
"marc",
"march",
"marco",
"marcy",
"mardi",
"mare",
"margo",
"maria",
"marie",
"marin",
"marine",
"mario",
"mark",
"marks",
"marlin",
"marrow",
"marry",
"mars",
"marsh",
"mart",
"marty",
"marx",
"mary",
"maser",
"mash",
"mask",
"mason",
"masque",
"mass",
"mast",
"mat",
"match",
"mate",
"mateo",
"mater",
"math",
"matte",
"maul",
"mauve",
"mavis",
"maw",
"mawr",
"max",
"maxim",
"maxima",
"may",
"maya",
"maybe",
"mayer",
"mayhem",
"mayo",
"mayor",
"mayst",
"mazda",
"maze",
"mb",
"mba",
"mc",
"mccoy",
"mcgee",
"mckay",
"mckee",
"mcleod",
"md",
"me",
"mead",
"meal",
"mealy",
"mean",
"meant",
"meat",
"meaty",
"mecca",
"mecum",
"medal",
"medea",
"media",
"medic",
"medley",
"meek",
"meet",
"meg",
"mega",
"meier",
"meir",
"mel",
"meld",
"melee",
"mellow",
"melon",
"melt",
"memo",
"memoir",
"men",
"mend",
"menlo",
"menu",
"merck",
"mercy",
"mere",
"merge",
"merit",
"merle",
"merry",
"mesa",
"mescal",
"mesh",
"meson",
"mess",
"messy",
"met",
"metal",
"mete",
"meter",
"metro",
"mew",
"meyer",
"meyers",
"mezzo",
"mf",
"mg",
"mh",
"mi",
"miami",
"mica",
"mice",
"mickey",
"micky",
"micro",
"mid",
"midas",
"midge",
"midst",
"mien",
"miff",
"mig",
"might",
"mike",
"mila",
"milan",
"milch",
"mild",
"mildew",
"mile",
"miles",
"milk",
"milky",
"mill",
"mills",
"milt",
"mimi",
"mimic",
"mince",
"mind",
"mine",
"mini",
"minim",
"mink",
"minnow",
"minor",
"minos",
"minot",
"minsk",
"mint",
"minus",
"mira",
"mirage",
"mire",
"mirth",
"miser",
"misery",
"miss",
"missy",
"mist",
"misty",
"mit",
"mite",
"mitre",
"mitt",
"mix",
"mixup",
"mizar",
"mj",
"mk",
"ml",
"mm",
"mmm",
"mmmm",
"mn",
"mno",
"mo",
"moan",
"moat",
"mob",
"mobil",
"mock",
"modal",
"mode",
"model",
"modem",
"modish",
"moe",
"moen",
"mohr",
"moire",
"moist",
"molal",
"molar",
"mold",
"mole",
"moll",
"mollie",
"molly",
"molt",
"molten",
"mommy",
"mona",
"monad",
"mondo",
"monel",
"money",
"monic",
"monk",
"mont",
"monte",
"month",
"monty",
"moo",
"mood",
"moody",
"moon",
"moor",
"moore",
"moose",
"moot",
"mop",
"moral",
"morale",
"moran",
"more",
"morel",
"morn",
"moron",
"morse",
"morsel",
"mort",
"mosaic",
"moser",
"moses",
"moss",
"mossy",
"most",
"mot",
"motel",
"motet",
"moth",
"mother",
"motif",
"motor",
"motto",
"mould",
"mound",
"mount",
"mourn",
"mouse",
"mousy",
"mouth",
"move",
"movie",
"mow",
"moyer",
"mp",
"mph",
"mq",
"mr",
"mrs",
"ms",
"mt",
"mu",
"much",
"muck",
"mucus",
"mud",
"mudd",
"muddy",
"muff",
"muffin",
"mug",
"muggy",
"mugho",
"muir",
"mulch",
"mulct",
"mule",
"mull",
"multi",
"mum",
"mummy",
"munch",
"mung",
"munson",
"muon",
"muong",
"mural",
"muriel",
"murk",
"murky",
"murre",
"muse",
"mush",
"mushy",
"music",
"musk",
"muslim",
"must",
"musty",
"mute",
"mutt",
"muzak",
"muzo",
"mv",
"mw",
"mx",
"my",
"myel",
"myers",
"mylar",
"mynah",
"myopia",
"myra",
"myron",
"myrrh",
"myself",
"myth",
"mz",
"n",
"n's",
"na",
"naacp",
"nab",
"nadir",
"nag",
"nagoya",
"nagy",
"naiad",
"nail",
"nair",
"naive",
"naked",
"name",
"nan",
"nancy",
"naomi",
"nap",
"nary",
"nasa",
"nasal",
"nash",
"nasty",
"nat",
"natal",
"nate",
"nato",
"natty",
"nature",
"naval",
"nave",
"navel",
"navy",
"nay",
"nazi",
"nb",
"nbc",
"nbs",
"nc",
"ncaa",
"ncr",
"nd",
"ne",
"neal",
"near",
"neat",
"neath",
"neck",
"ned",
"nee",
"need",
"needy",
"neff",
"negate",
"negro",
"nehru",
"neil",
"nell",
"nelsen",
"neon",
"nepal",
"nero",
"nerve",
"ness",
"nest",
"net",
"neuron",
"neva",
"neve",
"new",
"newel",
"newt",
"next",
"nf",
"ng",
"nh",
"ni",
"nib",
"nibs",
"nice",
"nicety",
"niche",
"nick",
"niece",
"niger",
"nigh",
"night",
"nih",
"nikko",
"nil",
"nile",
"nimbus",
"nimh",
"nina",
"nine",
"ninth",
"niobe",
"nip",
"nit",
"nitric",
"nitty",
"nixon",
"nj",
"nk",
"nl",
"nm",
"nn",
"nnn",
"nnnn",
"no",
"noaa",
"noah",
"nob",
"nobel",
"noble",
"nod",
"nodal",
"node",
"noel",
"noise",
"noisy",
"nolan",
"noll",
"nolo",
"nomad",
"non",
"nonce",
"none",
"nook",
"noon",
"noose",
"nop",
"nor",
"nora",
"norm",
"norma",
"north",
"norway",
"nose",
"not",
"notch",
"note",
"notre",
"noun",
"nov",
"nova",
"novak",
"novel",
"novo",
"now",
"np",
"nq",
"nr",
"nrc",
"ns",
"nsf",
"nt",
"ntis",
"nu",
"nuance",
"nubia",
"nuclei",
"nude",
"nudge",
"null",
"numb",
"nun",
"nurse",
"nut",
"nv",
"nw",
"nx",
"ny",
"nyc",
"nylon",
"nymph",
"nyu",
"nz",
"o",
"o'er",
"o's",
"oa",
"oaf",
"oak",
"oaken",
"oakley",
"oar",
"oases",
"oasis",
"oat",
"oath",
"ob",
"obese",
"obey",
"objet",
"oboe",
"oc",
"occur",
"ocean",
"oct",
"octal",
"octave",
"octet",
"od",
"odd",
"ode",
"odin",
"odium",
"oe",
"of",
"off",
"offal",
"offend",
"offer",
"oft",
"often",
"og",
"ogden",
"ogle",
"ogre",
"oh",
"ohio",
"ohm",
"ohmic",
"oi",
"oil",
"oily",
"oint",
"oj",
"ok",
"okay",
"ol",
"olaf",
"olav",
"old",
"olden",
"oldy",
"olga",
"olin",
"olive",
"olsen",
"olson",
"om",
"omaha",
"oman",
"omega",
"omen",
"omit",
"on",
"once",
"one",
"onion",
"only",
"onset",
"onto",
"onus",
"onward",
"onyx",
"oo",
"ooo",
"oooo",
"ooze",
"op",
"opal",
"opec",
"opel",
"open",
"opera",
"opium",
"opt",
"optic",
"opus",
"oq",
"or",
"oral",
"orate",
"orb",
"orbit",
"orchid",
"ordain",
"order",
"ore",
"organ",
"orgy",
"orin",
"orion",
"ornery",
"orono",
"orr",
"os",
"osaka",
"oscar",
"osier",
"oslo",
"ot",
"other",
"otis",
"ott",
"otter",
"otto",
"ou",
"ouch",
"ought",
"ounce",
"our",
"oust",
"out",
"ouvre",
"ouzel",
"ouzo",
"ov",
"ova",
"oval",
"ovary",
"ovate",
"oven",
"over",
"overt",
"ovid",
"ow",
"owe",
"owens",
"owing",
"owl",
"owly",
"own",
"ox",
"oxen",
"oxeye",
"oxide",
"oxnard",
"oy",
"oz",
"ozark",
"ozone",
"p",
"p's",
"pa",
"pablo",
"pabst",
"pace",
"pack",
"packet",
"pact",
"pad",
"paddy",
"padre",
"paean",
"pagan",
"page",
"paid",
"pail",
"pain",
"paine",
"paint",
"pair",
"pal",
"pale",
"pall",
"palm",
"palo",
"palsy",
"pam",
"pampa",
"pan",
"panama",
"panda",
"pane",
"panel",
"pang",
"panic",
"pansy",
"pant",
"panty",
"paoli",
"pap",
"papa",
"papal",
"papaw",
"paper",
"pappy",
"papua",
"par",
"parch",
"pardon",
"pare",
"pareto",
"paris",
"park",
"parke",
"parks",
"parr",
"parry",
"parse",
"part",
"party",
"pascal",
"pasha",
"paso",
"pass",
"passe",
"past",
"paste",
"pasty",
"pat",
"patch",
"pate",
"pater",
"path",
"patio",
"patsy",
"patti",
"patton",
"patty",
"paul",
"paula",
"pauli",
"paulo",
"pause",
"pave",
"paw",
"pawn",
"pax",
"pay",
"payday",
"payne",
"paz",
"pb",
"pbs",
"pc",
"pd",
"pe",
"pea",
"peace",
"peach",
"peak",
"peaky",
"peal",
"peale",
"pear",
"pearl",
"pease",
"peat",
"pebble",
"pecan",
"peck",
"pecos",
"pedal",
"pedro",
"pee",
"peed",
"peek",
"peel",
"peep",
"peepy",
"peer",
"peg",
"peggy",
"pelt",
"pen",
"penal",
"pence",
"pencil",
"pend",
"penh",
"penn",
"penna",
"penny",
"pent",
"peony",
"pep",
"peppy",
"pepsi",
"per",
"perch",
"percy",
"perez",
"peril",
"perk",
"perky",
"perle",
"perry",
"persia",
"pert",
"perth",
"peru",
"peruse",
"pest",
"peste",
"pet",
"petal",
"pete",
"peter",
"petit",
"petri",
"petty",
"pew",
"pewee",
"pf",
"pg",
"ph",
"ph.d",
"phage",
"phase",
"phd",
"phenol",
"phi",
"phil",
"phlox",
"phon",
"phone",
"phony",
"photo",
"phyla",
"physic",
"pi",
"piano",
"pica",
"pick",
"pickup",
"picky",
"pie",
"piece",
"pier",
"pierce",
"piety",
"pig",
"piggy",
"pike",
"pile",
"pill",
"pilot",
"pimp",
"pin",
"pinch",
"pine",
"ping",
"pinion",
"pink",
"pint",
"pinto",
"pion",
"piotr",
"pious",
"pip",
"pipe",
"piper",
"pique",
"pit",
"pitch",
"pith",
"pithy",
"pitney",
"pitt",
"pity",
"pius",
"pivot",
"pixel",
"pixy",
"pizza",
"pj",
"pk",
"pl",
"place",
"plague",
"plaid",
"plain",
"plan",
"plane",
"plank",
"plant",
"plasm",
"plat",
"plate",
"plato",
"play",
"playa",
"plaza",
"plea",
"plead",
"pleat",
"pledge",
"pliny",
"plod",
"plop",
"plot",
"plow",
"pluck",
"plug",
"plum",
"plumb",
"plume",
"plump",
"plunk",
"plus",
"plush",
"plushy",
"pluto",
"ply",
"pm",
"pn",
"po",
"poach",
"pobox",
"pod",
"podge",
"podia",
"poe",
"poem",
"poesy",
"poet",
"poetry",
"pogo",
"poi",
"point",
"poise",
"poke",
"pol",
"polar",
"pole",
"police",
"polio",
"polis",
"polk",
"polka",
"poll",
"polo",
"pomona",
"pomp",
"ponce",
"pond",
"pong",
"pont",
"pony",
"pooch",
"pooh",
"pool",
"poole",
"poop",
"poor",
"pop",
"pope",
"poppy",
"porch",
"pore",
"pork",
"porous",
"port",
"porte",
"portia",
"porto",
"pose",
"posey",
"posh",
"posit",
"posse",
"post",
"posy",
"pot",
"potts",
"pouch",
"pound",
"pour",
"pout",
"pow",
"powder",
"power",
"pp",
"ppm",
"ppp",
"pppp",
"pq",
"pqr",
"pr",
"prado",
"pram",
"prank",
"pratt",
"pray",
"preen",
"prefix",
"prep",
"press",
"prexy",
"prey",
"priam",
"price",
"prick",
"pride",
"prig",
"prim",
"prima",
"prime",
"primp",
"prince",
"print",
"prior",
"prism",
"prissy",
"privy",
"prize",
"pro",
"probe",
"prod",
"prof",
"prom",
"prone",
"prong",
"proof",
"prop",
"propyl",
"prose",
"proud",
"prove",
"prow",
"prowl",
"proxy",
"prune",
"pry",
"ps",
"psalm",
"psi",
"psych",
"pt",
"pta",
"pu",
"pub",
"puck",
"puddly",
"puerto",
"puff",
"puffy",
"pug",
"pugh",
"puke",
"pull",
"pulp",
"pulse",
"puma",
"pump",
"pun",
"punch",
"punic",
"punish",
"punk",
"punky",
"punt",
"puny",
"pup",
"pupal",
"pupil",
"puppy",
"pure",
"purge",
"purl",
"purr",
"purse",
"pus",
"pusan",
"pusey",
"push",
"pussy",
"put",
"putt",
"putty",
"pv",
"pvc",
"pw",
"px",
"py",
"pygmy",
"pyle",
"pyre",
"pyrex",
"pyrite",
"pz",
"q",
"q's",
"qa",
"qatar",
"qb",
"qc",
"qd",
"qe",
"qed",
"qf",
"qg",
"qh",
"qi",
"qj",
"qk",
"ql",
"qm",
"qn",
"qo",
"qp",
"qq",
"qqq",
"qqqq",
"qr",
"qrs",
"qs",
"qt",
"qu",
"qua",
"quack",
"quad",
"quaff",
"quail",
"quake",
"qualm",
"quark",
"quarry",
"quart",
"quash",
"quasi",
"quay",
"queasy",
"queen",
"queer",
"quell",
"query",
"quest",
"queue",
"quick",
"quid",
"quiet",
"quill",
"quilt",
"quinn",
"quint",
"quip",
"quirk",
"quirt",
"quit",
"quite",
"quito",
"quiz",
"quo",
"quod",
"quota",
"quote",
"qv",
"qw",
"qx",
"qy",
"qz",
"r",
"r&d",
"r's",
"ra",
"rabat",
"rabbi",
"rabbit",
"rabid",
"rabin",
"race",
"rack",
"racy",
"radar",
"radii",
"radio",
"radium",
"radix",
"radon",
"rae",
"rafael",
"raft",
"rag",
"rage",
"raid",
"rail",
"rain",
"rainy",
"raise",
"raj",
"rajah",
"rake",
"rally",
"ralph",
"ram",
"raman",
"ramo",
"ramp",
"ramsey",
"ran",
"ranch",
"rand",
"randy",
"rang",
"range",
"rangy",
"rank",
"rant",
"raoul",
"rap",
"rape",
"rapid",
"rapt",
"rare",
"rasa",
"rascal",
"rash",
"rasp",
"rat",
"rata",
"rate",
"rater",
"ratio",
"rattle",
"raul",
"rave",
"ravel",
"raven",
"raw",
"ray",
"raze",
"razor",
"rb",
"rc",
"rca",
"rd",
"re",
"reach",
"read",
"ready",
"reagan",
"real",
"realm",
"ream",
"reap",
"rear",
"reave",
"reb",
"rebel",
"rebut",
"recipe",
"reck",
"recur",
"red",
"redeem",
"reduce",
"reed",
"reedy",
"reef",
"reek",
"reel",
"reese",
"reeve",
"refer",
"regal",
"regina",
"regis",
"reich",
"reid",
"reign",
"rein",
"relax",
"relay",
"relic",
"reman",
"remedy",
"remit",
"remus",
"rena",
"renal",
"rend",
"rene",
"renown",
"rent",
"rep",
"repel",
"repent",
"resin",
"resort",
"rest",
"ret",
"retch",
"return",
"reub",
"rev",
"reveal",
"revel",
"rever",
"revet",
"revved",
"rex",
"rf",
"rg",
"rh",
"rhea",
"rheum",
"rhine",
"rhino",
"rho",
"rhoda",
"rhode",
"rhyme",
"ri",
"rib",
"rica",
"rice",
"rich",
"rick",
"rico",
"rid",
"ride",
"ridge",
"rifle",
"rift",
"rig",
"riga",
"rigel",
"riggs",
"right",
"rigid",
"riley",
"rill",
"rilly",
"rim",
"rime",
"rimy",
"ring",
"rink",
"rinse",
"rio",
"riot",
"rip",
"ripe",
"ripen",
"ripley",
"rise",
"risen",
"risk",
"risky",
"rite",
"ritz",
"rival",
"riven",
"river",
"rivet",
"riyadh",
"rj",
"rk",
"rl",
"rm",
"rn",
"ro",
"roach",
"road",
"roam",
"roar",
"roast",
"rob",
"robe",
"robin",
"robot",
"rock",
"rocket",
"rocky",
"rod",
"rode",
"rodeo",
"roe",
"roger",
"rogue",
"roil",
"role",
"roll",
"roman",
"rome",
"romeo",
"romp",
"ron",
"rondo",
"rood",
"roof",
"rook",
"rookie",
"rooky",
"room",
"roomy",
"roost",
"root",
"rope",
"rosa",
"rose",
"rosen",
"ross",
"rosy",
"rot",
"rotc",
"roth",
"rotor",
"rouge",
"rough",
"round",
"rouse",
"rout",
"route",
"rove",
"row",
"rowdy",
"rowe",
"roy",
"royal",
"royce",
"rp",
"rpm",
"rq",
"rr",
"rrr",
"rrrr",
"rs",
"rst",
"rsvp",
"rt",
"ru",
"ruanda",
"rub",
"rube",
"ruben",
"rubin",
"rubric",
"ruby",
"ruddy",
"rude",
"rudy",
"rue",
"rufus",
"rug",
"ruin",
"rule",
"rum",
"rumen",
"rummy",
"rump",
"rumpus",
"run",
"rune",
"rung",
"runge",
"runic",
"runt",
"runty",
"rupee",
"rural",
"ruse",
"rush",
"rusk",
"russ",
"russo",
"rust",
"rusty",
"rut",
"ruth",
"rutty",
"rv",
"rw",
"rx",
"ry",
"ryan",
"ryder",
"rye",
"rz",
"s",
"s's",
"sa",
"sabine",
"sable",
"sabra",
"sac",
"sachs",
"sack",
"sad",
"saddle",
"sadie",
"safari",
"safe",
"sag",
"saga",
"sage",
"sago",
"said",
"sail",
"saint",
"sake",
"sal",
"salad",
"sale",
"salem",
"saline",
"salk",
"salle",
"sally",
"salon",
"salt",
"salty",
"salve",
"salvo",
"sam",
"samba",
"same",
"sammy",
"samoa",
"samuel",
"san",
"sana",
"sand",
"sandal",
"sandy",
"sane",
"sang",
"sank",
"sans",
"santa",
"santo",
"sao",
"sap",
"sappy",
"sara",
"sarah",
"saran",
"sari",
"sash",
"sat",
"satan",
"satin",
"satyr",
"sauce",
"saucy",
"saud",
"saudi",
"saul",
"sault",
"saute",
"save",
"savoy",
"savvy",
"saw",
"sawyer",
"sax",
"saxon",
"say",
"sb",
"sc",
"scab",
"scala",
"scald",
"scale",
"scalp",
"scam",
"scamp",
"scan",
"scant",
"scar",
"scare",
"scarf",
"scary",
"scat",
"scaup",
"scene",
"scent",
"school",
"scion",
"scm",
"scoff",
"scold",
"scoop",
"scoot",
"scope",
"scops",
"score",
"scoria",
"scorn",
"scot",
"scott",
"scour",
"scout",
"scowl",
"scram",
"scrap",
"scrape",
"screw",
"scrim",
"scrub",
"scuba",
"scud",
"scuff",
"scull",
"scum",
"scurry",
"sd",
"se",
"sea",
"seal",
"seam",
"seamy",
"sean",
"sear",
"sears",
"season",
"seat",
"sec",
"secant",
"sect",
"sedan",
"seder",
"sedge",
"see",
"seed",
"seedy",
"seek",
"seem",
"seen",
"seep",
"seethe",
"seize",
"self",
"sell",
"selma",
"semi",
"sen",
"send",
"seneca",
"senor",
"sense",
"sent",
"sentry",
"seoul",
"sepal",
"sepia",
"sepoy",
"sept",
"septa",
"sequin",
"sera",
"serf",
"serge",
"serif",
"serum",
"serve",
"servo",
"set",
"seth",
"seton",
"setup",
"seven",
"sever",
"severe",
"sew",
"sewn",
"sex",
"sexy",
"sf",
"sg",
"sh",
"shack",
"shad",
"shade",
"shady",
"shafer",
"shaft",
"shag",
"shah",
"shake",
"shaken",
"shako",
"shaky",
"shale",
"shall",
"sham",
"shame",
"shank",
"shape",
"shard",
"share",
"shari",
"shark",
"sharp",
"shave",
"shaw",
"shawl",
"shay",
"she",
"she'd",
"shea",
"sheaf",
"shear",
"sheath",
"shed",
"sheen",
"sheep",
"sheer",
"sheet",
"sheik",
"shelf",
"shell",
"shied",
"shift",
"shill",
"shim",
"shin",
"shine",
"shinto",
"shiny",
"ship",
"shire",
"shirk",
"shirt",
"shish",
"shiv",
"shoal",
"shock",
"shod",
"shoe",
"shoji",
"shone",
"shoo",
"shook",
"shoot",
"shop",
"shore",
"short",
"shot",
"shout",
"shove",
"show",
"shown",
"showy",
"shrank",
"shred",
"shrew",
"shrike",
"shrub",
"shrug",
"shu",
"shuck",
"shun",
"shunt",
"shut",
"shy",
"si",
"sial",
"siam",
"sian",
"sib",
"sibley",
"sibyl",
"sic",
"sick",
"side",
"sidle",
"siege",
"siena",
"sieve",
"sift",
"sigh",
"sight",
"sigma",
"sign",
"signal",
"signor",
"silas",
"silk",
"silky",
"sill",
"silly",
"silo",
"silt",
"silty",
"sima",
"simon",
"simons",
"sims",
"sin",
"sinai",
"since",
"sine",
"sinew",
"sing",
"singe",
"sinh",
"sink",
"sinus",
"sioux",
"sip",
"sir",
"sire",
"siren",
"sis",
"sisal",
"sit",
"site",
"situ",
"situs",
"siva",
"six",
"sixgun",
"sixth",
"sixty",
"size",
"sj",
"sk",
"skat",
"skate",
"skeet",
"skew",
"ski",
"skid",
"skied",
"skiff",
"skill",
"skim",
"skimp",
"skimpy",
"skin",
"skip",
"skirt",
"skit",
"skulk",
"skull",
"skunk",
"sky",
"skye",
"sl",
"slab",
"slack",
"slag",
"slain",
"slake",
"slam",
"slang",
"slant",
"slap",
"slash",
"slat",
"slate",
"slater",
"slav",
"slave",
"slay",
"sled",
"sleek",
"sleep",
"sleet",
"slept",
"slew",
"slice",
"slick",
"slid",
"slide",
"slim",
"slime",
"slimy",
"sling",
"slip",
"slit",
"sliver",
"sloan",
"slob",
"sloe",
"slog",
"sloop",
"slop",
"slope",
"slosh",
"slot",
"sloth",
"slow",
"slug",
"sluice",
"slum",
"slump",
"slung",
"slur",
"slurp",
"sly",
"sm",
"smack",
"small",
"smart",
"smash",
"smear",
"smell",
"smelt",
"smile",
"smirk",
"smith",
"smithy",
"smog",
"smoke",
"smoky",
"smug",
"smut",
"sn",
"snack",
"snafu",
"snag",
"snail",
"snake",
"snap",
"snare",
"snark",
"snarl",
"snatch",
"sneak",
"sneer",
"snell",
"snick",
"sniff",
"snip",
"snipe",
"snob",
"snook",
"snoop",
"snore",
"snort",
"snout",
"snow",
"snowy",
"snub",
"snuff",
"snug",
"so",
"soak",
"soap",
"soapy",
"soar",
"sob",
"sober",
"social",
"sock",
"sod",
"soda",
"sofa",
"sofia",
"soft",
"soften",
"soggy",
"soil",
"sol",
"solar",
"sold",
"sole",
"solemn",
"solid",
"solo",
"solon",
"solve",
"soma",
"somal",
"some",
"son",
"sonar",
"song",
"sonic",
"sonny",
"sonora",
"sony",
"soon",
"soot",
"sooth",
"sop",
"sora",
"sorb",
"sore",
"sorry",
"sort",
"sos",
"sou",
"sough",
"soul",
"sound",
"soup",
"sour",
"source",
"sousa",
"south",
"sow",
"sown",
"soy",
"soya",
"sp",
"spa",
"space",
"spade",
"spain",
"span",
"spar",
"spare",
"sparge",
"spark",
"spasm",
"spat",
"spate",
"spawn",
"spay",
"speak",
"spear",
"spec",
"speck",
"sped",
"speed",
"spell",
"spend",
"spent",
"sperm",
"sperry",
"spew",
"spica",
"spice",
"spicy",
"spike",
"spiky",
"spill",
"spilt",
"spin",
"spine",
"spiny",
"spire",
"spiro",
"spit",
"spite",
"spitz",
"splat",
"splay",
"spline",
"split",
"spoil",
"spoke",
"spoof",
"spook",
"spooky",
"spool",
"spoon",
"spore",
"sport",
"spot",
"spout",
"sprain",
"spray",
"spree",
"sprig",
"spruce",
"sprue",
"spud",
"spume",
"spun",
"spunk",
"spur",
"spurn",
"spurt",
"spy",
"sq",
"squad",
"squat",
"squaw",
"squibb",
"squid",
"squint",
"sr",
"sri",
"ss",
"sss",
"ssss",
"sst",
"st",
"st.",
"stab",
"stack",
"stacy",
"staff",
"stag",
"stage",
"stagy",
"stahl",
"staid",
"stain",
"stair",
"stake",
"stale",
"stalk",
"stall",
"stamp",
"stan",
"stance",
"stand",
"stank",
"staph",
"star",
"stare",
"stark",
"starr",
"start",
"stash",
"state",
"statue",
"stave",
"stay",
"stead",
"steak",
"steal",
"steam",
"steed",
"steel",
"steele",
"steen",
"steep",
"steer",
"stein",
"stella",
"stem",
"step",
"stern",
"steve",
"stew",
"stick",
"stiff",
"stile",
"still",
"stilt",
"sting",
"stingy",
"stink",
"stint",
"stir",
"stock",
"stoic",
"stoke",
"stole",
"stomp",
"stone",
"stony",
"stood",
"stool",
"stoop",
"stop",
"store",
"storey",
"stork",
"storm",
"story",
"stout",
"stove",
"stow",
"strafe",
"strap",
"straw",
"stray",
"strewn",
"strip",
"stroll",
"strom",
"strop",
"strum",
"strut",
"stu",
"stuart",
"stub",
"stuck",
"stud",
"study",
"stuff",
"stuffy",
"stump",
"stun",
"stung",
"stunk",
"stunt",
"sturm",
"style",
"styli",
"styx",
"su",
"suave",
"sub",
"subtly",
"such",
"suck",
"sud",
"sudan",
"suds",
"sue",
"suey",
"suez",
"sugar",
"suit",
"suite",
"sulfa",
"sulk",
"sulky",
"sully",
"sultry",
"sum",
"sumac",
"summon",
"sun",
"sung",
"sunk",
"sunny",
"sunset",
"suny",
"sup",
"super",
"supra",
"sure",
"surf",
"surge",
"sus",
"susan",
"sushi",
"susie",
"sutton",
"sv",
"sw",
"swab",
"swag",
"swain",
"swam",
"swami",
"swamp",
"swampy",
"swan",
"swank",
"swap",
"swarm",
"swart",
"swat",
"swath",
"sway",
"swear",
"sweat",
"sweaty",
"swede",
"sweep",
"sweet",
"swell",
"swelt",
"swept",
"swift",
"swig",
"swim",
"swine",
"swing",
"swipe",
"swirl",
"swish",
"swiss",
"swoop",
"sword",
"swore",
"sworn",
"swum",
"swung",
"sx",
"sy",
"sybil",
"sykes",
"sylow",
"sylvan",
"synge",
"synod",
"syria",
"syrup",
"sz",
"t",
"t's",
"ta",
"tab",
"table",
"taboo",
"tabu",
"tabula",
"tacit",
"tack",
"tacky",
"tacoma",
"tact",
"tad",
"taffy",
"taft",
"tag",
"tahoe",
"tail",
"taint",
"take",
"taken",
"talc",
"tale",
"talk",
"talky",
"tall",
"tallow",
"tally",
"talon",
"talus",
"tam",
"tame",
"tamp",
"tampa",
"tan",
"tang",
"tango",
"tangy",
"tanh",
"tank",
"tansy",
"tanya",
"tao",
"taos",
"tap",
"tapa",
"tape",
"taper",
"tapir",
"tapis",
"tappa",
"tar",
"tara",
"tardy",
"tariff",
"tarry",
"tart",
"task",
"tass",
"taste",
"tasty",
"tat",
"tate",
"tater",
"tattle",
"tatty",
"tau",
"taunt",
"taut",
"tavern",
"tawny",
"tax",
"taxi",
"tb",
"tc",
"td",
"te",
"tea",
"teach",
"teal",
"team",
"tear",
"tease",
"teat",
"tech",
"tecum",
"ted",
"teddy",
"tee",
"teem",
"teen",
"teensy",
"teet",
"teeth",
"telex",
"tell",
"tempo",
"tempt",
"ten",
"tend",
"tenet",
"tenney",
"tenon",
"tenor",
"tense",
"tensor",
"tent",
"tenth",
"tepee",
"tepid",
"term",
"tern",
"terra",
"terre",
"terry",
"terse",
"tess",
"test",
"testy",
"tete",
"texan",
"texas",
"text",
"tf",
"tg",
"th",
"thai",
"than",
"thank",
"that",
"thaw",
"the",
"thea",
"thee",
"theft",
"their",
"them",
"theme",
"then",
"there",
"these",
"theta",
"they",
"thick",
"thief",
"thigh",
"thin",
"thine",
"thing",
"think",
"third",
"this",
"thong",
"thor",
"thorn",
"thorny",
"those",
"thou",
"thread",
"three",
"threw",
"throb",
"throes",
"throw",
"thrum",
"thud",
"thug",
"thule",
"thumb",
"thump",
"thus",
"thy",
"thyme",
"ti",
"tiber",
"tibet",
"tibia",
"tic",
"tick",
"ticket",
"tid",
"tidal",
"tidbit",
"tide",
"tidy",
"tie",
"tied",
"tier",
"tift",
"tiger",
"tight",
"til",
"tilde",
"tile",
"till",
"tilt",
"tilth",
"tim",
"time",
"timex",
"timid",
"timon",
"tin",
"tina",
"tine",
"tinge",
"tint",
"tiny",
"tioga",
"tip",
"tipoff",
"tippy",
"tipsy",
"tire",
"tit",
"titan",
"tithe",
"title",
"titus",
"tj",
"tk",
"tl",
"tm",
"tn",
"tnt",
"to",
"toad",
"toady",
"toast",
"toby",
"today",
"todd",
"toe",
"tofu",
"tog",
"togo",
"togs",
"toil",
"toilet",
"token",
"tokyo",
"told",
"toll",
"tom",
"tomb",
"tome",
"tommy",
"ton",
"tonal",
"tone",
"tong",
"toni",
"tonic",
"tonk",
"tonsil",
"tony",
"too",
"took",
"tool",
"toot",
"tooth",
"top",
"topaz",
"topic",
"topple",
"topsy",
"tor",
"torah",
"torch",
"tore",
"tori",
"torn",
"torr",
"torso",
"tort",
"torus",
"tory",
"toss",
"tot",
"total",
"tote",
"totem",
"touch",
"tough",
"tour",
"tout",
"tow",
"towel",
"tower",
"town",
"toxic",
"toxin",
"toy",
"tp",
"tq",
"tr",
"trace",
"track",
"tract",
"tracy",
"trade",
"trag",
"trail",
"train",
"trait",
"tram",
"tramp",
"trap",
"trash",
"trawl",
"tray",
"tread",
"treat",
"treble",
"tree",
"trek",
"trench",
"trend",
"tress",
"triad",
"trial",
"tribe",
"trick",
"tried",
"trig",
"trill",
"trim",
"trio",
"trip",
"tripe",
"trite",
"triton",
"trod",
"troll",
"troop",
"trot",
"trout",
"troy",
"truce",
"truck",
"trudge",
"trudy",
"true",
"truly",
"trump",
"trunk",
"truss",
"trust",
"truth",
"trw",
"try",
"ts",
"tsar",
"tt",
"ttl",
"ttt",
"tttt",
"tty",
"tu",
"tub",
"tuba",
"tube",
"tuck",
"tudor",
"tuff",
"tuft",
"tug",
"tulane",
"tulip",
"tulle",
"tulsa",
"tum",
"tun",
"tuna",
"tune",
"tung",
"tunic",
"tunis",
"tunnel",
"tuple",
"turf",
"turin",
"turk",
"turn",
"turvy",
"tusk",
"tussle",
"tutor",
"tutu",
"tuv",
"tv",
"tva",
"tw",
"twa",
"twain",
"tweak",
"tweed",
"twice",
"twig",
"twill",
"twin",
"twine",
"twirl",
"twist",
"twisty",
"twit",
"two",
"twx",
"tx",
"ty",
"tyburn",
"tying",
"tyler",
"type",
"typic",
"typo",
"tyson",
"tz",
"u",
"u's",
"ua",
"ub",
"uc",
"ucla",
"ud",
"ue",
"uf",
"ug",
"ugh",
"ugly",
"uh",
"ui",
"uj",
"uk",
"ul",
"ulan",
"ulcer",
"ultra",
"um",
"umber",
"umbra",
"umpire",
"un",
"unary",
"uncle",
"under",
"unify",
"union",
"unit",
"unite",
"unity",
"unix",
"until",
"uo",
"up",
"upend",
"uphold",
"upon",
"upper",
"uproar",
"upset",
"uptake",
"upton",
"uq",
"ur",
"urban",
"urbane",
"urea",
"urge",
"uri",
"urine",
"uris",
"urn",
"ursa",
"us",
"usa",
"usaf",
"usage",
"usc",
"usda",
"use",
"useful",
"usgs",
"usher",
"usia",
"usn",
"usps",
"ussr",
"usual",
"usurp",
"usury",
"ut",
"utah",
"utica",
"utile",
"utmost",
"utter",
"uu",
"uuu",
"uuuu",
"uv",
"uvw",
"uw",
"ux",
"uy",
"uz",
"v",
"v's",
"va",
"vacua",
"vacuo",
"vade",
"vaduz",
"vague",
"vail",
"vain",
"vale",
"valet",
"valeur",
"valid",
"value",
"valve",
"vamp",
"van",
"vance",
"vane",
"vary",
"vase",
"vast",
"vat",
"vault",
"vb",
"vc",
"vd",
"ve",
"veal",
"veda",
"vee",
"veer",
"veery",
"vega",
"veil",
"vein",
"velar",
"veldt",
"vella",
"vellum",
"venal",
"vend",
"venial",
"venom",
"vent",
"venus",
"vera",
"verb",
"verde",
"verdi",
"verge",
"verity",
"verna",
"verne",
"versa",
"verse",
"verve",
"very",
"vessel",
"vest",
"vet",
"vetch",
"veto",
"vex",
"vf",
"vg",
"vh",
"vi",
"via",
"vial",
"vicar",
"vice",
"vichy",
"vicky",
"vida",
"video",
"vie",
"viet",
"view",
"vigil",
"vii",
"viii",
"vile",
"villa",
"vine",
"vinyl",
"viola",
"violet",
"virgil",
"virgo",
"virus",
"vis",
"visa",
"vise",
"visit",
"visor",
"vista",
"vita",
"vitae",
"vital",
"vito",
"vitro",
"viva",
"vivian",
"vivid",
"vivo",
"vixen",
"viz",
"vj",
"vk",
"vl",
"vm",
"vn",
"vo",
"vocal",
"vogel",
"vogue",
"voice",
"void",
"volt",
"volta",
"volvo",
"vomit",
"von",
"voss",
"vote",
"vouch",
"vow",
"vowel",
"vp",
"vq",
"vr",
"vs",
"vt",
"vu",
"vulcan",
"vv",
"vvv",
"vvvv",
"vw",
"vx",
"vy",
"vying",
"vz",
"w",
"w's",
"wa",
"waals",
"wac",
"wack",
"wacke",
"wacky",
"waco",
"wad",
"wade",
"wadi",
"wafer",
"wag",
"wage",
"waggle",
"wah",
"wahl",
"wail",
"waist",
"wait",
"waite",
"waive",
"wake",
"waken",
"waldo",
"wale",
"walk",
"walkie",
"wall",
"walls",
"wally",
"walsh",
"walt",
"walton",
"waltz",
"wan",
"wand",
"wane",
"wang",
"want",
"war",
"ward",
"ware",
"warm",
"warmth",
"warn",
"warp",
"warren",
"wart",
"warty",
"wary",
"was",
"wash",
"washy",
"wasp",
"wast",
"waste",
"watch",
"water",
"watt",
"watts",
"wave",
"wavy",
"wax",
"waxen",
"waxy",
"way",
"wayne",
"wb",
"wc",
"wd",
"we",
"we'd",
"we'll",
"we're",
"we've",
"weak",
"weal",
"wealth",
"wean",
"wear",
"weary",
"weave",
"web",
"webb",
"weber",
"weco",
"wed",
"wedge",
"wee",
"weed",
"weedy",
"week",
"weeks",
"weep",
"wehr",
"wei",
"weigh",
"weir",
"weird",
"weiss",
"welch",
"weld",
"well",
"wells",
"welsh",
"welt",
"wendy",
"went",
"wept",
"were",
"wert",
"west",
"wet",
"wf",
"wg",
"wh",
"whack",
"whale",
"wham",
"wharf",
"what",
"wheat",
"whee",
"wheel",
"whelk",
"whelm",
"whelp",
"when",
"where",
"whet",
"which",
"whiff",
"whig",
"while",
"whim",
"whine",
"whinny",
"whip",
"whir",
"whirl",
"whisk",
"whit",
"white",
"whiz",
"who",
"who'd",
"whoa",
"whole",
"whom",
"whoop",
"whoosh",
"whop",
"whose",
"whup",
"why",
"wi",
"wick",
"wide",
"widen",
"widow",
"width",
"wield",
"wier",
"wife",
"wig",
"wild",
"wile",
"wiley",
"wilkes",
"will",
"willa",
"wills",
"wilma",
"wilt",
"wily",
"win",
"wince",
"winch",
"wind",
"windy",
"wine",
"wing",
"wink",
"winnie",
"wino",
"winter",
"winy",
"wipe",
"wire",
"wiry",
"wise",
"wish",
"wishy",
"wisp",
"wispy",
"wit",
"witch",
"with",
"withe",
"withy",
"witt",
"witty",
"wive",
"wj",
"wk",
"wl",
"wm",
"wn",
"wo",
"woe",
"wok",
"woke",
"wold",
"wolf",
"wolfe",
"wolff",
"wolve",
"woman",
"womb",
"women",
"won",
"won't",
"wonder",
"wong",
"wont",
"woo",
"wood",
"woods",
"woody",
"wool",
"woozy",
"word",
"wordy",
"wore",
"work",
"world",
"worm",
"wormy",
"worn",
"worry",
"worse",
"worst",
"worth",
"wotan",
"would",
"wound",
"wove",
"woven",
"wow",
"wp",
"wq",
"wr",
"wrack",
"wrap",
"wrath",
"wreak",
"wreck",
"wrest",
"wring",
"wrist",
"writ",
"write",
"writhe",
"wrong",
"wrote",
"wry",
"ws",
"wt",
"wu",
"wuhan",
"wv",
"ww",
"www",
"wwww",
"wx",
"wxy",
"wy",
"wyatt",
"wyeth",
"wylie",
"wyman",
"wyner",
"wynn",
"wz",
"x",
"x's",
"xa",
"xb",
"xc",
"xd",
"xe",
"xenon",
"xerox",
"xf",
"xg",
"xh",
"xi",
"xj",
"xk",
"xl",
"xm",
"xn",
"xo",
"xp",
"xq",
"xr",
"xs",
"xt",
"xu",
"xv",
"xw",
"xx",
"xxx",
"xxxx",
"xy",
"xylem",
"xyz",
"xz",
"y",
"y's",
"ya",
"yacht",
"yah",
"yak",
"yale",
"yalta",
"yam",
"yamaha",
"yang",
"yank",
"yap",
"yaqui",
"yard",
"yarn",
"yates",
"yaw",
"yawl",
"yawn",
"yb",
"yc",
"yd",
"ye",
"yea",
"yeah",
"year",
"yearn",
"yeast",
"yeasty",
"yeats",
"yell",
"yelp",
"yemen",
"yen",
"yet",
"yf",
"yg",
"yh",
"yi",
"yield",
"yin",
"yip",
"yj",
"yk",
"yl",
"ym",
"ymca",
"yn",
"yo",
"yodel",
"yoder",
"yoga",
"yogi",
"yoke",
"yokel",
"yolk",
"yon",
"yond",
"yore",
"york",
"yost",
"you",
"you'd",
"young",
"your",
"youth",
"yow",
"yp",
"yq",
"yr",
"ys",
"yt",
"yu",
"yucca",
"yuck",
"yuh",
"yuki",
"yukon",
"yule",
"yv",
"yves",
"yw",
"ywca",
"yx",
"yy",
"yyy",
"yyyy",
"yz",
"z",
"z's",
"za",
"zag",
"zaire",
"zan",
"zap",
"zazen",
"zb",
"zc",
"zd",
"ze",
"zeal",
"zealot",
"zebra",
"zeiss",
"zen",
"zero",
"zest",
"zesty",
"zeta",
"zeus",
"zf",
"zg",
"zh",
"zi",
"zig",
"zilch",
"zinc",
"zing",
"zion",
"zip",
"zj",
"zk",
"zl",
"zloty",
"zm",
"zn",
"zo",
"zoe",
"zomba",
"zone",
"zoo",
"zoom",
"zorn",
"zp",
"zq",
"zr",
"zs",
"zt",
"zu",
"zurich",
"zv",
"zw",
"zx",
"zy",
"zz",
"zzz",
"zzzz",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"27",
"28",
"29",
"30",
"31",
"32",
"33",
"34",
"35",
"36",
"37",
"38",
"39",
"40",
"41",
"42",
"43",
"44",
"45",
"46",
"47",
"48",
"49",
"50",
"51",
"52",
"53",
"54",
"55",
"56",
"57",
"58",
"59",
"60",
"61",
"62",
"63",
"64",
"65",
"66",
"67",
"68",
"69",
"70",
"71",
"72",
"73",
"74",
"75",
"76",
"77",
"78",
"79",
"80",
"81",
"82",
"83",
"84",
"85",
"86",
"87",
"88",
"89",
"90",
"91",
"92",
"93",
"94",
"95",
"96",
"97",
"98",
"99",
"100",
"101",
"111",
"123",
"200",
"222",
"234",
"300",
"333",
"345",
"400",
"444",
"456",
"500",
"555",
"567",
"600",
"666",
"678",
"700",
"777",
"789",
"800",
"888",
"900",
"999",
"1000",
"1111",
"1234",
"1492",
"1500",
"1600",
"1700",
"1776",
"1800",
"1812",
"1900",
"1910",
"1920",
"1925",
"1930",
"1935",
"1940",
"1945",
"1950",
"1955",
"1960",
"1965",
"1970",
"1975",
"1980",
"1985",
"1990",
"1991",
"1992",
"1993",
"1994",
"1995",
"1996",
"1997",
"2000",
"2001",
"2020",
"2222",
"2345",
"2468",
"3000",
"3333",
"3456",
"4000",
"4321",
"4444",
"4567",
"5000",
"5555",
"5678",
"6000",
"6666",
"6789",
"7000",
"7777",
"8000",
"8888",
"9000",
"9876",
"9999",
"100th",
"101st",
"10th",
"11th",
"12th",
"13th",
"14th",
"15th",
"16th",
"17th",
"18th",
"19th",
"1st",
"20th",
"21st",
"22nd",
"23rd",
"24th",
"25th",
"26th",
"27th",
"28th",
"29th",
"2nd",
"30th",
"31st",
"32nd",
"33rd",
"34th",
"35th",
"36th",
"37th",
"38th",
"39th",
"3rd",
"40th",
"41st",
"42nd",
"43rd",
"44th",
"45th",
"46th",
"47th",
"48th",
"49th",
"4th",
"50th",
"51st",
"52nd",
"53rd",
"54th",
"55th",
"56th",
"57th",
"58th",
"59th",
"5th",
"60th",
"61st",
"62nd",
"63rd",
"65th",
"66th",
"67th",
"68th",
"69th",
"6th",
"70th",
"71st",
"72nd",
"73rd",
"74th",
"75th",
"76th",
"77th",
"78th",
"79th",
"7th",
"80th",
"81st",
"82nd",
"83rd",
"84th",
"85th",
"86th",
"87th",
"88th",
"89th",
"8th",
"90th",
"91st",
"92nd",
"93rd",
"94th",
"95th",
"96th",
"97th",
"98th",
"99th",
"9th",
"!",
"!!",
"\"",
"#",
"##",
"$",
"$$",
"%",
"%%",
"&",
"(",
"()",
")",
"*",
"**",
"+",
"-",
":",
";",
"=",
"?",
"??",
"@"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate a diceware password.")
parser.add_argument("password_length", metavar="N", type=int, default=6,
nargs="?",
help="The desired number of diceware words for your password")
args = parser.parse_args()
current_password = ""
for x in range(args.password_length):
current_dice_roll = random.randint(0, 7775)
current_password += (words[current_dice_roll] + " ")
print(current_password[:-1])
| changlinli/diceware-generators | diceware.py | Python | unlicense | 64,687 | [
"Amber",
"BLAST",
"Brian",
"DIRAC",
"Elk",
"GULP",
"Galaxy",
"MOE",
"MOOSE",
"NEURON",
"VisIt"
] | 0dd48547ba8856eda9eeaa8b4e3714c3c6e4e17f08dd06850f3a4fc2b944b25d |
#!/usr/bin/env python
from __future__ import division
import re
import os
import sys
from glob import glob
from getopt import getopt
blastdb=""
gi2taxid_file=""
do_psiblast=True
htaxid={}
taxhash={}
numOfIter=3
out=False
rm_psiblast=False
psi_path=""
try:
opts, args = getopt(sys.argv[1:], "hd:g:p:i:orl:", ["help", "database=", "gi2taxid_file=", "psiblast=", "num_of_psiblast_iteations=", "output", "remove_psiblast_files", "psiblast_path="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-d", "--database"):
blastdb = arg
elif opt in ("-g", "--gi2taxid_file"):
gi2taxid_file = arg
elif opt in ("-p", "--psiblast"):
if arg=="False" or arg=="false": do_psiblast = False
elif opt in ("-i", "--num_of_psiblast_iteations"):
numOfIter = int(arg)
elif opt in ("-o", "--output"):
out=True
elif opt in ("-r", "--remove_psiblast_files"):
rm_psiblast=True
elif opt in ("-l", "--psiblast_path"):
psi_path=arg
def usage():
print "\nusage: ./psi_parse.py [options] fasta_dir\n"
print "-d, --database=path\n\tpath to protein database"
print "-g, --gi2taxid_file=filename\n\tname of gi_to_taxid file including path"
print "-p, --psiblast=boolean\n\tcontrols whether the psiblast will be launched.\n\tIf the psiblast files are already prepared it should be set to False (default is True)"
print "-i, --pnum_of_psiblast_iteations=int\n\tnumber of psiblast iterations (default set to 3)"
print "-o, --output\n\tif present files names and taxidlist will be produced\n"
print "-r, --remove_psiblast_files\n\tif present results of psiblast will be removed\n"
print "-l, --psiblast_path\n\tif the global path for the psiblast in the system is not set, please specify it in this option\n"
sys.exit()
def parse(fastafile, htaxid, names, do_psiblast, out, rm_psiblast):
if do_psiblast:
print >> sys.stderr, "PSI_BLAST for %s...\n" %(fastafile)
os.system("%spsiblast -num_threads 9 -num_iterations %d -db %s -outfmt 5 -evalue 0.001 -query %s -out %s.psiblast" %(psi_path, numOfIter, blastdb, fastafile, fastafile))
file=open("%s.psiblast" %(fastafile))
ftaxid=open("%s.taxid" %(fastafile), 'w')
t=file.read()
t=t.split('\n')
for i in xrange(len(t)-1,-1,-1):
if re.search("<Iteration_iter-num>", t[i])!=None:
iterstart=i
break
query=t[iterstart+2]
query=re.search("[a-z]{2}\|[^\|]*\|[^ \t\n\r\f\v]+",query)
if query!=None:
query=query.group()
print >> sys.stderr,"Query:", query
if out:
names.write(query+"\n")
taxhash={}
querylength=0
for i in xrange(iterstart, len(t)):
if re.search("<Hit_num>", t[i])!=None:
hitname=re.search("<Hit_id>([^<]*)</Hit_id>",t[i+1]).group(1)
for j in xrange(i, len(t)):
hitlength=re.search("<Hsp_align-len>",t[j])
if hitlength!=None:
hitlength=int(re.search("<Hsp_align-len>([^<]*)</Hsp_align-len>",t[j]).group(1))
i=j
break
if querylength==0: querylength=hitlength
if querylength/hitlength < 0.8 or querylength/hitlength > 1.2: continue
gi=int(re.search("gi\|(\d+)\|", hitname).group(1))
if not htaxid.has_key(gi): htaxid[gi]=1
htaxidvar=htaxid[gi]
if not taxhash.has_key(htaxidvar):
taxhash[htaxidvar]=1
ftaxid.write(str(htaxidvar)+"\n")
file.close()
ftaxid.close()
if rm_psiblast:
os.popen("rm %s.psiblast" %(fastafile))
indir=args
filelist=sorted(glob('%s/*[0-9]' %(indir[0])))
if len(filelist)==0:
filelist=sorted(glob('%s/*.fasta' %(indir)))
if len(filelist)==0:
print >> sys.stderr, "no fasta file in this directory\n"
usage()
sys.exit()
for i in xrange(len(filelist)):
fr=re.search("(%s/*).fasta", filelist[i])
if fr==None:
print >> sys.stderr, "wrong file name: ", filelist[i]
sys.exit()
filelist[i]=fr.group(1)
if out: names=open("names", "w")
else: names=""
print >> sys.stderr, "importing gi_taxid dump file\n\n"
try:
file=open(gi2taxid_file, 'r')
except IOError:
print gi2taxid_file, "not found.\n\n"
sys.exit()
content = file.readline()
while (content !=''):
content=content.rstrip()
gin, taxid=content.split()
htaxid[int(gin)]=int(taxid)
content = file.readline()
file.close()
#run blast
print >> sys.stderr, "running blast\n"
for fastafile in filelist:
parse(fastafile, htaxid, names, do_psiblast, out, rm_psiblast)
if out:
os.popen("cat %s/*.taxid | sort | uniq > taxidlist" %(indir))
names.close()
| agnmyk/simphypro | psi_parse.py | Python | mit | 4,537 | [
"BLAST"
] | 822f52bc986c43b2146157134fda850af91645918d54443a9b9131dbdc52b1dc |
""" SiteInspectorAgent
This agent inspect Sites, and evaluates policies that apply.
"""
__RCSID__ = '$Id$'
import math
import Queue
from DIRAC import S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.ResourceStatusSystem.PolicySystem.PEP import PEP
from DIRAC.ResourceStatusSystem.Utilities import Utils
ResourceManagementClient = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient' ), 'ResourceManagementClient')
AGENT_NAME = 'ResourceStatus/SiteInspectorAgent'
class SiteInspectorAgent( AgentModule ):
""" SiteInspectorAgent
The SiteInspectorAgent agent is an agent that is used to get the all the site names
and trigger PEP to evaluate their status.
"""
# Max number of worker threads by default
__maxNumberOfThreads = 15
# Inspection freqs, defaults, the lower, the higher priority to be checked.
# Error state usually means there is a glitch somewhere, so it has the highest
# priority.
__checkingFreqs = {'Active' : 20,
'Degraded' : 20,
'Probing' : 20,
'Banned' : 15,
'Unknown' : 10,
'Error' : 5}
def __init__( self, *args, **kwargs ):
AgentModule.__init__( self, *args, **kwargs )
# ElementType, to be defined among Site, Resource or Node
self.sitesToBeChecked = None
self.threadPool = None
self.siteClient = None
self.clients = {}
def initialize( self ):
""" Standard initialize.
"""
maxNumberOfThreads = self.am_getOption( 'maxNumberOfThreads', self.__maxNumberOfThreads )
self.threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads )
self.siteClient = SiteStatus()
self.clients['SiteStatus'] = self.siteClient
self.clients['ResourceManagementClient'] = ResourceManagementClient()
return S_OK()
def execute( self ):
""" execute
This is the main method of the agent. It gets the sites from the Database, calculates how many threads should be
started and spawns them. Each thread will get a site from the queue until
it is empty. At the end, the method will join the queue such that the agent
will not terminate a cycle until all sites have been processed.
"""
# Gets sites to be checked ( returns a Queue )
sitesToBeChecked = self.getSitesToBeChecked()
if not sitesToBeChecked['OK']:
self.log.error( sitesToBeChecked['Message'] )
return sitesToBeChecked
self.sitesToBeChecked = sitesToBeChecked['Value']
queueSize = self.sitesToBeChecked.qsize()
pollingTime = self.am_getPollingTime()
# Assigns number of threads on the fly such that we exhaust the PollingTime
# without having to spawn too many threads. We assume 10 seconds per element
# to be processed ( actually, it takes something like 1 sec per element ):
# numberOfThreads = elements * 10(s/element) / pollingTime
numberOfThreads = int( math.ceil( queueSize * 10. / pollingTime ) )
self.log.info( 'Needed %d threads to process %d elements' % ( numberOfThreads, queueSize ) )
for _x in xrange( numberOfThreads ):
jobUp = self.threadPool.generateJobAndQueueIt( self._execute )
if not jobUp['OK']:
self.log.error( jobUp['Message'] )
self.log.info( 'blocking until all sites have been processed' )
# block until all tasks are done
self.sitesToBeChecked.join()
self.log.info( 'done')
return S_OK()
def getSitesToBeChecked( self ):
""" getElementsToBeChecked
This method gets all the site names from the SiteStatus table, after that it get the details of each
site (status, name, etc..) and adds them to a queue.
"""
toBeChecked = Queue.Queue()
res = self.siteClient.getSites('All')
if not res['OK']:
return res
# get the current status
res = self.siteClient.getSiteStatuses( res['Value'] )
if not res['OK']:
return res
# filter elements
for site in res['Value']:
status = res['Value'].get(site, 'Unknown')
toBeChecked.put( { 'status': status,
'name': site,
'site' : site,
'element' : 'Site',
'statusType': 'all',
'elementType': 'Site' } )
return S_OK( toBeChecked )
# Private methods ............................................................
def _execute( self ):
"""
Method run by each of the thread that is in the ThreadPool.
It enters a loop until there are no sites on the queue.
On each iteration, it evaluates the policies for such site
and enforces the necessary actions. If there are no more sites in the
queue, the loop is finished.
"""
pep = PEP( clients = self.clients )
while True:
try:
site = self.sitesToBeChecked.get_nowait()
except Queue.Empty:
return S_OK()
resEnforce = pep.enforce( site )
if not resEnforce['OK']:
self.log.error( 'Failed policy enforcement', resEnforce['Message'] )
self.sitesToBeChecked.task_done()
continue
# Used together with join !
self.sitesToBeChecked.task_done()
#...............................................................................
#EOF
| arrabito/DIRAC | ResourceStatusSystem/Agent/SiteInspectorAgent.py | Python | gpl-3.0 | 5,660 | [
"DIRAC"
] | 90f0b77cb9e6d77e2eca750cce166eff6abcf6be18863268812027521d8efa80 |
#PENDING TASKS FOR FURTHER OPTIMIZATION:
#1. hMat grows 2*int(radMax)+1 for all radiuses; i.e., all "radius" dimensions are growing to max radius size;
# Enlarge each matrix only by its corresponding radius size
#2. A given (a,b) point currently only votes for themselves; try voting for "neighboring" pixels and evaluate performance
#3. Conduct "neighborhood suppression" of local maximas in hMat and evaluate performance
import cv2
import numpy as np
import math
#Load the three-channel and the grayscale version of the image
img_filename = ''
img = cv2.imread(img_filename)
img_gray=cv2.imread(img_filename,0) #Load grayscale version of same image
img_smooth = cv2.GaussianBlur(img_gray,(7,7),7) #'Smooth' using Gaussian filter of kernel size 7x7 and s.d.=7; use if Gaussian noise is present in image
#Calculate the Sobel matrices, the gradient matrix, and the Canny edge matrix
sobelX = cv2.Sobel(img_gray,cv2.CV_8U,1,0,ksize=7) #Sobel gradient along X axis
sobelY = cv2.Sobel(img_gray,cv2.CV_8U,0,1,ksize=7) #Sobel along Y axis
gradTheta = np.arctan2(sobelY,sobelX) #Gradient direction matrix: arctan(y/x)
img_edges = cv2.Canny(img_gray,50,170) #Detect image edges using canny operator
(threshold, img_bw) = cv2.threshold(img_edges, 25, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) #Convert image to binary
#Get the appropriate matrices for the following operations:
radMin = 10 #Minimun radius for consideration
radMax = 40 #Maximum radius for consideration
radRes = 1 #Resolution or the bin size for the radius
radRange = int(math.ceil(radMax-radMin+1)/radRes) #Gives the number of bins
radMultiplier = np.arange(radMin,radMin+(radRange*radRes),radRes).reshape((radRange,1,1)) #This is to multiply each radius-dimension with seperate radius value
gradTheta = np.tile(np.arctan2(sobelY,sobelX),(radRange,1,1)) #Tile the gradTheta matrix
voters = np.tile(img_bw>0,(radRange,1,1)) #Boolean matrix to keep track of which y,x cells get to vote; only cells with positive values vote
#Setup X and Y matrices and calculate A and B
index = np.indices(img_bw.shape) #Returns matrices with row and column index values
X = np.tile(index[1],(radRange,1,1)) #Sets column values to represent x-index in the image; tile adds third dimension of value radRange
Y = np.tile(index[0],(radRange,1,1)) #Sets row values
A = X - (radMultiplier * np.cos(gradTheta)) #Calculate A using the standard formula
A = A[voters].astype(int) #Remove non-voters and convert to integer type from float
B = Y - (radMultiplier * np.sin(gradTheta)) #Same as A
B = B[voters].astype(int)
#Setup accumulator matrix and adjust other matrices so they have appropriate (i.e. non-negative) values for matrix indexing
hMat = np.zeros((radRange,img_bw.shape[0]+2*int(radMax)+1,img_bw.shape[1]+2*int(radMax)+1)) #hMat can have (a,b) circle center values "outside" (x,y) range
A = A + (2*radMax) #Positive scaling so that there are no negative values in A; done for indexing in hMat;later reversed
B = B + (2*radMax)
radMultiplier = ((radMultiplier - radMin)/radRes).astype(int) #Adjusting for indexing purposes; re-adjusted to original state below
hIndex = tuple((radMultiplier,B,A)) #The corresponding tuple values in (B,A) will provide (row,col) index in hMat
hMat[hIndex]+=1 #update accumulator
#Locating peaks
peakCount = 10 #Number of Hough peaks we want to detect in the Hough Accumulator
peakIndex = np.argsort(np.ravel(hMat))[::-1][:peakCount] #Get the first peakCount number of "flat" hMat indices of the largest values
peakRYX = np.unravel_index(peakIndex, (hMat.shape)) #Unravel the "ravelled" peakIndex to get the Y,X,R coordinate tuple of the largest n (=peakCount) values
#Undoing the adjustments made above for indexing = final values of radius, y, and x
peakR = (peakRYX[0] + radMin) * radRes #Radius adjustment
peakY = peakRYX[1] - (2*radMax) #Y Adjustment
peakX = peakRYX[2] - (2*radMax) #X Adjustment
#Draw the circles using the located peaks
for i in range(peakCount):
cv2.circle(img, (peakX[i],peakY[i]), peakR[i], (0,255,0))
cv2.imshow('hMat',img)
cv2.waitKey()
cv2.destroyAllWindows() | ashimb9/hough_transform | hough_circle.py | Python | gpl-3.0 | 4,167 | [
"Gaussian"
] | dfaa4e06f3f97999f745da99192e20639c42b3679658089a2399721d3c67634f |
""" ntsne.py
numpy wrapper for bh_tsne (https://github.com/lvdmaaten/bhtsne)
Brian DeCost bdecost@andrew.cmu.edu
"""
import os
import re
import shutil
import struct
import tempfile
import subprocess
import numpy as np
# vdM's bh_tsne reads and writes from/to these hardcoded paths
# in the directory from which bh_tsne is run
DATAFILE = 'data.dat'
RESULTFILE = 'result.dat'
TSNESOURCE = 'https://github.com/lvdmaaten/bhtsne'
# check to see if bh_tsne in on system PATH or in the CWD;
# if not, clone and build it in ~/.ntsne if necessary
augmented_path = os.getenv('PATH') + ':{}'.format(os.getcwd())
TSNE = shutil.which('bh_tsne', path=augmented_path)
if TSNE is None:
TSNEDIR = os.path.expanduser('~/.ntsne')
TSNE = os.path.join(TSNEDIR, 'bh_tsne')
# set default bh_tsne parameters
THETA = 0.5
PERPLEXITY = 30
MAP_DIMS = 2
MAX_ITER = 1000
SEED = None
def build_bhtsne():
""" clone and build lvdmaaten's bhtsne """
subprocess.call(['git', 'clone', TSNESOURCE, TSNEDIR])
subprocess.call(['g++', 'sptree.cpp', 'tsne.cpp', '-o', 'bh_tsne', '-O2'], cwd=TSNEDIR)
return
def write_tsne_input(X, theta=THETA, perplexity=PERPLEXITY, map_dims=MAP_DIMS, max_iter=MAX_ITER, seed=SEED, cwd=''):
""" serialize 2D data matrix (numpy array) with t-SNE options to vdM's binary input format """
with open(os.path.join(cwd, DATAFILE), 'wb') as f:
n, d = X.shape
f.write(struct.pack('=i', n)) # number of instances
f.write(struct.pack('=i', d)) # initial dimensionality
f.write(struct.pack('=d', theta))
f.write(struct.pack('=d', perplexity))
f.write(struct.pack('=i', map_dims))
f.write(struct.pack('=i', max_iter))
f.write(X.tobytes())
if seed is not None:
f.write(struct.pack('=i', map_dims))
def read_tsne_results(cwd=''):
""" load t-SNE results from vdM's binary results file format """
with open(os.path.join(cwd, RESULTFILE), 'rb') as f:
n, = struct.unpack('=i', f.read(4)) # number of instances
md, = struct.unpack('=i', f.read(4)) # map dimensionality
sz = struct.calcsize('=d')
buf = f.read()
x_tsne = [struct.unpack_from('=d', buf, sz*offset)
for offset in range(n*md)]
return np.array(x_tsne).reshape((n,md))
def tsne(X, theta=THETA, perplexity=PERPLEXITY, map_dims=MAP_DIMS, max_iter=MAX_ITER, seed=SEED):
""" simple wrapper function for applying bh_tsne to the data matrix X """
with tempfile.TemporaryDirectory() as tmpdir:
write_tsne_input(X, theta=theta, perplexity=perplexity,
map_dims=map_dims, max_iter=max_iter, seed=seed, cwd=tmpdir)
subprocess.call(TSNE, cwd=tmpdir)
x_tsne = read_tsne_results(cwd=tmpdir)
return x_tsne
def tsne_error(results):
""" find the error string for each iteration; get the min (likely the last iteration...) """
errorstrings = re.findall('error is \d+\.\d+', results.decode())
# lexicographic ordering is equivalent to splitting each sting and converting to float...
error = min(errorstrings).split()[-1]
return float(error)
def best_tsne(X, theta=THETA, perplexity=PERPLEXITY, map_dims=MAP_DIMS, max_iter=MAX_ITER, seed=SEED, n_repeats=10):
""" run bh_tsne {n_repeats} times and return results with lowest KL divergence """
lowest_error = 1e9
x_tsne = None
with tempfile.TemporaryDirectory() as tmpdir:
write_tsne_input(X, theta=theta, perplexity=perplexity,
map_dims=map_dims, max_iter=max_iter, seed=seed, cwd=tmpdir)
for __ in range(n_repeats):
results = subprocess.check_output(TSNE, cwd=tmpdir)
error = tsne_error(results)
if error < lowest_error:
lowest_error = error
x_tsne = read_tsne_results(cwd=tmpdir)
return x_tsne
if not os.path.isfile(TSNE):
print('bh_tsne not found; cloning from {}'.format(TSNESOURCE))
build_bhtsne()
| bdecost/ntsne | ntsne.py | Python | mit | 4,058 | [
"Brian"
] | 136b0a7ffc0925ea076de9b48785551f87f5df15b4aa5d78e14a8d1e6552cec7 |
import collections.abc
import warnings
from abc import abstractmethod
from collections import defaultdict
from datetime import datetime
from enum import Enum, EnumMeta
from textwrap import dedent
from typing import Any, Callable, Dict, Mapping, Optional, Set, Tuple, Union
from urllib.parse import urlencode
import ciso8601
from ruamel.yaml.timestamp import TimeStamp as RuamelTimeStamp
from eodatasets3.utils import default_utc
class FileFormat(Enum):
GeoTIFF = 1
NetCDF = 2
Zarr = 3
JPEG2000 = 4
def nest_properties(d: Mapping[str, Any], separator=":") -> Dict[str, Any]:
"""
Split keys with embedded colons into sub dictionaries.
Intended for stac-like properties
>>> nest_properties({'landsat:path':1, 'landsat:row':2, 'clouds':3})
{'landsat': {'path': 1, 'row': 2}, 'clouds': 3}
"""
out = defaultdict(dict)
for key, val in d.items():
section, *remainder = key.split(separator, 1)
if remainder:
[sub_key] = remainder
out[section][sub_key] = val
else:
out[section] = val
for key, val in out.items():
if isinstance(val, dict):
out[key] = nest_properties(val, separator=separator)
return dict(out)
def datetime_type(value):
# Ruamel's TimeZone class can become invalid from the .replace(utc) call.
# (I think it no longer matches the internal ._yaml fields.)
# Convert to a regular datetime.
if isinstance(value, RuamelTimeStamp):
value = value.isoformat()
if isinstance(value, str):
value = ciso8601.parse_datetime(value)
# Store all dates with a timezone.
# yaml standard says all dates default to UTC.
# (and ruamel normalises timezones to UTC itself)
return default_utc(value)
def of_enum_type(
vals: Union[EnumMeta, Tuple[str, ...]] = None, lower=False, upper=False, strict=True
) -> Callable[[str], str]:
if isinstance(vals, EnumMeta):
vals = tuple(vals.__members__.keys())
def normalise(v: str):
if isinstance(v, Enum):
v = v.name
if upper:
v = v.upper()
if lower:
v = v.lower()
if v not in vals:
msg = f"Unexpected value {v!r}. Expected one of: {', '.join(vals)},"
if strict:
raise ValueError(msg)
else:
warnings.warn(msg)
return v
return normalise
def percent_type(value):
value = float(value)
if not (0.0 <= value <= 100.0):
raise ValueError("Expected percent between 0,100")
return value
def normalise_platforms(value: Union[str, list, set]):
"""
>>> normalise_platforms('LANDSAT_8')
'landsat-8'
>>> # Multiple can be comma-separated. They're normalised independently and sorted.
>>> normalise_platforms('LANDSAT_8,Landsat-5,landsat-7')
'landsat-5,landsat-7,landsat-8'
>>> # Can be given as a list.
>>> normalise_platforms(['sentinel-2b','SENTINEL-2a'])
'sentinel-2a,sentinel-2b'
>>> # Deduplicated too
>>> normalise_platforms('landsat-5,landsat-5,LANDSAT-5')
'landsat-5'
"""
if not isinstance(value, (list, set, tuple)):
value = value.split(",")
platforms = sorted({s.strip().lower().replace("_", "-") for s in value if s})
if not platforms:
return None
return ",".join(platforms)
def degrees_type(value):
value = float(value)
if not (-360.0 <= value <= 360.0):
raise ValueError("Expected degrees between -360,+360")
return value
def identifier_type(v: str):
v = v.replace("-", "_")
if not v.isidentifier() or not v.islower():
warnings.warn(
f"{v!r} is expected to be an identifier "
"(alphanumeric with underscores, typically lowercase)"
)
return v
def producer_check(value):
if "." not in value:
warnings.warn(
"Property 'odc:producer' is expected to be a domain name, "
"eg 'usgs.gov' or 'ga.gov.au'"
)
return value
def parsed_sentinel_tile_id(tile_id) -> Tuple[str, Dict]:
"""Extract useful extra fields from a sentinel tile id
>>> val, props = parsed_sentinel_tile_id("S2B_OPER_MSI_L1C_TL_EPAE_20201011T011446_A018789_T55HFA_N02.09")
>>> val
'S2B_OPER_MSI_L1C_TL_EPAE_20201011T011446_A018789_T55HFA_N02.09'
>>> props
{'sentinel:datatake_start_datetime': datetime.datetime(2020, 10, 11, 1, 14, 46, tzinfo=datetime.timezone.utc)}
"""
extras = {}
split_tile_id = tile_id.split("_")
try:
datatake_sensing_time = datetime_type(split_tile_id[-4])
extras["sentinel:datatake_start_datetime"] = datatake_sensing_time
except IndexError:
pass
# TODO: we could extract other useful fields?
return tile_id, extras
def parsed_sentinel_datastrip_id(tile_id) -> Tuple[str, Dict]:
"""Extract useful extra fields from a sentinel datastrip id
>>> val, props = parsed_sentinel_datastrip_id("S2B_OPER_MSI_L1C_DS_EPAE_20201011T011446_S20201011T000244_N02.09")
>>> val
'S2B_OPER_MSI_L1C_DS_EPAE_20201011T011446_S20201011T000244_N02.09'
>>> props
{'sentinel:datatake_start_datetime': datetime.datetime(2020, 10, 11, 1, 14, 46, tzinfo=datetime.timezone.utc)}
"""
extras = {}
split_tile_id = tile_id.split("_")
try:
datatake_sensing_time = datetime_type(split_tile_id[-3])
extras["sentinel:datatake_start_datetime"] = datatake_sensing_time
except IndexError:
pass
# TODO: we could extract other useful fields?
return tile_id, extras
# The primitive types allowed as stac values.
PrimitiveType = Union[str, int, float, datetime]
ExtraProperties = Dict
# A function to normalise a value.
# (eg. convert to int, or make string lowercase).
# They throw a ValueError if not valid.
NormaliseValueFn = Callable[
[Any],
# It returns the normalised value, but can optionally also return extra property values extracted from it.
Union[PrimitiveType, Tuple[PrimitiveType, ExtraProperties]],
]
# Extras typically on the ARD product.
_GQA_FMASK_PROPS = {
"fmask:clear": float,
"fmask:cloud": float,
"fmask:cloud_shadow": float,
"fmask:snow": float,
"fmask:water": float,
"gqa:abs_iterative_mean_x": float,
"gqa:abs_iterative_mean_xy": float,
"gqa:abs_iterative_mean_y": float,
"gqa:abs_x": float,
"gqa:abs_xy": float,
"gqa:abs_y": float,
"gqa:cep90": float,
"gqa:error_message": None,
"gqa:final_gcp_count": int,
"gqa:iterative_mean_x": float,
"gqa:iterative_mean_xy": float,
"gqa:iterative_mean_y": float,
"gqa:iterative_stddev_x": float,
"gqa:iterative_stddev_xy": float,
"gqa:iterative_stddev_y": float,
"gqa:mean_x": float,
"gqa:mean_xy": float,
"gqa:mean_y": float,
"gqa:ref_source": None,
"gqa:stddev_x": float,
"gqa:stddev_xy": float,
"gqa:stddev_y": float,
}
# Typically only from LPGS (ie. Level 1 products)
_LANDSAT_EXTENDED_PROPS = {
"landsat:algorithm_source_surface_reflectance": None,
"landsat:collection_category": None,
"landsat:collection_number": int,
"landsat:data_type": None,
"landsat:earth_sun_distance": None,
"landsat:ephemeris_type": None,
"landsat:geometric_rmse_model": None,
"landsat:geometric_rmse_model_x": None,
"landsat:geometric_rmse_model_y": None,
"landsat:geometric_rmse_verify": None,
"landsat:ground_control_points_model": None,
"landsat:ground_control_points_verify": None,
"landsat:ground_control_points_version": None,
"landsat:image_quality_oli": None,
"landsat:image_quality_tirs": None,
"landsat:processing_software_version": None,
"landsat:scan_gap_interpolation": float,
"landsat:station_id": None,
# Landsat USGS Properties
"landsat:rmse": None,
"landsat:rmse_x": None,
"landsat:rmse_y": None,
"landsat:wrs_type": None,
"landsat:correction": None,
"landsat:cloud_cover_land": None,
}
_SENTINEL_EXTENDED_PROPS = {
"sentinel:sentinel_tile_id": parsed_sentinel_tile_id,
"sentinel:datatake_start_datetime": datetime_type,
"sentinel:datastrip_id": parsed_sentinel_datastrip_id,
"sentinel:datatake_type": None,
"sentinel:processing_baseline": None,
"sentinel:processing_center": None,
"sentinel:product_name": None,
"sentinel:reception_station": None,
"sentinel:utm_zone": int,
"sentinel:latitude_band": None,
"sentinel:grid_square": None,
"sinergise_product_id": None,
}
_STAC_MISC_PROPS = {
"providers": None, # https://github.com/radiantearth/stac-spec/blob/master/item-spec/common-metadata.md#provider,
# Projection extension
"proj:epsg": int,
"proj:shape": None,
"proj:transform": None,
}
class Eo3Dict(collections.abc.MutableMapping):
"""
This acts like a dictionary, but will normalise known properties (consistent
case, types etc) and warn about common mistakes.
It wraps an inner dictionary. By default it will normalise the fields in
the input dictionary on creation, but you can disable this with `normalise_input=False`.
"""
# Every property we've seen or dealt with so far. Feel free to expand with abandon...
# This is to minimise minor typos, case differences, etc, which plagued previous systems.
# Keep sorted.
KNOWN_PROPERTIES: Mapping[str, Optional[NormaliseValueFn]] = {
"datetime": datetime_type,
"dea:dataset_maturity": of_enum_type(("final", "interim", "nrt"), lower=True),
"dea:product_maturity": of_enum_type(("stable", "provisional"), lower=True),
"dtr:end_datetime": datetime_type,
"dtr:start_datetime": datetime_type,
"eo:azimuth": float,
"eo:cloud_cover": percent_type,
"eo:epsg": None,
"eo:gsd": None,
"eo:instrument": None,
"eo:off_nadir": float,
"eo:platform": normalise_platforms,
"eo:constellation": None,
"eo:sun_azimuth": degrees_type,
"eo:sun_elevation": degrees_type,
"sat:orbit_state": None,
"sat:relative_orbit": int,
"sat:absolute_orbit": int,
"landsat:landsat_product_id": None,
"landsat:scene_id": None,
"landsat:landsat_scene_id": None,
"landsat:wrs_path": int,
"landsat:wrs_row": int,
"odc:dataset_version": None,
"odc:collection_number": int,
"odc:naming_conventions": None,
# Not strict as there may be more added in ODC...
"odc:file_format": of_enum_type(FileFormat, strict=False),
"odc:processing_datetime": datetime_type,
"odc:producer": producer_check,
"odc:product": None,
"odc:product_family": identifier_type,
"odc:region_code": None,
"odc:sat_row": None, # When a dataset has a range of rows (... telemetry)
**_LANDSAT_EXTENDED_PROPS,
**_GQA_FMASK_PROPS,
**_SENTINEL_EXTENDED_PROPS,
**_STAC_MISC_PROPS,
}
# For backwards compatibility, in case users are extending at runtime.
KNOWN_STAC_PROPERTIES = KNOWN_PROPERTIES
def __init__(self, properties: Mapping = None, normalise_input=True) -> None:
if properties is None:
properties = {}
self._props = properties
# We normalise the properties they gave us.
for key in list(self._props):
# We always want to normalise dates as datetime objects rather than strings
# for consistency.
if normalise_input or ("datetime" in key):
self.normalise_and_set(key, self._props[key], expect_override=True)
self._finished_init_ = True
def __setattr__(self, name: str, value: Any) -> None:
"""
Prevent against users accidentally setting new properties (it has happened multiple times).
"""
if hasattr(self, "_finished_init_") and not hasattr(self, name):
raise TypeError(
f"Cannot set new field '{name}' on a dict. "
f"(Perhaps you meant to set it as a dictionary field??)"
)
super().__setattr__(name, value)
def __getitem__(self, item):
return self._props[item]
def __iter__(self):
return iter(self._props)
def __len__(self):
return len(self._props)
def __delitem__(self, name: str) -> None:
del self._props[name]
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._props!r})"
def __setitem__(self, key, value):
self.normalise_and_set(
key,
value,
# They can override properties but will receive a warning.
allow_override=True,
)
def normalise_and_set(self, key, value, allow_override=True, expect_override=False):
"""
Set a property with the usual normalisation.
This has some options that are not available on normal dictionary item
setting (``self[key] = val``)
The default behaviour of this class is very conservative in order to catch common errors
of users. You can loosen the settings here.
:argument allow_override: Is it okay to overwrite an existing value? (if not, error will be thrown)
:argument expect_override: We expect to overwrite a property, so don't produce a warning or error.
"""
if key not in self.KNOWN_PROPERTIES:
warnings.warn(
f"Unknown Stac property {key!r}. "
f"If this is valid property, please tell us on Github here so we can add it: "
f"\n\t{_github_suggest_new_property_url(key, value)}"
)
if value is not None:
normalise = self.KNOWN_PROPERTIES.get(key)
if normalise:
value = normalise(value)
# If the normaliser has extracted extra properties, we'll get two return values.
if isinstance(value, Tuple):
value, extra_properties = value
for k, v in extra_properties.items():
if k == key:
raise RuntimeError(
f"Infinite loop: writing key {k!r} from itself"
)
self.normalise_and_set(k, v, allow_override=allow_override)
if key in self._props and value != self[key] and (not expect_override):
message = (
f"Overriding property {key!r} " f"(from {self[key]!r} to {value!r})"
)
if allow_override:
warnings.warn(message, category=PropertyOverrideWarning)
else:
raise KeyError(message)
self._props[key] = value
def nested(self):
return nest_properties(self._props)
class StacPropertyView(Eo3Dict):
"""
Backwards compatibility class name. Deprecated.
Use the identical 'Eo3Dict' instead.
These were called "StacProperties" in Stac 0.6, but many of them have
changed in newer versions and we're sticking to the old names for consistency
and backwards-compatibility. So they're now EO3 Properties.
(The eo3-to-stac tool to convert EO3 properties to real Stac properties.)
"""
def __init__(self, properties=None) -> None:
super().__init__(properties)
warnings.warn(
"The class name 'StacPropertyView' is deprecated as it's misleading. "
"Please change your import to the (identical) 'Eo3Dict'.",
category=DeprecationWarning,
)
class PropertyOverrideWarning(UserWarning):
"""A warning that a property was set twice with different values."""
...
class Eo3Interface:
"""
These are convenience properties for common metadata fields. They are available
on DatasetAssemblers and within other naming APIs.
(This is abstract. If you want one of these of your own, you probably want to create
an :class:`eodatasets3.DatasetDoc`)
"""
@property
@abstractmethod
def properties(self) -> Eo3Dict:
raise NotImplementedError
@property
def platform(self) -> Optional[str]:
"""
Unique name of the specific platform the instrument is attached to.
For satellites this would be the name of the satellite (e.g., ``landsat-8``, ``sentinel-2a``),
whereas for drones this would be a unique name for the drone.
In derivative products, multiple platforms can be specified with a comma: ``landsat-5,landsat-7``.
Shorthand for ``eo:platform`` property
"""
return self.properties.get("eo:platform")
@platform.setter
def platform(self, value: str):
self.properties["eo:platform"] = value
@property
def platforms(self) -> Set[str]:
"""
Get platform as a set (containing zero or more items).
In EO3, multiple platforms are specified by comma-separating them.
"""
if not self.platform:
return set()
return set(self.properties.get("eo:platform", "").split(","))
@platforms.setter
def platforms(self, value: Set[str]):
# The normaliser supports sets/lists
self.properties["eo:platform"] = value
@property
def instrument(self) -> str:
"""
Name of instrument or sensor used (e.g., MODIS, ASTER, OLI, Canon F-1).
Shorthand for ``eo:instrument`` property
"""
return self.properties.get("eo:instrument")
@instrument.setter
def instrument(self, value: str):
self.properties["eo:instrument"] = value
@property
def constellation(self) -> str:
"""
Constellation. Eg ``sentinel-2``.
"""
return self.properties.get("eo:constellation")
@constellation.setter
def constellation(self, value: str):
self.properties["eo:constellation"] = value
@property
def product_name(self) -> Optional[str]:
"""
The ODC product name
"""
return self.properties.get("odc:product")
@product_name.setter
def product_name(self, value: str):
self.properties["odc:product"] = value
@property
def producer(self) -> str:
"""
Organisation that produced the data.
eg. ``usgs.gov`` or ``ga.gov.au``
Shorthand for ``odc:producer`` property
"""
return self.properties.get("odc:producer")
@producer.setter
def producer(self, domain: str):
self.properties["odc:producer"] = domain
@property
def datetime_range(self) -> Tuple[datetime, datetime]:
"""
An optional date range for the dataset.
The ``datetime`` is still mandatory when this is set.
This field is a shorthand for reading/setting the datetime-range
stac 0.6 extension properties: ``dtr:start_datetime`` and ``dtr:end_datetime``
"""
return (
self.properties.get("dtr:start_datetime"),
self.properties.get("dtr:end_datetime"),
)
@datetime_range.setter
def datetime_range(self, val: Tuple[datetime, datetime]):
# TODO: string type conversion, better validation/errors
start, end = val
self.properties["dtr:start_datetime"] = start
self.properties["dtr:end_datetime"] = end
@property
def processed(self) -> datetime:
"""When the dataset was created (Defaults to UTC if not specified)
Shorthand for the ``odc:processing_datetime`` field
"""
return self.properties.get("odc:processing_datetime")
@processed.setter
def processed(self, value: Union[str, datetime]):
self.properties["odc:processing_datetime"] = value
def processed_now(self):
"""
Shorthand for when the dataset was processed right now on the current system.
"""
self.properties["odc:processing_datetime"] = datetime.utcnow()
@property
def dataset_version(self) -> str:
"""
The version of the dataset.
Typically digits separated by a dot. Eg. `1.0.0`
The first digit is usually the collection number for
this 'producer' organisation, such as USGS Collection 1 or
GA Collection 3.
"""
return self.properties.get("odc:dataset_version")
@property
def collection_number(self) -> int:
"""
The version of the collection.
Eg.::
metadata:
product_family: wofs
dataset_version: 1.6.0
collection_number: 3
"""
return self.properties.get("odc:collection_number")
@dataset_version.setter
def dataset_version(self, value):
self.properties["odc:dataset_version"] = value
@collection_number.setter
def collection_number(self, value):
self.properties["odc:collection_number"] = value
@property
def naming_conventions(self) -> str:
return self.properties.get("odc:naming_conventions")
@naming_conventions.setter
def naming_conventions(self, value):
self.properties["odc:naming_conventions"] = value
@property
def product_family(self) -> str:
"""
The identifier for this "family" of products, such as ``ard``, ``level1`` or ``fc``.
It's used for grouping similar products together.
They products in a family are usually produced the same way but have small variations:
they come from different sensors, or are written in different projections, etc.
``ard`` family of products: ``ls7_ard``, ``ls5_ard`` ....
On older versions of Open Data Cube this was called ``product_type``.
Shorthand for ``odc:product_family`` property.
"""
return self.properties.get("odc:product_family")
@product_family.setter
def product_family(self, value):
self.properties["odc:product_family"] = value
@product_family.deleter
def product_family(self):
del self.properties["odc:product_family"]
@property
def region_code(self) -> Optional[str]:
"""
The "region" of acquisition. This is a platform-agnostic representation of things like
the Landsat Path+Row. Datasets with the same Region Code will *roughly* (but usually
not *exactly*) cover the same spatial footprint.
It's generally treated as an opaque string to group datasets and process as stacks.
For Landsat products it's the concatenated ``{path}{row}`` (both numbers formatted to three digits).
For Sentinel 2, it's the MGRS grid (TODO presumably?).
Shorthand for ``odc:region_code`` property.
"""
return self.properties.get("odc:region_code")
@region_code.setter
def region_code(self, value: str):
self.properties["odc:region_code"] = value
@property
def maturity(self) -> str:
"""
The dataset maturity. The same data may be processed multiple times -- becoming more
mature -- as new ancillary data becomes available.
Typical values (from least to most mature): ``nrt`` (near real time), ``interim``, ``final``
"""
return self.properties.get("dea:dataset_maturity")
@maturity.setter
def maturity(self, value):
self.properties["dea:dataset_maturity"] = value
@property
def product_maturity(self) -> str:
"""
Classification: is this a 'provisional' or 'stable' release of the product?
"""
return self.properties.get("dea:product_maturity")
@product_maturity.setter
def product_maturity(self, value):
self.properties["dea:product_maturity"] = value
# Note that giving a method the name 'datetime' will override the 'datetime' type
# for class-level declarations (ie, for any types on functions!)
# So we make an alias:
from datetime import datetime as datetime_
@property
def datetime(self) -> datetime_:
"""
The searchable date and time of the assets. (Default to UTC if not specified)
"""
return self.properties.get("datetime")
@datetime.setter
def datetime(self, val: datetime_):
self.properties["datetime"] = val
def _github_suggest_new_property_url(key: str, value: object) -> str:
"""Get a URL to create a Github issue suggesting new properties to be added."""
issue_parameters = urlencode(
dict(
title=f"Include property {key!r}",
labels="known-properties",
body=dedent(
f"""\
Hello! The property {key!r} does not appear to be in the KNOWN_PROPERTIES list,
but I believe it to be valid.
An example value of this property is: {value!r}
Thank you!
"""
),
)
)
return f"https://github.com/GeoscienceAustralia/eo-datasets/issues/new?{issue_parameters}"
| jeremyh/eo-datasets | eodatasets3/properties.py | Python | apache-2.0 | 25,006 | [
"NetCDF"
] | 3600bde7a618594421b8663241595f55e16cbb69f82295b1d7acab910bec6833 |
import json
import requests
import os
from settings import global_settings
_amino_acids_json_path = os.path.join(global_settings['package_path'], 'tools', 'amino_acids.json')
with open(_amino_acids_json_path, 'r') as inf:
amino_acids_dict = json.loads(inf.read())
water_mass = 18.01528
ideal_backbone_bond_lengths = {
# Ideal bond distances from:
# Schulz, G. E, and R. Heiner Schirmer. Principles Of Protein Structure. New York: Springer-Verlag, 1979.
'n_ca': 1.47,
'ca_c': 1.53,
'c_o': 1.24,
# peptide bond length for residues.
'c_n': 1.32,
}
ideal_backbone_bond_angles = {
# Ideal bond angles from:
# Schulz, G. E, and R. Heiner Schirmer. Principles Of Protein Structure. New York: Springer-Verlag, 1979.
'trans': {
'n_ca_c': 110.0,
'ca_c_o': 121.0,
'ca_c_n': 114.0,
'c_n_ca': 123.0,
'o_c_n': 125.0,
},
'cis': {
'n_ca_c': 110.0,
'ca_c_o': 119.0,
'ca_c_n': 118.0,
'c_n_ca': 126.0,
'o_c_n': 123.0
}
}
residue_mwt = {
'A': 71.0779, 'C': 103.1429, 'D': 115.0874, 'E': 129.114, 'F': 147.1739,
'G': 57.0513, 'H': 137.1393, 'I': 113.1576, 'K': 128.1723, 'L': 113.1576,
'M': 131.1961, 'N': 114.1026, 'P': 97.1152, 'Q': 128.1292, 'R': 156.1857,
'S': 87.0773, 'T': 101.1039, 'V': 99.1311, 'W': 186.2099, 'Y': 163.1733,
'X': 57.0513
}
residue_charge = {
'A': 0, 'C': -1, 'D': -1, 'E': -1, 'F': 0,
'G': 0, 'H': +1, 'I': 0, 'K': +1, 'L': 0,
'M': 0, 'N': 0, 'P': 0, 'Q': 0, 'R': +1,
'S': 0, 'T': 0, 'V': 0, 'W': 0, 'Y': -1,
'N-term': +1, 'C-term': -1, 'X': 0
}
residue_pka = {
'A': 0.0, 'C': 8.3, 'D': 3.65, 'E': 4.25, 'F': 0.0,
'G': 0.0, 'H': 6.1, 'I': 0.0, 'K': 10.53, 'L': 0.0,
'M': 0.0, 'N': 0.0, 'P': 0.0, 'Q': 0.0, 'R': 12.48,
'S': 0.0, 'T': 0.0, 'V': 0.0, 'W': 0.0, 'Y': 10.1,
'N-term': 8.0, 'C-term': 3.1, 'X': 0.0
}
residue_ext_280 = {
'A': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0,
'G': 0, 'H': 0, 'I': 0, 'K': 0, 'L': 0,
'M': 0, 'N': 0, 'P': 0, 'Q': 0, 'R': 0,
'S': 0, 'T': 0, 'V': 0, 'W': 5690, 'Y': 1280,
'X': 0
}
standard_amino_acids = {
'A': 'ALA', 'C': 'CYS', 'D': 'ASP', 'E': 'GLU', 'F': 'PHE',
'G': 'GLY', 'H': 'HIS', 'I': 'ILE', 'K': 'LYS', 'L': 'LEU',
'M': 'MET', 'N': 'ASN', 'P': 'PRO', 'Q': 'GLN', 'R': 'ARG',
'S': 'SER', 'T': 'THR', 'V': 'VAL', 'W': 'TRP', 'Y': 'TYR'
}
side_chain_dihedrals = {
'ARG': [
['N', 'CA', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2'],
['CA', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2'],
['CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2'],
['CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2']],
'ASN': [
['N', 'CA', 'CB', 'CG', 'OD1', 'ND2'],
['CA', 'CB', 'CG', 'OD1', 'ND2']],
'ASP': [
['N', 'CA', 'CB', 'CG', 'OD1', 'OD2'],
['CA', 'CB', 'CG', 'OD1', 'OD2']],
'CYS': [['N', 'CA', 'CB', 'SG']],
'GLN': [
['N', 'CA', 'CB', 'CG', 'CD', 'OE1', 'NE2'],
['CA', 'CB', 'CG', 'CD', 'OE1', 'NE2'],
['CB', 'CG', 'CD', 'OE1', 'NE2']],
'GLU': [
['N', 'CA', 'CB', 'CG', 'CD', 'OE1', 'OE2'],
['CA', 'CB', 'CG', 'CD', 'OE1', 'OE2'],
['CB', 'CG', 'CD', 'OE1', 'OE2']],
'HIS': [
['N', 'CA', 'CB', 'CG', 'ND1', 'CE1', 'ND2'],
['CA', 'CB', 'CG', 'ND1', 'CE1', 'ND2']],
'ILE': [
['N', 'CA', 'CB', 'CG1', 'CG2', 'CD1'],
['CA', 'CB', 'CG1', 'CD1', 'CG2']],
'LEU': [
['N', 'CA', 'CB', 'CG', 'CD1', 'CD2'],
['CA', 'CB', 'CG', 'CD1', 'CD2']],
'LYS': [
['N', 'CA', 'CB', 'CG', 'CD', 'CE', 'NZ'],
['CA', 'CB', 'CG', 'CD', 'CE', 'NZ'],
['CB', 'CG', 'CD', 'CE', 'NZ'],
['CG', 'CD', 'CE', 'NZ']],
'MET': [
['N', 'CA', 'CB', 'CG', 'SD', 'CE'],
['CA', 'CB', 'CG', 'SD', 'CE'],
['CB', 'CG', 'SD', 'CE']],
'PHE': [
['N', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ'],
['CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ']],
'PRO': [
['N', 'CA', 'CB', 'CG', 'CD'],
['CA', 'CB', 'CG', 'CD']],
'SER': [['N', 'CA', 'CB', 'OG']],
'THR': [['N', 'CA', 'CB', 'OG1', 'CG2']],
'TRP': [
['N', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE2', 'CE3', 'NE1', 'CZ2', 'CZ3', 'CH2'],
['CA', 'CB', 'CG', 'CD1', 'CD2', 'CE2', 'CE3', 'NE1', 'CZ2', 'CZ3', 'CH2']],
'TYR': [
['N', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH'],
['CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH']],
'VAL': [['N', 'CA', 'CB', 'CG1', 'CG2']]
}
# Data taken from http://web.expasy.org/protscale/ unless otherwise stated. Original reference also given.
# Levitt M. Biochemistry 17:4277-4285(1978)
a_helix_Levitt = {
'A': 1.29, 'C': 1.11, 'D': 1.04, 'E': 1.44, 'F': 1.07,
'G': 0.56, 'H': 1.22, 'I': 0.97, 'K': 1.23, 'L': 1.3,
'M': 1.47, 'N': 0.9, 'P': 0.52, 'Q': 1.27, 'R': 0.96,
'S': 0.82, 'T': 0.82, 'V': 0.91, 'W': 0.99, 'Y': 0.72
}
# Janin J. Nature 277:491-492(1979)
accessibility_Janin = {
'A': 6.6, 'C': 0.9, 'D': 7.7, 'E': 5.7, 'F': 2.4,
'G': 6.7, 'H': 2.5, 'I': 2.8, 'K': 10.3, 'L': 4.8,
'M': 1.0, 'N': 6.7, 'P': 4.8, 'Q': 5.2, 'R': 4.5,
'S': 9.4, 'T': 7.0, 'V': 4.5, 'W': 1.4, 'Y': 5.1
}
# Bhaskaran R., Ponnuswamy P.K. Int. J. Pept. Protein. Res. 32:242-255(1988)
avg_flex_index = {
'A': 0.36, 'C': 0.35, 'D': 0.51, 'E': 0.5, 'F': 0.31,
'G': 0.54, 'H': 0.32, 'I': 0.46, 'K': 0.47, 'L': 0.37,
'M': 0.3, 'N': 0.46, 'P': 0.51, 'Q': 0.49, 'R': 0.53,
'S': 0.51, 'T': 0.44, 'V': 0.39, 'W': 0.31, 'Y': 0.42
}
# Levitt M. Biochemistry 17:4277-4285(1978)
beta_sheet_Levitt = {
'A': 0.9, 'C': 0.74, 'D': 0.72, 'E': 0.75, 'F': 1.32,
'G': 0.92, 'H': 1.08, 'I': 1.45, 'K': 0.77, 'L': 1.02,
'M': 0.97, 'N': 0.76, 'P': 0.64, 'Q': 0.8, 'R': 0.99,
'S': 0.95, 'T': 1.21, 'V': 1.49, 'W': 1.14, 'Y': 1.25
}
# Levitt M. Biochemistry 17:4277-4285(1978)
beta_turn_Levitt = {
'A': 0.77, 'C': 0.81, 'D': 1.41, 'E': 0.99, 'F': 0.59,
'G': 1.64, 'H': 0.68, 'I': 0.51, 'K': 0.96, 'L': 0.58,
'M': 0.41, 'N': 1.28, 'P': 1.91, 'Q': 0.98, 'R': 0.88,
'S': 1.32, 'T': 1.04, 'V': 0.47, 'W': 0.76, 'Y': 1.05
}
# Zimmerman J.M., Eliezer N., Simha R. J. Theor. Biol. 21:170-201(1968)
bulkiness = {
'A': 11.5, 'C': 13.46, 'D': 11.68, 'E': 13.57, 'F': 19.8,
'G': 3.4, 'H': 13.69, 'I': 21.4, 'K': 15.71, 'L': 21.4,
'M': 16.25, 'N': 12.82, 'P': 17.43, 'Q': 14.45, 'R': 14.28,
'S': 9.47, 'T': 15.77, 'V': 21.57, 'W': 21.67, 'Y': 18.03
}
# Kyte J., Doolittle R.F. J. Mol. Biol. 157:105-132(1982)
hydropathicity = {
'A': 1.8, 'C': 2.5, 'D': -3.5, 'E': -3.5, 'F': 2.8,
'G': -0.4, 'H': -3.2, 'I': 4.5, 'K': -3.9, 'L': 3.8,
'M': 1.9, 'N': -3.5, 'P': -1.6, 'Q': -3.5, 'R': -4.5,
'S': -0.8, 'T': -0.7, 'V': 4.2, 'W': -0.9, 'Y': -1.3
}
# http://biopython.org/DIST/docs/api/Bio.PDB.DSSP%27-pysrc.html
# Sander & Rost, (1994), Proteins, 20:216-226
max_asa = {
'A': 106.0, 'C': 135.0, 'D': 163.0, 'E': 194.0, 'F': 197.0,
'G': 84.0, 'H': 184.0, 'I': 169.0, 'K': 205.0, 'L': 164.0,
'M': 188.0, 'N': 157.0, 'P': 136.0, 'Q': 198.0, 'R': 135.0,
'S': 130.0, 'T': 142.0, 'V': 142.0, 'W': 227.0, 'Y': 222.0
}
# Grantham R. Science 185:862-864(1974)
polarity_Grantham = {
'A': 8.1, 'C': 5.5, 'D': 13.0, 'E': 12.3, 'F': 5.2,
'G': 9.0, 'H': 10.4, 'I': 5.2, 'K': 11.3, 'L': 4.9,
'M': 5.7, 'N': 11.6, 'P': 8.0, 'Q': 10.5, 'R': 10.5,
'S': 9.2, 'T': 8.6, 'V': 5.9, 'W': 5.4, 'Y': 6.2
}
# Zimmerman J.M., Eliezer N., Simha R. J. Theor. Biol. 21:170-201(1968)
polarity_Zimmerman = {
'A': 0.0, 'C': 1.48, 'D': 49.7, 'E': 49.9, 'F': 0.35,
'G': 0.0, 'H': 51.6, 'I': 0.13, 'K': 49.5, 'L': 0.13,
'M': 1.43, 'N': 3.38, 'P': 1.58, 'Q': 3.53, 'R': 52.0,
'S': 1.67, 'T': 1.66, 'V': 0.13, 'W': 2.1, 'Y': 1.61
}
# Fraga S. Can. J. Chem. 60:2606-2610(1982)
recognition_factors = {
'A': 78.0, 'C': 89.0, 'D': 81.0, 'E': 78.0, 'F': 81.0,
'G': 84.0, 'H': 84.0, 'I': 88.0, 'K': 87.0, 'L': 85.0,
'M': 80.0, 'N': 94.0, 'P': 91.0, 'Q': 87.0, 'R': 95.0,
'S': 107.0, 'T': 93.0, 'V': 89.0, 'W': 104.0, 'Y': 84.0
}
# Jones. D.D. J. Theor. Biol. 50:167-184(1975)
refractivity = {
'A': 4.34, 'C': 35.77, 'D': 12.0, 'E': 17.26, 'F': 29.4,
'G': 0.0, 'H': 21.81, 'I': 19.06, 'K': 21.29, 'L': 18.78,
'M': 21.64, 'N': 13.28, 'P': 10.93, 'Q': 17.56, 'R': 26.66,
'S': 6.35, 'T': 11.01, 'V': 13.92, 'W': 42.53, 'Y': 31.53
}
# Dayhoff M.O., Schwartz R.M., Orcutt B.C. In "Atlas of Protein Sequence and Structure", Vol.5, Suppl.3 (1978)
relative_mutability = {
'A': 100.0, 'C': 20.0, 'D': 106.0, 'E': 102.0, 'F': 41.0,
'G': 49.0, 'H': 66.0, 'I': 96.0, 'K': 56.0, 'L': 40.0,
'M': 94.0, 'N': 134.0, 'P': 56.0, 'Q': 93.0, 'R': 65.0,
'S': 120.0, 'T': 97.0, 'V': 74.0, 'W': 18.0, 'Y': 41.0
}
# Meek J.L. Proc. Natl. Acad. Sci. USA 77:1632-1636(1980)
retention_coeff_hplc_pH7pt4 = {
'A': 0.5, 'C': -6.8, 'D': -8.2, 'E': -16.9, 'F': 13.2,
'G': 0.0, 'H': -3.5, 'I': 13.9, 'K': 0.1, 'L': 8.8,
'M': 4.8, 'N': 0.8, 'P': 6.1, 'Q': -4.8, 'R': 0.8,
'S': 1.2, 'T': 2.7, 'V': 2.7, 'W': 14.9, 'Y': 6.1
}
# Zhao, G., London E. Protein Sci. 15:1987-2001(2006)
transmembrane_tendancy = {
'A': 0.38, 'C': -0.3, 'D': -3.27, 'E': -2.9, 'F': 1.98,
'G': -0.19, 'H': -1.44, 'I': 1.97, 'K': -3.46, 'L': 1.82,
'M': 1.4, 'N': -1.62, 'P': -1.44, 'Q': -1.84, 'R': -2.57,
'S': -0.53, 'T': -0.32, 'V': 1.46, 'W': 1.53, 'Y': 0.49
}
# Bairoch A. Release notes for UniProtKB/Swiss-Prot release 2013_04 - April 2013
uniprot_composition_2013 = {
'A': 8.25, 'C': 1.37, 'D': 5.45, 'E': 6.75, 'F': 3.86,
'G': 7.07, 'H': 2.27, 'I': 5.96, 'K': 5.84, 'L': 9.66,
'M': 2.42, 'N': 4.06, 'P': 4.7, 'Q': 3.93, 'R': 5.53,
'S': 6.56, 'T': 5.34, 'V': 6.87, 'W': 1.08, 'Y': 2.92
}
number_of_codons = {
'A': 4, 'C': 1, 'D': 2, 'E': 2, 'F': 2,
'G': 4, 'H': 2, 'I': 3, 'K': 2, 'L': 6,
'M': 1, 'N': 2, 'P': 4, 'Q': 2, 'R': 6,
'S': 6, 'T': 4, 'V': 4, 'W': 1, 'Y': 2
}
# pI, pk_COOH, pK_NH3, pK_Rgroup all taken from:
# http://www.sigmaaldrich.com/life-science/metabolomics/learning-center/amino-acid-reference-chart.html#prop'
# D. R. Lide, Handbook of Chemistry and Physics, 72nd Edition, CRC Press, Boca Raton, FL, 1991.
pI = {
'A': 6.0, 'C': 5.07, 'D': 2.77, 'E': 3.22, 'F': 5.48,
'G': 5.97, 'H': 7.59, 'I': 6.02, 'K': 9.74, 'L': 5.98,
'M': 5.74, 'N': 5.41, 'P': 6.3, 'Q': 5.65, 'R': 10.76,
'S': 5.68, 'T': 5.6, 'V': 5.96, 'W': 5.89, 'Y': 5.66
}
pK_COOH = {
'A': 2.34, 'C': 1.96, 'D': 1.88, 'E': 2.19, 'F': 1.83,
'G': 2.34, 'H': 1.82, 'I': 2.36, 'K': 2.18, 'L': 2.36,
'M': 2.28, 'N': 2.02, 'P': 1.99, 'Q': 2.17, 'R': 2.17,
'S': 2.21, 'T': 2.09, 'V': 2.32, 'W': 2.83, 'Y': 2.2
}
pK_NH3 = {
'A': 9.69, 'C': 10.28, 'D': 9.6, 'E': 9.67, 'F': 9.13,
'G': 9.6, 'H': 9.17, 'I': 9.6, 'K': 8.95, 'L': 9.6,
'M': 9.21, 'N': 8.8, 'P': 10.6, 'Q': 9.13, 'R': 9.04,
'S': 9.15, 'T': 9.1, 'V': 9.62, 'W': 9.39, 'Y': 9.11
}
pK_Rgroup = {
'A': None, 'C': 8.18, 'D': 3.65, 'E': 4.25, 'F': None,
'G': None, 'H': 6.0, 'I': None, 'K': 10.53, 'L': None,
'M': None, 'N': None, 'P': None, 'Q': None, 'R': 12.48,
'S': None, 'T': None, 'V': None, 'W': None, 'Y': 10.07
}
def get_aa_code(aa_letter):
""" Get three-letter aa code if possible. If not, return None.
If three-letter code is None, will have to find this later from the filesystem.
Parameters
----------
aa_letter : str
One-letter amino acid code.
Returns
-------
aa_code : str, or None
Three-letter aa code.
"""
aa_code = None
if aa_letter != 'X':
for key, val in standard_amino_acids.items():
if key == aa_letter:
aa_code = val
return aa_code
def get_aa_letter(aa_code):
""" Get one-letter version of aa_code if possible. If not, return 'X'.
Parameters
----------
aa_code : str
Three-letter amino acid code.
Returns
-------
aa_letter : str
One-letter aa code.
Default value is 'X'.
"""
aa_letter = 'X'
for key, val in standard_amino_acids.items():
if val == aa_code:
aa_letter = key
return aa_letter
def get_aa_info(code):
"""Get dictionary of information relating to a new amino acid code not currently in the database.
Notes
-----
Use this function to get a dictionary that is then to be sent to the function add_amino_acid_to_json().
use to fill in rows of amino_acid table for new amino acid code.
Parameters
----------
code : str
Three-letter amino acid code.
Raises
------
IOError
If unable to locate the page associated with the amino acid name on the PDBE site.
Returns
-------
aa_dict : dict
Keys are AminoAcidDB field names.
Values are the str values for the new amino acid, scraped from the PDBE if possible. None if not found.
"""
letter = 'X'
# Try to get content from PDBE.
url_string = 'http://www.ebi.ac.uk/pdbe-srv/pdbechem/chemicalCompound/show/{0}'.format(code)
r = requests.get(url_string)
# Raise error if content not obtained.
if not r.ok:
raise IOError("Could not get to url {0}".format(url_string))
# Parse r.text in an ugly way to get the required information.
description = r.text.split('<h3>Molecule name')[1].split('</tr>')[0]
description = description.strip().split('\n')[3].strip()[:255]
modified = r.text.split("<h3>Standard parent ")[1].split('</tr>')[0]
modified = modified.replace(" ", "").replace('\n', '').split('<')[-3].split('>')[-1]
if modified == "NotAssigned":
modified = None
# Add the required information to a dictionary which can then be passed to add_amino_acid_to_json.
aa_dict = {'code': code, 'description': description, 'modified': modified, 'letter': letter}
return aa_dict
def add_amino_acid_to_json(code, description, letter='X', modified=None, force_add=False):
""" Add an amino acid to the amino_acids.json file used to populate the amino_acid table.
Parameters
----------
code : str
New code to be added to amino acid table.
description : str
Description of the amino acid, e.g. 'amidated terminal carboxy group'.
letter : str, optional
One letter code for the amino acid.
Defaults to 'X'
modified : str or None, optional
Code of modified amino acid, e.g. 'ALA', or None.
Defaults to None
force_add : bool, optional
If True, will over-write existing dictionary value for code if already in amino_acids.json.
If False, then an IOError is raised if code is already in amino_acids.json.
Raises
------
IOError
If code is already in amino_acids.json and force_add is False.
Returns
-------
None
"""
# If code is already in the dictionary, raise an error
if (not force_add) and code in amino_acids_dict.keys():
raise IOError("{0} is already in the amino_acids dictionary, with values: {1}".format(
code, amino_acids_dict[code]))
# Prepare data to be added.
add_code = code
add_code_dict = {'description': description, 'letter': letter, 'modified': modified}
# Check that data does not already exist, and if not, add it to the dictionary.
amino_acids_dict[add_code] = add_code_dict
# Write over json file with updated dictionary.
with open(_amino_acids_json_path, 'w') as foo:
foo.write(json.dumps(amino_acids_dict))
return
__author__ = 'Jack W. Heal'
| woolfson-group/isambard | isambard/tools/amino_acids.py | Python | mit | 15,990 | [
"Biopython"
] | e94df5a0f27765ba2f0c19bd3b478bb1b6a9faea2fc03094025a1c387c7237ed |
"""
:mod:`Mutators` -- mutation methods module
=====================================================================
In this module we have the genetic operators of mutation for each chromosome representation.
"""
import Util
from random import randint as rand_randint, gauss as rand_gauss, uniform as rand_uniform
from random import choice as rand_choice
import Consts
import GTree
#############################
## 1D Binary String ##
#############################
def G1DBinaryStringMutatorSwap(genome, **args):
""" The 1D Binary String Swap Mutator """
if args["pmut"] <= 0.0: return 0
stringLength = len(genome)
mutations = args["pmut"] * (stringLength)
if mutations < 1.0:
mutations = 0
for it in xrange(stringLength):
if Util.randomFlipCoin(args["pmut"]):
Util.listSwapElement(genome, it, rand_randint(0, stringLength-1))
mutations+=1
else:
for it in xrange(int(round(mutations))):
Util.listSwapElement(genome, rand_randint(0, stringLength-1),
rand_randint(0, stringLength-1))
return int(mutations)
def G1DBinaryStringMutatorFlip(genome, **args):
""" The classical flip mutator for binary strings """
if args["pmut"] <= 0.0: return 0
stringLength = len(genome)
mutations = args["pmut"] * (stringLength)
if mutations < 1.0:
mutations = 0
for it in xrange(stringLength):
if Util.randomFlipCoin(args["pmut"]):
if genome[it] == 0: genome[it] = 1
else: genome[it] = 0
mutations+=1
else:
for it in xrange(int(round(mutations))):
which = rand_randint(0, stringLength-1)
if genome[which] == 0: genome[which] = 1
else: genome[which] = 0
return int(mutations)
####################
## 1D List ##
####################
def G1DListMutatorSwap(genome, **args):
""" The mutator of G1DList, Swap Mutator
.. note:: this mutator is :term:`Data Type Independent`
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome) - 1
mutations = args["pmut"] * (listSize+1)
if mutations < 1.0:
mutations = 0
for it in xrange(listSize+1):
if Util.randomFlipCoin(args["pmut"]):
Util.listSwapElement(genome, it, rand_randint(0, listSize))
mutations+=1
else:
for it in xrange(int(round(mutations))):
Util.listSwapElement(genome, rand_randint(0, listSize), rand_randint(0, listSize))
return int(mutations)
def G1DListMutatorSIM(genome, **args):
""" The mutator of G1DList, Simple Inversion Mutation
.. note:: this mutator is :term:`Data Type Independent`
"""
mutations = 0
if args["pmut"] <= 0.0: return 0
cuts = [rand_randint(0, len(genome)), rand_randint(0, len(genome))]
if cuts[0] > cuts[1]:
Util.listSwapElement(cuts, 0, 1)
if (cuts[1]-cuts[0]) <= 0:
cuts[1] = rand_randint(cuts[0], len(genome))
if Util.randomFlipCoin(args["pmut"]):
part = genome[cuts[0]:cuts[1]]
if len(part) == 0: return 0
part.reverse()
genome[cuts[0]:cuts[1]] = part
mutations += 1
return mutations
def G1DListMutatorIntegerRange(genome, **args):
""" Simple integer range mutator for G1DList
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * listSize
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
genome[it] = rand_randint(genome.getParam("rangemin", Consts.CDefRangeMin),
genome.getParam("rangemax", Consts.CDefRangeMax))
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize-1)
genome[which_gene] = rand_randint(genome.getParam("rangemin", Consts.CDefRangeMin),
genome.getParam("rangemax", Consts.CDefRangeMax))
return int(mutations)
def G1DListMutatorRealRange(genome, **args):
""" Simple real range mutator for G1DList
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * (listSize)
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
genome[it] = rand_uniform(genome.getParam("rangemin", Consts.CDefRangeMin),
genome.getParam("rangemax", Consts.CDefRangeMax))
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize-1)
genome[which_gene] = rand_uniform(genome.getParam("rangemin", Consts.CDefRangeMin),
genome.getParam("rangemax", Consts.CDefRangeMax))
return int(mutations)
def G1DListMutatorIntegerGaussian(genome, **args):
""" A gaussian mutator for G1DList of Integers
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * (listSize)
mu = genome.getParam("gauss_mu")
sigma = genome.getParam("gauss_sigma")
if mu is None:
mu = Consts.CDefG1DListMutIntMU
if sigma is None:
sigma = Consts.CDefG1DListMutIntSIGMA
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
final_value = genome[it] + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome[it] = final_value
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize-1)
final_value = genome[which_gene] + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome[which_gene] = final_value
return int(mutations)
def G1DListMutatorRealGaussian(genome, **args):
""" The mutator of G1DList, Gaussian Mutator
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * (listSize)
mu = genome.getParam("gauss_mu")
sigma = genome.getParam("gauss_sigma")
if mu is None:
mu = Consts.CDefG1DListMutRealMU
if sigma is None:
sigma = Consts.CDefG1DListMutRealSIGMA
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
final_value = genome[it] + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome[it] = final_value
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize-1)
final_value = genome[which_gene] + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome[which_gene] = final_value
return int(mutations)
def G1DListMutatorIntegerBinary(genome, **args):
""" The mutator of G1DList, the binary mutator
This mutator will random change the 0 and 1 elements of the 1D List.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome)
mutations = args["pmut"] * (listSize)
if mutations < 1.0:
mutations = 0
for it in xrange(listSize):
if Util.randomFlipCoin(args["pmut"]):
if genome[it] == 0: genome[it] = 1
elif genome[it] == 1: genome[it] = 0
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize-1)
if genome[which_gene] == 0: genome[which_gene] = 1
elif genome[which_gene] == 1: genome[which_gene] = 0
return int(mutations)
def G1DListMutatorAllele(genome, **args):
""" The mutator of G1DList, Allele Mutator
To use this mutator, you must specify the *allele* genome parameter with the
:class:`GAllele.GAlleles` instance.
"""
if args["pmut"] <= 0.0: return 0
listSize = len(genome) - 1
mutations = args["pmut"] * (listSize+1)
allele = genome.getParam("allele", None)
if allele is None:
Util.raiseException("to use the G1DListMutatorAllele, you must specify the 'allele' parameter", TypeError)
if mutations < 1.0:
mutations = 0
for it in xrange(listSize+1):
if Util.randomFlipCoin(args["pmut"]):
new_val = allele[it].getRandomAllele()
genome[it] = new_val
mutations+=1
else:
for it in xrange(int(round(mutations))):
which_gene = rand_randint(0, listSize)
new_val = allele[which_gene].getRandomAllele()
genome[which_gene] = new_val
return int(mutations)
####################
## 2D List ##
####################
def G2DListMutatorSwap(genome, **args):
""" The mutator of G1DList, Swap Mutator
.. note:: this mutator is :term:`Data Type Independent`
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
if mutations < 1.0:
mutations = 0
for i in xrange(height):
for j in xrange(width):
if Util.randomFlipCoin(args["pmut"]):
index_b = (rand_randint(0, height-1), rand_randint(0, width-1))
Util.list2DSwapElement(genome.genomeList, (i,j), index_b)
mutations+=1
else:
for it in xrange(int(round(mutations))):
index_a = (rand_randint(0, height-1), rand_randint(0, width-1))
index_b = (rand_randint(0, height-1), rand_randint(0, width-1))
Util.list2DSwapElement(genome.genomeList, index_a, index_b)
return int(mutations)
def G2DListMutatorIntegerRange(genome, **args):
""" Simple integer range mutator for G2DList
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
range_min = genome.getParam("rangemin", Consts.CDefRangeMin)
range_max = genome.getParam("rangemax", Consts.CDefRangeMax)
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if Util.randomFlipCoin(args["pmut"]):
random_int = rand_randint(range_min, range_max)
genome.setItem(i, j, random_int)
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_x = rand_randint(0, genome.getWidth()-1)
which_y = rand_randint(0, genome.getHeight()-1)
random_int = rand_randint(range_min, range_max)
genome.setItem(which_y, which_x, random_int)
return int(mutations)
def G2DListMutatorIntegerGaussian(genome, **args):
""" A gaussian mutator for G2DList of Integers
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
mu = genome.getParam("gauss_mu")
sigma = genome.getParam("gauss_sigma")
if mu is None:
mu = Consts.CDefG2DListMutIntMU
if sigma is None:
sigma = Consts.CDefG2DListMutIntSIGMA
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if Util.randomFlipCoin(args["pmut"]):
final_value = genome[i][j] + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome.setItem(i, j, final_value)
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_x = rand_randint(0, genome.getWidth()-1)
which_y = rand_randint(0, genome.getHeight()-1)
final_value = genome[which_y][which_x] + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome.setItem(which_y, which_x, final_value)
return int(mutations)
def G2DListMutatorAllele(genome, **args):
""" The mutator of G2DList, Allele Mutator
To use this mutator, you must specify the *allele* genome parameter with the
:class:`GAllele.GAlleles` instance.
.. warning:: the :class:`GAllele.GAlleles` instance must have the homogeneous flag enabled
"""
if args["pmut"] <= 0.0: return 0
listSize = genome.getHeight()*genome.getWidth() - 1
mutations = args["pmut"] * (listSize+1)
allele = genome.getParam("allele", None)
if allele is None:
Util.raiseException("to use the G2DListMutatorAllele, you must specify the 'allele' parameter", TypeError)
if allele.homogeneous == False:
Util.raiseException("to use the G2DListMutatorAllele, the 'allele' must be homogeneous")
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if Util.randomFlipCoin(args["pmut"]):
new_val = allele[0].getRandomAllele()
genome.setItem(i, j, new_val)
mutations+=1
else:
for it in xrange(int(round(mutations))):
which_x = rand_randint(0, genome.getHeight()-1)
which_y = rand_randint(0, genome.getWidth()-1)
new_val = allele[0].getRandomAllele()
genome.setItem(which_x, which_y, new_val)
return int(mutations)
def G2DListMutatorRealGaussian(genome, **args):
""" A gaussian mutator for G2DList of Real
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
mu = genome.getParam("gauss_mu")
sigma = genome.getParam("gauss_sigma")
if mu is None:
mu = Consts.CDefG2DListMutRealMU
if sigma is None:
sigma = Consts.CDefG2DListMutRealSIGMA
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if Util.randomFlipCoin(args["pmut"]):
final_value = genome[i][j] + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome.setItem(i, j, final_value)
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_x = rand_randint(0, genome.getWidth()-1)
which_y = rand_randint(0, genome.getHeight()-1)
final_value = genome[which_y][which_x] + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
genome.setItem(which_y, which_x, final_value)
return int(mutations)
#############################
## 2D Binary String ##
#############################
def G2DBinaryStringMutatorSwap(genome, **args):
""" The mutator of G2DBinaryString, Swap Mutator
.. versionadded:: 0.6
The *G2DBinaryStringMutatorSwap* function
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
if mutations < 1.0:
mutations = 0
for i in xrange(height):
for j in xrange(width):
if Util.randomFlipCoin(args["pmut"]):
index_b = (rand_randint(0, height-1), rand_randint(0, width-1))
Util.list2DSwapElement(genome.genomeString, (i,j), index_b)
mutations+=1
else:
for it in xrange(int(round(mutations))):
index_a = (rand_randint(0, height-1), rand_randint(0, width-1))
index_b = (rand_randint(0, height-1), rand_randint(0, width-1))
Util.list2DSwapElement(genome.genomeString, index_a, index_b)
return int(mutations)
def G2DBinaryStringMutatorFlip(genome, **args):
""" A flip mutator for G2DBinaryString
.. versionadded:: 0.6
The *G2DBinaryStringMutatorFlip* function
"""
if args["pmut"] <= 0.0: return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if Util.randomFlipCoin(args["pmut"]):
if genome[i][j] == 0: genome.setItem(i, j, 1)
else: genome.setItem(i, j, 0)
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_x = rand_randint(0, genome.getWidth()-1)
which_y = rand_randint(0, genome.getHeight()-1)
if genome[i][j] == 0: genome.setItem(which_y, which_x, 1)
else: genome.setItem(which_y, which_x, 0)
return int(mutations)
#################
## Tree ##
#################
def GTreeMutatorSwap(genome, **args):
""" The mutator of GTree, Swap Mutator
.. versionadded:: 0.6
The *GTreeMutatorSwap* function
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
nodeOne = genome.getRandomNode()
nodeTwo = genome.getRandomNode()
nodeOne.swapNodeData(nodeTwo)
else:
for it in xrange(int(round(mutations))):
nodeOne = genome.getRandomNode()
nodeTwo = genome.getRandomNode()
nodeOne.swapNodeData(nodeTwo)
return int(mutations)
def GTreeMutatorIntegerRange(genome, **args):
""" The mutator of GTree, Integer Range Mutator
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
.. versionadded:: 0.6
The *GTreeMutatorIntegerRange* function
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
range_min = genome.getParam("rangemin", Consts.CDefRangeMin)
range_max = genome.getParam("rangemax", Consts.CDefRangeMax)
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
rand_node = genome.getRandomNode()
random_int = rand_randint(range_min, range_max)
rand_node.setData(random_int)
else:
for it in xrange(int(round(mutations))):
rand_node = genome.getRandomNode()
random_int = rand_randint(range_min, range_max)
rand_node.setData(random_int)
return int(mutations)
def GTreeMutatorRealRange(genome, **args):
""" The mutator of GTree, Real Range Mutator
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
.. versionadded:: 0.6
The *GTreeMutatorRealRange* function
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
range_min = genome.getParam("rangemin", Consts.CDefRangeMin)
range_max = genome.getParam("rangemax", Consts.CDefRangeMax)
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
rand_node = genome.getRandomNode()
random_real = rand_uniform(range_min, range_max)
rand_node.setData(random_real)
else:
for it in xrange(int(round(mutations))):
rand_node = genome.getRandomNode()
random_real = rand_uniform(range_min, range_max)
rand_node.setData(random_real)
return int(mutations)
def GTreeMutatorIntegerGaussian(genome, **args):
""" A gaussian mutator for GTree of Integers
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
mu = genome.getParam("gauss_mu", Consts.CDefG1DListMutIntMU)
sigma = genome.getParam("gauss_sigma", Consts.CDefG1DListMutIntSIGMA)
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
rand_node = genome.getRandomNode()
final_value = rand_node.getData() + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
rand_node.setData(final_value)
else:
for it in xrange(int(round(mutations))):
rand_node = genome.getRandomNode()
final_value = rand_node.getData() + int(rand_gauss(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
rand_node.setData(final_value)
return int(mutations)
def GTreeMutatorRealGaussian(genome, **args):
""" A gaussian mutator for GTree of Real numbers
Accepts the *rangemin* and *rangemax* genome parameters, both optional. Also
accepts the parameter *gauss_mu* and the *gauss_sigma* which respectively
represents the mean and the std. dev. of the random distribution.
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
mu = genome.getParam("gauss_mu", Consts.CDefG1DListMutRealMU)
sigma = genome.getParam("gauss_sigma", Consts.CDefG1DListMutRealSIGMA)
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
rand_node = genome.getRandomNode()
final_value = rand_node.getData() + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
rand_node.setData(final_value)
else:
for it in xrange(int(round(mutations))):
rand_node = genome.getRandomNode()
final_value = rand_node.getData() + rand_gauss(mu, sigma)
final_value = min(final_value, genome.getParam("rangemax", Consts.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", Consts.CDefRangeMin))
rand_node.setData(final_value)
return int(mutations)
###################
## Tree GP ##
###################
def GTreeGPMutatorOperation(genome, **args):
""" The mutator of GTreeGP, Operation Mutator
.. versionadded:: 0.6
The *GTreeGPMutatorOperation* function
"""
if args["pmut"] <= 0.0: return 0
elements = len(genome)
mutations = args["pmut"] * elements
ga_engine = args["ga_engine"]
gp_terminals = ga_engine.getParam("gp_terminals")
assert gp_terminals is not None
gp_function_set = ga_engine.getParam("gp_function_set")
assert gp_function_set is not None
if mutations < 1.0:
mutations = 0
for i in xrange(len(genome)):
if Util.randomFlipCoin(args["pmut"]):
mutations += 1
rand_node = genome.getRandomNode()
assert rand_node is not None
if rand_node.getType() == Consts.nodeType["TERMINAL"]:
term_operator = rand_choice(gp_terminals)
else:
op_len = gp_function_set[rand_node.getData()]
fun_candidates = []
for o, l in gp_function_set.items():
if l==op_len:
fun_candidates.append(o)
if len(fun_candidates) <= 0:
continue
term_operator = rand_choice(fun_candidates)
rand_node.setData(term_operator)
else:
for it in xrange(int(round(mutations))):
rand_node = genome.getRandomNode()
assert rand_node is not None
if rand_node.getType() == Consts.nodeType["TERMINAL"]:
term_operator = rand_choice(gp_terminals)
else:
op_len = gp_function_set[rand_node.getData()]
fun_candidates = []
for o, l in gp_function_set.items():
if l==op_len:
fun_candidates.append(o)
if len(fun_candidates) <= 0:
continue
term_operator = rand_choice(fun_candidates)
rand_node.setData(term_operator)
return int(mutations)
def GTreeGPMutatorSubtree(genome, **args):
""" The mutator of GTreeGP, Subtree Mutator
This mutator will recreate random subtree of the tree using the grow algorithm.
.. versionadded:: 0.6
The *GTreeGPMutatorSubtree* function
"""
if args["pmut"] <= 0.0: return 0
ga_engine = args["ga_engine"]
max_depth = genome.getParam("max_depth", None)
mutations = 0
if max_depth is None:
Util.raiseException("You must specify the max_depth genome parameter !", ValueError)
if max_depth < 0:
Util.raiseException("The max_depth must be >= 1, if you want to use GTreeGPMutatorSubtree crossover !", ValueError)
branch_list = genome.nodes_branch
elements = len(branch_list)
for i in xrange(elements):
node = branch_list[i]
assert node is not None
if Util.randomFlipCoin(args["pmut"]):
depth = genome.getNodeDepth(node)
mutations += 1
root_subtree = GTree.buildGTreeGPGrow(ga_engine, 0, max_depth-depth)
node_parent = node.getParent()
if node_parent is None:
genome.setRoot(root_subtree)
genome.processNodes()
return mutations
else:
root_subtree.setParent(node_parent)
node_parent.replaceChild(node, root_subtree)
genome.processNodes()
return int(mutations)
| HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/Pyevolve-0.6-py2.7.egg/pyevolve/Mutators.py | Python | gpl-2.0 | 27,943 | [
"Gaussian"
] | 003ed0cb026a3f9ecf532e8edb1efd55359c929c6f449650732912545ffcf612 |
"""
$Id: loggingrli.py,v 1.41 2006/04/24 14:49:23 jp Exp $
"""
import plastk.rl
from plastk.rl import RLI
from plastk.params import Parameter
from plastk import rand
#from Scientific.IO.NetCDF import NetCDFFile
from scipy.io.netcdf import netcdf_file as NetCDFFile
import time,sys,threading,os
NewColumn = 'new column'
class LoggingRLI(RLI):
filestem = Parameter(default='')
catch_signals = Parameter(default=[])
ckpt_extension = Parameter(default = '.ckpt')
steps_per_ckpt = Parameter(sys.maxint)
episodes_per_ckpt = Parameter(sys.maxint)
rename_old_data = Parameter(default=True)
gui_button_orient = Parameter(default='horizontal')
ckpt_attribs = ['ep_count',
'step_count',
'steps_per_ckpt',
'episodes_per_ckpt',
'last_ckpt_step',
'last_ckpt_episode',
'last_sensation',
'next_action',
'env',
'agent']
def __init__(self,**args):
super(LoggingRLI,self).__init__(**args)
self.step_vars = {}
self.ep_vars = {}
self.caught_signal = None
if not self.filestem:
self.filestem = self.name
self.episode_filename = self.filestem + '-episodes.cdf'
self.step_filename = self.filestem + '-steps.cdf'
self.checkpointing = False
self.gui_root = False
self.gui_runstate = None
self.action = ''
self.last_sensation = ''
def init(self,agent,env,**kw):
super(LoggingRLI,self).init(agent,env,**kw)
self.step_count = self.ep_count = 0
if os.access(self.episode_filename,os.F_OK):
self.remove_or_rename(self.episode_filename)
self.episode_data = ed = NetCDFFile(self.episode_filename,'w')
ed.createDimension('index',None)
ed.createDimension('value',1)
ed.createVariable('start','d',('index','value'))
ed.createVariable('length','d',('index','value'))
ed.createVariable('reward','f',('index','value'))
for name,(fn,type,size) in self.ep_vars.items():
ed.createDimension(name+'_dim',size)
ed.createVariable(name,type,('index',name+'_dim'))
if self.step_vars:
if os.access(self.step_filename,os.F_OK):
self.remove_or_rename(self.step_filename)
self.step_data = sd = NetCDFFile(self.step_filename,'a')
sd.createDimension('index',None)
for name,(fn,type,size) in self.step_vars.items():
sd.createDimension(name+'_dim',size)
sd.createVariable(name,type,('index',name+'_dim'))
self.last_ckpt_step = 0
self.last_ckpt_episode = 0
def remove_or_rename(self,filename):
# if the data file already exists either rename it or delete it
if not self.rename_old_data:
self.warning("Removing old data file:",filename)
os.remove(filename)
else:
i = 0
while True:
stem,ext = filename.split('.cdf')
new_filename = '%s-%d.cdf'%(stem,i)
if os.access(new_filename,os.F_OK):
i += 1
continue
self.warning("Renaming old data file to",new_filename)
os.rename(filename,new_filename)
break
def steps(self,num_steps,max_episodes=sys.maxint):
for i in xrange(num_steps):
if self.ep_count >= max_episodes:
break
super(LoggingRLI,self).steps(1)
def close(self):
try:
self.episode_data.close()
if self.step_vars:
self.step_data.close()
except AttributeError:
self.warning("Error closing data files.")
def add_step_variable(self,name,fn,type,size=1):
self.step_vars[name] = (fn,type,size)
def add_episode_variable(self,name,fn,type,size=1):
self.ep_vars[name] = (fn,type,size)
def start_episode(self):
from plastk.rl.data import make_plot,plot_trials
if (self.checkpointing and self.ep_count - self.last_ckpt_episode >= self.episodes_per_ckpt):
self.ckpt_save()
if self.gui_runstate == 'Episode':
self.gui_runstate_control.invoke('Stop')
self.request_gui_redraw()
while self.gui_runstate == 'Stop':
time.sleep(0.1)
self.message("Starting episode",self.ep_count)
super(LoggingRLI,self).start_episode()
if self.step_vars:
self.step_data.sync()
epvars = self.episode_data.variables
epvars['start'][self.ep_count] = self.step_count
epvars['length'][self.ep_count] = 0
epvars['reward'][self.ep_count] = 0
if self.ep_count > 0:
for var,(fn,type,size) in self.ep_vars.items():
epvars[var][self.ep_count-1] = fn(self)
self.episode_data.sync()
self.ep_count += 1
def collect_data(self,sensation,action,reward,next_sensation):
from Numeric import array
self.sensation = sensation
self.action = action
self.reward = reward
self.next_sensation = next_sensation
if self.caught_signal:
import sys
self.close()
raise "Caught signal %d" % self.caught_signal
epvars = self.episode_data.variables
epvars['reward'][self.ep_count-1] += array((reward,),'f')
epvars['length'][self.ep_count-1] += 1
if self.step_vars:
stvars = self.step_data.variables
for var,(fn,type,size) in self.step_vars.items():
stvars[var][self.step_count] = fn(self)
if self.step_count % 10000 == 0:
self.step_data.sync()
if (self.checkpointing and self.step_count - self.last_ckpt_step >= self.steps_per_ckpt):
self.ckpt_save()
self.step_count += 1
###################################################
# Checkpointing
def ckpt_steps(self,num_steps,max_episodes=sys.maxint):
self.checkpointing = True
self.setup_signals()
self.steps(num_steps-self.step_count,max_episodes=max_episodes)
self.clear_signals()
self.checkpointing = False
def ckpt_episodes(self,num_episodes,timeout):
self.checkpointing = True
self.setup_signals()
self.episodes(num_episodes-self.ep_count,timeout)
self.clear_signals()
self.checkpointing = False
def ckpt_filename(self):
return self.filestem + self.ckpt_extension
def ckpt_save(self):
from plastk import pkl
self.verbose("Attempting checkpoint, %d episodes, %d steps."%(self.ep_count,self.step_count))
if self.ckpt_ok():
self.last_ckpt_step = self.step_count
self.last_ckpt_episode = self.ep_count
self.env.sim = self.agent.sim = None
ckpt = dict(rand_seed = rand.get_seed())
self.verbose("Checkpointing...")
for a in self.ckpt_attribs:
ckpt[a] = getattr(self,a)
self.verbose(a, ' = ', ckpt[a])
pkl.dump(ckpt,self.ckpt_filename())
self.episode_data.sync()
if self.step_vars:
self.step_data.sync()
self.env.sim = self.agent.sim = self
else:
self.verbose("No checkpoint, ckpt_ok failed")
return
def ckpt_restore_state(self,filename):
from plastk import pkl
ckpt = pkl.load(filename)
self.verbose("Restoring checkpoint state")
for a in self.ckpt_attribs:
self.verbose(a,' = ', ckpt[a])
setattr(self,a,ckpt[a])
rand.seed(*ckpt['rand_seed'])
self.env.sim = self.agent.sim = self
self.episode_data = NetCDFFile(self.episode_filename,'a')
if self.step_vars:
self.step_data = NetCDFFile(self.step_filename,'a')
return ckpt
def ckpt_resume(self):
import os
ckpt_filename = self.ckpt_filename()
if os.access(ckpt_filename,os.F_OK):
self.message("Found checkpoint file",ckpt_filename)
self.ckpt_restore_state(ckpt_filename)
return True
else:
return False
def ckpt_ok(self):
"""
Override this method to provide a check to make
sure it's okay to checkpoint.
(default = True)
"""
return True
def setup_signals(self):
import signal
for sig in self.catch_signals:
self.verbose("Setting handler for signal",sig)
signal.signal(sig,self.signal_handler)
def clear_signals(self):
import signal
for sig in self.catch_signals:
self.verbose("Clearing handler for signal",sig)
signal.signal(sig,signal.SIG_DFL)
def signal_handler(self,signal,frame):
self.caught_signal = signal
#########################################
# GUI
def gui(self,*frame_types):
"""
Each of frame_types must be either
(1) the string NewColumn, to start a new column or
(2) a function that takes (tk_root,rli) and returns a Tkinter widget
where tk_root is a Tkinter frame and rli is the controlling plastk rli.
The Tkinter widget must have a redraw method, which takes no arguments.
"""
import Tkinter as Tk
from threading import Thread,Event
self.gui_root = Tk.Tk()
frame = self.gui_init(self.gui_root,frame_types)
frame.pack(side='top',expand=1,fill='both')
self.gui_root.title( self.name )
self.gui_root.bind('<<redraw>>',self.gui_redraw)
self.gui_root.bind('<<destroy>>', self.gui_destroy)
self.gui_runloop_thread = Thread(target=self.gui_runloop)
self.gui_runloop_thread.setDaemon(True)
self.gui_redraw_event = Event()
def destroy():
self.gui_runstate = 'Quit'
self.gui_root.protocol('WM_DELETE_WINDOW',destroy)
self.gui_running = True
self.gui_runloop_thread.start()
self.gui_root.mainloop()
print "GUI Finished."
self.gui_root = False
def gui_runloop(self):
while True:
time.sleep(0.1)
while self.gui_runstate != 'Quit' and self.gui_runstate != 'Stop':
self.steps(1)
self.request_gui_redraw()
if self.gui_runstate == 'Step':
self.gui_runstate_control.invoke('Stop')
if self.gui_runstate == 'Quit':
break
print "Ending GUI run loop."
def gui_init(self,root,frame_types):
import Tkinter as Tk
import Pmw
gui_frame = Tk.Frame(root)
control_frame = gui_frame
#control_frame = Tk.Frame(gui_frame)
#control_frame.pack(side='left',fill='both',expand=1)
self.gui_runstate_control = Pmw.RadioSelect(control_frame,
labelpos = 'w',
orient = self.gui_button_orient,
command = self.gui_runstate_callback,
label_text = '',
frame_borderwidth = 1,
frame_relief = 'ridge')
self.gui_runstate_control.pack(side='top',fill='none')
# Add some buttons to the RadioSelect.
for text in ('Quit','Run', 'Stop', 'Step','Episode'):
self.gui_runstate_control.add(text)
self.gui_runstate_control.invoke('Stop')
self.subframes = []
g_frame = Tk.Frame(control_frame)
g_frame.pack(side='left',expand=1,fill='both')
for ft in frame_types:
if ft == NewColumn:
g_frame = Tk.Frame(control_frame)
g_frame.pack(side='left',expand=1,fill='both')
else:
f = ft(g_frame,self)
self.subframes.append(f)
f.pack(side='top',expand=1,fill='both')
return gui_frame
def request_gui_redraw(self):
if self.gui_root:
self.gui_root.event_generate("<<redraw>>", when='tail')
self.gui_redraw_event.wait(1.0)
self.gui_redraw_event.clear()
def gui_redraw(self,event):
for f in self.subframes:
f.redraw()
self.gui_runstate_control.invoke(self.gui_runstate)
self.gui_redraw_event.set()
def gui_runstate_callback(self,tag):
self.gui_runstate = tag
if tag == 'Quit':
self.gui_root.event_generate('<<destroy>>',when='tail')
def gui_destroy(self,event):
self.gui_root.quit()
self.gui_root.destroy()
try:
import Tkinter as Tk
import Pmw
except ImportError:
pass
else:
class VarPlotter(Tk.Frame):
def __init__(self,root,rli,name,fn,initial_count=500,**args):
Tk.Frame.__init__(self,root,**args)
self.fn = fn
self.rli = rli
self.trace_len = Tk.StringVar()
self.trace_len.set('500')
group = Pmw.Group(self,tag_text=name)
group.pack(side='top',fill='both',expand=1)
Pmw.EntryField(group.interior(),
label_text='Trace length',
labelpos='w',
validate='numeric',
entry_textvariable=self.trace_len).pack(side='top',fill='x')
# self.plot = Pmw.Blt.Graph(group.interior())
self.plot = Pmw.Blt.Graph(group.interior(),height='1i')
self.plot.pack(side='top',expand=1,fill='both')
self.plot.line_create('values',label='',symbol='',smooth='step')
self.plot.grid_on()
self.last_yvalues = []
self.last_xvalues = []
def redraw(self):
try:
N = int(self.trace_len.get())
except ValueError:
return
xvalues,yvalues = self.fn(self.rli, N)
if yvalues and (yvalues != self.last_yvalues or xvalues != self.last_xvalues):
ydata = tuple(yvalues[:,0])
xdata = tuple(xvalues)
self.plot.element_configure('values',
xdata=xdata,
ydata=ydata)
self.last_yvalues = yvalues
self.last_xvalues = xvalues
def StepVarPlotter(name,length=500):
def get_values(rli,N):
v = rli.step_data.variables[name][:]
M = len(v[:])
if M > N:
return range(M-N,M),v[M-N:M]
else:
return range(M),v[:]
return lambda root,rli: VarPlotter(root,rli,name,get_values,initial_count=length)
def EpisodeVarPlotter(name,length=500):
def get_values(rli,N):
v = rli.episode_data.variables[name][:]
M = len(v[:])
if M > N:
return range(M-N,M-1),v[M-N:M-1]
else:
return range(M),v[:M-1]
return lambda root,rli: VarPlotter(root,rli,name,get_values,initial_count=length)
class TextList(Tk.Frame):
def __init__(self, root, rli, **args):
Tk.Frame.__init__(self, root, **args)
self.rli = rli
self.textlist = []
self.ep_count = rli.ep_count
self.list = Pmw.ComboBox(self, dropdown=0, history=0,
labelpos='nw', label_text=self.name)
self.list.component('scrolledlist').component('listbox').config(exportselection=0)
self.list.pack(side='top',fill='both',expand=1)
def redraw(self):
self.list.component('listbox').insert(self.rli.step_count, self.get_line())
listlen = self.list.component('listbox').size()
if listlen: self.list.selectitem(listlen - 1)
if self.ep_count != self.rli.ep_count:
self.list.clear()
self.ep_count = self.rli.ep_count
class ActionList(TextList):
name = 'Actions'
def get_line(self):
return self.rli.action
class SensationList(TextList):
name = 'Sensations'
def get_line(self):
return self.rli.last_sensation
| ronaldahmed/robot-navigation | neural-navigation-with-lstm/MARCO/plastk/rl/loggingrli.py | Python | mit | 16,837 | [
"NetCDF"
] | 341a18bf232268b8d44a9b50e7081e85fc80173c199d75573dc6fbb98f997cab |
#### PATTERN | DE | INFLECT ########################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2012 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
####################################################################################################
# Regular expressions-based rules for German word inflection:
# - pluralization and singularization of nouns and adjectives,
# - conjugation of verbs,
# - attributive and predicative of adjectives,
# - comparative and superlative of adjectives.
# Accuracy (measured on CELEX German morphology word forms):
# 75% for gender()
# 72% for pluralize()
# 84% for singularize() (for nominative)
# 87% for Verbs.find_lemma()
# 87% for Verbs.find_lexeme()
# 98% for predicative
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.abspath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
from pattern.text import Verbs as _Verbs
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
INDICATIVE, IMPERATIVE, SUBJUNCTIVE,
PROGRESSIVE,
PARTICIPLE, GERUND
)
sys.path.pop(0)
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
VOWELS = "aeiouy"
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
is_vowel = lambda ch: ch in VOWELS
#### ARTICLE #######################################################################################
# German inflection of depends on gender, role and number + the determiner (if any).
# Inflection gender.
# Masculine is the most common, so it is the default for all functions.
MASCULINE, FEMININE, NEUTER, PLURAL = \
MALE, FEMALE, NEUTRAL, PLURAL = \
M, F, N, PL = "m", "f", "n", "p"
# Inflection role.
# - nom = subject, "Der Hund bellt" (the dog barks).
# - acc = object, "Das Mädchen küsst den Hund" (the girl kisses the dog).
# - dat = object (indirect), "Der Mann gibt einen Knochen zum Hund" (the man gives the dog a bone).
# - gen = property, "die Knochen des Hundes" (the dog's bone).
NOMINATIVE, ACCUSATIVE, DATIVE, GENITIVE = SUBJECT, OBJECT, INDIRECT, PROPERTY = \
"nominative", "accusative", "dative", "genitive"
article_definite = {
("m", "nom"): "der", ("f", "nom"): "die", ("n", "nom"): "das", ("p", "nom"): "die",
("m", "acc"): "den", ("f", "acc"): "die", ("n", "acc"): "das", ("p", "acc"): "die",
("m", "dat"): "dem", ("f", "dat"): "der", ("n", "dat"): "dem", ("p", "dat"): "den",
("m", "gen"): "des", ("f", "gen"): "der", ("n", "gen"): "des", ("p", "gen"): "der",
}
article_indefinite = {
("m", "nom"): "ein" , ("f", "nom"): "eine" , ("n", "nom"): "ein" , ("p", "nom"): "eine",
("m", "acc"): "einen", ("f", "acc"): "eine" , ("n", "acc"): "ein" , ("p", "acc"): "eine",
("m", "dat"): "einem", ("f", "dat"): "einer", ("n", "dat"): "einem", ("p", "dat"): "einen",
("m", "gen"): "eines", ("f", "gen"): "einer", ("n", "gen"): "eines", ("p", "gen"): "einer",
}
def definite_article(word, gender=MALE, role=SUBJECT):
""" Returns the definite article (der/die/das/die) for a given word.
"""
return article_definite.get((gender[:1].lower(), role[:3].lower()))
def indefinite_article(word, gender=MALE, role=SUBJECT):
""" Returns the indefinite article (ein) for a given word.
"""
return article_indefinite.get((gender[:1].lower(), role[:3].lower()))
DEFINITE = "definite"
INDEFINITE = "indefinite"
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
"""
return function == DEFINITE \
and definite_article(word, gender, role) \
or indefinite_article(word, gender, role)
_article = article
def referenced(word, article=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns a string with the article + the word.
"""
return "%s %s" % (_article(word, article, gender, role), word)
#### GENDER #########################################################################################
gender_masculine = (
"ant", "ast", "ich", "ig", "ismus", "ling", "or", "us"
)
gender_feminine = (
"a", "anz", "ei", "enz", "heit", "ie", "ik", "in", "keit", "schaf", "sion", "sis",
u"tät", "tion", "ung", "ur"
)
gender_neuter = (
"chen", "icht", "il", "it", "lein", "ma", "ment", "tel", "tum", "um","al", "an", "ar",
u"ät", "ent", "ett", "ier", "iv", "o", "on", "nis", "sal"
)
gender_majority_vote = {
MASCULINE: (
"ab", "af", "ag", "ak", "am", "an", "ar", "at", "au", "ch", "ck", "eb", "ef", "eg",
"el", "er", "es", "ex", "ff", "go", "hn", "hs", "ib", "if", "ig", "ir", "kt", "lf",
"li", "ll", "lm", "ls", "lt", "mi", "nd", "nk", "nn", "nt", "od", "of", "og", "or",
"pf", "ph", "pp", "ps", "rb", "rd", "rf", "rg", "ri", "rl", "rm", "rr", "rs", "rt",
"rz", "ss", "st", "tz", "ub", "uf", "ug", "uh", "un", "us", "ut", "xt", "zt"
),
FEMININE: (
"be", "ce", "da", "de", "dt", "ee", "ei", "et", "eu", "fe", "ft", "ge", "he", "hr",
"ht", "ia", "ie", "ik", "in", "it", "iz", "ka", "ke", "la", "le", "me", "na", "ne",
"ng", "nz", "on", "pe", "ra", "re", "se", "ta", "te", "ue", "ur", "ve", "ze"
),
NEUTER: (
"ad", "al", "as", "do", "ed", "eh", "em", "en", "hl", "id", "il", "im", "io", "is",
"iv", "ix", "ld", "lk", "lo", "lz", "ma", "md", "mm", "mt", "no", "ns", "ol", "om",
"op", "os", "ot", "pt", "rk", "rn", "ro", "to", "tt", "ul", "um", "uz"
)
}
def gender(word, pos=NOUN):
""" Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote).
Returns None for words that are not nouns.
"""
w = word.lower()
if pos == NOUN:
# Default rules (baseline = 32%).
if w.endswith(gender_masculine):
return MASCULINE
if w.endswith(gender_feminine):
return FEMININE
if w.endswith(gender_neuter):
return NEUTER
# Majority vote.
for g in gender_majority_vote:
if w.endswith(gender_majority_vote[g]):
return g
#### PLURALIZE ######################################################################################
plural_inflections = [
("aal", u"äle" ), ("aat", "aaten"), ( "abe", "aben" ), ("ach", u"ächer"), ("ade", "aden" ),
("age", "agen" ), ("ahn", "ahnen"), ( "ahr", "ahre" ), ("akt", "akte" ), ("ale", "alen" ),
("ame", "amen" ), ("amt", u"ämter"), ( "ane", "anen" ), ("ang", u"änge" ), ("ank", u"änke" ),
("ann", u"änner" ), ("ant", "anten"), ( "aph", "aphen"), ("are", "aren" ), ("arn", "arne" ),
("ase", "asen" ), ("ate", "aten" ), ( "att", u"ätter"), ("atz", u"ätze" ), ("aum", "äume" ),
("aus", u"äuser" ), ("bad", u"bäder"), ( "bel", "bel" ), ("ben", "ben" ), ("ber", "ber" ),
("bot", "bote" ), ("che", "chen" ), ( "chs", "chse" ), ("cke", "cken" ), ("del", "del" ),
("den", "den" ), ("der", "der" ), ( "ebe", "ebe" ), ("ede", "eden" ), ("ehl", "ehle" ),
("ehr", "ehr" ), ("eil", "eile" ), ( "eim", "eime" ), ("eis", "eise" ), ("eit", "eit" ),
("ekt", "ekte" ), ("eld", "elder"), ( "ell", "elle" ), ("ene", "enen" ), ("enz", "enzen" ),
("erd", "erde" ), ("ere", "eren" ), ( "erk", "erke" ), ("ern", "erne" ), ("ert", "erte" ),
("ese", "esen" ), ("ess", "esse" ), ( "est", "este" ), ("etz", "etze" ), ("eug", "euge" ),
("eur", "eure" ), ("fel", "fel" ), ( "fen", "fen" ), ("fer", "fer" ), ("ffe", "ffen" ),
("gel", "gel" ), ("gen", "gen" ), ( "ger", "ger" ), ("gie", "gie" ), ("hen", "hen" ),
("her", "her" ), ("hie", "hien" ), ( "hle", "hlen" ), ("hme", "hmen" ), ("hne", "hnen" ),
("hof", u"höfe" ), ("hre", "hren" ), ( "hrt", "hrten"), ("hse", "hsen" ), ("hte", "hten" ),
("ich", "iche" ), ("ick", "icke" ), ( "ide", "iden" ), ("ieb", "iebe" ), ("ief", "iefe" ),
("ieg", "iege" ), ("iel", "iele" ), ( "ien", "ium" ), ("iet", "iete" ), ("ife", "ifen" ),
("iff", "iffe" ), ("ift", "iften"), ( "ige", "igen" ), ("ika", "ikum" ), ("ild", "ilder" ),
("ilm", "ilme" ), ("ine", "inen" ), ( "ing", "inge" ), ("ion", "ionen"), ("ise", "isen" ),
("iss", "isse" ), ("ist", "isten"), ( "ite", "iten" ), ("itt", "itte" ), ("itz", "itze" ),
("ium", "ium" ), ("kel", "kel" ), ( "ken", "ken" ), ("ker", "ker" ), ("lag", u"läge" ),
("lan", u"läne" ), ("lar", "lare" ), ( "lei", "leien"), ("len", "len" ), ("ler", "ler" ),
("lge", "lgen" ), ("lie", "lien" ), ( "lle", "llen" ), ("mel", "mel" ), ("mer", "mer" ),
("mme", "mmen" ), ("mpe", "mpen" ), ( "mpf", "mpfe" ), ("mus", "mus" ), ("mut", "mut" ),
("nat", "nate" ), ("nde", "nden" ), ( "nen", "nen" ), ("ner", "ner" ), ("nge", "ngen" ),
("nie", "nien" ), ("nis", "nisse"), ( "nke", "nken" ), ("nkt", "nkte" ), ("nne", "nnen" ),
("nst", "nste" ), ("nte", "nten" ), ( "nze", "nzen" ), ("ock", u"öcke" ), ("ode", "oden" ),
("off", "offe" ), ("oge", "ogen" ), ( "ohn", u"öhne" ), ("ohr", "ohre" ), ("olz", u"ölzer" ),
("one", "onen" ), ("oot", "oote" ), ( "opf", u"öpfe" ), ("ord", "orde" ), ("orm", "ormen" ),
("orn", u"örner" ), ("ose", "osen" ), ( "ote", "oten" ), ("pel", "pel" ), ("pen", "pen" ),
("per", "per" ), ("pie", "pien" ), ( "ppe", "ppen" ), ("rag", u"räge" ), ("rau", u"raün" ),
("rbe", "rben" ), ("rde", "rden" ), ( "rei", "reien"), ("rer", "rer" ), ("rie", "rien" ),
("rin", "rinnen"), ("rke", "rken" ), ( "rot", "rote" ), ("rre", "rren" ), ("rte", "rten" ),
("ruf", "rufe" ), ("rzt", "rzte" ), ( "sel", "sel" ), ("sen", "sen" ), ("ser", "ser" ),
("sie", "sien" ), ("sik", "sik" ), ( "sse", "ssen" ), ("ste", "sten" ), ("tag", "tage" ),
("tel", "tel" ), ("ten", "ten" ), ( "ter", "ter" ), ("tie", "tien" ), ("tin", "tinnen"),
("tiv", "tive" ), ("tor", "toren"), ( "tte", "tten" ), ("tum", "tum" ), ("tur", "turen" ),
("tze", "tzen" ), ("ube", "uben" ), ( "ude", "uden" ), ("ufe", "ufen" ), ("uge", "ugen" ),
("uhr", "uhren" ), ("ule", "ulen" ), ( "ume", "umen" ), ("ung", "ungen"), ("use", "usen" ),
("uss", u"üsse" ), ("ute", "uten" ), ( "utz", "utz" ), ("ver", "ver" ), ("weg", "wege" ),
("zer", "zer" ), ("zug", u"züge" ), (u"ück", u"ücke" )
]
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the plural of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if pos == NOUN:
for a, b in plural_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rules (baseline = 69%).
if w.startswith("ge"):
return w
if w.endswith("gie"):
return w
if w.endswith("e"):
return w + "n"
if w.endswith("ien"):
return w[:-2] + "um"
if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", u"tät", "tik", "tum", "u")):
return w
if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")):
return w + "en"
if w.endswith("in"):
return w + "nen"
if w.endswith("nis"):
return w + "se"
if w.endswith(("eld", "ild", "ind")):
return w + "er"
if w.endswith("o"):
return w + "s"
if w.endswith("a"):
return w[:-1] + "en"
# Inflect common umlaut vowels: Kopf => Köpfe.
if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")):
umlaut = w[-3]
umlaut = umlaut.replace("a", u"ä")
umlaut = umlaut.replace("o", u"ö")
umlaut = umlaut.replace("u", u"ü")
return w[:-3] + umlaut + w[-2:] + "e"
for a, b in (
("ag", u"äge"),
("ann", u"änner"),
("aum", u"äume"),
("aus", u"äuser"),
("zug", u"züge")):
if w.endswith(a):
return w[:-len(a)] + b
return w + "e"
return w
#### SINGULARIZE ###################################################################################
singular_inflections = [
( "innen", "in" ), (u"täten", u"tät"), ( "ahnen", "ahn"), ( "enten", "ent"), (u"räser", "ras"),
( "hrten", "hrt"), (u"ücher", "uch"), (u"örner", "orn"), (u"änder", "and"), (u"ürmer", "urm"),
( "ahlen", "ahl"), ( "uhren", "uhr"), (u"ätter", "att"), ( "suren", "sur"), ( "chten", "cht"),
( "kuren", "kur"), ( "erzen", "erz"), (u"güter", "gut"), ( "soren", "sor"), (u"änner", "ann"),
(u"äuser", "aus"), ( "taten", "tat"), ( "isten", "ist"), (u"bäder", "bad"), (u"ämter", "amt"),
( "eiten", "eit"), ( "raten", "rat"), ( "ormen", "orm"), ( "ionen", "ion"), ( "nisse", "nis"),
(u"ölzer", "olz"), ( "ungen", "ung"), (u"läser", "las"), (u"ächer", "ach"), ( "urten", "urt"),
( "enzen", "enz"), ( "aaten", "aat"), ( "aphen", "aph"), (u"öcher", "och"), (u"türen", u"tür"),
( "sonen", "son"), (u"ühren", u"ühr"), (u"ühner", "uhn"), ( "toren", "tor"), (u"örter", "ort"),
( "anten", "ant"), (u"räder", "rad"), ( "turen", "tur"), (u"äuler", "aul"), ( u"änze", "anz"),
( "tten", "tte"), ( "mben", "mbe"), ( u"ädte", "adt"), ( "llen", "lle"), ( "ysen", "yse"),
( "rben", "rbe"), ( "hsen", "hse"), ( u"raün", "rau"), ( "rven", "rve"), ( "rken", "rke"),
( u"ünge", "ung"), ( u"üten", u"üte"), ( "usen", "use"), ( "tien", "tie"), ( u"läne", "lan"),
( "iben", "ibe"), ( "ifen", "ife"), ( "ssen", "sse"), ( "gien", "gie"), ( "eten", "ete"),
( "rden", "rde"), ( u"öhne", "ohn"), ( u"ärte", "art"), ( "ncen", "nce"), ( u"ünde", "und"),
( "uben", "ube"), ( "lben", "lbe"), ( u"üsse", "uss"), ( "agen", "age"), ( u"räge", "rag"),
( "ogen", "oge"), ( "anen", "ane"), ( "sken", "ske"), ( "eden", "ede"), ( u"össe", "oss"),
( u"ürme", "urm"), ( "ggen", "gge"), ( u"üren", u"üre"), ( "nten", "nte"), ( u"ühle", u"ühl"),
( u"änge", "ang"), ( "mmen", "mme"), ( "igen", "ige"), ( "nken", "nke"), ( u"äcke", "ack"),
( "oden", "ode"), ( "oben", "obe"), ( u"ähne", "ahn"), ( u"änke", "ank"), ( "inen", "ine"),
( "seen", "see"), ( u"äfte", "aft"), ( "ulen", "ule"), ( u"äste", "ast"), ( "hren", "hre"),
( u"öcke", "ock"), ( "aben", "abe"), ( u"öpfe", "opf"), ( "ugen", "uge"), ( "lien", "lie"),
( u"ände", "and"), ( u"ücke", u"ück"), ( "asen", "ase"), ( "aden", "ade"), ( "dien", "die"),
( "aren", "are"), ( "tzen", "tze"), ( u"züge", "zug"), ( u"üfte", "uft"), ( "hien", "hie"),
( "nden", "nde"), ( u"älle", "all"), ( "hmen", "hme"), ( "ffen", "ffe"), ( "rmen", "rma"),
( "olen", "ole"), ( "sten", "ste"), ( "amen", "ame"), ( u"höfe", "hof"), ( u"üste", "ust"),
( "hnen", "hne"), ( u"ähte", "aht"), ( "umen", "ume"), ( "nnen", "nne"), ( "alen", "ale"),
( "mpen", "mpe"), ( "mien", "mie"), ( "rten", "rte"), ( "rien", "rie"), ( u"äute", "aut"),
( "uden", "ude"), ( "lgen", "lge"), ( "ngen", "nge"), ( "iden", "ide"), ( u"ässe", "ass"),
( "osen", "ose"), ( "lken", "lke"), ( "eren", "ere"), ( u"üche", "uch"), ( u"lüge", "lug"),
( "hlen", "hle"), ( "isen", "ise"), ( u"ären", u"äre"), ( u"töne", "ton"), ( "onen", "one"),
( "rnen", "rne"), ( u"üsen", u"üse"), ( u"haün", "hau"), ( "pien", "pie"), ( "ihen", "ihe"),
( u"ürfe", "urf"), ( "esen", "ese"), ( u"ätze", "atz"), ( "sien", "sie"), ( u"läge", "lag"),
( "iven", "ive"), ( u"ämme", "amm"), ( u"äufe", "auf"), ( "ppen", "ppe"), ( "enen", "ene"),
( "lfen", "lfe"), ( u"äume", "aum"), ( "nien", "nie"), ( "unen", "une"), ( "cken", "cke"),
( "oten", "ote"), ( "mie", "mie"), ( "rie", "rie"), ( "sis", "sen"), ( "rin", "rin"),
( "ein", "ein"), ( "age", "age"), ( "ern", "ern"), ( "ber", "ber"), ( "ion", "ion"),
( "inn", "inn"), ( "ben", "ben"), ( u"äse", u"äse"), ( "eis", "eis"), ( "hme", "hme"),
( "iss", "iss"), ( "hen", "hen"), ( "fer", "fer"), ( "gie", "gie"), ( "fen", "fen"),
( "her", "her"), ( "ker", "ker"), ( "nie", "nie"), ( "mer", "mer"), ( "ler", "ler"),
( "men", "men"), ( "ass", "ass"), ( "ner", "ner"), ( "per", "per"), ( "rer", "rer"),
( "mus", "mus"), ( "abe", "abe"), ( "ter", "ter"), ( "ser", "ser"), ( u"äle", "aal"),
( "hie", "hie"), ( "ger", "ger"), ( "tus", "tus"), ( "gen", "gen"), ( "ier", "ier"),
( "ver", "ver"), ( "zer", "zer"),
]
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the singular of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if pos == NOUN:
for a, b in singular_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rule: strip known plural suffixes (baseline = 51%).
for suffix in ("nen", "en", "n", "e", "er", "s"):
if w.endswith(suffix):
return w[:-len(suffix)]
return w
return w
#### VERB CONJUGATION ##############################################################################
# The verb table was trained on CELEX and contains the top 2000 most frequent verbs.
prefix_inseparable = (
"be", "emp", "ent", "er", "ge", "miss", u"über", "unter", "ver", "voll", "wider", "zer"
)
prefix_separable = (
"ab", "an", "auf", "aus", "bei", "durch", "ein", "fort", "mit", "nach", "vor", "weg",
u"zurück", "zusammen", "zu", "dabei", "daran", "da", "empor", "entgegen", "entlang",
"fehl", "fest", u"gegenüber", "gleich", "herab", "heran", "herauf", "heraus", "herum",
"her", "hinweg", "hinzu", "hin", "los", "nieder", "statt", "umher", "um", "weg",
"weiter", "wieder", "zwischen"
) + ( # There are many more...
"dort", "fertig", "frei", "gut", "heim", "hoch", "klein", "klar", "nahe", "offen", "richtig"
)
prefixes = prefix_inseparable + prefix_separable
def encode_sz(s):
return s.replace(u"ß", "ss")
def decode_sz(s):
return s.replace("ss", u"ß")
class Verbs(_Verbs):
def __init__(self):
_Verbs.__init__(self, os.path.join(MODULE, "de-verbs.txt"),
language = "de",
format = [0, 1, 2, 3, 4, 5, 8, 17, 18, 19, 20, 21, 24, 52, 54, 53, 55, 56, 58, 59, 67, 68, 70, 71],
default = {6: 4, 22: 20, 57: 55, 60: 58, 69: 67, 72: 70}
)
def find_lemma(self, verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
"""
v = verb.lower()
# Common prefixes: be-finden and emp-finden probably inflect like finden.
if not (v.startswith("ge") and v.endswith("t")): # Probably gerund.
for prefix in prefixes:
if v.startswith(prefix) and v[len(prefix):] in self.inflections:
return prefix + self.inflections[v[len(prefix):]]
# Common sufixes: setze nieder => niedersetzen.
b, suffix = " " in v and v.split()[:2] or (v, "")
# Infinitive -ln: trommeln.
if b.endswith(("ln", "rn")):
return b
# Lemmatize regular inflections.
for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"):
if b.endswith(x): b = b[:-len(x)]; break
# Subjunctive: hielte => halten, schnitte => schneiden.
for x, y in (
("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"),
("ien", "ein"), ("iess", "ass"), (u"ieß", u"aß" ), ( "iff", "eif" ), ("iss", "eiss"),
(u"iß", u"eiß"), ( "it", "eid"), ( "oss", "iess"), (u"öss", "iess")):
if b.endswith(x): b = b[:-len(x)] + y; break
b = b.replace("eeiss", "eiss")
b = b.replace("eeid", "eit")
# Subjunctive: wechselte => wechseln
if not b.endswith(("e", "l")) and not (b.endswith("er") and not b[-3] in VOWELS):
b = b + "e"
# abknallst != abknalln => abknallen
if b.endswith(("hl", "ll", "ul", "eil")):
b = b + "e"
# Strip ge- from (likely) gerund:
if b.startswith("ge") and v.endswith("t"):
b = b[2:]
# Corrections (these add about 1.5% accuracy):
if b.endswith(("lnde", "rnde")):
b = b[:-3]
if b.endswith(("ae", "al", u"öe", u"üe")):
b = b.rstrip("e") + "te"
if b.endswith(u"äl"):
b = b + "e"
return suffix + b + "n"
def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
# Stem = infinitive minus -en, -ln, -rn.
b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v)))
# Split common prefixes.
x, x1, x2 = "", "", ""
for prefix in prefix_separable:
if v.startswith(prefix):
b, x = b[len(prefix):], prefix
x1 = (" " + x).rstrip()
x2 = x + "ge"
break
# Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest.
pl = b.endswith("el") and b[:-2]+"l" or b
# Present tense 1pl -el: handeln => wir handeln
pw = v.endswith(("ln", "rn")) and v or b+"en"
# Present tense ending in -d or -t gets -e:
pr = b.endswith(("d", "t")) and b+"e" or b
# Present tense 2sg gets -st, unless stem ends with -s or -z.
p2 = pr.endswith(("s","z")) and pr+"t" or pr+"st"
# Present participle: spiel + -end, arbeiten + -d:
pp = v.endswith(("en", "ln", "rn")) and v+"d" or v+"end"
# Past tense regular:
pt = encode_sz(pr) + "t"
# Past participle: haushalten => hausgehalten
ge = (v.startswith(prefix_inseparable) or b.endswith(("r","t"))) and pt or "ge"+pt
ge = x and x+"ge"+pt or ge
# Present subjunctive: stem + -e, -est, -en, -et:
s1 = encode_sz(pl)
# Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et:
s2 = encode_sz(pt)
# Construct the lexeme:
lexeme = a = [
v,
pl+"e"+x1, p2+x1, pr+"t"+x1, pw+x1, pr+"t"+x1, pp, # present
pt+"e"+x1, pt+"est"+x1, pt+"e"+x1, pt+"en"+x1, pt+"et"+x1, ge, # past
b+"e"+x1, pr+"t"+x1, x+pw, # imperative
s1+"e"+x1, s1+"est"+x1, s1+"en"+x1, s1+"et"+x1, # subjunctive I
s2+"e"+x1, s2+"est"+x1, s2+"en"+x1, s2+"et"+x1 # subjunctive II
]
# Encode Eszett (ß) and attempt to retrieve from the lexicon.
# Decode Eszett for present and imperative.
if encode_sz(v) in self:
a = self[encode_sz(v)]
a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:]
# Since the lexicon does not contain imperative for all verbs, don't simply return it.
# Instead, update the rule-based lexeme with inflections from the lexicon.
return [a[i] or lexeme[i] for i in range(len(a))]
def tenses(self, verb, parse=True):
""" Returns a list of possible tenses for the given inflected verb.
"""
tenses = _Verbs.tenses(self, verb, parse)
if len(tenses) == 0:
# auswirkte => wirkte aus
for prefix in prefix_separable:
if verb.startswith(prefix):
tenses = _Verbs.tenses(self, verb[len(prefix):] + " " + prefix, parse)
break
return tenses
verbs = Verbs()
conjugate, lemma, lexeme, tenses = \
verbs.conjugate, verbs.lemma, verbs.lexeme, verbs.tenses
#### ATTRIBUTIVE & PREDICATIVE #####################################################################
# Strong inflection: no article.
adjectives_strong = {
("m", "nom"): "er", ("f", "nom"): "e" , ("n", "nom"): "es", ("p", "nom"): "e",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "es", ("p", "acc"): "e",
("m", "dat"): "em", ("f", "dat"): "er", ("n", "dat"): "em", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "er", ("n", "gen"): "en", ("p", "gen"): "er",
}
# Mixed inflection: after indefinite article ein & kein and possessive determiners.
adjectives_mixed = {
("m", "nom"): "er", ("f", "nom"): "e" , ("n", "nom"): "es", ("p", "nom"): "en",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "es", ("p", "acc"): "en",
("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
}
# Weak inflection: after definite article.
adjectives_weak = {
("m", "nom"): "e", ("f", "nom"): "e" , ("n", "nom"): "e", ("p", "nom"): "en",
("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "e", ("p", "acc"): "en",
("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
}
# Uninflected + exceptions.
adjective_attributive = {
"etwas" : "etwas",
"genug" : "genug",
"viel" : "viel",
"wenig" : "wenig"
}
def attributive(adjective, gender=MALE, role=SUBJECT, article=None):
""" For a predicative adjective, returns the attributive form (lowercase).
In German, the attributive is formed with -e, -em, -en, -er or -es,
depending on gender (masculine, feminine, neuter or plural) and role
(nominative, accusative, dative, genitive).
"""
w, g, c, a = \
adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None
if w in adjective_attributive:
return adjective_attributive[w]
if a is None \
or a in ("mir", "dir", "ihm") \
or a in ("ein", "etwas", "mehr") \
or a.startswith(("all", "mehrer", "wenig", "viel")):
return w + adjectives_strong.get((g, c), "")
if a.startswith(("ein", "kein")) \
or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")):
return w + adjectives_mixed.get((g, c), "")
if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \
or a.startswith((
"derselb", "derjenig", "jed", "jeglich", "jen", "manch",
"dies", "solch", "welch")):
return w + adjectives_weak.get((g, c), "")
# Default to strong inflection.
return w + adjectives_strong.get((g, c), "")
def predicative(adjective):
""" Returns the predicative adjective (lowercase).
In German, the attributive form preceding a noun is always used:
"ein kleiner Junge" => strong, masculine, nominative,
"eine schöne Frau" => mixed, feminine, nominative,
"der kleine Prinz" => weak, masculine, nominative, etc.
The predicative is useful for lemmatization.
"""
w = adjective.lower()
if len(w) > 3:
for suffix in ("em", "en", "er", "es", "e"):
if w.endswith(suffix):
b = w[:max(-len(suffix), -(len(w)-3))]
if b.endswith("bl"): # plausibles => plausibel
b = b[:-1] + "el"
if b.endswith("pr"): # propres => proper
b = b[:-1] + "er"
return b
return w
#### COMPARATIVE & SUPERLATIVE #####################################################################
COMPARATIVE = "er"
SUPERLATIVE = "st"
def grade(adjective, suffix=COMPARATIVE):
""" Returns the comparative or superlative form of the given (inflected) adjective.
"""
b = predicative(adjective)
# groß => großt, schön => schönst
if suffix == SUPERLATIVE and b.endswith(("s", u"ß")):
suffix = suffix[1:]
# große => großere, schönes => schöneres
return adjective[:len(b)] + suffix + adjective[len(b):]
def comparative(adjective):
return grade(adjective, COMPARATIVE)
def superlative(adjective):
return grade(adjective, SUPERLATIVE)
#print comparative(u"schönes")
#print superlative(u"schönes")
#print superlative(u"große") | EricSchles/pattern | pattern/text/de/inflect.py | Python | bsd-3-clause | 28,754 | [
"ASE"
] | 1019d722f0d1579e6367e28274bb6f9675a2b71a42fa657c0a548267022dff8c |
# -*- coding: utf-8 -*-
"""This module builds mutation functions that are bound to a manager."""
from typing import Callable
from pybel import BELGraph, Manager
from pybel.struct.mutation.expansion.neighborhood import expand_node_neighborhood
from pybel.struct.pipeline import in_place_transformation, uni_in_place_transformation
__all__ = [
'build_expand_node_neighborhood_by_hash',
'build_delete_node_by_hash',
]
def build_expand_node_neighborhood_by_hash(manager: Manager) -> Callable[[BELGraph, BELGraph, str], None]: # noqa: D202
"""Make an expand function that's bound to the manager."""
@uni_in_place_transformation
def expand_node_neighborhood_by_hash(universe: BELGraph, graph: BELGraph, node_hash: str) -> None:
"""Expand around the neighborhoods of a node by identifier."""
node = manager.get_dsl_by_hash(node_hash)
return expand_node_neighborhood(universe, graph, node)
return expand_node_neighborhood_by_hash
def build_delete_node_by_hash(manager: Manager) -> Callable[[BELGraph, str], None]: # noqa: D202
"""Make a delete function that's bound to the manager."""
@in_place_transformation
def delete_node_by_hash(graph: BELGraph, node_hash: str) -> None:
"""Remove a node by identifier."""
node = manager.get_dsl_by_hash(node_hash)
graph.remove_node(node)
return delete_node_by_hash
| pybel/pybel-tools | src/pybel_tools/mutation/bound.py | Python | mit | 1,398 | [
"Pybel"
] | fb756b684f463593452abc97b7c5d9ca492983b96045a56e5119ec823b8d921f |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Parsers for basis set in Gaussian program format
'''
__all__ = ['parse', 'load']
try:
from pyscf.gto.basis.parse_nwchem import optimize_contraction
from pyscf.gto.basis.parse_nwchem import remove_zero
except ImportError:
optimize_contraction = lambda basis: basis
remove_zero = lambda basis: basis
MAXL = 12
SPDF = 'SPDFGHIJKLMN'
MAPSPDF = {key: l for l, key in enumerate(SPDF)}
DELIMETER = '****'
def parse(string, optimize=True):
'''Parse the basis text which is in NWChem format, return an internal
basis format which can be assigned to :attr:`Mole.basis`
Lines started with # are ignored.
'''
raw_basis = []
for dat in string.splitlines():
x = dat.split('!', 1)[0].strip()
if x and x != DELIMETER:
raw_basis.append(x)
return _parse(raw_basis, optimize)
def load(basisfile, symb, optimize=True):
raw_basis = search_seg(basisfile, symb)
#if not raw_basis:
# raise BasisNotFoundError('Basis not found for %s in %s' % (symb, basisfile))
return _parse(raw_basis, optimize)
def search_seg(basisfile, symb):
with open(basisfile, 'r') as fin:
def _seek(test_str):
raw_basis = []
dat = fin.readline()
while dat:
if test_str in dat:
return True, raw_basis
elif dat.strip(): # Skip empty lines
raw_basis.append(dat)
dat = fin.readline()
return False, raw_basis
has_delimeter, raw_basis = _seek(DELIMETER)
if has_delimeter:
dat = fin.readline()
while dat:
if dat.strip().split(' ', 1)[0].upper() == symb.upper():
raw_basis = _seek(DELIMETER)[1]
break
else:
_seek(DELIMETER)
dat = fin.readline()
return raw_basis
def _parse(raw_basis, optimize=True):
basis_add = []
for line in raw_basis:
dat = line.strip()
if dat.startswith('!'):
continue
elif dat[0].isalpha():
key = dat.split()
if len(key) == 2:
# skip the line which has only two items. It's the line for
# element symbol
continue
elif key[0] == 'SP':
basis_add.append([0])
basis_add.append([1])
elif len(key[0])>2 and key[0][:2] in ['l=', 'L=']:
# Angular momentum defined explicitly
basis_add.append([int(key[0][2:])])
else:
basis_add.append([MAPSPDF[key[0]]])
else:
line = [float(x) for x in dat.replace('D','e').split()]
if key[0] == 'SP':
basis_add[-2].append([line[0], line[1]])
basis_add[-1].append([line[0], line[2]])
else:
basis_add[-1].append(line)
basis_sorted = []
for l in range(MAXL):
basis_sorted.extend([b for b in basis_add if b[0] == l])
if optimize:
basis_sorted = optimize_contraction(basis_sorted)
basis_sorted = remove_zero(basis_sorted)
return basis_sorted
if __name__ == '__main__':
print(load('def2-qzvp-jkfit.gbs', 'C'))
| sunqm/pyscf | pyscf/gto/basis/parse_gaussian.py | Python | apache-2.0 | 3,951 | [
"Gaussian",
"NWChem",
"PySCF"
] | 16a74dea400a7a817238b067559121ec13764474256820d1ff5bd0e8dff5f176 |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Different FCI solvers are implemented to support different type of symmetry.
Symmetry
File Point group Spin singlet Real hermitian* Alpha/beta degeneracy
direct_spin0_symm Yes Yes Yes Yes
direct_spin1_symm Yes No Yes Yes
direct_spin0 No Yes Yes Yes
direct_spin1 No No Yes Yes
direct_uhf No No Yes No
direct_nosym No No No** Yes
* Real hermitian Hamiltonian implies (ij|kl) = (ji|kl) = (ij|lk) = (ji|lk)
** Hamiltonian is real but not hermitian, (ij|kl) != (ji|kl) ...
'''
import ctypes
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.fci import cistring
from pyscf.fci import direct_spin1
libfci = lib.load_library('libfci')
# When the spin-orbitals do not have the degeneracy on spacial part,
# there is only one version of FCI which is close to _spin1 solver.
# The inputs: h1e has two parts (h1e_a, h1e_b),
# h2e has three parts (h2e_aa, h2e_ab, h2e_bb)
def contract_1e(f1e, fcivec, norb, nelec, link_index=None):
fcivec = numpy.asarray(fcivec, order='C')
link_indexa, link_indexb = direct_spin1._unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
assert(fcivec.size == na*nb)
ci1 = numpy.zeros_like(fcivec)
f1e_tril = lib.pack_tril(f1e[0])
libfci.FCIcontract_a_1e(f1e_tril.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
f1e_tril = lib.pack_tril(f1e[1])
libfci.FCIcontract_b_1e(f1e_tril.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
return ci1
def contract_2e(eri, fcivec, norb, nelec, link_index=None):
fcivec = numpy.asarray(fcivec, order='C')
g2e_aa = ao2mo.restore(4, eri[0], norb)
g2e_ab = ao2mo.restore(4, eri[1], norb)
g2e_bb = ao2mo.restore(4, eri[2], norb)
link_indexa, link_indexb = direct_spin1._unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
assert(fcivec.size == na*nb)
ci1 = numpy.empty_like(fcivec)
libfci.FCIcontract_uhf2e(g2e_aa.ctypes.data_as(ctypes.c_void_p),
g2e_ab.ctypes.data_as(ctypes.c_void_p),
g2e_bb.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
return ci1
def contract_2e_hubbard(u, fcivec, norb, nelec, opt=None):
neleca, nelecb = direct_spin1._unpack_nelec(nelec)
u_aa, u_ab, u_bb = u
strsa = numpy.asarray(cistring.gen_strings4orblist(range(norb), neleca))
strsb = numpy.asarray(cistring.gen_strings4orblist(range(norb), nelecb))
na = cistring.num_strings(norb, neleca)
nb = cistring.num_strings(norb, nelecb)
fcivec = fcivec.reshape(na,nb)
fcinew = numpy.zeros_like(fcivec)
if u_aa != 0: # u * n_alpha^+ n_alpha
for i in range(norb):
maska = (strsa & (1 << i)) > 0
fcinew[maska] += u_aa * fcivec[maska]
if u_ab != 0: # u * (n_alpha^+ n_beta + n_beta^+ n_alpha)
for i in range(norb):
maska = (strsa & (1 << i)) > 0
maskb = (strsb & (1 << i)) > 0
fcinew[maska[:,None] & maskb] += 2*u_ab * fcivec[maska[:,None] & maskb]
if u_bb != 0: # u * n_beta^+ n_beta
for i in range(norb):
maskb = (strsb & (1 << i)) > 0
fcinew[:,maskb] += u_bb * fcivec[:,maskb]
return fcinew
def make_hdiag(h1e, eri, norb, nelec):
neleca, nelecb = direct_spin1._unpack_nelec(nelec)
h1e_a = numpy.ascontiguousarray(h1e[0])
h1e_b = numpy.ascontiguousarray(h1e[1])
g2e_aa = ao2mo.restore(1, eri[0], norb)
g2e_ab = ao2mo.restore(1, eri[1], norb)
g2e_bb = ao2mo.restore(1, eri[2], norb)
occslsta = occslstb = cistring._gen_occslst(range(norb), neleca)
if neleca != nelecb:
occslstb = cistring._gen_occslst(range(norb), nelecb)
na = len(occslsta)
nb = len(occslstb)
hdiag = numpy.empty(na*nb)
jdiag_aa = numpy.asarray(numpy.einsum('iijj->ij',g2e_aa), order='C')
jdiag_ab = numpy.asarray(numpy.einsum('iijj->ij',g2e_ab), order='C')
jdiag_bb = numpy.asarray(numpy.einsum('iijj->ij',g2e_bb), order='C')
kdiag_aa = numpy.asarray(numpy.einsum('ijji->ij',g2e_aa), order='C')
kdiag_bb = numpy.asarray(numpy.einsum('ijji->ij',g2e_bb), order='C')
libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),
h1e_a.ctypes.data_as(ctypes.c_void_p),
h1e_b.ctypes.data_as(ctypes.c_void_p),
jdiag_aa.ctypes.data_as(ctypes.c_void_p),
jdiag_ab.ctypes.data_as(ctypes.c_void_p),
jdiag_bb.ctypes.data_as(ctypes.c_void_p),
kdiag_aa.ctypes.data_as(ctypes.c_void_p),
kdiag_bb.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(neleca), ctypes.c_int(nelecb),
occslsta.ctypes.data_as(ctypes.c_void_p),
occslstb.ctypes.data_as(ctypes.c_void_p))
return numpy.asarray(hdiag)
def absorb_h1e(h1e, eri, norb, nelec, fac=1):
if not isinstance(nelec, (int, numpy.number)):
nelec = sum(nelec)
h1e_a, h1e_b = h1e
h2e_aa = ao2mo.restore(1, eri[0], norb).copy()
h2e_ab = ao2mo.restore(1, eri[1], norb).copy()
h2e_bb = ao2mo.restore(1, eri[2], norb).copy()
f1e_a = h1e_a - numpy.einsum('jiik->jk', h2e_aa) * .5
f1e_b = h1e_b - numpy.einsum('jiik->jk', h2e_bb) * .5
f1e_a *= 1./(nelec+1e-100)
f1e_b *= 1./(nelec+1e-100)
for k in range(norb):
h2e_aa[:,:,k,k] += f1e_a
h2e_aa[k,k,:,:] += f1e_a
h2e_ab[:,:,k,k] += f1e_a
h2e_ab[k,k,:,:] += f1e_b
h2e_bb[:,:,k,k] += f1e_b
h2e_bb[k,k,:,:] += f1e_b
return (ao2mo.restore(4, h2e_aa, norb) * fac,
ao2mo.restore(4, h2e_ab, norb) * fac,
ao2mo.restore(4, h2e_bb, norb) * fac)
def pspace(h1e, eri, norb, nelec, hdiag=None, np=400):
neleca, nelecb = direct_spin1._unpack_nelec(nelec)
h1e_a = numpy.ascontiguousarray(h1e[0])
h1e_b = numpy.ascontiguousarray(h1e[1])
g2e_aa = ao2mo.restore(1, eri[0], norb)
g2e_ab = ao2mo.restore(1, eri[1], norb)
g2e_bb = ao2mo.restore(1, eri[2], norb)
if hdiag is None:
hdiag = make_hdiag(h1e, eri, norb, nelec)
if hdiag.size < np:
addr = numpy.arange(hdiag.size)
else:
try:
addr = numpy.argpartition(hdiag, np-1)[:np]
except AttributeError:
addr = numpy.argsort(hdiag)[:np]
nb = cistring.num_strings(norb, nelecb)
addra = addr // nb
addrb = addr % nb
stra = cistring.addrs2str(norb, neleca, addra)
strb = cistring.addrs2str(norb, nelecb, addrb)
np = len(addr)
h0 = numpy.zeros((np,np))
libfci.FCIpspace_h0tril_uhf(h0.ctypes.data_as(ctypes.c_void_p),
h1e_a.ctypes.data_as(ctypes.c_void_p),
h1e_b.ctypes.data_as(ctypes.c_void_p),
g2e_aa.ctypes.data_as(ctypes.c_void_p),
g2e_ab.ctypes.data_as(ctypes.c_void_p),
g2e_bb.ctypes.data_as(ctypes.c_void_p),
stra.ctypes.data_as(ctypes.c_void_p),
strb.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(np))
for i in range(np):
h0[i,i] = hdiag[addr[i]]
h0 = lib.hermi_triu(h0)
return addr, h0
# be careful with single determinant initial guess. It may lead to the
# eigvalue of first davidson iter being equal to hdiag
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
ecore=0, **kwargs):
return direct_spin1._kfactory(FCISolver, h1e, eri, norb, nelec, ci0, level_shift,
tol, lindep, max_cycle, max_space, nroots,
davidson_only, pspace_size, ecore=ecore, **kwargs)
def energy(h1e, eri, fcivec, norb, nelec, link_index=None):
h2e = absorb_h1e(h1e, eri, norb, nelec, .5)
ci1 = contract_2e(h2e, fcivec, norb, nelec, link_index)
return numpy.dot(fcivec.reshape(-1), ci1.reshape(-1))
# dm_pq = <|p^+ q|>
def make_rdm1s(fcivec, norb, nelec, link_index=None):
return direct_spin1.make_rdm1s(fcivec, norb, nelec, link_index)
# spacial part of DM, dm_pq = <|p^+ q|>
def make_rdm1(fcivec, norb, nelec, link_index=None):
raise ValueError('Spin trace for UHF-FCI density matrices.')
def make_rdm12s(fcivec, norb, nelec, link_index=None, reorder=True):
return direct_spin1.make_rdm12s(fcivec, norb, nelec, link_index, reorder)
def trans_rdm1s(cibra, ciket, norb, nelec, link_index=None):
return direct_spin1.trans_rdm1s(cibra, ciket, norb, nelec, link_index)
# spacial part of DM
def trans_rdm1(cibra, ciket, norb, nelec, link_index=None):
raise ValueError('Spin trace for UHF-FCI density matrices.')
def trans_rdm12s(cibra, ciket, norb, nelec, link_index=None, reorder=True):
return direct_spin1.trans_rdm12s(cibra, ciket, norb, nelec, link_index, reorder)
###############################################################
# uhf-integral direct-CI driver
###############################################################
class FCISolver(direct_spin1.FCISolver):
def absorb_h1e(self, h1e, eri, norb, nelec, fac=1):
return absorb_h1e(h1e, eri, norb, nelec, fac)
def make_hdiag(self, h1e, eri, norb, nelec):
return make_hdiag(h1e, eri, norb, nelec)
def pspace(self, h1e, eri, norb, nelec, hdiag, np=400):
return pspace(h1e, eri, norb, nelec, hdiag, np)
def contract_1e(self, f1e, fcivec, norb, nelec, link_index=None, **kwargs):
return contract_1e(f1e, fcivec, norb, nelec, link_index, **kwargs)
def contract_2e(self, eri, fcivec, norb, nelec, link_index=None, **kwargs):
return contract_2e(eri, fcivec, norb, nelec, link_index, **kwargs)
def spin_square(self, fcivec, norb, nelec):
from pyscf.fci import spin_op
return spin_op.spin_square(fcivec, norb, nelec)
def make_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
FCI = FCISolver
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
#['H', ( 0.,-0.5 ,-1. )],
#['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g'}
mol.charge = 1
mol.spin = 1
mol.build()
m = scf.UHF(mol)
ehf = m.scf()
cis = FCISolver(mol)
norb = m.mo_energy[0].size
nea = (mol.nelectron+1) // 2
neb = (mol.nelectron-1) // 2
nelec = (nea, neb)
mo_a = m.mo_coeff[0]
mo_b = m.mo_coeff[1]
h1e_a = reduce(numpy.dot, (mo_a.T, m.get_hcore(), mo_a))
h1e_b = reduce(numpy.dot, (mo_b.T, m.get_hcore(), mo_b))
g2e_aa = ao2mo.incore.general(m._eri, (mo_a,)*4, compact=False)
g2e_aa = g2e_aa.reshape(norb,norb,norb,norb)
g2e_ab = ao2mo.incore.general(m._eri, (mo_a,mo_a,mo_b,mo_b), compact=False)
g2e_ab = g2e_ab.reshape(norb,norb,norb,norb)
g2e_bb = ao2mo.incore.general(m._eri, (mo_b,)*4, compact=False)
g2e_bb = g2e_bb.reshape(norb,norb,norb,norb)
h1e = (h1e_a, h1e_b)
eri = (g2e_aa, g2e_ab, g2e_bb)
na = cistring.num_strings(norb, nea)
nb = cistring.num_strings(norb, neb)
numpy.random.seed(15)
fcivec = numpy.random.random((na,nb))
e = kernel(h1e, eri, norb, nelec)[0]
print(e, e - -8.65159903476)
| sunqm/pyscf | pyscf/fci/direct_uhf.py | Python | apache-2.0 | 14,488 | [
"PySCF"
] | 0eb1c5e46a86b7e137d60e524cacb648a7f0322e6eea1ef08a12da75b1f265d1 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
import math
import espressopp
import mpi4py.MPI as MPI
import unittest
def vec_pbc(u,v,cell):
#vector u-v
dx = u[0] - v[0]
dy = u[1] - v[1]
dz = u[2] - v[2]
dx = dx - round(dx/cell[0])*cell[0]
dy = dy - round(dy/cell[1])*cell[1]
dz = dz - round(dz/cell[2])*cell[2]
return dx,dy,dz
def abslen(u):
return math.sqrt(u[0]*u[0]+u[1]*u[1]+u[2]*u[2])
def cross(u,v):
c=[0.0,0.0,0.0]
c[0]=u[1]*v[2]-u[2]*v[1]
c[1]=u[2]*v[0]-u[0]*v[2]
c[2]=u[0]*v[1]-u[1]*v[0]
return c
def dot(u,v):
c=u[0]*v[0]+u[1]*v[1]+u[2]*v[2]
return c
def calc_dihedral(self,quadrupleslist):
#quadrupleslist contains [i,j,k,n]
#returns torsional angle according to IUPAC convention (same convention as used in espressopp, dlpoly, gromacs, vmd, etc.)
positions = []
for pid in quadrupleslist:
positions.append(self.system.storage.getParticle(pid).pos)
rij = vec_pbc(positions[1],positions[0],self.box)
rjk = vec_pbc(positions[2],positions[1],self.box)
rkn = vec_pbc(positions[3],positions[2],self.box)
rijjk = cross(rij,rjk)
rjkkn = cross(rjk,rkn)
cos_phi = dot(rijjk,rjkkn)/(abslen(rijjk)*abslen(rjkkn))
phi = math.acos(cos_phi)
rcross = cross(rijjk,rjkkn)
signcheck = dot(rcross,rjk)
if signcheck < 0.0: phi *= -1.0
return phi
class TestDihedralHarmonic(unittest.TestCase):
def setUp(self):
system = espressopp.System()
box = (10, 10, 10)
self.box = box
cutoff = 2.0
skin = 1.0
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = skin
system.comm = MPI.COMM_WORLD
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, cutoff, skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
self.system = system
def test_phi0(self):
#create system with three torsions, and for each one set a potential with a different phi0 values, to check that the interaction potential works for all combinations of positive and negative phi and phi0
phi0 = [10.0,-170.0,170.0]
particle_list = [
#add particles with initial torsion angle +90
(1, 0, espressopp.Real3D(2.0, 3.0, 3.0), 1.0),
(2, 0, espressopp.Real3D(2.0, 2.0, 3.0), 1.0),
(3, 0, espressopp.Real3D(2.0, 2.0, 2.0), 1.0),
(4, 0, espressopp.Real3D(3.0, 2.0, 2.0), 1.0),
#add particles with initial torsion angle 160
(5, 0, espressopp.Real3D(2.0, 3.0, 3.0), 1.0),
(6, 0, espressopp.Real3D(2.0, 2.0, 3.0), 1.0),
(7, 0, espressopp.Real3D(2.0, 2.0, 2.0), 1.0),
(8, 0, espressopp.Real3D(2.4, 0.8, 2.0), 1.0),
#add particles with initial torsion angle -161
( 9, 0, espressopp.Real3D(2.0, 3.0, 3.0), 1.0),
(10, 0, espressopp.Real3D(2.0, 2.0, 3.0), 1.0),
(11, 0, espressopp.Real3D(2.0, 2.0, 2.0), 1.0),
(12, 0, espressopp.Real3D(1.6, 0.8, 2.0), 1.0),
]
self.system.storage.addParticles(particle_list, 'id', 'type', 'pos', 'mass')
self.system.storage.decompose()
quadrupleslist = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
torsiontuples = [(1,2,3,4),(5,6,7,8),(9,10,11,12)]
bondtuples = [(1,2),(2,3),(3,4),(5,6),(6,7),(7,8),(9,10),(10,11),(11,12)]
#add torsions
interactions = []
for i in xrange(3):
fql = espressopp.FixedQuadrupleList(self.system.storage)
fql.addQuadruples([torsiontuples[i]])
interaction = espressopp.interaction.FixedQuadrupleListDihedralHarmonic(self.system,fql,potential=espressopp.interaction.DihedralHarmonic(K=1.0,phi0=phi0[i]*math.pi/180.0))
self.system.addInteraction(interaction)
interactions.append(interaction)
#add bonds so that atoms in the torsions don't drift too far apart
fpl = espressopp.FixedPairList(self.system.storage)
fpl.addBonds(bondtuples)
interaction = espressopp.interaction.FixedPairListHarmonic(self.system,fpl,potential=espressopp.interaction.Harmonic(K=1.0,r0=1.0))
self.system.addInteraction(interaction)
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.run(50)
self.assertAlmostEqual(interactions[0].computeEnergy(),0.747885,places=5)
self.assertAlmostEqual(interactions[1].computeEnergy(),0.099570,places=5)
self.assertAlmostEqual(interactions[2].computeEnergy(),0.099570,places=5)
self.assertAlmostEqual(calc_dihedral(self,quadrupleslist[0]),1.397549,places=5)
self.assertAlmostEqual(calc_dihedral(self,quadrupleslist[1]),2.869874,places=5)
self.assertAlmostEqual(calc_dihedral(self,quadrupleslist[2]),-2.869874,places=5)
if __name__ == '__main__':
unittest.main()
| acfogarty/espressopp | testsuite/interaction_potentials/dihedral_harmonic/test_dihedralharmonic.py | Python | gpl-3.0 | 5,042 | [
"Gromacs",
"VMD"
] | 106dba4009cef3713639280bd7a156d0ccc0a55a07c71034b710b35abf16bc7e |
# -*- coding: utf-8 -*-
"""Transport functions for Amazon Web Services (AWS).
AWS has a cloud-based file storage service called S3 that can be programatically
accessed using the :mod:`boto3` package. This module provides functions for quickly
wrapping upload/download of BEL graphs using the gzipped Node-Link schema.
"""
import logging
from io import BytesIO
from typing import Any, Optional
from .nodelink import from_nodelink_gz_io, to_nodelink_gz_io
from ..struct import BELGraph
__all__ = [
"to_s3",
"from_s3",
]
logger = logging.getLogger(__name__)
S3Client = Any
def to_s3(graph: BELGraph, *, bucket: str, key: str, client: Optional[S3Client] = None) -> None:
"""Save BEL to S3 as gzipped node-link JSON.
If you don't specify an instantiated client, PyBEL will do its best to load a default
one using :func:`boto3.client` like in the following example:
.. code-block:: python
import pybel
from pybel.examples import sialic_acid_graph
graph = pybel.to_s3(
sialic_acid_graph,
bucket='your bucket',
key='your file name.bel.nodelink.json.gz',
)
However, if you would like to configure your own, you can do it with something like this:
.. code-block:: python
import boto3
s3_client = boto3.client('s3')
import pybel
from pybel.examples import sialic_acid_graph
graph = pybel.to_s3(
sialic_acid_graph,
client=s3_client,
bucket='your bucket',
key='your file name.bel.nodelink.json.gz',
)
.. warning:: This assumes you already have credentials set up on your machine
If you don't already have a bucket, you can create one using ``boto3`` by following
this tutorial: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-example-creating-buckets.html
"""
if client is None:
import boto3
client = boto3.client("s3")
io = to_nodelink_gz_io(graph)
client.upload_fileobj(io, bucket, key)
def from_s3(*, bucket: str, key: str, client: Optional[S3Client] = None) -> BELGraph:
"""Get BEL from gzipped node-link JSON from Amazon S3.
If you don't specify an instantiated client, PyBEL will do its best to load a default
one using :func:`boto3.client` like in the following example:
.. code-block:: python
graph = pybel.from_s3(bucket='your bucket', key='your file name.bel.nodelink.json.gz')
However, if you would like to configure your own, you can do it with something like this:
.. code-block:: python
import boto3
s3_client = boto3.client('s3')
import pybel
graph = pybel.from_s3(
client=s3_client,
bucket='your bucket',
key='your file name.bel.nodelink.json.gz',
)
"""
if client is None:
import boto3
client = boto3.client("s3")
io = BytesIO()
client.download_fileobj(bucket, key, io)
io.seek(0)
return from_nodelink_gz_io(io)
| pybel/pybel | src/pybel/io/aws.py | Python | mit | 3,050 | [
"Pybel"
] | d45121418d73f022935b7eac4982b3bbf13ae0c136ffe8f5106591293900589d |
from .base import *
class diffusion(object):
"""
cytoscape session interface as shown in CyREST's swagger documentation for 'diffusion'.
:param url: an url of the type 'http://' + host + ':' + str(port) + '/' + version + '/'.
"""
def __init__(self, url):
self.__url = url + 'commands/diffusion'
def diffuse(self, verbose=False):
"""
Diffusion will send the selected network view and its selected nodes to
a web-based REST service to calculate network propagation. Results are
returned and represented by columns in the node table.
Columns are created for each execution of Diffusion and their names are
returned in the response.
The nodes you would like to use as input should be selected. This will
be used to generate the contents of the diffusion_input column, which
represents the query vector and corresponds to h in the diffusion equation.
:param verbose: print more
"""
response=api(url=self.__url+"/diffuse", method="POST", verbose=verbose)
return response
def diffuse_advanced(self, heatColumnName=None, time=None, verbose=False):
"""
Diffusion will send the selected network view and its selected nodes to
a web-based REST service to calculate network propagation. Results are
returned and represented by columns in the node table.
Columns are created for each execution of Diffusion and their names are
returned in the response.
:param heatColumnName (string, optional): A node column name intended
to override the default table column 'diffusion_input'. This represents
the query vector and corresponds to h in the diffusion equation. =
['HEKScore', 'JurkatScore', '(Use selected nodes)']
:param time (string, optional): The extent of spread over the network.
This corresponds to t in the diffusion equation.
:param verbose: print more
"""
PARAMS=set_param(["heatColumnName","time"],[heatColumnName,time])
response=api(url=self.__url+"/diffuse_advanced", PARAMS=PARAMS, method="POST", verbose=verbose)
return response
| idekerlab/py2cytoscape | py2cytoscape/cyrest/diffusion.py | Python | mit | 2,230 | [
"Cytoscape"
] | 313a60b32f04e8d3ed0876920b5d44bc794f92b7a32faf5bf7c4c0a9cec82f5f |
'''Simulator independent grid cell network code.
.. currentmodule:: grid_cell_model.models.gc_net
Attractor network setup
-----------------------
This file is a module for the grid cell network. It allows you to create a
network of exponential integrate and fire neurons. There are two populations:
* Stellate cells -- excitatory neurons (E)
* Interneurons -- inhibitory (I)
In the basic version of the model, the connections are only E --> I and I -->
E. That is the main idea behind the model. However, it is possible to also add
E --> E and I --> I connections in various forms, see
`~grid_cell_model.models.parameters`.
Both neuron types can receive AMPA, NMDA and GABA_A events. NMDA is only in the
E --> I direction.
The topology of connections has several characteristics:
- Both neuron models are uniformly placed on a surface of a twisted torus,
the sides of which are scaled as X:Y = 1:sqrt(3)/2, to accomodate the
hexagonal nature of grid-cell receptive fields.
- The user can set the number of neurons in the larger side of the toroidal
sheet (always X dimension)
- The connections follow a center-surround profile, i.e. either E-->I
connections have a surround profile and I-->E connections have a center
profile, or vice-versa. This can be used to test the effect of the type of
excitatory or inhibitory profile on the stability of the attractor.
In addition, it is possible to select whether connections are
distance-dependent, as described in the previous paragraph, or 'flat', which
means that the connection weight does not depend on the distance between the
pair of neurons on the twisted torus. Another variant of connections is the
*probabilistic* synapses. Here, the probability of connection instead of the
weight is scaled according to the centre-surround principle. See the
``probabilistic_synapses`` parameter.
- GABA_A connections (I-->E) can also contain extra, distance-independent,
inhibitory synapses onto stellate cells in order to promote generation of
gamma oscillations.
- Technically, the basic functionality of the model (attractor emergence,
oscillations), shouldn't be very dependent on the spiking neuron type.
After some parameter setups/changes, one should be able to set the
simulation with any kind of spiking model (leaky IaF, Hodgkin-Huxley,
etc.)
Class hierarchy of the attractor networks
-----------------------------------------
.. inheritance-diagram:: grid_cell_model.models.gc_net_nest
grid_cell_model.models.gc_single_neuron
:parts: 2
'''
from __future__ import absolute_import, print_function
import logging
import numpy as np
import time
import copy
from ..analysis.image import Position2D, remapTwistedTorus
from .construction.weights import (IsomorphicConstructor,
ProbabilisticConstructor)
__all__ = ['GridCellNetwork']
gcnLogger = logging.getLogger('{0}.{1}'.format(__name__,
"NestGridCellNetwork"))
class GridCellNetwork(object):
'''
This is an interface to the grid cell network. One should be able to set
it up, simulate and record from neurons, quite independently of a specific
simulator. We don't use PyNN here because its generic interface is not
suitable for this purpose
The GridCellNetwork creates two separate populations and connects them
according to the specified connectivity rules.
'''
def __init__(self, neuronOpts, simulationOpts):
# timers
self._startT = time.time()
self._constrEndT = None
self._simStartT = None
self._simEndT = None
self.beginConstruction()
self.no = copy.deepcopy(neuronOpts)
self.so = copy.deepcopy(simulationOpts)
# Setup neuron numbers for each dimension (X, Y)
# We have a single bump and to get hexagonal receptive fields the X:Y
# size ratio must be 1:sqrt(3)/2
self.y_dim = np.sqrt(3) / 2.0
self.Ne_x = self.no.Ne
self.Ne_y = int(np.ceil(self.no.Ne * self.y_dim)) // 2 * 2
self.Ni_x = self.no.Ni
self.Ni_y = int(np.ceil(self.no.Ni * self.y_dim)) // 2 * 2
self.net_Ne = self.Ne_x * self.Ne_y
self.net_Ni = self.Ni_x * self.Ni_y
self.prefDirs_e = None
self.prefDirs_i = None
self._weight_constructor = self._select_weight_constructor(neuronOpts)
def simulate(self, t, printTime):
'''Simulate the network, after being set up.'''
raise NotImplementedError()
@staticmethod
def _select_weight_constructor(options):
'''Create an instance of the constructor, based on ``options``.
Parameters
----------
options : dict-like
A mapping that contains options necessary to select the appropriate
constructor.
Returns
-------
constructor : WeightConstructor
Constructor instance.
'''
if options.probabilistic_synapses:
gcnLogger.debug('Selecting probabilistic contructor for weights.')
return ProbabilisticConstructor()
else:
gcnLogger.debug('Selecting isomorphic contructor for weights.')
return IsomorphicConstructor()
def _divergentConnectEE(self, pre, post, weights):
'''Connect a ``pre`` neuron in the E population to all neurons in the E
population in the ``post``, with ``weights``.
'''
raise NotImplementedError()
def _divergentConnectEI(self, pre, post, weights):
'''
Simply connect a 'pre' neuron in the E population to all neurons in
the I population from post, with given weights
'''
raise NotImplementedError()
def _divergentConnectIE(self, pre, post, weights):
'''
Simply connect a 'pre' neuron in the I population to all neurons in
the E population from post, with given weights
'''
raise NotImplementedError()
def _shiftOnTwistedTorus(self, val, shift, dim):
'''Shift a pair of X and Y coordinates on a twisted torus in a specified
direction.
Parameters
----------
val : Position2D
The original coordinates.
shift : Position2D
The vector that determines the shift.
dim : Position2D
Dimensions of the twisted torus.
Returns
-------
new_coord : Position2D
New coordinates on the twisted torus.
'''
ret = Position2D(val.x, val.y)
ret.x += shift.x
ret.y += shift.y
if ret.y < 0 or ret.y >= dim.y:
ret.x += dim.x / 2.0
ret.x %= dim.x
ret.y %= dim.y
return ret
def _generateRinglikeWeights(self, a, others, mu, sigma, prefDir,
prefDirC):
'''Generate ring-like weights.
Here we assume that X coordinates are normalised to <0, 1), and Y
coordinates are normalised to <0, sqrt(3)/2) Y coordinates are twisted,
i.e. X will have additional position shifts when determining minimum.
@param a Neuron center, normalised. A Position2D object.
@param others Positions of postsynaptic neurons. A Position2D object.
@param mu Radius of the circular function
@param sigma Width of the circular function
@param prefDir Preferred direction of the cell. A Position2D object.
@param prefDirC A preferred direction coefficient. A multiplier.
@return An array (1D) of normalized weights.
'''
dim = Position2D()
dim.x = 1.0
dim.y = self.y_dim
# a.x -= prefDirC*prefDir.x
# a.y -= prefDirC*prefDir.y
shift = Position2D(-prefDirC * prefDir.x, -prefDirC * prefDir.y)
a = self._shiftOnTwistedTorus(a, shift, dim)
d = remapTwistedTorus(a, others, dim)
return np.exp(-(d - mu)**2 / 2 / sigma**2)
def _generateGaussianWeights(self, a, others, sigma, prefDir, prefDirC):
'''Generate Gaussian-like weights, i.e. local connections
Here we assume that X coordinates are normalised to <0, 1), and Y
coordinates are normalised to <0, sqrt(3)/2) Y coordinates are twisted,
i.e. X will have additional position shifts when determining minimum.
@param a Neuron center, normalised. A Position2D object.
@param others Positions of postsynaptic neurons. A Position2D object.
@param sigma Std. dev. of the Gaussian (normalised)
@param prefDir Preferred direction of the cell. A Position2D object.
@param prefDirC A preferred direction coefficient. A multiplier.
@return An array (1D) of normalized weights.
'''
dim = Position2D()
dim.x = 1.0
dim.y = self.y_dim
a.x -= prefDirC * prefDir.x
a.y -= prefDirC * prefDir.y
d = remapTwistedTorus(a, others, dim)
return np.exp(-d**2 / 2. / sigma**2)
def _addToConnections(self, conductances, perc_synapses, h):
'''
Picks perc_synapses% of connections from the array and adds h to them
'''
indexes = np.random.choice(
np.arange(len(conductances)),
size=int(perc_synapses / 100.0 * len(conductances)),
replace=False)
conductances[indexes] += h
return conductances
def _connect_network(self):
'''Make network connections according to parameter settings.'''
if self.no.EI_flat:
self._connect_ei_flat()
else:
self._connect_ei_distance(self.no.AMPA_gaussian, self.no.pAMPA_mu,
self.no.pAMPA_sigma)
if self.no.IE_flat:
self._connect_ie_flat()
else:
self._connect_ie_distance(self.no.AMPA_gaussian, self.no.pGABA_mu,
self.no.pGABA_sigma)
if self.no.use_EE:
self._connect_ee(self.no.pEE_sigma)
if self.no.use_II:
self._connect_ii_flat()
def _connect_ee(self, pEE_sigma):
'''Make E-->E connections, according to network options.'''
gcnLogger.info('Connecting E-->E (distance-dependent).')
g_EE_mean = self.no.g_EE_total / self.net_Ne
print("g_EE_mean: %f nS" % g_EE_mean)
others_e = Position2D()
pd_norm_e = Position2D()
a = Position2D()
X, Y = np.meshgrid(np.arange(self.Ne_x), np.arange(self.Ne_y))
X = 1. * X / self.Ne_x
Y = 1. * Y / self.Ne_y * self.y_dim
others_e.x = X.ravel()
others_e.y = Y.ravel()
self.prefDirs_e = np.ndarray((self.net_Ne, 2))
for y in xrange(self.Ne_y):
y_e_norm = float(y) / self.Ne_y * self.y_dim
for x in xrange(self.Ne_x):
it = y * self.Ne_x + x
x_e_norm = float(x) / self.Ne_x
a.x = x_e_norm
a.y = y_e_norm
pd_e = self.getPreferredDirection(x, y)
self.prefDirs_e[it, :] = pd_e
pd_norm_e.x = 1. * pd_e[0] / self.Ne_x
pd_norm_e.y = 1. * pd_e[1] / self.Ne_y * self.y_dim
tmp_templ = self._generateGaussianWeights(
a, others_e, pEE_sigma, pd_norm_e, self.no.prefDirC_ee)
# tmp_templ down here must be in the proper units (e.g. nS)
tmp_templ *= g_EE_mean
tmp_templ[it] = 0. # do not allow autapses
self._divergentConnectEE(it, range(self.net_Ne), tmp_templ)
def _connect_ei_distance(self, AMPA_gaussian, pAMPA_mu, pAMPA_sigma):
'''Make E-->I connections, according to network options.
This doc applies to both connect_ei and connect_ie.
The connections are remapped to [1.0, sqrt(3)/2], whether the topology
is a twisted torus or just a regular torus.
Parameters
----------
AMPA_gaussian : bool
AMPA_gaussian switches between two cases:
true Each exciatory neuron has a 2D excitatory gaussian
profile, while each inhibitory neuron has a ring-like
profile pAMPA_mu, pAMPA_sigma, pGABA_sigma are used,
pGABA_mu is discarded
false Each excitatory neuron has a ring-like profile, while
each inhibitory neuron has a gaussian profile.
pAMPA_sigma, pGABA_mu, pGABA_sigma are used, pAMPA_mu
is discarded
'''
gcnLogger.info('Connecting E-->I (distance-dependent).')
g_AMPA_mean = self.no.g_AMPA_total / self.net_Ne
others_e = Position2D()
pd_norm_e = Position2D()
a = Position2D()
X, Y = np.meshgrid(np.arange(self.Ni_x), np.arange(self.Ni_y))
X = 1. * X / self.Ni_x
Y = 1. * Y / self.Ni_y * self.y_dim
others_e.x = X.ravel()
others_e.y = Y.ravel()
self.prefDirs_e = np.ndarray((self.net_Ne, 2))
for y in xrange(self.Ne_y):
y_e_norm = float(y) / self.Ne_y * self.y_dim
for x in xrange(self.Ne_x):
it = y * self.Ne_x + x
x_e_norm = float(x) / self.Ne_x
a.x = x_e_norm
a.y = y_e_norm
pd_e = self.getPreferredDirection(x, y)
self.prefDirs_e[it, :] = pd_e
pd_norm_e.x = 1. * pd_e[0] / self.Ni_x
pd_norm_e.y = 1. * pd_e[1] / self.Ni_y * self.y_dim
if AMPA_gaussian == 1:
tmp_templ = self._generateGaussianWeights(
a, others_e, pAMPA_sigma, pd_norm_e,
self.no.prefDirC_e)
elif AMPA_gaussian == 0:
tmp_templ = self._generateRinglikeWeights(
a, others_e, pAMPA_mu, pAMPA_sigma, pd_norm_e,
self.no.prefDirC_e)
else:
raise Exception('AMPA_gaussian parameters must be 0 or 1')
tmp_templ = self._weight_constructor.generate_weights(
tmp_templ, g_AMPA_mean)
# tmp_templ down here must be in the proper units (e.g. nS)
self._divergentConnectEI(it, range(self.net_Ni), tmp_templ)
def _connect_ei_flat(self):
'''Make E-->I connections that are distance-independent.'''
gcnLogger.info('Connecting E-->I (flat).')
g_EI_mean = (self.no.g_AMPA_total / self.net_Ne /
self.no.g_EI_uni_density)
n = int(float(self.net_Ni) * self.no.g_EI_uni_density)
self._randomDivergentConnectEI(range(self.net_Ne),
range(self.net_Ni),
n,
g_EI_mean)
def _connect_ie_distance(self, AMPA_gaussian, pGABA_mu, pGABA_sigma):
'''Make I-->E connections, according to network options.
This doc applies to both connect_ei and connect_ie.
The connections are remapped to [1.0, sqrt(3)/2], whether the topology
is a twisted torus or just a regular torus.
Parameters
----------
AMPA_gaussian : bool
AMPA_gaussian switches between two cases:
true Each exciatory neuron has a 2D excitatory gaussian
profile, while each inhibitory neuron has a ring-like
profile pAMPA_mu, pAMPA_sigma, pGABA_sigma are used,
pGABA_mu is discarded
false Each excitatory neuron has a ring-like profile, while
each inhibitory neuron has a gaussian profile.
pAMPA_sigma, pGABA_mu, pGABA_sigma are used, pAMPA_mu
is discarded
'''
gcnLogger.info('Connecting I-->E (distance-dependent).')
g_GABA_mean = self.no.g_GABA_total / self.net_Ni
g_uni_GABA_total = self.no.g_GABA_total * self.no.g_uni_GABA_frac
g_uni_GABA_mean = (g_uni_GABA_total / self.net_Ni /
self.no.uni_GABA_density)
print("g_uni_GABA_total: ", g_uni_GABA_total)
print("g_uni_GABA_mean: ", g_uni_GABA_mean)
others_i = Position2D()
pd_norm_i = Position2D()
a = Position2D()
X, Y = np.meshgrid(np.arange(self.Ne_x), np.arange(self.Ne_y))
X = 1. * X / self.Ne_x
Y = 1. * Y / self.Ne_y * self.y_dim
others_i.x = X.ravel()
others_i.y = Y.ravel()
conn_th = 1e-5
self.prefDirs_i = np.ndarray((self.net_Ni, 2))
for y in xrange(self.Ni_y):
y_i_norm = float(y) / self.Ni_y * self.y_dim
for x in xrange(self.Ni_x):
it = y * self.Ni_x + x
x_i_norm = float(x) / self.Ni_x
a.x = x_i_norm
a.y = y_i_norm
pd_i = self.getPreferredDirection(x, y)
self.prefDirs_i[it, :] = pd_i
pd_norm_i.x = 1. * pd_i[0] / self.Ne_x
pd_norm_i.y = 1. * pd_i[1] / self.Ne_y * self.y_dim
if AMPA_gaussian == 1:
tmp_templ = self._generateRinglikeWeights(
a, others_i, pGABA_mu, pGABA_sigma, pd_norm_i,
self.no.prefDirC_i)
elif AMPA_gaussian == 0:
tmp_templ = self._generateGaussianWeights(
a, others_i, pGABA_sigma, pd_norm_i,
self.no.prefDirC_i)
else:
raise Exception('AMPA_gaussian parameters must be 0 or 1')
# FIXME: ugly: B_GABA is defined only in child classes
tmp_templ = self._weight_constructor.generate_weights(
tmp_templ, self.B_GABA * g_GABA_mean)
self._addToConnections(
tmp_templ, self.no.uni_GABA_density * 100.0,
g_uni_GABA_mean)
E_nid = (tmp_templ > conn_th).nonzero()[0]
self._divergentConnectIE(it, E_nid, tmp_templ[E_nid])
def _connect_ie_flat(self):
'''Make I-->E connections that are distance independent.'''
gcnLogger.info('Connecting I-->E (flat).')
g_IE_mean = (self.no.g_GABA_total / self.net_Ni /
self.no.g_IE_uni_density)
n = int(float(self.net_Ne) * self.no.g_IE_uni_density)
self._randomDivergentConnectIE(range(self.net_Ni),
range(self.net_Ne),
n,
g_IE_mean)
def _connect_ii_flat(self):
'''Make I-->I connections that are distance independent.'''
gcnLogger.info('Connecting I-->I (flat).')
g_II_mean = (self.no.g_II_total / self.net_Ni /
self.no.g_II_uni_density)
gcnLogger.debug('g_II_total: %f, g_II_mean: %f', self.no.g_II_total,
g_II_mean)
n = int(float(self.net_Ne) * self.no.g_IE_uni_density)
self._randomDivergentConnectII(range(self.net_Ni),
range(self.net_Ni),
n,
g_II_mean)
###########################################################################
# External sources definitions
###########################################################################
def setVelocityCurrentInput_e(self, prefDirs_mask=None):
'''
Setup a velocity input to the excitatory population. Current input.
'''
raise NotImplementedError()
def setVelocityCurrentInput_i(self):
'''
Setup a velocity input to the inhibitory population. Current input.
'''
raise NotImplementedError()
def setConstantVelocityCurrent_e(self, vel):
'''
Setup a constant velocity current onto E poputlaion, where vel must be
a list of numbers::
vel = [vel_x, vel_y]
'''
raise NotImplementedError()
def setConstantVelocityCurrent_i(self, vel):
'''
Setup a constant velocity current onto I population, where vel must be
a list of numbers::
vel = [vel_x, vel_y]
'''
raise NotImplementedError()
def getAttrDictionary(self):
'''
Get a dictionary containing all the necessary attributes the user might
need in order to work with data produced by the simulation.
'''
raise NotImplementedError()
###########################################################################
# Other
###########################################################################
def beginConstruction(self):
'''
Mark the beginning of network construction.
'''
self._constrStartT = time.time()
print("Starting network construction")
def endConstruction(self):
'''
Mark the end of network construction.
'''
self._constrEndT = time.time()
print("Network construction finished.")
def constructionTime(self):
'''Compute network construction time'''
assert self._constrStartT is not None
if self._constrEndT is None:
raise RuntimeError("Cannot compute contruction time. End time has "
"not been marked yet.")
else:
return self._constrEndT - self._constrStartT
def beginSimulation(self):
'''Mark beginning of the simulation'''
self._simStartT = time.time()
print("Simulation has started...")
def endSimulation(self):
'''Mark end of the simulation'''
self._simEndT = time.time()
print("Simulation finished")
def simulationTime(self):
'''Compute simulation time'''
assert self._simStartT is not None
if self._simEndT is None:
raise RuntimeError("Cannot compute simulation time. End time has "
"not been marked yet (no simulation has been "
"run?).")
else:
return self._simEndT - self._simStartT
def totalTime(self):
'''
Return elapsed time in seconds, since the network construction start.
'''
return time.time() - self._startT
def _getTimes(self):
'''Get simulation times'''
return (self.constructionTime(), self.simulationTime(),
self.totalTime())
def printTimes(self, constrT=True, simT=True, totalT=True):
'''Print the different elapsed simulation times.'''
constrT, simT, totalT = self._getTimes()
print("Timer statistics:")
print(" Construction: {0} s".format(constrT))
print(" Simulation : {0} s".format(simT))
print(" Total : {0} s".format(totalT))
return constrT, simT, totalT
def getPreferredDirection(self, pos_x, pos_y):
'''
Get a preferred direction for a neuron.
Parameters
----------
pos_x/y : int
Position of neuron in 2d sheet
'''
pos4_x = pos_x % 2
pos2_y = pos_y % 2
if pos4_x == 0:
if pos2_y == 0:
return [-1, 0] # Left
else:
return [0, -1] # Down
else:
if pos2_y == 0:
return [0, 1] # up
else:
return [1, 0] # Right
| MattNolanLab/ei-attractor | grid_cell_model/models/gc_net.py | Python | gpl-3.0 | 23,856 | [
"Gaussian",
"NEURON"
] | b6b4e05b69a1ff00e71643bea3488eff5e0a646569a4eb3b2d30f0b6b694c2f0 |
'''
PathwayGenie (c) GeneGenie Bioinformatics Ltd. 2018
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=too-many-arguments
import sys
from scripts.writer import write
def do_write(in_filename, out_filename, ice_url, ice_username, ice_password,
group_name=None):
'''Write.'''
comp_columns = ['plasmid', 'host']
typ = 'STRAIN'
write(in_filename, out_filename, ice_url, ice_username, ice_password,
typ, comp_columns, group_name)
def main(args):
'''main method.'''
do_write(*args)
if __name__ == '__main__':
main(sys.argv[1:])
| neilswainston/PathwayGenie | scripts/strain_writer.py | Python | mit | 738 | [
"VisIt"
] | 6337092947f89c480b3ef17bef03e717a826bc0543046b523f5fb71db8965a13 |
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Use the DSSP program to calculate secondary structure and accessibility.
You need to have a working version of DSSP (and a license, free for academic
use) in order to use this. For DSSP, see U{http://swift.cmbi.ru.nl/gv/dssp/}.
The DSSP codes for secondary structure used here are:
H
Alpha helix (4-12)
B
Isolated beta-bridge residue
E
Strand
G
3-10 helix
I
pi helix
T
Turn
S
Bend
\-
None
The following Accessible surface area (ASA) values can be used, defaulting
to the Sander and Rost values:
Miller
Miller et al. 1987 http://dx.doi.org/10.1016/0022-2836(87)90038-6
Sander
Sander and Rost 1994 http://dx.doi.org/10.1002/prot.340200303
Wilke
Tien et al. 2013 http://dx.doi.org/10.1371/journal.pone.0080635
"""
from __future__ import print_function
import re
from Bio._py3k import StringIO
import subprocess
import warnings
from Bio.Data import SCOPData
from Bio.PDB.AbstractPropertyMap import AbstractResiduePropertyMap
from Bio.PDB.PDBExceptions import PDBException
from Bio.PDB.PDBParser import PDBParser
# Match C in DSSP
_dssp_cys = re.compile('[a-z]')
# Maximal ASA of amino acids
# Used for relative accessibility
residue_max_acc = {
# Miller max acc: Miller et al. 1987 http://dx.doi.org/10.1016/0022-2836(87)90038-6
# Wilke: Tien et al. 2013 http://dx.doi.org/10.1371/journal.pone.0080635
# Sander: Sander & Rost 1994 http://dx.doi.org/10.1002/prot.340200303
'Miller': {
'ALA': 113.0, 'ARG': 241.0, 'ASN': 158.0, 'ASP': 151.0,
'CYS': 140.0, 'GLN': 189.0, 'GLU': 183.0, 'GLY': 85.0,
'HIS': 194.0, 'ILE': 182.0, 'LEU': 180.0, 'LYS': 211.0,
'MET': 204.0, 'PHE': 218.0, 'PRO': 143.0, 'SER': 122.0,
'THR': 146.0, 'TRP': 259.0, 'TYR': 229.0, 'VAL': 160.0
},
'Wilke': {
'ALA': 129.0, 'ARG': 274.0, 'ASN': 195.0, 'ASP': 193.0,
'CYS': 167.0, 'GLN': 225.0, 'GLU': 223.0, 'GLY': 104.0,
'HIS': 224.0, 'ILE': 197.0, 'LEU': 201.0, 'LYS': 236.0,
'MET': 224.0, 'PHE': 240.0, 'PRO': 159.0, 'SER': 155.0,
'THR': 172.0, 'TRP': 285.0, 'TYR': 263.0, 'VAL': 174.0
},
'Sander': {
'ALA': 106.0, 'ARG': 248.0, 'ASN': 157.0, 'ASP': 163.0,
'CYS': 135.0, 'GLN': 198.0, 'GLU': 194.0, 'GLY': 84.0,
'HIS': 184.0, 'ILE': 169.0, 'LEU': 164.0, 'LYS': 205.0,
'MET': 188.0, 'PHE': 197.0, 'PRO': 136.0, 'SER': 130.0,
'THR': 142.0, 'TRP': 227.0, 'TYR': 222.0, 'VAL': 142.0
}
}
def ss_to_index(ss):
"""Secondary structure symbol to index.
H=0
E=1
C=2
"""
if ss == 'H':
return 0
if ss == 'E':
return 1
if ss == 'C':
return 2
assert 0
def dssp_dict_from_pdb_file(in_file, DSSP="dssp"):
"""Create a DSSP dictionary from a PDB file.
Example:
--------
>>> dssp_dict=dssp_dict_from_pdb_file("1fat.pdb")
>>> aa, ss, acc=dssp_dict[('A', 1)]
Parameters
----------
in_file : string
pdb file
DSSP : string
DSSP executable (argument to os.system)
Returns
-------
(out_dict, keys) : tuple
a dictionary that maps (chainid, resid) to
amino acid type, secondary structure code and
accessibility.
"""
# Using universal newlines is important on Python 3, this
# gives unicode handles rather than bytes handles.
p = subprocess.Popen([DSSP, in_file], universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
# Alert user for errors
if err.strip():
warnings.warn(err)
if not out.strip():
raise Exception('DSSP failed to produce an output')
out_dict, keys = _make_dssp_dict(StringIO(out))
return out_dict, keys
def make_dssp_dict(filename):
"""DSSP dictionary mapping identifers to properties.
Return a DSSP dictionary that maps (chainid, resid) to
aa, ss and accessibility, from a DSSP file. ::
Parameters
----------
filename : string
the DSSP output file
"""
with open(filename, "r") as handle:
return _make_dssp_dict(handle)
def _make_dssp_dict(handle):
"""Internal function used by mask_dssp_dict (PRIVATE).
Return a DSSP dictionary that maps (chainid, resid) to an amino acid,
secondary structure symbol, solvent accessibility value, and hydrogen bond
information (relative dssp indices and hydrogen bond energies) from an open
DSSP file object. ::
Parameters
----------
handle : file
the open DSSP output file handle
"""
dssp = {}
start = 0
keys = []
for l in handle.readlines():
sl = l.split()
if len(sl) < 2:
continue
if sl[1] == "RESIDUE":
# Start parsing from here
start = 1
continue
if not start:
continue
if l[9] == " ":
# Skip -- missing residue
continue
dssp_index = int(l[:5])
resseq = int(l[5:10])
icode = l[10]
chainid = l[11]
aa = l[13]
ss = l[16]
if ss == " ":
ss = "-"
try:
NH_O_1_relidx = int(l[38:45])
NH_O_1_energy = float(l[46:50])
O_NH_1_relidx = int(l[50:56])
O_NH_1_energy = float(l[57:61])
NH_O_2_relidx = int(l[61:67])
NH_O_2_energy = float(l[68:72])
O_NH_2_relidx = int(l[72:78])
O_NH_2_energy = float(l[79:83])
acc = int(l[34:38])
phi = float(l[103:109])
psi = float(l[109:115])
except ValueError as exc:
# DSSP output breaks its own format when there are >9999
# residues, since only 4 digits are allocated to the seq num
# field. See 3kic chain T res 321, 1vsy chain T res 6077.
# Here, look for whitespace to figure out the number of extra
# digits, and shift parsing the rest of the line by that amount.
if l[34] != ' ':
shift = l[34:].find(' ')
NH_O_1_relidx = int(l[38 + shift:45 + shift])
NH_O_1_energy = float(l[46 + shift:50 + shift])
O_NH_1_relidx = int(l[50 + shift:56 + shift])
O_NH_1_energy = float(l[57 + shift:61 + shift])
NH_O_2_relidx = int(l[61 + shift:67 + shift])
NH_O_2_energy = float(l[68 + shift:72 + shift])
O_NH_2_relidx = int(l[72 + shift:78 + shift])
O_NH_2_energy = float(l[79 + shift:83 + shift])
acc = int((l[34 + shift:38 + shift]))
phi = float(l[103 + shift:109 + shift])
psi = float(l[109 + shift:115 + shift])
else:
raise ValueError(exc)
res_id = (" ", resseq, icode)
dssp[(chainid, res_id)] = (aa, ss, acc, phi, psi, dssp_index,
NH_O_1_relidx, NH_O_1_energy, O_NH_1_relidx, O_NH_1_energy,
NH_O_2_relidx, NH_O_2_energy, O_NH_2_relidx, O_NH_2_energy)
keys.append((chainid, res_id))
return dssp, keys
class DSSP(AbstractResiduePropertyMap):
"""Run DSSP and parse secondary structure and accessibility.
Run DSSP on a pdb file, and provide a handle to the
DSSP secondary structure and accessibility.
**Note** that DSSP can only handle one model.
Example:
--------
>>> p = PDBParser()
>>> structure = p.get_structure("1MOT", "1MOT.pdb")
>>> model = structure[0]
>>> dssp = DSSP(model, "1MOT.pdb")
>>> # DSSP data is accessed by a tuple (chain_id, res_id)
>>> a_key = list(dssp.keys())[2]
>>> # residue object, secondary structure, solvent accessibility,
>>> # relative accessiblity, phi, psi
>>> dssp[a_key]
(<Residue ALA het= resseq=251 icode= >,
'H',
72,
0.67924528301886788,
-61.200000000000003,
-42.399999999999999)
"""
def __init__(self, model, in_file, dssp="dssp", acc_array="Sander", file_type='PDB'):
"""Create a DSSP object.
Parameters
----------
model : Model
The first model of the structure
in_file : string
Either a PDB file or a DSSP file.
dssp : string
The dssp executable (ie. the argument to os.system)
acc_array : string
Accessible surface area (ASA) from either Miller et al. (1987),
Sander & Rost (1994), or Wilke: Tien et al. 2013, as string
Sander/Wilke/Miller. Defaults to Sander.
file_type: string
File type switch, either PDB or DSSP with PDB as default.
"""
self.residue_max_acc = residue_max_acc[acc_array]
# create DSSP dictionary
file_type = file_type.upper()
assert(file_type in ['PDB', 'DSSP'])
# If the input file is a PDB file run DSSP and parse output:
if file_type == 'PDB':
dssp_dict, dssp_keys = dssp_dict_from_pdb_file(in_file, dssp)
# If the input file is a DSSP file just parse it directly:
elif file_type == 'DSSP':
dssp_dict, dssp_keys = make_dssp_dict(in_file)
dssp_map = {}
dssp_list = []
def resid2code(res_id):
"""Serialize a residue's resseq and icode for easy comparison."""
return '%s%s' % (res_id[1], res_id[2])
# Now create a dictionary that maps Residue objects to
# secondary structure and accessibility, and a list of
# (residue, (secondary structure, accessibility)) tuples
for key in dssp_keys:
chain_id, res_id = key
chain = model[chain_id]
try:
res = chain[res_id]
except KeyError:
# In DSSP, HET field is not considered in residue identifier.
# Thus HETATM records may cause unnecessary exceptions.
# (See 3jui chain A res 593.)
# Try the lookup again with all HETATM other than water
res_seq_icode = resid2code(res_id)
for r in chain:
if r.id[0] not in (' ', 'W'):
# Compare resseq + icode
if resid2code(r.id) == res_seq_icode:
# Found a matching residue
res = r
break
else:
raise KeyError(res_id)
# For disordered residues of point mutations, BioPython uses the
# last one as default, But DSSP takes the first one (alternative
# location is blank, A or 1). See 1h9h chain E resi 22.
# Here we select the res in which all atoms have altloc blank, A or
# 1. If no such residues are found, simply use the first one appears
# (as DSSP does).
if res.is_disordered() == 2:
for rk in res.disordered_get_id_list():
# All atoms in the disordered residue should have the same
# altloc, so it suffices to check the altloc of the first
# atom.
altloc = res.child_dict[rk].get_list()[0].get_altloc()
if altloc in tuple('A1 '):
res.disordered_select(rk)
break
else:
# Simply select the first one
res.disordered_select(res.disordered_get_id_list()[0])
# Sometimes point mutations are put into HETATM and ATOM with altloc
# 'A' and 'B'.
# See 3piu chain A residue 273:
# <Residue LLP het=H_LLP resseq=273 icode= >
# <Residue LYS het= resseq=273 icode= >
# DSSP uses the HETATM LLP as it has altloc 'A'
# We check the altloc code here.
elif res.is_disordered() == 1:
# Check altloc of all atoms in the DisorderedResidue. If it
# contains blank, A or 1, then use it. Otherwise, look for HET
# residues of the same seq+icode. If not such HET residues are
# found, just accept the current one.
altlocs = set(a.get_altloc() for a in res.get_unpacked_list())
if altlocs.isdisjoint('A1 '):
# Try again with all HETATM other than water
res_seq_icode = resid2code(res_id)
for r in chain:
if r.id[0] not in (' ', 'W'):
if resid2code(r.id) == res_seq_icode and \
r.get_list()[0].get_altloc() in tuple('A1 '):
res = r
break
(aa, ss, acc, phi, psi, dssp_index,
NH_O_1_relidx, NH_O_1_energy,
O_NH_1_relidx, O_NH_1_energy,
NH_O_2_relidx, NH_O_2_energy,
O_NH_2_relidx, O_NH_2_energy) = dssp_dict[key]
res.xtra["SS_DSSP"] = ss
res.xtra["EXP_DSSP_ASA"] = acc
res.xtra["PHI_DSSP"] = phi
res.xtra["PSI_DSSP"] = psi
res.xtra["DSSP_INDEX"] = dssp_index
res.xtra["NH_O_1_RELIDX_DSSP"] = NH_O_1_relidx
res.xtra["NH_O_1_ENERGY_DSSP"] = NH_O_1_energy
res.xtra["O_NH_1_RELIDX_DSSP"] = O_NH_1_relidx
res.xtra["O_NH_1_ENERGY_DSSP"] = O_NH_1_energy
res.xtra["NH_O_2_RELIDX_DSSP"] = NH_O_2_relidx
res.xtra["NH_O_2_ENERGY_DSSP"] = NH_O_2_energy
res.xtra["O_NH_2_RELIDX_DSSP"] = O_NH_2_relidx
res.xtra["O_NH_2_ENERGY_DSSP"] = O_NH_2_energy
# Relative accessibility
resname = res.get_resname()
try:
rel_acc = acc / self.residue_max_acc[resname]
except KeyError:
# Invalid value for resname
rel_acc = 'NA'
else:
if rel_acc > 1.0:
rel_acc = 1.0
res.xtra["EXP_DSSP_RASA"] = rel_acc
# Verify if AA in DSSP == AA in Structure
# Something went wrong if this is not true!
# NB: DSSP uses X often
resname = SCOPData.protein_letters_3to1.get(resname, 'X')
if resname == "C":
# DSSP renames C in C-bridges to a,b,c,d,...
# - we rename it back to 'C'
if _dssp_cys.match(aa):
aa = 'C'
# Take care of HETATM again
if (resname != aa) and (res.id[0] == ' ' or aa != 'X'):
raise PDBException("Structure/DSSP mismatch at %s" % res)
dssp_vals = (dssp_index, aa, ss, rel_acc, phi, psi,
NH_O_1_relidx, NH_O_1_energy,
O_NH_1_relidx, O_NH_1_energy,
NH_O_2_relidx, NH_O_2_energy,
O_NH_2_relidx, O_NH_2_energy)
dssp_map[key] = dssp_vals
dssp_list.append(dssp_vals)
AbstractResiduePropertyMap.__init__(self, dssp_map, dssp_keys,
dssp_list)
if __name__ == "__main__":
import sys
p = PDBParser()
s = p.get_structure('X', sys.argv[1])
model = s[0]
d = DSSP(model, sys.argv[1])
for r in d:
print(r)
print("Handled %i residues" % len(d))
print(sorted(d))
if ('A', 1) in d:
print(d[('A', 1)])
print(s[0]['A'][1].xtra)
# Secondary structure
print(''.join(item[1] for item in d))
| zjuchenyuan/BioWeb | Lib/Bio/PDB/DSSP.py | Python | mit | 15,902 | [
"Biopython"
] | bc3050270774554ccdd0665407b488663276699d80953bed22c794f4e5d37294 |
import os
import pytest
import logging
import numpy as np
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.utils import sct_test_path, sct_dir_local_path
from spinalcordtoolbox.scripts import sct_register_multimodal, sct_create_mask
logger = logging.getLogger(__name__)
def test_sct_register_multimodal_mask_files_exist(tmp_path):
"""
Run the script without validating results.
- TODO: Write a check that verifies the registration results.
- TODO: Parametrize this test to add '-initwarpinv warp_anat2template.nii.gz',
after the file is added to sct_testing_data:
https://github.com/spinalcordtoolbox/spinalcordtoolbox/pull/3407#discussion_r646895013
"""
fname_mask = str(tmp_path/'mask_mt1.nii.gz')
sct_create_mask.main(['-i', sct_test_path('mt', 'mt1.nii.gz'),
'-p', f"centerline,{sct_test_path('mt', 'mt1_seg.nii.gz')}",
'-size', '35mm', '-f', 'cylinder', '-o', fname_mask])
sct_register_multimodal.main([
'-i', sct_dir_local_path('data/PAM50/template/', 'PAM50_t2.nii.gz'),
'-iseg', sct_dir_local_path('data/PAM50/template/', 'PAM50_cord.nii.gz'),
'-d', sct_test_path('mt', 'mt1.nii.gz'),
'-dseg', sct_test_path('mt', 'mt1_seg.nii.gz'),
'-param', 'step=1,type=seg,algo=centermass:step=2,type=seg,algo=bsplinesyn,slicewise=1,iter=3',
'-m', fname_mask,
'-initwarp', sct_test_path('t2', 'warp_template2anat.nii.gz'),
'-ofolder', str(tmp_path)
])
for path in ["PAM50_t2_reg.nii.gz", "warp_PAM50_t22mt1.nii.gz"]:
assert os.path.exists(tmp_path/path)
# Because `-initwarp` was specified (but `-initwarpinv` wasn't) the dest->seg files should NOT exist
for path in ["mt1_reg.nii.gz", "warp_mt12PAM50_t2.nii.gz"]:
assert not os.path.exists(tmp_path/path)
@pytest.mark.sct_testing
@pytest.mark.usefixtures("run_in_sct_testing_data_dir")
@pytest.mark.parametrize("use_seg,param,fname_gt", [
(False, 'step=1,algo=syn,type=im,iter=1,smooth=1,shrink=2,metric=MI', 'mt/mt0_reg_syn_goldstandard.nii.gz'),
(False, 'step=1,algo=slicereg,type=im,iter=5,smooth=0,metric=MeanSquares', 'mt/mt0_reg_slicereg_goldstandard.nii.gz'),
(True, 'step=1,algo=centermassrot,type=seg,rot_method=pca', None),
(True, 'step=1,algo=centermassrot,type=imseg,rot_method=hog', None),
(True, 'step=1,algo=centermassrot,type=imseg,rot_method=pcahog', None),
(True, 'step=1,algo=columnwise,type=seg,smooth=1', None),
])
def test_sct_register_multimodal_mt0_image_data_within_threshold(use_seg, param, fname_gt):
"""Run the CLI script and verify that the output image data is close to a reference image (within threshold)."""
fname_out = 'mt0_reg.nii.gz'
argv = ['-i', 'mt/mt0.nii.gz', '-d', 'mt/mt1.nii.gz', '-o', fname_out, '-x', 'linear', '-r', '0', '-param', param]
seg_argv = ['-iseg', 'mt/mt0_seg.nii.gz', '-dseg', 'mt/mt1_seg.nii.gz']
sct_register_multimodal.main(argv=(argv + seg_argv) if use_seg else argv)
# This check is skipped because of https://github.com/neuropoly/spinalcordtoolbox/issues/3372
#############################################################################################
# if fname_gt is not None:
# im_gt = Image(fname_gt)
# im_result = Image(fname_out)
# # get dimensions
# nx, ny, nz, nt, px, py, pz, pt = im_gt.dim
# # set the difference threshold to 1e-3 pe voxel
# threshold = 1e-3 * nx * ny * nz * nt
# # check if non-zero elements are present when computing the difference of the two images
# diff = im_gt.data - im_result.data
# # compare images
# assert abs(np.sum(diff)) < threshold # FIXME: Use np.linalg.norm when this test is fixed
def test_sct_register_multimodal_with_softmask(tmp_path):
"""
Verify that softmask is actually applied during registration.
NB: For 'gaussian', ANTs binaries can't handle softmasks natively, so SCT should be applying
the mask directly to the image. Related links:
* https://github.com/ivadomed/pipeline-hemis/issues/3.
* https://github.com/spinalcordtoolbox/spinalcordtoolbox/issues/3075
"""
fname_mask = str(tmp_path/'mask_t2.nii.gz')
fname_t2 = sct_test_path('t2', 't2.nii.gz')
fname_t1 = sct_test_path('t1', 't1w.nii.gz')
fname_warp = str(tmp_path/"warp_t1w2t2.nii.gz")
sct_create_mask.main(['-i', fname_t2, '-p', f"centerline,{sct_test_path('t2', 't2_centerline-manual.nii.gz')}",
'-o', fname_mask, '-f', 'gaussian'])
sct_register_multimodal.main(['-i', fname_t1, '-d', fname_t2, '-dseg', sct_test_path('t2', 't2_seg-manual.nii.gz'),
'-param', "step=1,type=im,algo=slicereg,metric=CC", '-m', fname_mask,
'-ofolder', str(tmp_path), '-r', '0', '-v', '2'])
# If registration was successful, the warping field should be non-empty
assert np.any(Image(fname_warp).data)
# TODO: Find a way to validate the quality of the registration to see if the mask actually has a benefit.
# The problem is, adding a mask seems to make the registration _worse_ for this specific data/params.
# (Dice score == 0.9 for no mask, and 0.85 for 'gaussian', and 0.7 for 'cylinder'.)
# Nonetheless, below is a rough sketch of what a test could look like:
# from spinalcordtoolbox.math import dice
# from spinalcordtoolbox.deepseg_sc.core import deep_segmentation_spinalcord
# fname_t1_reg = str(tmp_path / "t1w_reg.nii.gz")
# im_t1_reg_seg, _, _ = deep_segmentation_spinalcord(Image(fname_t1_reg), contrast_type='t1', ctr_algo='svm')
# im_t2_seg, _, _ = deep_segmentation_spinalcord(Image(fname_t2), contrast_type='t2', ctr_algo='svm')
# dice_score_t1_reg = dice(im_t2_seg.data, im_t1_reg_seg.data)
# assert dice_score_t1_reg > 0.9
| neuropoly/spinalcordtoolbox | testing/cli/test_cli_sct_register_multimodal.py | Python | mit | 5,959 | [
"Gaussian"
] | 9942bb0a18b7433bfa45d1207287745ac49458cc6e71668105bf1eb47c864dd0 |
import numpy as np
# Set the random seed for reproducibility
seed = np.random.randint(2**16)
print "Seed: ", seed
np.random.seed(seed)
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from optofit.cneuron.compartment import Compartment, SquidCompartment
from optofit.cneuron.channels import LeakChannel, NaChannel, KdrChannel
from optofit.cneuron.simulate import forward_euler
from hips.inference.particle_mcmc import *
from optofit.cinference.pmcmc import *
# Make a simple compartment
hypers = {
'C' : 1.0,
'V0' : -60.0,
'g_leak' : 0.3,
'E_leak' : -65.0,
'g_na' : 120.0,
'E_na' : 50.0,
'g_kdr' : 36.0,
'E_kdr' : -77.0
}
def sample_model():
# # Add a few channels
# body = Compartment(name='body', hypers=hypers)
# leak = LeakChannel(name='leak', hypers=hypers)
# na = NaChannel(name='na', hypers=hypers)
# kdr = KdrChannel(name='kdr', hypers=hypers)
#
# body.add_child(leak)
# body.add_child(na)
# body.add_child(kdr)
# Initialize the model
# body.initialize_offsets()
squid_body = SquidCompartment(name='body', hypers=hypers)
# Initialize the model
D, I = squid_body.initialize_offsets()
# Set the recording duration
t_start = 0
t_stop = 100.
dt = 0.01
t = np.arange(t_start, t_stop, dt)
T = len(t)
# Make input with an injected current from 500-600ms
inpt = np.zeros((T, I))
inpt[50/dt:60/dt,:] = 7.
inpt += np.random.randn(T, I)
# Set the initial distribution to be Gaussian around the steady state
z0 = np.zeros(D)
squid_body.steady_state(z0)
init = GaussianInitialDistribution(z0, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
# TODO: Fix the hack which requires us to know the number of particles
N = 100
sigmas = 0.0001*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
sigmas[squid_body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, N, D, squid_body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([squid_body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.zeros((T,N,D))
z[0,0,:] = init.sample()
# Initialize the output matrix
x = np.zeros((T,D))
# Sample the latent state sequence
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.array([0], dtype=np.int32))
# Sample observations
for i in np.arange(0,T):
lkhd.sample(z,x,i,0)
# Extract the first (and in this case only) particle
z = z[:,0,:].copy(order='C')
# Plot the first particle trajectory
plt.ion()
fig = plt.figure()
# fig.add_subplot(111, aspect='equal')
plt.plot(t, z[:,observed_dims[0]], 'k')
plt.plot(t, x[:,0], 'r')
plt.show()
plt.pause(0.01)
return t, z, x, init, prop, lkhd
# Now run the pMCMC inference
def sample_z_given_x(t, z_curr, x,
init, prop, lkhd,
N_particles=100,
plot=False):
T,D = z_curr.shape
T,O = x.shape
# import pdb; pdb.set_trace()
pf = ParticleGibbsAncestorSampling(T, N_particles, D)
pf.initialize(init, prop, lkhd, x, z_curr)
S = 100
z_smpls = np.zeros((S,T,D))
l = plt.plot(t, z_smpls[0,:,0], 'b')
for s in range(S):
print "Iteration %d" % s
# Reinitialize with the previous particle
pf.initialize(init, prop, lkhd, x, z_smpls[s,:,:])
z_smpls[s,:,:] = pf.sample()
l[0].set_data(t, z_smpls[s,:,0])
plt.pause(0.01)
z_mean = z_smpls.mean(axis=0)
z_std = z_smpls.std(axis=0)
z_env = np.zeros((T*2,2))
z_env[:,0] = np.concatenate((t, t[::-1]))
z_env[:,1] = np.concatenate((z_mean[:,0] + z_std[:,0], z_mean[::-1,0] - z_std[::-1,0]))
if plot:
plt.gca().add_patch(Polygon(z_env, facecolor='b', alpha=0.25, edgecolor='none'))
plt.plot(t, z_mean[:,0], 'b', lw=1)
# Plot a few random samples
# for s in range(10):
# si = np.random.randint(S)
# plt.plot(t, z_smpls[si,:,0], '-b', lw=0.5)
plt.ioff()
plt.show()
return z_smpls
t, z, x, init, prop, lkhd = sample_model()
sample_z_given_x(t, z, x, init, prop, lkhd, plot=True) | HIPS/optofit | examples/cython_demo.py | Python | gpl-2.0 | 4,626 | [
"Gaussian"
] | 73faa061f55809978f7ef560cc200b2cb514a77de717b46d9be62f0344c2f0a5 |
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
#File has been modified to check for endless data attack
import gtk, pango
from zeroinstall import _, translation
from zeroinstall.support import tasks, pretty_size
from zeroinstall.injector import model, reader, download
import properties
from zeroinstall.gtkui.icon import load_icon
from zeroinstall import support
from logging import warn, info
import utils
from gui import gobject
from sys import exit
ngettext = translation.ngettext
def _stability(impl):
assert impl
if impl.user_stability is None:
return _(str(impl.upstream_stability))
return _("%(implementation_user_stability)s (was %(implementation_upstream_stability)s)") \
% {'implementation_user_stability': _(str(impl.user_stability)),
'implementation_upstream_stability': _(str(impl.upstream_stability))}
ICON_SIZE = 20.0
CELL_TEXT_INDENT = int(ICON_SIZE) + 4
def get_tooltip_text(mainwindow, interface, main_feed, model_column):
assert interface
if model_column == InterfaceBrowser.INTERFACE_NAME:
return _("Full name: %s") % interface.uri
elif model_column == InterfaceBrowser.SUMMARY:
if main_feed is None or not main_feed.description:
return _("(no description available)")
first_para = main_feed.description.split('\n\n', 1)[0]
return first_para.replace('\n', ' ')
elif model_column is None:
return _("Click here for more options...")
impl = mainwindow.driver.solver.selections.get(interface, None)
if not impl:
return _("No suitable version was found. Double-click "
"here to find out why.")
if model_column == InterfaceBrowser.VERSION:
text = _("Currently preferred version: %(version)s (%(stability)s)") % \
{'version': impl.get_version(), 'stability': _stability(impl)}
old_impl = mainwindow.original_implementation.get(interface, None)
if old_impl is not None and old_impl is not impl:
text += '\n' + _('Previously preferred version: %(version)s (%(stability)s)') % \
{'version': old_impl.get_version(), 'stability': _stability(old_impl)}
return text
assert model_column == InterfaceBrowser.DOWNLOAD_SIZE
if impl.is_available(mainwindow.driver.config.stores):
return _("This version is already stored on your computer.")
else:
src = mainwindow.driver.config.fetcher.get_best_source(impl)
if not src:
return _("No downloads available!")
return _("Need to download %(pretty_size)s (%(size)s bytes)") % \
{'pretty_size': support.pretty_size(src.size), 'size': src.size}
import math
angle_right = math.pi / 2
class MenuIconRenderer(gtk.GenericCellRenderer):
def __init__(self):
gtk.GenericCellRenderer.__init__(self)
self.set_property('mode', gtk.CELL_RENDERER_MODE_ACTIVATABLE)
def do_set_property(self, prop, value):
setattr(self, prop.name, value)
def do_get_size(self, widget, cell_area, layout = None):
return (0, 0, 20, 20)
on_get_size = do_get_size # GTK 2
if gtk.pygtk_version >= (2, 90):
# note: if you get "TypeError: Couldn't find conversion for foreign struct 'cairo.Context'", you need "python3-gi-cairo"
def do_render(self, cr, widget, background_area, cell_area, flags): # GTK 3
context = widget.get_style_context()
gtk.render_arrow(context, cr, angle_right,
cell_area.x + 5, cell_area.y + 5, max(cell_area.width, cell_area.height) - 10)
else:
def on_render(self, window, widget, background_area, cell_area, expose_area, flags): # GTK 2
if flags & gtk.CELL_RENDERER_PRELIT:
state = gtk.STATE_PRELIGHT
else:
state = gtk.STATE_NORMAL
widget.style.paint_box(window, state, gtk.SHADOW_OUT, expose_area, widget, None,
cell_area.x, cell_area.y, cell_area.width, cell_area.height)
widget.style.paint_arrow(window, state, gtk.SHADOW_NONE, expose_area, widget, None,
gtk.ARROW_RIGHT, True,
cell_area.x + 5, cell_area.y + 5, cell_area.width - 10, cell_area.height - 10)
class IconAndTextRenderer(gtk.GenericCellRenderer):
__gproperties__ = {
"image": (gobject.TYPE_PYOBJECT, "Image", "Image", gobject.PARAM_READWRITE),
"text": (gobject.TYPE_STRING, "Text", "Text", "-", gobject.PARAM_READWRITE),
}
def do_set_property(self, prop, value):
setattr(self, prop.name, value)
def do_get_size(self, widget, cell_area, layout = None):
if not layout:
layout = widget.create_pango_layout(self.text)
a, rect = layout.get_pixel_extents()
if self.image:
pixmap_height = self.image.get_height()
else:
pixmap_height = 32
if not isinstance(rect, tuple):
rect = (rect.x, rect.y, rect.width, rect.height) # GTK 3
both_height = max(rect[1] + rect[3], pixmap_height)
return (0, 0,
rect[0] + rect[2] + CELL_TEXT_INDENT,
both_height)
on_get_size = do_get_size # GTK 2
if gtk.pygtk_version >= (2, 90):
def do_render(self, cr, widget, background_area, cell_area, flags): # GTK 3
layout = widget.create_pango_layout(self.text)
a, rect = layout.get_pixel_extents()
context = widget.get_style_context()
image_y = int(0.5 * (cell_area.height - self.image.get_height()))
gtk.render_icon(context, cr, self.image, cell_area.x, cell_area.y + image_y)
text_y = int(0.5 * (cell_area.height - (rect.y + rect.height)))
gtk.render_layout(context, cr,
cell_area.x + CELL_TEXT_INDENT,
cell_area.y + text_y,
layout)
else:
def on_render(self, window, widget, background_area, cell_area, expose_area, flags): # GTK 2
layout = widget.create_pango_layout(self.text)
a, rect = layout.get_pixel_extents()
if flags & gtk.CELL_RENDERER_SELECTED:
state = gtk.STATE_SELECTED
elif flags & gtk.CELL_RENDERER_PRELIT:
state = gtk.STATE_PRELIGHT
else:
state = gtk.STATE_NORMAL
image_y = int(0.5 * (cell_area.height - self.image.get_height()))
window.draw_pixbuf(widget.style.white_gc, self.image, 0, 0,
cell_area.x,
cell_area.y + image_y)
text_y = int(0.5 * (cell_area.height - (rect[1] + rect[3])))
widget.style.paint_layout(window, state, True,
expose_area, widget, "cellrenderertext",
cell_area.x + CELL_TEXT_INDENT,
cell_area.y + text_y,
layout)
if gtk.pygtk_version < (2, 8, 0):
# Note sure exactly which versions need this.
# 2.8.0 gives a warning if you include it, though.
gobject.type_register(IconAndTextRenderer)
gobject.type_register(MenuIconRenderer)
def walk(model, it):
while it:
yield it
for x in walk(model, model.iter_children(it)): yield x
it = model.iter_next(it)
class InterfaceBrowser(object):
model = None
root = None
cached_icon = None
driver = None
config = None
original_implementation = None
update_icons = False
INTERFACE = 0
INTERFACE_NAME = 1
VERSION = 2
SUMMARY = 3
DOWNLOAD_SIZE = 4
ICON = 5
BACKGROUND = 6
PROBLEM = 7
columns = [(_('Component'), INTERFACE_NAME),
(_('Version'), VERSION),
(_('Fetch'), DOWNLOAD_SIZE),
(_('Description'), SUMMARY),
('', None)]
def __init__(self, driver, widgets):
self.driver = driver
self.config = driver.config
tree_view = widgets.get_widget('components')
tree_view.set_property('has-tooltip', True)
def callback(widget, x, y, keyboard_mode, tooltip):
x, y = tree_view.convert_widget_to_bin_window_coords(x, y)
pos = tree_view.get_path_at_pos(x, y)
if pos:
tree_view.set_tooltip_cell(tooltip, pos[0], pos[1], None)
path = pos[0]
try:
col_index = column_objects.index(pos[1])
except ValueError:
return False
else:
col = self.columns[col_index][1]
row = self.model[path]
iface = row[InterfaceBrowser.INTERFACE]
main_feed = self.config.iface_cache.get_feed(iface.uri)
tooltip.set_text(get_tooltip_text(self, iface, main_feed, col))
return True
else:
return False
tree_view.connect('query-tooltip', callback)
self.cached_icon = {} # URI -> GdkPixbuf
self.default_icon = tree_view.get_style().lookup_icon_set(gtk.STOCK_EXECUTE).render_icon(tree_view.get_style(),
gtk.TEXT_DIR_NONE, gtk.STATE_NORMAL, gtk.ICON_SIZE_SMALL_TOOLBAR, tree_view, None)
self.model = gtk.TreeStore(object, str, str, str, str, gobject.TYPE_PYOBJECT, str, bool)
self.tree_view = tree_view
tree_view.set_model(self.model)
column_objects = []
text = gtk.CellRendererText()
coloured_text = gtk.CellRendererText()
for name, model_column in self.columns:
if model_column == InterfaceBrowser.INTERFACE_NAME:
column = gtk.TreeViewColumn(name, IconAndTextRenderer(),
text = model_column,
image = InterfaceBrowser.ICON)
elif model_column == None:
menu_column = column = gtk.TreeViewColumn('', MenuIconRenderer())
else:
if model_column == InterfaceBrowser.SUMMARY:
text_ellip = gtk.CellRendererText()
try:
text_ellip.set_property('ellipsize', pango.ELLIPSIZE_END)
except:
pass
column = gtk.TreeViewColumn(name, text_ellip, text = model_column)
column.set_expand(True)
elif model_column == InterfaceBrowser.VERSION:
column = gtk.TreeViewColumn(name, coloured_text, text = model_column,
background = InterfaceBrowser.BACKGROUND)
else:
column = gtk.TreeViewColumn(name, text, text = model_column)
tree_view.append_column(column)
column_objects.append(column)
tree_view.set_enable_search(True)
selection = tree_view.get_selection()
def button_press(tree_view, bev):
pos = tree_view.get_path_at_pos(int(bev.x), int(bev.y))
if not pos:
return False
path, col, x, y = pos
if (bev.button == 3 or (bev.button < 4 and col is menu_column)) \
and bev.type == gtk.gdk.BUTTON_PRESS:
selection.select_path(path)
iface = self.model[path][InterfaceBrowser.INTERFACE]
self.show_popup_menu(iface, bev)
return True
if bev.button != 1 or bev.type != gtk.gdk._2BUTTON_PRESS:
return False
properties.edit(driver, self.model[path][InterfaceBrowser.INTERFACE], self.compile, show_versions = True)
tree_view.connect('button-press-event', button_press)
tree_view.connect('destroy', lambda s: driver.watchers.remove(self.build_tree))
driver.watchers.append(self.build_tree)
def set_root(self, root):
assert isinstance(root, model.Interface)
self.root = root
def set_update_icons(self, update_icons):
if update_icons:
# Clear icons cache to make sure they're really updated
self.cached_icon = {}
self.update_icons = update_icons
def get_icon(self, iface):
"""Get an icon for this interface. If the icon is in the cache, use that.
If not, start a download. If we already started a download (successful or
not) do nothing. Returns None if no icon is currently available."""
try:
# Try the in-memory cache
return self.cached_icon[iface.uri]
except KeyError:
# Try the on-disk cache
iconpath = self.config.iface_cache.get_icon_path(iface)
if iconpath:
icon = load_icon(iconpath, ICON_SIZE, ICON_SIZE)
# (if icon is None, cache the fact that we can't load it)
self.cached_icon[iface.uri] = icon
else:
icon = None
# Download a new icon if we don't have one, or if the
# user did a 'Refresh'
if iconpath is None or self.update_icons:
if self.config.network_use == model.network_offline:
fetcher = None
else:
fetcher = self.config.fetcher.download_icon(iface)
if fetcher:
if iface.uri not in self.cached_icon:
self.cached_icon[iface.uri] = None # Only try once
@tasks.async
def update_display():
yield fetcher
try:
tasks.check(fetcher)
# Try to insert new icon into the cache
# If it fails, we'll be left with None in the cached_icon so
# we don't try again.
iconpath = self.config.iface_cache.get_icon_path(iface)
if iconpath:
self.cached_icon[iface.uri] = load_icon(iconpath, ICON_SIZE, ICON_SIZE)
self.build_tree()
else:
warn("Failed to download icon for '%s'", iface)
except download.DownloadAborted as ex:
info("Icon download aborted: %s", ex)
# Don't report further; the user knows they cancelled
except download.DownloadError as ex:
warn("Icon download failed: %s", ex)
# Not worth showing a dialog box for this
except Exception as ex:
import traceback
traceback.print_exc()
self.config.handler.report_error(ex)
update_display()
# elif fetcher is None: don't store anything in cached_icon
# Note: if no icon is available for downloading,
# more attempts are made later.
# It can happen that no icon is yet available because
# the interface was not downloaded yet, in which case
# it's desireable to try again once the interface is available
return icon
return None
def build_tree(self):
iface_cache = self.config.iface_cache
if self.original_implementation is None:
self.set_original_implementations()
done = {} # Detect cycles
sels = self.driver.solver.selections
self.model.clear()
def add_node(parent, iface, commands, essential):
if iface in done:
return
done[iface] = True
main_feed = iface_cache.get_feed(iface.uri)
if main_feed:
name = main_feed.get_name()
summary = main_feed.summary
else:
name = iface.get_name()
summary = None
iter = self.model.append(parent)
self.model[iter][InterfaceBrowser.INTERFACE] = iface
self.model[iter][InterfaceBrowser.INTERFACE_NAME] = name
self.model[iter][InterfaceBrowser.SUMMARY] = summary or ''
self.model[iter][InterfaceBrowser.ICON] = self.get_icon(iface) or self.default_icon
self.model[iter][InterfaceBrowser.PROBLEM] = False
sel = sels.selections.get(iface.uri, None)
if sel:
impl = sel.impl
old_impl = self.original_implementation.get(iface, None)
version_str = impl.get_version()
if old_impl is not None and old_impl.id != impl.id:
version_str += _(' (was %s)') % old_impl.get_version()
self.model[iter][InterfaceBrowser.VERSION] = version_str
self.model[iter][InterfaceBrowser.DOWNLOAD_SIZE] = utils.get_fetch_info(self.config, impl)
deps = sel.dependencies
for c in commands:
deps += sel.get_command(c).requires
for child in deps:
if isinstance(child, model.InterfaceDependency):
add_node(iter,
iface_cache.get_interface(child.interface),
child.get_required_commands(),
child.importance == model.Dependency.Essential)
elif not isinstance(child, model.InterfaceRestriction):
child_iter = self.model.append(parent)
self.model[child_iter][InterfaceBrowser.INTERFACE_NAME] = '?'
self.model[child_iter][InterfaceBrowser.SUMMARY] = \
_('Unknown dependency type : %s') % child
self.model[child_iter][InterfaceBrowser.ICON] = self.default_icon
else:
self.model[iter][InterfaceBrowser.PROBLEM] = essential
self.model[iter][InterfaceBrowser.VERSION] = _('(problem)') if essential else _('(none)')
try:
if sels.command:
add_node(None, self.root, [sels.command], essential = True)
else:
add_node(None, self.root, [], essential = True)
self.tree_view.expand_all()
except Exception as ex:
warn("Failed to build tree: %s", ex, exc_info = ex)
raise
def show_popup_menu(self, iface, bev):
import bugs
have_source = properties.have_source_for(self.config, iface)
global menu # Fix GC problem in PyGObject
menu = gtk.Menu()
for label, cb in [(_('Show Feeds'), lambda: properties.edit(self.driver, iface, self.compile)),
(_('Show Versions'), lambda: properties.edit(self.driver, iface, self.compile, show_versions = True)),
(_('Report a Bug...'), lambda: bugs.report_bug(self.driver, iface))]:
item = gtk.MenuItem()
item.set_label(label)
if cb:
item.connect('activate', lambda item, cb=cb: cb())
else:
item.set_sensitive(False)
item.show()
menu.append(item)
item = gtk.MenuItem()
item.set_label(_('Compile'))
item.show()
menu.append(item)
if have_source:
compile_menu = gtk.Menu()
item.set_submenu(compile_menu)
item = gtk.MenuItem()
item.set_label(_('Automatic'))
item.connect('activate', lambda item: self.compile(iface, autocompile = True))
item.show()
compile_menu.append(item)
item = gtk.MenuItem()
item.set_label(_('Manual...'))
item.connect('activate', lambda item: self.compile(iface, autocompile = False))
item.show()
compile_menu.append(item)
else:
item.set_sensitive(False)
if gtk.pygtk_version >= (2, 90):
menu.popup(None, None, None, None, bev.button, bev.time)
else:
menu.popup(None, None, None, bev.button, bev.time)
def compile(self, interface, autocompile = True):
import compile
def on_success():
# A new local feed may have been registered, so reload it from the disk cache
info(_("0compile command completed successfully. Reloading interface details."))
reader.update_from_cache(interface)
for feed in interface.extra_feeds:
self.config.iface_cache.get_feed(feed.uri, force = True)
import main
main.recalculate()
compile.compile(on_success, interface.uri, autocompile = autocompile)
def set_original_implementations(self):
assert self.original_implementation is None
self.original_implementation = self.driver.solver.selections.copy()
def update_download_status(self, only_update_visible = False):
"""Called at regular intervals while there are downloads in progress,
and once at the end. Also called when things are added to the store.
Update the TreeView with the interfaces."""
# A download may be for a feed, an interface or an implementation.
# Create the reverse mapping (item -> download)
hints = {}
for dl in self.config.handler.monitored_downloads:
if dl.hint:
if dl.hint not in hints:
hints[dl.hint] = []
hints[dl.hint].append(dl)
selections = self.driver.solver.selections
# Only update currently visible rows
if only_update_visible and self.tree_view.get_visible_range() != None:
firstVisiblePath, lastVisiblePath = self.tree_view.get_visible_range()
firstVisibleIter = self.model.get_iter(firstVisiblePath)
else:
# (or should we just wait until the TreeView has settled enough to tell
# us what is visible?)
firstVisibleIter = self.model.get_iter_root()
lastVisiblePath = None
solver = self.driver.solver
requirements = self.driver.requirements
iface_cache = self.config.iface_cache
for it in walk(self.model, firstVisibleIter):
row = self.model[it]
iface = row[InterfaceBrowser.INTERFACE]
# Is this interface the download's hint?
downloads = hints.get(iface, []) # The interface itself
downloads += hints.get(iface.uri, []) # The main feed
arch = solver.get_arch_for(requirements, iface)
for feed in iface_cache.usable_feeds(iface, arch):
downloads += hints.get(feed.uri, []) # Other feeds
impl = selections.get(iface, None)
if impl:
downloads += hints.get(impl, []) # The chosen implementation
if downloads:
so_far = 0
expected = None
for dl in downloads:
if dl.expected_size:
expected = (expected or 0) + dl.expected_size
so_far += dl.get_bytes_downloaded_so_far()
if expected:
summary = ngettext("(downloading %(downloaded)s/%(expected)s [%(percentage).2f%%])",
"(downloading %(downloaded)s/%(expected)s [%(percentage).2f%%] in %(number)d cdownloads)",downloads)
values_dict = {'downloaded': pretty_size(so_far), 'expected': pretty_size(expected), 'percentage': 100 * so_far / float(expected), 'number': len(downloads)}
else:
summary = ngettext("(downloading %(downloaded)s/unknown)",
"(downloading %(downloaded)s/unknown in %(number)d downloads)",
downloads)
values_dict = {'downloaded': pretty_size(so_far), 'number': len(downloads)}
row[InterfaceBrowser.SUMMARY] = summary % values_dict
else:
feed = iface_cache.get_feed(iface.uri)
row[InterfaceBrowser.DOWNLOAD_SIZE] = utils.get_fetch_info(self.config, impl)
row[InterfaceBrowser.SUMMARY] = feed.summary if feed else "-"
if self.model.get_path(it) == lastVisiblePath:
break
def highlight_problems(self):
"""Called when the solve finishes. Highlight any missing implementations."""
for it in walk(self.model, self.model.get_iter_root()):
row = self.model[it]
iface = row[InterfaceBrowser.INTERFACE]
sel = self.driver.solver.selections.selections.get(iface.uri, None)
if sel is None and row[InterfaceBrowser.PROBLEM]:
row[InterfaceBrowser.BACKGROUND] = '#f88'
| AlexanderRyzhko/0install-TUF | zeroinstall/0launch-gui/iface_browser.py | Python | lgpl-2.1 | 20,425 | [
"VisIt"
] | 0ff2436214edcdb87b8a586c869919cdb680b77a514b7586b5eaa754f4d8f4ea |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pyscf import lib
from pyscf import gto, scf
try:
from pyscf.dftd3 import dftd3
except ImportError:
dftd3 = False
mol = gto.M(atom='''
O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587
''', symmetry=True)
def tearDownModule():
global mol
del mol
@unittest.skipIf(not dftd3, "library dftd3 not found.")
class KnownValues(unittest.TestCase):
def test_dftd3_scf(self):
mf = dftd3(scf.RHF(mol))
self.assertAlmostEqual(mf.kernel(), -74.96757204541478, 0)
def test_dftd3_scf_grad(self):
mf = dftd3(scf.RHF(mol)).run()
mfs = mf.as_scanner()
e1 = mfs(''' O 0. 0. 0.0001; H 0. -0.757 0.587; H 0. 0.757 0.587 ''')
e2 = mfs(''' O 0. 0. -0.0001; H 0. -0.757 0.587; H 0. 0.757 0.587 ''')
ref = (e1 - e2)/0.0002 * lib.param.BOHR
g = mf.nuc_grad_method().kernel()
# DFTD3 does not show high agreement between analytical gradients and
# numerical gradients. not sure whether libdftd3 analytical gradients
# have bug
self.assertAlmostEqual(ref, g[0,2], 5)
if __name__ == "__main__":
print("Tests for dftd3")
unittest.main()
| gkc1000/pyscf | pyscf/dftd3/test/test_dftd3.py | Python | apache-2.0 | 1,865 | [
"PySCF"
] | 751e4ba5d402aa73c5a09c6a771551dbe3ae2b5894a59e77904d1fa039b2cddb |
# -*- coding: utf-8 -*-
"""Tests for graph operations."""
import unittest
from pybel import BELGraph
from pybel.dsl import protein
from pybel.struct.operations import (
left_full_join,
left_node_intersection_join,
left_outer_join,
node_intersection,
union,
)
from pybel.testing.utils import n
p1, p2, p3, p4, p5, p6, p7, p8 = (protein(namespace="HGNC", name=n()) for _ in range(8))
class TestLeftFullJoin(unittest.TestCase):
"""Tests the variants of the left full join, including the exhaustive vs. hash algorithms and calling by function
or magic functions"""
def setUp(self):
"""Set up tests for the left full join with two example graphs."""
g = BELGraph()
g.add_increases(p1, p2, citation="PMID1", evidence="Evidence 1")
h = BELGraph()
h.add_increases(p1, p2, citation="PMID1", evidence="Evidence 1")
h.add_increases(p1, p2, citation="PMID2", evidence="Evidence 2")
h.add_increases(p1, p3, citation="PMID1", evidence="Evidence 3")
self.g = g
self.h = h
self._help_check_initial_g(self.g)
self._help_check_initial_h(self.h)
def _help_check_initial_g(self, graph: BELGraph):
"""Test the initial G graph."""
self.assertEqual(2, graph.number_of_nodes(), msg="initial graph G had wrong number of nodes")
self.assertEqual(1, graph.number_of_edges(), msg="initial graph G had wrong number of edges")
def _help_check_initial_h(self, graph: BELGraph):
"""Test the initial H graph."""
self.assertEqual(3, graph.number_of_nodes(), msg="initial graph H had wrong number of nodes")
self.assertEqual(3, graph.number_of_edges(), msg="initial graph H had wrong number of edges")
def _help_check_result(self, j: BELGraph):
"""Help check the result of left joining H into G.
:param j: The resulting graph from G += H
"""
self.assertEqual(3, j.number_of_nodes())
self.assertEqual(
3,
j.number_of_edges(),
msg="G edges:\n{}".format("\n".join(map(str, j.edges(data=True)))),
)
def test_function(self):
"""Test full joining two networks using the function."""
left_full_join(self.g, self.h)
self._help_check_result(self.g)
self._help_check_initial_h(self.h)
def test_full_join_with_isolated_nodes(self):
"""Test what happens when there are isolated nodes."""
a = BELGraph()
a.add_increases(p1, p2, citation=n(), evidence=n())
a.add_node_from_data(p4)
b = BELGraph()
b.add_increases(p2, p3, citation=n(), evidence=n())
b.add_node_from_data(p5)
left_full_join(a, b)
for node in p1, p2, p3, p4, p5:
self.assertIn(node, a)
def test_in_place_operator_failure(self):
"""Test that using the wrong type with the in-place addition operator raises an error."""
with self.assertRaises(TypeError):
self.g += None
def test_in_place_operator(self):
"""Test full joining two networks using the BELGraph in-place addition operator."""
self.g += self.h
self._help_check_result(self.g)
self._help_check_initial_h(self.h)
def test_operator_failure(self):
"""Test that using the wrong type with the addition operator raises an error."""
with self.assertRaises(TypeError):
self.g + None
def test_operator(self):
"""Test full joining two networks using the BELGraph addition operator."""
j = self.g + self.h
self._help_check_result(j)
self._help_check_initial_g(self.g)
self._help_check_initial_h(self.h)
def test_union_failure(self):
"""Test that the union of no graphs raises a value error."""
with self.assertRaises(ValueError):
union([])
def test_union_trivial(self):
"""Test that the union of a single graph returns that graph."""
res = union([self.g])
self.assertEqual(self.g, res)
def test_union(self):
"""Test that the union of a pair of graphs is the same as the full join."""
j = union([self.g, self.h])
self._help_check_result(j)
self._help_check_initial_g(self.g)
self._help_check_initial_h(self.h)
class TestLeftFullOuterJoin(unittest.TestCase):
def setUp(self):
g = BELGraph()
g.add_edge(p1, p2)
h = BELGraph()
h.add_edge(p1, p3)
h.add_edge(p1, p4)
h.add_edge(p5, p6)
h.add_node(p7)
self.g = g
self.h = h
def _help_check_initial_g(self, g):
self.assertEqual(2, g.number_of_nodes())
self.assertEqual({p1, p2}, set(g))
self.assertEqual(1, g.number_of_edges())
self.assertEqual({(p1, p2)}, set(g.edges()))
def _help_check_initial_h(self, h):
self.assertEqual(6, h.number_of_nodes())
self.assertEqual({p1, p3, p4, p5, p6, p7}, set(h))
self.assertEqual(3, h.number_of_edges())
self.assertEqual({(p1, p3), (p1, p4), (p5, p6)}, set(h.edges()))
def _help_check_result(self, j):
"""After H has been full outer joined into G, this is what it should be"""
self.assertEqual(4, j.number_of_nodes())
self.assertEqual({p1, p2, p3, p4}, set(j))
self.assertEqual(3, j.number_of_edges())
self.assertEqual({(p1, p2), (p1, p3), (p1, p4)}, set(j.edges()))
def test_in_place_type_failure(self):
with self.assertRaises(TypeError):
self.g &= None
def test_type_failure(self):
with self.assertRaises(TypeError):
self.g & None
def test_magic(self):
# left_outer_join(g, h)
self.g &= self.h
self._help_check_initial_h(self.h)
self._help_check_result(self.g)
def test_operator(self):
# left_outer_join(g, h)
j = self.g & self.h
self._help_check_initial_h(self.h)
self._help_check_initial_g(self.g)
self._help_check_result(j)
def test_left_outer_join(self):
left_outer_join(self.g, self.h)
self._help_check_initial_h(self.h)
self._help_check_result(self.g)
def test_left_outer_exhaustive_join(self):
self.g &= self.h
left_outer_join(self.g, self.h)
self._help_check_initial_h(self.h)
self._help_check_result(self.g)
class TestInnerJoin(unittest.TestCase):
"""Tests various graph merging procedures"""
def setUp(self):
g = BELGraph()
g.add_edge(p1, p2)
g.add_edge(p1, p3)
g.add_edge(p8, p3)
h = BELGraph()
h.add_edge(p1, p3)
h.add_edge(p1, p4)
h.add_edge(p5, p6)
h.add_node(p7)
self.g = g
self.h = h
def _help_check_initialize_g(self, graph):
self.assertEqual(4, graph.number_of_nodes())
self.assertEqual(3, graph.number_of_edges())
def _help_check_initialize_h(self, graph):
self.assertEqual(6, graph.number_of_nodes())
self.assertEqual({p1, p3, p4, p5, p6, p7}, set(graph))
self.assertEqual(3, graph.number_of_edges())
self.assertEqual({(p1, p3), (p1, p4), (p5, p6)}, set(graph.edges()))
def test_initialize(self):
self._help_check_initialize_g(self.g)
self._help_check_initialize_h(self.h)
def _help_check_join(self, j):
self.assertEqual(2, j.number_of_nodes())
self.assertEqual({p1, p3}, set(j))
self.assertEqual(1, j.number_of_edges())
self.assertEqual(
{
(p1, p3),
},
set(j.edges()),
)
def test_in_place_type_failure(self):
with self.assertRaises(TypeError):
self.g ^ None
def test_type_failure(self):
with self.assertRaises(TypeError):
self.g ^= None
def test_magic(self):
j = self.g ^ self.h
self._help_check_join(j)
self._help_check_initialize_h(self.h)
self._help_check_initialize_g(self.g)
def test_left_node_intersection_join(self):
j = left_node_intersection_join(self.g, self.h)
self._help_check_join(j)
self._help_check_initialize_h(self.h)
self._help_check_initialize_g(self.g)
def test_node_intersection(self):
j = node_intersection([self.h, self.g])
self._help_check_join(j)
self._help_check_initialize_h(self.h)
self._help_check_initialize_g(self.g)
def test_intersection_failure(self):
with self.assertRaises(ValueError):
node_intersection([])
def test_intersection_trivial(self):
res = node_intersection([self.g])
self.assertEqual(self.g, res)
| pybel/pybel | tests/test_struct/test_struct_operations.py | Python | mit | 8,752 | [
"Pybel"
] | a49dbd7f44b835c17f6345677a5fe529e1f41a85e7d6c0d75d08f7f1f38fc8ac |
#!/usr/bin/env python
import argparse
import shutil
import icqsol_utils
# Parse Command Line.
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='input', help='Shape dataset selected from history')
parser.add_argument('--input_file_format_and_type', dest='input_file_format_and_type', help='Input file format and type')
parser.add_argument('--input_dataset_type', dest='input_dataset_type', help='Input dataset_type')
parser.add_argument('--field_name', dest='field_name', help='Surface field name')
parser.add_argument('--location', dest='location', help='Location of field within cell, either point or cell')
parser.add_argument('--expression', dest='expression', help='Expression for applying surface field to shape')
parser.add_argument('--time_point', dest='time_points', type=float, action='append', nargs=1, help='Points in time')
parser.add_argument('--max_edge_length', dest='max_edge_length', type=float, default=float('inf'), help='Maximum edge length')
parser.add_argument('--output', dest='output', help='Output dataset')
parser.add_argument('--output_vtk_type', dest='output_vtk_type', help='Output VTK type')
args = parser.parse_args()
input_format, input_file_type = icqsol_utils.get_format_and_type(args.input_file_format_and_type)
time_points = [tp[0] for tp in args.time_points]
tmp_dir = icqsol_utils.get_temp_dir()
# Instantiate a ShapeManager for loading the input.
shape_mgr = icqsol_utils.get_shape_manager(input_format, args.input_dataset_type)
# Get the vtkPolyData object.
pdata = shape_mgr.loadAsVtkPolyData(args.input)
# Add surface field to shape data.
vtk_poly_data = shape_mgr.addSurfaceFieldFromExpressionToVtkPolyData(pdata,
args.field_name,
args.expression,
time_points,
args.max_edge_length,
args.location)
# Write min/max field values and surface integral.
for comp in range(len(time_points)):
minVal, maxVal = shape_mgr.getFieldRange(vtk_poly_data, args.field_name, comp)
surfIntegral = shape_mgr.integrateSurfaceField(vtk_poly_data, args.field_name, comp)
print 'component {2} min/max values of {3}: {0}/{1} surf integral: {4}'.format(minVal, maxVal, comp, args.field_name, surfIntegral)
# Define the output file format and type (the outpur_format can only be 'vtk').
output_format, output_file_type = icqsol_utils.get_format_and_type(args.output_vtk_type)
tmp_output_path = icqsol_utils.get_temporary_file_path(tmp_dir, output_format)
# Make sure the ShapeManager's writer is vtk.
shape_mgr.setWriter(file_format=icqsol_utils.VTK, vtk_dataset_type=icqsol_utils.POLYDATA)
# Save the output.
shape_mgr.saveVtkPolyData(vtk_poly_data=vtk_poly_data, file_name=tmp_output_path, file_type=output_file_type)
shutil.move(tmp_output_path, args.output)
| pletzer/galaxy-csg | tools/icqsol_add_surface_field_from_expression/icqsol_add_surface_field_from_expression.py | Python | mit | 3,070 | [
"VTK"
] | 3b07b52c9d91e052e220df85cbbd152b2674f3f268ef409b929f0c81574f0977 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Driver for Microsoft Azure Virtual Machines service.
http://azure.microsoft.com/en-us/services/virtual-machines/
"""
import re
import time
import collections
import random
import sys
import copy
import base64
from datetime import datetime
from xml.dom import minidom
from xml.sax.saxutils import escape as xml_escape
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.common.azure import AzureServiceManagementConnection
from libcloud.common.azure import AzureRedirectException
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
from libcloud.compute.base import NodeImage, StorageVolume
from libcloud.compute.types import NodeState
from libcloud.common.types import LibcloudError
from libcloud.utils.py3 import _real_unicode
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import ensure_string
from libcloud.utils.py3 import urlquote as url_quote
from libcloud.utils.misc import ReprMixin
HTTPSConnection = httplib.HTTPSConnection
if sys.version_info < (3,):
_unicode_type = unicode
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
_unicode_type = str
AZURE_SERVICE_MANAGEMENT_HOST = 'management.core.windows.net'
X_MS_VERSION = '2013-08-01'
WINDOWS_SERVER_REGEX = re.compile(
r'Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk'
)
"""
Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them
From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx
Prices are for Linux instances in East US data center. To see what pricing will
actually be, visit:
http://azure.microsoft.com/en-gb/pricing/details/virtual-machines/
"""
AZURE_COMPUTE_INSTANCE_TYPES = {
'A0': {
'id': 'ExtraSmall',
'name': 'Extra Small Instance',
'ram': 768,
'disk': 127,
'bandwidth': None,
'price': '0.0211',
'max_data_disks': 1,
'cores': 'Shared'
},
'A1': {
'id': 'Small',
'name': 'Small Instance',
'ram': 1792,
'disk': 127,
'bandwidth': None,
'price': '0.0633',
'max_data_disks': 2,
'cores': 1
},
'A2': {
'id': 'Medium',
'name': 'Medium Instance',
'ram': 3584,
'disk': 127,
'bandwidth': None,
'price': '0.1266',
'max_data_disks': 4,
'cores': 2
},
'A3': {
'id': 'Large',
'name': 'Large Instance',
'ram': 7168,
'disk': 127,
'bandwidth': None,
'price': '0.2531',
'max_data_disks': 8,
'cores': 4
},
'A4': {
'id': 'ExtraLarge',
'name': 'Extra Large Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.5062',
'max_data_disks': 16,
'cores': 8
},
'A5': {
'id': 'A5',
'name': 'Memory Intensive Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.2637',
'max_data_disks': 4,
'cores': 2
},
'A6': {
'id': 'A6',
'name': 'A6 Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.5273',
'max_data_disks': 8,
'cores': 4
},
'A7': {
'id': 'A7',
'name': 'A7 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '1.0545',
'max_data_disks': 16,
'cores': 8
},
'A8': {
'id': 'A8',
'name': 'A8 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '2.0774',
'max_data_disks': 16,
'cores': 8
},
'A9': {
'id': 'A9',
'name': 'A9 Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '4.7137',
'max_data_disks': 16,
'cores': 16
},
'A10': {
'id': 'A10',
'name': 'A10 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '1.2233',
'max_data_disks': 16,
'cores': 8
},
'A11': {
'id': 'A11',
'name': 'A11 Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '2.1934',
'max_data_disks': 16,
'cores': 16
},
'D1': {
'id': 'Standard_D1',
'name': 'D1 Faster Compute Instance',
'ram': 3584,
'disk': 127,
'bandwidth': None,
'price': '0.0992',
'max_data_disks': 2,
'cores': 1
},
'D2': {
'id': 'Standard_D2',
'name': 'D2 Faster Compute Instance',
'ram': 7168,
'disk': 127,
'bandwidth': None,
'price': '0.1983',
'max_data_disks': 4,
'cores': 2
},
'D3': {
'id': 'Standard_D3',
'name': 'D3 Faster Compute Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.3965',
'max_data_disks': 8,
'cores': 4
},
'D4': {
'id': 'Standard_D4',
'name': 'D4 Faster Compute Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.793',
'max_data_disks': 16,
'cores': 8
},
'D11': {
'id': 'Standard_D11',
'name': 'D11 Faster Compute Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.251',
'max_data_disks': 4,
'cores': 2
},
'D12': {
'id': 'Standard_D12',
'name': 'D12 Faster Compute Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.502',
'max_data_disks': 8,
'cores': 4
},
'D13': {
'id': 'Standard_D13',
'name': 'D13 Faster Compute Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '0.9038',
'max_data_disks': 16,
'cores': 8
},
'D14': {
'id': 'Standard_D14',
'name': 'D14 Faster Compute Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '1.6261',
'max_data_disks': 32,
'cores': 16
}
}
_KNOWN_SERIALIZATION_XFORMS = {
'include_apis': 'IncludeAPIs',
'message_id': 'MessageId',
'content_md5': 'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
'os_disk_configuration': 'OSDiskConfiguration',
'is_dns_programmed': 'IsDnsProgrammed'
}
class AzureNodeDriver(NodeDriver):
connectionCls = AzureServiceManagementConnection
name = 'Azure Virtual machines'
website = 'http://azure.microsoft.com/en-us/services/virtual-machines/'
type = Provider.AZURE
_instance_types = AZURE_COMPUTE_INSTANCE_TYPES
_blob_url = ".blob.core.windows.net"
features = {'create_node': ['password']}
service_location = collections.namedtuple(
'service_location',
['is_affinity_group', 'service_location']
)
NODE_STATE_MAP = {
'RoleStateUnknown': NodeState.UNKNOWN,
'CreatingVM': NodeState.PENDING,
'StartingVM': NodeState.PENDING,
'Provisioning': NodeState.PENDING,
'CreatingRole': NodeState.PENDING,
'StartingRole': NodeState.PENDING,
'ReadyRole': NodeState.RUNNING,
'BusyRole': NodeState.PENDING,
'StoppingRole': NodeState.PENDING,
'StoppingVM': NodeState.PENDING,
'DeletingVM': NodeState.PENDING,
'StoppedVM': NodeState.STOPPED,
'RestartingRole': NodeState.REBOOTING,
'CyclingRole': NodeState.TERMINATED,
'FailedStartingRole': NodeState.TERMINATED,
'FailedStartingVM': NodeState.TERMINATED,
'UnresponsiveRole': NodeState.TERMINATED,
'StoppedDeallocated': NodeState.TERMINATED,
}
def __init__(self, subscription_id=None, key_file=None, **kwargs):
"""
subscription_id contains the Azure subscription id in the form of GUID
key_file contains the Azure X509 certificate in .pem form
"""
self.subscription_id = subscription_id
self.key_file = key_file
self.follow_redirects = kwargs.get('follow_redirects', True)
super(AzureNodeDriver, self).__init__(
self.subscription_id,
self.key_file,
secure=True,
**kwargs
)
def list_sizes(self):
"""
Lists all sizes
:rtype: ``list`` of :class:`NodeSize`
"""
sizes = []
for _, values in self._instance_types.items():
node_size = self._to_node_size(copy.deepcopy(values))
sizes.append(node_size)
return sizes
def list_images(self, location=None):
"""
Lists all images
:rtype: ``list`` of :class:`NodeImage`
"""
data = self._perform_get(self._get_image_path(), Images)
custom_image_data = self._perform_get(
self._get_vmimage_path(),
VMImages
)
images = [self._to_image(i) for i in data]
images.extend(self._vm_to_image(j) for j in custom_image_data)
if location is not None:
images = [
image
for image in images
if location in image.extra["location"]
]
return images
def list_locations(self):
"""
Lists all locations
:rtype: ``list`` of :class:`NodeLocation`
"""
data = self._perform_get(
'/' + self.subscription_id + '/locations',
Locations
)
return [self._to_location(l) for l in data]
def list_nodes(self, ex_cloud_service_name):
"""
List all nodes
ex_cloud_service_name parameter is used to scope the request
to a specific Cloud Service. This is a required parameter as
nodes cannot exist outside of a Cloud Service nor be shared
between a Cloud Service within Azure.
:param ex_cloud_service_name: Cloud Service name
:type ex_cloud_service_name: ``str``
:rtype: ``list`` of :class:`Node`
"""
response = self._perform_get(
self._get_hosted_service_path(ex_cloud_service_name) +
'?embed-detail=True',
None
)
self.raise_for_response(response, 200)
data = self._parse_response(response, HostedService)
vips = None
if (len(data.deployments) > 0 and
data.deployments[0].virtual_ips is not None):
vips = [vip.address for vip in data.deployments[0].virtual_ips]
try:
return [
self._to_node(n, ex_cloud_service_name, vips)
for n in data.deployments[0].role_instance_list
]
except IndexError:
return []
def reboot_node(self, node, ex_cloud_service_name=None,
ex_deployment_slot=None):
"""
Reboots a node.
ex_cloud_service_name parameter is used to scope the request
to a specific Cloud Service. This is a required parameter as
nodes cannot exist outside of a Cloud Service nor be shared
between a Cloud Service within Azure.
:param ex_cloud_service_name: Cloud Service name
:type ex_cloud_service_name: ``str``
:param ex_deployment_slot: Options are "production" (default)
or "Staging". (Optional)
:type ex_deployment_slot: ``str``
:rtype: ``bool``
"""
if ex_cloud_service_name is None:
if node.extra is not None:
ex_cloud_service_name = node.extra.get(
'ex_cloud_service_name'
)
if not ex_cloud_service_name:
raise ValueError("ex_cloud_service_name is required.")
if not ex_deployment_slot:
ex_deployment_slot = "Production"
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
try:
response = self._perform_post(
self._get_deployment_path_using_name(
ex_cloud_service_name,
_deployment_name
) + '/roleinstances/' + _str(node.id) + '?comp=reboot',
''
)
self.raise_for_response(response, 202)
if self._parse_response_for_async_op(response):
return True
else:
return False
except Exception:
return False
def list_volumes(self, node=None):
"""
Lists volumes of the disks in the image repository that are
associated with the specified subscription.
Pass Node object to scope the list of volumes to a single
instance.
:rtype: ``list`` of :class:`StorageVolume`
"""
data = self._perform_get(self._get_disk_path(), Disks)
volumes = [self._to_volume(volume=v, node=node) for v in data]
return volumes
def create_node(self, name, size, image, ex_cloud_service_name,
ex_storage_service_name=None, ex_new_deployment=False,
ex_deployment_slot="Production", ex_deployment_name=None,
ex_admin_user_id="azureuser", ex_custom_data=None,
ex_virtual_network_name=None, ex_network_config=None,
auth=None, **kwargs):
"""
Create Azure Virtual Machine
Reference: http://bit.ly/1fIsCb7
[www.windowsazure.com/en-us/documentation/]
We default to:
+ 3389/TCP - RDP - 1st Microsoft instance.
+ RANDOM/TCP - RDP - All succeeding Microsoft instances.
+ 22/TCP - SSH - 1st Linux instance
+ RANDOM/TCP - SSH - All succeeding Linux instances.
The above replicates the standard behavior of the Azure UI.
You can retrieve the assigned ports to each instance by
using the following private function:
_get_endpoint_ports(service_name)
Returns public,private port key pair.
@inherits: :class:`NodeDriver.create_node`
:keyword image: The image to use when creating this node
:type image: `NodeImage`
:keyword size: The size of the instance to create
:type size: `NodeSize`
:keyword ex_cloud_service_name: Required.
Name of the Azure Cloud Service.
:type ex_cloud_service_name: ``str``
:keyword ex_storage_service_name: Optional:
Name of the Azure Storage Service.
:type ex_storage_service_name: ``str``
:keyword ex_new_deployment: Optional. Tells azure to create a
new deployment rather than add to an
existing one.
:type ex_new_deployment: ``boolean``
:keyword ex_deployment_slot: Optional: Valid values: production|
staging.
Defaults to production.
:type ex_deployment_slot: ``str``
:keyword ex_deployment_name: Optional. The name of the
deployment.
If this is not passed in we default
to using the Cloud Service name.
:type ex_deployment_name: ``str``
:type ex_custom_data: ``str``
:keyword ex_custom_data: Optional script or other data which is
injected into the VM when it's beginning
provisioned.
:keyword ex_admin_user_id: Optional. Defaults to 'azureuser'.
:type ex_admin_user_id: ``str``
:keyword ex_virtual_network_name: Optional. If this is not passed
in no virtual network is used.
:type ex_virtual_network_name: ``str``
:keyword ex_network_config: Optional. The ConfigurationSet to use
for network configuration
:type ex_network_config: `ConfigurationSet`
"""
# TODO: Refactor this method to make it more readable, split it into
# multiple smaller methods
auth = self._get_and_check_auth(auth)
password = auth.password
if not isinstance(size, NodeSize):
raise ValueError('Size must be an instance of NodeSize')
if not isinstance(image, NodeImage):
raise ValueError(
"Image must be an instance of NodeImage, "
"produced by list_images()"
)
# Retrieve a list of currently available nodes for the provided cloud
# service
node_list = self.list_nodes(
ex_cloud_service_name=ex_cloud_service_name
)
if ex_network_config is None:
network_config = ConfigurationSet()
else:
network_config = ex_network_config
network_config.configuration_set_type = 'NetworkConfiguration'
# Base64 encode custom data if provided
if ex_custom_data:
ex_custom_data = self._encode_base64(data=ex_custom_data)
# We do this because we need to pass a Configuration to the
# method. This will be either Linux or Windows.
if WINDOWS_SERVER_REGEX.search(image.id, re.I):
machine_config = WindowsConfigurationSet(
computer_name=name,
admin_password=password,
admin_user_name=ex_admin_user_id
)
machine_config.domain_join = None
if not node_list or ex_new_deployment:
port = "3389"
else:
port = random.randint(41952, 65535)
endpoints = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
for instances in endpoints.role_instance_list:
ports = [ep.public_port for ep in
instances.instance_endpoints]
while port in ports:
port = random.randint(41952, 65535)
endpoint = ConfigurationSetInputEndpoint(
name='Remote Desktop',
protocol='tcp',
port=port,
local_port='3389',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
else:
if not node_list or ex_new_deployment:
port = "22"
else:
port = random.randint(41952, 65535)
endpoints = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
for instances in endpoints.role_instance_list:
ports = []
if instances.instance_endpoints is not None:
for ep in instances.instance_endpoints:
ports += [ep.public_port]
while port in ports:
port = random.randint(41952, 65535)
endpoint = ConfigurationSetInputEndpoint(
name='SSH',
protocol='tcp',
port=port,
local_port='22',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
machine_config = LinuxConfigurationSet(
name,
ex_admin_user_id,
password,
False,
ex_custom_data
)
network_config.input_endpoints.items.append(endpoint)
_storage_location = self._get_cloud_service_location(
service_name=ex_cloud_service_name
)
if ex_storage_service_name is None:
ex_storage_service_name = ex_cloud_service_name
ex_storage_service_name = re.sub(
r'[\W_-]+',
'',
ex_storage_service_name.lower(),
flags=re.UNICODE
)
if self._is_storage_service_unique(
service_name=ex_storage_service_name):
self._create_storage_account(
service_name=ex_storage_service_name,
location=_storage_location.service_location,
is_affinity_group=_storage_location.is_affinity_group
)
# OK, bit annoying here. You must create a deployment before
# you can create an instance; however, the deployment function
# creates the first instance, but all subsequent instances
# must be created using the add_role function.
#
# So, yeah, annoying.
if not node_list or ex_new_deployment:
# This is the first node in this cloud service.
if not ex_deployment_name:
ex_deployment_name = ex_cloud_service_name
vm_image_id = None
disk_config = None
if image.extra.get('vm_image', False):
vm_image_id = image.id
# network_config = None
else:
blob_url = "http://%s.blob.core.windows.net" % (
ex_storage_service_name)
# Azure's pattern in the UI.
disk_name = "%s-%s-%s.vhd" % (
ex_cloud_service_name,
name,
time.strftime("%Y-%m-%d")
)
media_link = "%s/vhds/%s" % (blob_url, disk_name)
disk_config = OSVirtualHardDisk(image.id, media_link)
response = self._perform_post(
self._get_deployment_path_using_name(ex_cloud_service_name),
AzureXmlSerializer.virtual_machine_deployment_to_xml(
ex_deployment_name,
ex_deployment_slot,
name,
name,
machine_config,
disk_config,
'PersistentVMRole',
network_config,
None,
None,
size.id,
ex_virtual_network_name,
vm_image_id
)
)
self.raise_for_response(response, 202)
self._ex_complete_async_azure_operation(response)
else:
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
vm_image_id = None
disk_config = None
if image.extra.get('vm_image', False):
vm_image_id = image.id
# network_config = None
else:
blob_url = "http://%s.blob.core.windows.net" % (
ex_storage_service_name
)
disk_name = "%s-%s-%s.vhd" % (
ex_cloud_service_name,
name,
time.strftime("%Y-%m-%d")
)
media_link = "%s/vhds/%s" % (blob_url, disk_name)
disk_config = OSVirtualHardDisk(image.id, media_link)
path = self._get_role_path(ex_cloud_service_name, _deployment_name)
body = AzureXmlSerializer.add_role_to_xml(
name, # role_name
machine_config, # system_config
disk_config, # os_virtual_hard_disk
'PersistentVMRole', # role_type
network_config, # network_config
None, # availability_set_name
None, # data_virtual_hard_disks
vm_image_id, # vm_image
size.id # role_size
)
response = self._perform_post(path, body)
self.raise_for_response(response, 202)
self._ex_complete_async_azure_operation(response)
return Node(
id=name,
name=name,
state=NodeState.PENDING,
public_ips=[],
private_ips=[],
driver=self.connection.driver,
extra={
'ex_cloud_service_name': ex_cloud_service_name
}
)
def destroy_node(self, node, ex_cloud_service_name=None,
ex_deployment_slot="Production"):
"""
Remove Azure Virtual Machine
This removes the instance, but does not
remove the disk. You will need to use destroy_volume.
Azure sometimes has an issue where it will hold onto
a blob lease for an extended amount of time.
:keyword ex_cloud_service_name: Required.
Name of the Azure Cloud Service.
:type ex_cloud_service_name: ``str``
:keyword ex_deployment_slot: Optional: The name of the deployment
slot. If this is not passed in we
default to production.
:type ex_deployment_slot: ``str``
"""
if not isinstance(node, Node):
raise ValueError("A libcloud Node object is required.")
if ex_cloud_service_name is None and node.extra is not None:
ex_cloud_service_name = node.extra.get('ex_cloud_service_name')
if not ex_cloud_service_name:
raise ValueError("Unable to get ex_cloud_service_name from Node.")
_deployment = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
_deployment_name = _deployment.name
_server_deployment_count = len(_deployment.role_instance_list)
if _server_deployment_count > 1:
path = self._get_role_path(
ex_cloud_service_name,
_deployment_name,
node.id
)
else:
path = self._get_deployment_path_using_name(
ex_cloud_service_name,
_deployment_name
)
path += '?comp=media'
self._perform_delete(path)
return True
def ex_list_cloud_services(self):
return self._perform_get(
self._get_hosted_service_path(),
HostedServices
)
def ex_create_cloud_service(self, name, location, description=None,
extended_properties=None):
"""
Create an azure cloud service.
:param name: Name of the service to create
:type name: ``str``
:param location: Standard azure location string
:type location: ``str``
:param description: Optional description
:type description: ``str``
:param extended_properties: Optional extended_properties
:type extended_properties: ``dict``
:rtype: ``bool``
"""
response = self._perform_cloud_service_create(
self._get_hosted_service_path(),
AzureXmlSerializer.create_hosted_service_to_xml(
name,
self._encode_base64(name),
description,
location,
None,
extended_properties
)
)
self.raise_for_response(response, 201)
return True
def ex_destroy_cloud_service(self, name):
"""
Delete an azure cloud service.
:param name: Name of the cloud service to destroy.
:type name: ``str``
:rtype: ``bool``
"""
response = self._perform_cloud_service_delete(
self._get_hosted_service_path(name)
)
self.raise_for_response(response, 200)
return True
def ex_add_instance_endpoints(self, node, endpoints,
ex_deployment_slot="Production"):
all_endpoints = [
{
"name": endpoint.name,
"protocol": endpoint.protocol,
"port": endpoint.public_port,
"local_port": endpoint.local_port,
}
for endpoint in node.extra['instance_endpoints']
]
all_endpoints.extend(endpoints)
result = self.ex_set_instance_endpoints(node, all_endpoints,
ex_deployment_slot)
return result
def ex_set_instance_endpoints(self, node, endpoints,
ex_deployment_slot="Production"):
"""
For example::
endpoint = ConfigurationSetInputEndpoint(
name='SSH',
protocol='tcp',
port=port,
local_port='22',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
{
'name': 'SSH',
'protocol': 'tcp',
'port': port,
'local_port': '22'
}
"""
ex_cloud_service_name = node.extra['ex_cloud_service_name']
vm_role_name = node.name
network_config = ConfigurationSet()
network_config.configuration_set_type = 'NetworkConfiguration'
for endpoint in endpoints:
new_endpoint = ConfigurationSetInputEndpoint(**endpoint)
network_config.input_endpoints.items.append(new_endpoint)
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
response = self._perform_put(
self._get_role_path(
ex_cloud_service_name,
_deployment_name,
vm_role_name
),
AzureXmlSerializer.add_role_to_xml(
None, # role_name
None, # system_config
None, # os_virtual_hard_disk
'PersistentVMRole', # role_type
network_config, # network_config
None, # availability_set_name
None, # data_virtual_hard_disks
None, # vm_image
None # role_size
)
)
self.raise_for_response(response, 202)
def ex_create_storage_service(self, name, location,
description=None, affinity_group=None,
extended_properties=None):
"""
Create an azure storage service.
:param name: Name of the service to create
:type name: ``str``
:param location: Standard azure location string
:type location: ``str``
:param description: (Optional) Description of storage service.
:type description: ``str``
:param affinity_group: (Optional) Azure affinity group.
:type affinity_group: ``str``
:param extended_properties: (Optional) Additional configuration
options support by Azure.
:type extended_properties: ``dict``
:rtype: ``bool``
"""
response = self._perform_storage_service_create(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_to_xml(
service_name=name,
label=self._encode_base64(name),
description=description,
location=location,
affinity_group=affinity_group,
extended_properties=extended_properties
)
)
self.raise_for_response(response, 202)
return True
def ex_destroy_storage_service(self, name):
"""
Destroy storage service. Storage service must not have any active
blobs. Sometimes Azure likes to hold onto volumes after they are
deleted for an inordinate amount of time, so sleep before calling
this method after volume deletion.
:param name: Name of storage service.
:type name: ``str``
:rtype: ``bool``
"""
response = self._perform_storage_service_delete(
self._get_storage_service_path(name)
)
self.raise_for_response(response, 200)
return True
"""
Functions not implemented
"""
def create_volume_snapshot(self):
raise NotImplementedError(
'You cannot create snapshots of '
'Azure VMs at this time.'
)
def attach_volume(self):
raise NotImplementedError(
'attach_volume is not supported '
'at this time.'
)
def create_volume(self):
raise NotImplementedError(
'create_volume is not supported '
'at this time.'
)
def detach_volume(self):
raise NotImplementedError(
'detach_volume is not supported '
'at this time.'
)
def destroy_volume(self):
raise NotImplementedError(
'destroy_volume is not supported '
'at this time.'
)
"""
Private Functions
"""
def _perform_cloud_service_create(self, path, data):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = data
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_cloud_service_delete(self, path):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_storage_service_create(self, path, data):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = data
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_storage_service_delete(self, path):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _to_node(self, data, ex_cloud_service_name=None, virtual_ips=None):
"""
Convert the data from a Azure response object into a Node
"""
remote_desktop_port = ''
ssh_port = ''
public_ips = virtual_ips or []
if data.instance_endpoints is not None:
if len(data.instance_endpoints) >= 1:
public_ips = [data.instance_endpoints[0].vip]
for port in data.instance_endpoints:
if port.name == 'Remote Desktop':
remote_desktop_port = port.public_port
if port.name == "SSH":
ssh_port = port.public_port
return Node(
id=data.role_name,
name=data.role_name,
state=self.NODE_STATE_MAP.get(
data.instance_status,
NodeState.UNKNOWN
),
public_ips=public_ips,
private_ips=[data.ip_address],
driver=self.connection.driver,
extra={
'instance_endpoints': data.instance_endpoints,
'remote_desktop_port': remote_desktop_port,
'ssh_port': ssh_port,
'power_state': data.power_state,
'instance_size': data.instance_size,
'ex_cloud_service_name': ex_cloud_service_name
}
)
def _to_location(self, data):
"""
Convert the data from a Azure response object into a location
"""
country = data.display_name
if "Asia" in data.display_name:
country = "Asia"
if "Europe" in data.display_name:
country = "Europe"
if "US" in data.display_name:
country = "US"
if "Japan" in data.display_name:
country = "Japan"
if "Brazil" in data.display_name:
country = "Brazil"
vm_role_sizes = data.compute_capabilities.virtual_machines_role_sizes
return AzureNodeLocation(
id=data.name,
name=data.display_name,
country=country,
driver=self.connection.driver,
available_services=data.available_services,
virtual_machine_role_sizes=vm_role_sizes
)
def _to_node_size(self, data):
"""
Convert the AZURE_COMPUTE_INSTANCE_TYPES into NodeSize
"""
return NodeSize(
id=data["id"],
name=data["name"],
ram=data["ram"],
disk=data["disk"],
bandwidth=data["bandwidth"],
price=data["price"],
driver=self.connection.driver,
extra={
'max_data_disks': data["max_data_disks"],
'cores': data["cores"]
}
)
def _to_image(self, data):
return NodeImage(
id=data.name,
name=data.label,
driver=self.connection.driver,
extra={
'os': data.os,
'category': data.category,
'description': data.description,
'location': data.location,
'affinity_group': data.affinity_group,
'media_link': data.media_link,
'vm_image': False
}
)
def _vm_to_image(self, data):
return NodeImage(
id=data.name,
name=data.label,
driver=self.connection.driver,
extra={
'os': data.os_disk_configuration.os,
'category': data.category,
'location': data.location,
'media_link': data.os_disk_configuration.media_link,
'affinity_group': data.affinity_group,
'deployment_name': data.deployment_name,
'vm_image': True
}
)
def _to_volume(self, volume, node):
extra = {
'affinity_group': volume.affinity_group,
'os': volume.os,
'location': volume.location,
'media_link': volume.media_link,
'source_image_name': volume.source_image_name
}
role_name = getattr(volume.attached_to, 'role_name', None)
hosted_service_name = getattr(
volume.attached_to,
'hosted_service_name',
None
)
deployment_name = getattr(
volume.attached_to,
'deployment_name',
None
)
if role_name is not None:
extra['role_name'] = role_name
if hosted_service_name is not None:
extra['hosted_service_name'] = hosted_service_name
if deployment_name is not None:
extra['deployment_name'] = deployment_name
if node:
if role_name is not None and role_name == node.id:
return StorageVolume(
id=volume.name,
name=volume.name,
size=int(volume.logical_disk_size_in_gb),
driver=self.connection.driver,
extra=extra
)
else:
return StorageVolume(
id=volume.name,
name=volume.name,
size=int(volume.logical_disk_size_in_gb),
driver=self.connection.driver,
extra=extra
)
def _get_deployment(self, **kwargs):
_service_name = kwargs['service_name']
_deployment_slot = kwargs['deployment_slot']
response = self._perform_get(
self._get_deployment_path_using_slot(
_service_name,
_deployment_slot
),
None
)
self.raise_for_response(response, 200)
return self._parse_response(response, Deployment)
def _get_cloud_service_location(self, service_name=None):
if not service_name:
raise ValueError("service_name is required.")
res = self._perform_get(
'%s?embed-detail=False' % (
self._get_hosted_service_path(service_name)
),
HostedService
)
_affinity_group = res.hosted_service_properties.affinity_group
_cloud_service_location = res.hosted_service_properties.location
if _affinity_group is not None and _affinity_group is not '':
return self.service_location(True, _affinity_group)
elif _cloud_service_location is not None:
return self.service_location(False, _cloud_service_location)
else:
return None
def _is_storage_service_unique(self, service_name=None):
if not service_name:
raise ValueError("service_name is required.")
_check_availability = self._perform_get(
'%s/operations/isavailable/%s%s' % (
self._get_storage_service_path(),
_str(service_name),
''
),
AvailabilityResponse
)
self.raise_for_response(_check_availability, 200)
return _check_availability.result
def _create_storage_account(self, **kwargs):
if kwargs['is_affinity_group'] is True:
response = self._perform_post(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_input_to_xml(
kwargs['service_name'],
kwargs['service_name'],
self._encode_base64(kwargs['service_name']),
kwargs['location'],
None, # Location
True, # geo_replication_enabled
None # extended_properties
)
)
self.raise_for_response(response, 202)
else:
response = self._perform_post(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_input_to_xml(
kwargs['service_name'],
kwargs['service_name'],
self._encode_base64(kwargs['service_name']),
None, # Affinity Group
kwargs['location'], # Location
True, # geo_replication_enabled
None # extended_properties
)
)
self.raise_for_response(response, 202)
# We need to wait for this to be created before we can
# create the storage container and the instance.
self._ex_complete_async_azure_operation(
response,
"create_storage_account"
)
def _get_operation_status(self, request_id):
return self._perform_get(
'/' + self.subscription_id + '/operations/' + _str(request_id),
Operation
)
def _perform_get(self, path, response_type):
request = AzureHTTPRequest()
request.method = 'GET'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
if response_type is not None:
return self._parse_response(response, response_type)
return response
def _perform_post(self, path, body, response_type=None, async=False):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = ensure_string(self._get_request_body(body))
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_put(self, path, body, response_type=None, async=False):
request = AzureHTTPRequest()
request.method = 'PUT'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = ensure_string(self._get_request_body(body))
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_delete(self, path, async=False):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
self.raise_for_response(response, 202)
if async:
return self._parse_response_for_async_op(response)
def _perform_request(self, request):
try:
return self.connection.request(
action=request.path,
data=request.body,
headers=request.headers,
method=request.method
)
except AzureRedirectException:
e = sys.exc_info()[1]
parsed_url = urlparse.urlparse(e.location)
request.host = parsed_url.netloc
return self._perform_request(request)
except Exception as e:
raise e
def _update_request_uri_query(self, request):
"""
pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters
"""
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += '%s=%s%s' % (
name,
url_quote(value, '/()$=\','),
'&'
)
request.path = request.path[:-1]
return request.path, request.query
def _update_management_header(self, request):
"""
Add additional headers for management.
"""
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers['Content-Length'] = str(len(request.body))
# append additional headers base on the service
# request.headers.append(('x-ms-version', X_MS_VERSION))
# if it is not GET or HEAD request, must set content-type.
if request.method not in ['GET', 'HEAD']:
for key in request.headers:
if 'content-type' == key.lower():
break
else:
request.headers['Content-Type'] = 'application/xml'
return request.headers
def _parse_response(self, response, return_type):
"""
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
"""
return self._parse_response_body_from_xml_text(
response=response,
return_type=return_type
)
def _parse_response_body_from_xml_text(self, response, return_type):
"""
parse the xml and fill all the data into a class of return_type
"""
respbody = response.body
doc = minidom.parseString(respbody)
return_obj = return_type()
for node in self._get_child_nodes(doc, return_type.__name__):
self._fill_data_to_return_object(node, return_obj)
# Note: We always explicitly assign status code to the custom return
# type object
return_obj.status = response.status
return return_obj
def _get_child_nodes(self, node, tag_name):
return [childNode for childNode in node.getElementsByTagName(tag_name)
if childNode.parentNode == node]
def _fill_data_to_return_object(self, node, return_obj):
members = dict(vars(return_obj))
for name, value in members.items():
if isinstance(value, _ListOf):
setattr(
return_obj,
name,
self._fill_list_of(
node,
value.list_type,
value.xml_element_name
)
)
elif isinstance(value, ScalarListOf):
setattr(
return_obj,
name,
self._fill_scalar_list_of(
node,
value.list_type,
self._get_serialization_name(name),
value.xml_element_name
)
)
elif isinstance(value, _DictOf):
setattr(
return_obj,
name,
self._fill_dict_of(
node,
self._get_serialization_name(name),
value.pair_xml_element_name,
value.key_xml_element_name,
value.value_xml_element_name
)
)
elif isinstance(value, WindowsAzureData):
setattr(
return_obj,
name,
self._fill_instance_child(node, name, value.__class__)
)
elif isinstance(value, dict):
setattr(
return_obj,
name,
self._fill_dict(
node,
self._get_serialization_name(name)
)
)
elif isinstance(value, _Base64String):
value = self._fill_data_minidom(node, name, '')
if value is not None:
value = self._decode_base64_to_text(value)
# always set the attribute,
# so we don't end up returning an object
# with type _Base64String
setattr(return_obj, name, value)
else:
value = self._fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
def _fill_list_of(self, xmldoc, element_type, xml_element_name):
xmlelements = self._get_child_nodes(xmldoc, xml_element_name)
return [
self._parse_response_body_from_xml_node(xmlelement, element_type)
for xmlelement in xmlelements
]
def _parse_response_body_from_xml_node(self, node, return_type):
"""
parse the xml and fill all the data into a class of return_type
"""
return_obj = return_type()
self._fill_data_to_return_object(node, return_obj)
return return_obj
def _fill_scalar_list_of(self,
xmldoc,
element_type,
parent_xml_element_name,
xml_element_name):
xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = self._get_child_nodes(
xmlelements[0],
xml_element_name
)
return [
self._get_node_value(xmlelement, element_type)
for xmlelement in xmlelements
]
def _get_node_value(self, xmlelement, data_type):
value = xmlelement.firstChild.nodeValue
if data_type is datetime:
return self._to_datetime(value)
elif data_type is bool:
return value.lower() != 'false'
else:
return data_type(value)
def _get_serialization_name(self, element_name):
"""
Converts a Python name into a serializable name.
"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
def _fill_dict_of(self, xmldoc, parent_xml_element_name,
pair_xml_element_name, key_xml_element_name,
value_xml_element_name):
return_obj = {}
xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = self._get_child_nodes(
xmlelements[0],
pair_xml_element_name
)
for pair in xmlelements:
keys = self._get_child_nodes(pair, key_xml_element_name)
values = self._get_child_nodes(pair, value_xml_element_name)
if keys and values:
key = keys[0].firstChild.nodeValue
value = values[0].firstChild.nodeValue
return_obj[key] = value
return return_obj
def _fill_instance_child(self, xmldoc, element_name, return_type):
"""
Converts a child of the current dom element to the specified type.
"""
xmlelements = self._get_child_nodes(
xmldoc,
self._get_serialization_name(element_name)
)
if not xmlelements:
return None
return_obj = return_type()
self._fill_data_to_return_object(xmlelements[0], return_obj)
return return_obj
def _fill_dict(self, xmldoc, element_name):
xmlelements = self._get_child_nodes(xmldoc, element_name)
if xmlelements:
return_obj = {}
for child in xmlelements[0].childNodes:
if child.firstChild:
return_obj[child.nodeName] = child.firstChild.nodeValue
return return_obj
def _encode_base64(self, data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(self, data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(self, data):
decoded_bytes = self._decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _fill_data_minidom(self, xmldoc, element_name, data_member):
xmlelements = self._get_child_nodes(
xmldoc,
self._get_serialization_name(element_name)
)
if not xmlelements or not xmlelements[0].childNodes:
return None
value = xmlelements[0].firstChild.nodeValue
if data_member is None:
return value
elif isinstance(data_member, datetime):
return self._to_datetime(value)
elif type(data_member) is bool:
return value.lower() != 'false'
elif type(data_member) is str:
return _real_unicode(value)
else:
return type(data_member)(value)
def _to_datetime(self, strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
def _get_request_body(self, request_body):
if request_body is None:
return b''
if isinstance(request_body, WindowsAzureData):
request_body = self._convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _convert_class_to_xml(self, source, xml_prefix=True):
root = ET.Element()
doc = self._construct_element_tree(source, root)
result = ensure_string(ET.tostring(doc, encoding='utf-8',
method='xml'))
return result
def _construct_element_tree(self, source, etree):
if source is None:
return ET.Element()
if isinstance(source, list):
for value in source:
etree.append(self._construct_element_tree(value, etree))
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
etree.append(ET.Element(class_name))
for name, value in vars(source).items():
if value is not None:
if (isinstance(value, list) or
isinstance(value, WindowsAzureData)):
etree.append(
self._construct_element_tree(value, etree)
)
else:
ele = ET.Element(self._get_serialization_name(name))
ele.text = xml_escape(str(value))
etree.append(ele)
etree.append(ET.Element(class_name))
return etree
def _parse_response_for_async_op(self, response):
if response is None:
return None
result = AsynchronousOperationResult()
if response.headers:
for name, value in response.headers.items():
if name.lower() == 'x-ms-request-id':
result.request_id = value
return result
def _get_deployment_path_using_name(self, service_name,
deployment_name=None):
components = [
'services/hostedservices/',
_str(service_name),
'/deployments'
]
resource = ''.join(components)
return self._get_path(resource, deployment_name)
def _get_path(self, resource, name):
path = '/' + self.subscription_id + '/' + resource
if name is not None:
path += '/' + _str(name)
return path
def _get_image_path(self, image_name=None):
return self._get_path('services/images', image_name)
def _get_vmimage_path(self, image_name=None):
return self._get_path('services/vmimages', image_name)
def _get_hosted_service_path(self, service_name=None):
return self._get_path('services/hostedservices', service_name)
def _get_deployment_path_using_slot(self, service_name, slot=None):
return self._get_path(
'services/hostedservices/%s/deploymentslots' % (
_str(service_name)
),
slot
)
def _get_disk_path(self, disk_name=None):
return self._get_path('services/disks', disk_name)
def _get_role_path(self, service_name, deployment_name, role_name=None):
components = [
'services/hostedservices/',
_str(service_name),
'/deployments/',
deployment_name,
'/roles'
]
resource = ''.join(components)
return self._get_path(resource, role_name)
def _get_storage_service_path(self, service_name=None):
return self._get_path('services/storageservices', service_name)
def _ex_complete_async_azure_operation(self, response=None,
operation_type='create_node'):
request_id = self._parse_response_for_async_op(response)
operation_status = self._get_operation_status(request_id.request_id)
timeout = 60 * 5
waittime = 0
interval = 5
while operation_status.status == "InProgress" and waittime < timeout:
operation_status = self._get_operation_status(request_id)
if operation_status.status == "Succeeded":
break
waittime += interval
time.sleep(interval)
if operation_status.status == 'Failed':
raise LibcloudError(
'Message: Async request for operation %s has failed' %
operation_type,
driver=self.connection.driver
)
def raise_for_response(self, response, valid_response):
if response.status != valid_response:
values = (response.error, response.body, response.status)
message = 'Message: %s, Body: %s, Status code: %s' % (values)
raise LibcloudError(message, driver=self)
"""
XML Serializer
Borrowed from the Azure SDK for Python which is licensed under Apache 2.0.
https://github.com/Azure/azure-sdk-for-python
"""
def _lower(text):
return text.lower()
class AzureXmlSerializer(object):
@staticmethod
def create_storage_service_input_to_xml(service_name,
description,
label,
affinity_group,
location,
geo_replication_enabled,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'CreateStorageServiceInput',
[
('ServiceName', service_name),
('Description', description),
('Label', label),
('AffinityGroup', affinity_group),
('Location', location),
('GeoReplicationEnabled', geo_replication_enabled, _lower)
],
extended_properties
)
@staticmethod
def update_storage_service_input_to_xml(description,
label,
geo_replication_enabled,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpdateStorageServiceInput',
[
('Description', description),
('Label', label, AzureNodeDriver._encode_base64),
('GeoReplicationEnabled', geo_replication_enabled, _lower)
],
extended_properties
)
@staticmethod
def regenerate_keys_to_xml(key_type):
return AzureXmlSerializer.doc_from_data(
'RegenerateKeys',
[('KeyType', key_type)]
)
@staticmethod
def update_hosted_service_to_xml(label, description, extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpdateHostedService',
[
('Label', label, AzureNodeDriver._encode_base64),
('Description', description)
],
extended_properties
)
@staticmethod
def create_hosted_service_to_xml(service_name,
label,
description,
location,
affinity_group=None,
extended_properties=None):
if affinity_group:
return AzureXmlSerializer.doc_from_data(
'CreateHostedService',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('AffinityGroup', affinity_group),
],
extended_properties
)
return AzureXmlSerializer.doc_from_data(
'CreateHostedService',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('Location', location),
],
extended_properties
)
@staticmethod
def create_storage_service_to_xml(service_name,
label,
description,
location,
affinity_group,
extended_properties=None):
return AzureXmlSerializer.doc_from_data(
'CreateStorageServiceInput',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('Location', location),
('AffinityGroup', affinity_group)
],
extended_properties
)
@staticmethod
def create_deployment_to_xml(name,
package_url,
label,
configuration,
start_deployment,
treat_warnings_as_error,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'CreateDeployment',
[
('Name', name),
('PackageUrl', package_url),
('Label', label, AzureNodeDriver._encode_base64),
('Configuration', configuration),
('StartDeployment', start_deployment, _lower),
('TreatWarningsAsError', treat_warnings_as_error, _lower)
],
extended_properties
)
@staticmethod
def swap_deployment_to_xml(production, source_deployment):
return AzureXmlSerializer.doc_from_data(
'Swap',
[
('Production', production),
('SourceDeployment', source_deployment)
]
)
@staticmethod
def update_deployment_status_to_xml(status):
return AzureXmlSerializer.doc_from_data(
'UpdateDeploymentStatus',
[('Status', status)]
)
@staticmethod
def change_deployment_to_xml(configuration,
treat_warnings_as_error,
mode,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'ChangeConfiguration',
[
('Configuration', configuration),
('TreatWarningsAsError', treat_warnings_as_error, _lower),
('Mode', mode)
],
extended_properties
)
@staticmethod
def upgrade_deployment_to_xml(mode,
package_url,
configuration,
label,
role_to_upgrade,
force,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpgradeDeployment',
[
('Mode', mode),
('PackageUrl', package_url),
('Configuration', configuration),
('Label', label, AzureNodeDriver._encode_base64),
('RoleToUpgrade', role_to_upgrade),
('Force', force, _lower)
],
extended_properties
)
@staticmethod
def rollback_upgrade_to_xml(mode, force):
return AzureXmlSerializer.doc_from_data(
'RollbackUpdateOrUpgrade',
[
('Mode', mode),
('Force', force, _lower)
]
)
@staticmethod
def walk_upgrade_domain_to_xml(upgrade_domain):
return AzureXmlSerializer.doc_from_data(
'WalkUpgradeDomain',
[('UpgradeDomain', upgrade_domain)]
)
@staticmethod
def certificate_file_to_xml(data, certificate_format, password):
return AzureXmlSerializer.doc_from_data(
'CertificateFile',
[
('Data', data),
('CertificateFormat', certificate_format),
('Password', password)
]
)
@staticmethod
def create_affinity_group_to_xml(name, label, description, location):
return AzureXmlSerializer.doc_from_data(
'CreateAffinityGroup',
[
('Name', name),
('Label', label, AzureNodeDriver._encode_base64),
('Description', description),
('Location', location)
]
)
@staticmethod
def update_affinity_group_to_xml(label, description):
return AzureXmlSerializer.doc_from_data(
'UpdateAffinityGroup',
[
('Label', label, AzureNodeDriver._encode_base64),
('Description', description)
]
)
@staticmethod
def subscription_certificate_to_xml(public_key, thumbprint, data):
return AzureXmlSerializer.doc_from_data(
'SubscriptionCertificate',
[
('SubscriptionCertificatePublicKey', public_key),
('SubscriptionCertificateThumbprint', thumbprint),
('SubscriptionCertificateData', data)
]
)
@staticmethod
def os_image_to_xml(label, media_link, name, os):
return AzureXmlSerializer.doc_from_data(
'OSImage',
[
('Label', label),
('MediaLink', media_link),
('Name', name),
('OS', os)
]
)
@staticmethod
def data_virtual_hard_disk_to_xml(host_caching,
disk_label,
disk_name,
lun,
logical_disk_size_in_gb,
media_link,
source_media_link):
return AzureXmlSerializer.doc_from_data(
'DataVirtualHardDisk',
[
('HostCaching', host_caching),
('DiskLabel', disk_label),
('DiskName', disk_name),
('Lun', lun),
('LogicalDiskSizeInGB', logical_disk_size_in_gb),
('MediaLink', media_link),
('SourceMediaLink', source_media_link)
]
)
@staticmethod
def disk_to_xml(has_operating_system, label, media_link, name, os):
return AzureXmlSerializer.doc_from_data(
'Disk',
[
('HasOperatingSystem', has_operating_system, _lower),
('Label', label),
('MediaLink', media_link),
('Name', name),
('OS', os)
]
)
@staticmethod
def restart_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "RestartRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'RestartRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def shutdown_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "ShutdownRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'ShutdownRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def start_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "StartRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'StartRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def windows_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
AzureXmlSerializer.data_to_xml(
[('ComputerName', configuration.computer_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('AdminPassword', configuration.admin_password)],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'ResetPasswordOnFirstLogon',
configuration.reset_password_on_first_logon,
_lower
)
],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'EnableAutomaticUpdates',
configuration.enable_automatic_updates,
_lower
)
],
xml
)
AzureXmlSerializer.data_to_xml(
[('TimeZone', configuration.time_zone)],
xml
)
if configuration.domain_join is not None:
domain = ET.xml("DomainJoin")
creds = ET.xml("Credentials")
domain.appemnd(creds)
xml.append(domain)
AzureXmlSerializer.data_to_xml(
[('Domain', configuration.domain_join.credentials.domain)],
creds
)
AzureXmlSerializer.data_to_xml(
[
(
'Username',
configuration.domain_join.credentials.username
)
],
creds
)
AzureXmlSerializer.data_to_xml(
[
(
'Password',
configuration.domain_join.credentials.password
)
],
creds
)
AzureXmlSerializer.data_to_xml(
[('JoinDomain', configuration.domain_join.join_domain)],
domain
)
AzureXmlSerializer.data_to_xml(
[
(
'MachineObjectOU',
configuration.domain_join.machine_object_ou
)
],
domain
)
if configuration.stored_certificate_settings is not None:
cert_settings = ET.Element("StoredCertificateSettings")
xml.append(cert_settings)
for cert in configuration.stored_certificate_settings:
cert_setting = ET.Element("CertificateSetting")
cert_settings.append(cert_setting)
cert_setting.append(AzureXmlSerializer.data_to_xml(
[('StoreLocation', cert.store_location)])
)
AzureXmlSerializer.data_to_xml(
[('StoreName', cert.store_name)],
cert_setting
)
AzureXmlSerializer.data_to_xml(
[('Thumbprint', cert.thumbprint)],
cert_setting
)
AzureXmlSerializer.data_to_xml(
[('AdminUsername', configuration.admin_user_name)],
xml
)
return xml
@staticmethod
def linux_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
AzureXmlSerializer.data_to_xml(
[('HostName', configuration.host_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('UserName', configuration.user_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('UserPassword', configuration.user_password)],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'DisableSshPasswordAuthentication',
configuration.disable_ssh_password_authentication,
_lower
)
],
xml
)
if configuration.ssh is not None:
ssh = ET.Element("SSH")
pkeys = ET.Element("PublicKeys")
kpairs = ET.Element("KeyPairs")
ssh.append(pkeys)
ssh.append(kpairs)
xml.append(ssh)
for key in configuration.ssh.public_keys:
pkey = ET.Element("PublicKey")
pkeys.append(pkey)
AzureXmlSerializer.data_to_xml(
[('Fingerprint', key.fingerprint)],
pkey
)
AzureXmlSerializer.data_to_xml([('Path', key.path)], pkey)
for key in configuration.ssh.key_pairs:
kpair = ET.Element("KeyPair")
kpairs.append(kpair)
AzureXmlSerializer.data_to_xml(
[('Fingerprint', key.fingerprint)],
kpair
)
AzureXmlSerializer.data_to_xml([('Path', key.path)], kpair)
if configuration.custom_data is not None:
AzureXmlSerializer.data_to_xml(
[('CustomData', configuration.custom_data)],
xml
)
return xml
@staticmethod
def network_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
input_endpoints = ET.Element("InputEndpoints")
xml.append(input_endpoints)
for endpoint in configuration.input_endpoints:
input_endpoint = ET.Element("InputEndpoint")
input_endpoints.append(input_endpoint)
AzureXmlSerializer.data_to_xml(
[
(
'LoadBalancedEndpointSetName',
endpoint.load_balanced_endpoint_set_name
)
],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('LocalPort', endpoint.local_port)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('Name', endpoint.name)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('Port', endpoint.port)],
input_endpoint
)
if (endpoint.load_balancer_probe.path or
endpoint.load_balancer_probe.port or
endpoint.load_balancer_probe.protocol):
load_balancer_probe = ET.Element("LoadBalancerProbe")
input_endpoint.append(load_balancer_probe)
AzureXmlSerializer.data_to_xml(
[('Path', endpoint.load_balancer_probe.path)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Port', endpoint.load_balancer_probe.port)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Protocol', endpoint.load_balancer_probe.protocol)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Protocol', endpoint.protocol)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[
(
'EnableDirectServerReturn',
endpoint.enable_direct_server_return,
_lower
)
],
input_endpoint
)
subnet_names = ET.Element("SubnetNames")
xml.append(subnet_names)
for name in configuration.subnet_names:
AzureXmlSerializer.data_to_xml(
[('SubnetName', name)],
subnet_names
)
return xml
@staticmethod
def role_to_xml(availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
xml):
AzureXmlSerializer.data_to_xml([('RoleName', role_name)], xml)
AzureXmlSerializer.data_to_xml([('RoleType', role_type)], xml)
config_sets = ET.Element("ConfigurationSets")
xml.append(config_sets)
if system_configuration_set is not None:
config_set = ET.Element("ConfigurationSet")
config_sets.append(config_set)
if isinstance(system_configuration_set, WindowsConfigurationSet):
AzureXmlSerializer.windows_configuration_to_xml(
system_configuration_set,
config_set
)
elif isinstance(system_configuration_set, LinuxConfigurationSet):
AzureXmlSerializer.linux_configuration_to_xml(
system_configuration_set,
config_set
)
if network_configuration_set is not None:
config_set = ET.Element("ConfigurationSet")
config_sets.append(config_set)
AzureXmlSerializer.network_configuration_to_xml(
network_configuration_set,
config_set
)
if availability_set_name is not None:
AzureXmlSerializer.data_to_xml(
[('AvailabilitySetName', availability_set_name)],
xml
)
if data_virtual_hard_disks is not None:
vhds = ET.Element("DataVirtualHardDisks")
xml.append(vhds)
for hd in data_virtual_hard_disks:
vhd = ET.Element("DataVirtualHardDisk")
vhds.append(vhd)
AzureXmlSerializer.data_to_xml(
[('HostCaching', hd.host_caching)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('DiskLabel', hd.disk_label)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('DiskName', hd.disk_name)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('Lun', hd.lun)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('MediaLink', hd.media_link)],
vhd
)
if os_virtual_hard_disk is not None:
hd = ET.Element("OSVirtualHardDisk")
xml.append(hd)
AzureXmlSerializer.data_to_xml(
[('HostCaching', os_virtual_hard_disk.host_caching)],
hd
)
AzureXmlSerializer.data_to_xml(
[('DiskLabel', os_virtual_hard_disk.disk_label)],
hd
)
AzureXmlSerializer.data_to_xml(
[('DiskName', os_virtual_hard_disk.disk_name)],
hd
)
AzureXmlSerializer.data_to_xml(
[('MediaLink', os_virtual_hard_disk.media_link)],
hd
)
AzureXmlSerializer.data_to_xml(
[('SourceImageName', os_virtual_hard_disk.source_image_name)],
hd
)
if vm_image_name is not None:
AzureXmlSerializer.data_to_xml(
[('VMImageName', vm_image_name)],
xml
)
if role_size is not None:
AzureXmlSerializer.data_to_xml([('RoleSize', role_size)], xml)
return xml
@staticmethod
def add_role_to_xml(role_name,
system_configuration_set,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
vm_image_name,
role_size):
doc = AzureXmlSerializer.doc_from_xml('PersistentVMRole')
xml = AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
doc
)
result = ensure_string(ET.tostring(xml, encoding='utf-8'))
return result
@staticmethod
def update_role_to_xml(role_name,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
vm_image_name,
role_size):
doc = AzureXmlSerializer.doc_from_xml('PersistentVMRole')
AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
None,
doc
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def capture_role_to_xml(post_capture_action,
target_image_name,
target_image_label,
provisioning_configuration):
xml = AzureXmlSerializer.data_to_xml(
[('OperationType', 'CaptureRoleOperation')]
)
AzureXmlSerializer.data_to_xml(
[('PostCaptureAction', post_capture_action)],
xml
)
if provisioning_configuration is not None:
provisioning_config = ET.Element("ProvisioningConfiguration")
xml.append(provisioning_config)
if isinstance(provisioning_configuration, WindowsConfigurationSet):
AzureXmlSerializer.windows_configuration_to_xml(
provisioning_configuration,
provisioning_config
)
elif isinstance(provisioning_configuration, LinuxConfigurationSet):
AzureXmlSerializer.linux_configuration_to_xml(
provisioning_configuration,
provisioning_config
)
AzureXmlSerializer.data_to_xml(
[('TargetImageLabel', target_image_label)],
xml
)
AzureXmlSerializer.data_to_xml(
[('TargetImageName', target_image_name)],
xml
)
doc = AzureXmlSerializer.doc_from_xml('CaptureRoleOperation', xml)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def virtual_machine_deployment_to_xml(deployment_name,
deployment_slot,
label,
role_name,
system_configuration_set,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
role_size,
virtual_network_name,
vm_image_name):
doc = AzureXmlSerializer.doc_from_xml('Deployment')
AzureXmlSerializer.data_to_xml([('Name', deployment_name)], doc)
AzureXmlSerializer.data_to_xml(
[('DeploymentSlot', deployment_slot)],
doc
)
AzureXmlSerializer.data_to_xml([('Label', label)], doc)
role_list = ET.Element("RoleList")
role = ET.Element("Role")
role_list.append(role)
doc.append(role_list)
AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
role
)
if virtual_network_name is not None:
doc.append(
AzureXmlSerializer.data_to_xml(
[('VirtualNetworkName', virtual_network_name)]
)
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def data_to_xml(data, xml=None):
"""
Creates an xml fragment from the specified data.
data: Array of tuples, where first: xml element name
second: xml element text
third: conversion function
"""
for element in data:
name = element[0]
val = element[1]
if len(element) > 2:
converter = element[2]
else:
converter = None
if val is not None:
if converter is not None:
text = _str(converter(_str(val)))
else:
text = _str(val)
entry = ET.Element(name)
entry.text = text
if xml is not None:
xml.append(entry)
else:
return entry
return xml
@staticmethod
def doc_from_xml(document_element_name, inner_xml=None):
"""
Wraps the specified xml in an xml root element with default azure
namespaces
"""
# Note: Namespaces don't work consistency in Python 2 and 3.
"""
nsmap = {
None: "http://www.w3.org/2001/XMLSchema-instance",
"i": "http://www.w3.org/2001/XMLSchema-instance"
}
xml.attrib["xmlns:i"] = "http://www.w3.org/2001/XMLSchema-instance"
xml.attrib["xmlns"] = "http://schemas.microsoft.com/windowsazure"
"""
xml = ET.Element(document_element_name)
xml.set("xmlns", "http://schemas.microsoft.com/windowsazure")
if inner_xml is not None:
xml.append(inner_xml)
return xml
@staticmethod
def doc_from_data(document_element_name, data, extended_properties=None):
doc = AzureXmlSerializer.doc_from_xml(document_element_name)
AzureXmlSerializer.data_to_xml(data, doc)
if extended_properties is not None:
doc.append(
AzureXmlSerializer.extended_properties_dict_to_xml_fragment(
extended_properties
)
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def extended_properties_dict_to_xml_fragment(extended_properties):
if extended_properties is not None and len(extended_properties) > 0:
xml = ET.Element("ExtendedProperties")
for key, val in extended_properties.items():
extended_property = ET.Element("ExtendedProperty")
name = ET.Element("Name")
name.text = _str(key)
value = ET.Element("Value")
value.text = _str(val)
extended_property.append(name)
extended_property.append(value)
xml.append(extended_property)
return xml
"""
Data Classes
Borrowed from the Azure SDK for Python.
"""
class WindowsAzureData(object):
"""
This is the base of data class.
It is only used to check whether it is instance or not.
"""
pass
class WindowsAzureDataTypedList(WindowsAzureData):
list_type = None
xml_element_name = None
def __init__(self):
self.items = _ListOf(self.list_type, self.xml_element_name)
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
class OSVirtualHardDisk(WindowsAzureData):
def __init__(self, source_image_name=None, media_link=None,
host_caching=None, disk_label=None, disk_name=None):
self.source_image_name = source_image_name
self.media_link = media_link
self.host_caching = host_caching
self.disk_label = disk_label
self.disk_name = disk_name
self.os = '' # undocumented, not used when adding a role
class LinuxConfigurationSet(WindowsAzureData):
def __init__(self,
host_name=None,
user_name=None,
user_password=None,
disable_ssh_password_authentication=None,
custom_data=None):
self.configuration_set_type = 'LinuxProvisioningConfiguration'
self.host_name = host_name
self.user_name = user_name
self.user_password = user_password
self.disable_ssh_password_authentication = \
disable_ssh_password_authentication
self.ssh = SSH()
self.custom_data = custom_data
class WindowsConfigurationSet(WindowsAzureData):
def __init__(self,
computer_name=None,
admin_password=None,
reset_password_on_first_logon=None,
enable_automatic_updates=None,
time_zone=None,
admin_user_name=None):
self.configuration_set_type = 'WindowsProvisioningConfiguration'
self.computer_name = computer_name
self.admin_password = admin_password
self.reset_password_on_first_logon = reset_password_on_first_logon
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.admin_user_name = admin_user_name
self.domain_join = DomainJoin()
self.stored_certificate_settings = StoredCertificateSettings()
class DomainJoin(WindowsAzureData):
def __init__(self):
self.credentials = Credentials()
self.join_domain = ''
self.machine_object_ou = ''
class Credentials(WindowsAzureData):
def __init__(self):
self.domain = ''
self.username = ''
self.password = ''
class CertificateSetting(WindowsAzureData):
"""
Initializes a certificate setting.
thumbprint:
Specifies the thumbprint of the certificate to be provisioned. The
thumbprint must specify an existing service certificate.
store_name:
Specifies the name of the certificate store from which retrieve
certificate.
store_location:
Specifies the target certificate store location on the virtual machine
The only supported value is LocalMachine.
"""
def __init__(self, thumbprint='', store_name='', store_location=''):
self.thumbprint = thumbprint
self.store_name = store_name
self.store_location = store_location
class StoredCertificateSettings(WindowsAzureDataTypedList):
list_type = CertificateSetting
_repr_attributes = [
'items'
]
class SSH(WindowsAzureData):
def __init__(self):
self.public_keys = PublicKeys()
self.key_pairs = KeyPairs()
class PublicKey(WindowsAzureData):
def __init__(self, fingerprint='', path=''):
self.fingerprint = fingerprint
self.path = path
class PublicKeys(WindowsAzureDataTypedList):
list_type = PublicKey
_repr_attributes = [
'items'
]
class AzureKeyPair(WindowsAzureData):
def __init__(self, fingerprint='', path=''):
self.fingerprint = fingerprint
self.path = path
class KeyPairs(WindowsAzureDataTypedList):
list_type = AzureKeyPair
_repr_attributes = [
'items'
]
class LoadBalancerProbe(WindowsAzureData):
def __init__(self):
self.path = ''
self.port = ''
self.protocol = ''
class ConfigurationSet(WindowsAzureData):
def __init__(self):
self.configuration_set_type = ''
self.role_type = ''
self.input_endpoints = ConfigurationSetInputEndpoints()
self.subnet_names = ScalarListOf(str, 'SubnetName')
class ConfigurationSets(WindowsAzureDataTypedList):
list_type = ConfigurationSet
_repr_attributes = [
'items'
]
class ConfigurationSetInputEndpoint(WindowsAzureData):
def __init__(self,
name='',
protocol='',
port='',
local_port='',
load_balanced_endpoint_set_name='',
enable_direct_server_return=False):
self.enable_direct_server_return = enable_direct_server_return
self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name
self.local_port = local_port
self.name = name
self.port = port
self.load_balancer_probe = LoadBalancerProbe()
self.protocol = protocol
class ConfigurationSetInputEndpoints(WindowsAzureDataTypedList):
list_type = ConfigurationSetInputEndpoint
xml_element_name = 'InputEndpoint'
_repr_attributes = [
'items'
]
class Location(WindowsAzureData):
def __init__(self):
self.name = ''
self.display_name = ''
self.available_services = ScalarListOf(str, 'AvailableService')
self.compute_capabilities = ComputeCapability()
class Locations(WindowsAzureDataTypedList):
list_type = Location
_repr_attributes = [
'items'
]
class ComputeCapability(WindowsAzureData):
def __init__(self):
self.virtual_machines_role_sizes = ScalarListOf(str, 'RoleSize')
class VirtualMachinesRoleSizes(WindowsAzureData):
def __init__(self):
self.role_size = ScalarListOf(str, 'RoleSize')
class OSImage(WindowsAzureData):
def __init__(self):
self.affinity_group = ''
self.category = ''
self.location = ''
self.logical_size_in_gb = 0
self.label = ''
self.media_link = ''
self.name = ''
self.os = ''
self.eula = ''
self.description = ''
class Images(WindowsAzureDataTypedList):
list_type = OSImage
_repr_attributes = [
'items'
]
class VMImage(WindowsAzureData):
def __init__(self):
self.name = ''
self.label = ''
self.category = ''
self.os_disk_configuration = OSDiskConfiguration()
self.service_name = ''
self.deployment_name = ''
self.role_name = ''
self.location = ''
self.affinity_group = ''
class VMImages(WindowsAzureDataTypedList):
list_type = VMImage
_repr_attributes = [
'items'
]
class VirtualIP(WindowsAzureData):
def __init__(self):
self.address = ''
self.is_dns_programmed = ''
self.name = ''
class VirtualIPs(WindowsAzureDataTypedList):
list_type = VirtualIP
_repr_attributes = [
'items'
]
class HostedService(WindowsAzureData, ReprMixin):
_repr_attributes = [
'service_name',
'url'
]
def __init__(self):
self.url = ''
self.service_name = ''
self.hosted_service_properties = HostedServiceProperties()
self.deployments = Deployments()
class HostedServices(WindowsAzureDataTypedList, ReprMixin):
list_type = HostedService
_repr_attributes = [
'items'
]
class HostedServiceProperties(WindowsAzureData):
def __init__(self):
self.description = ''
self.location = ''
self.affinity_group = ''
self.label = _Base64String()
self.status = ''
self.date_created = ''
self.date_last_modified = ''
self.extended_properties = _DictOf(
'ExtendedProperty',
'Name',
'Value'
)
class Deployment(WindowsAzureData):
def __init__(self):
self.name = ''
self.deployment_slot = ''
self.private_id = ''
self.status = ''
self.label = _Base64String()
self.url = ''
self.configuration = _Base64String()
self.role_instance_list = RoleInstanceList()
self.upgrade_status = UpgradeStatus()
self.upgrade_domain_count = ''
self.role_list = RoleList()
self.sdk_version = ''
self.input_endpoint_list = InputEndpoints()
self.locked = False
self.rollback_allowed = False
self.persistent_vm_downtime_info = PersistentVMDowntimeInfo()
self.created_time = ''
self.last_modified_time = ''
self.extended_properties = _DictOf(
'ExtendedProperty',
'Name',
'Value'
)
self.virtual_ips = VirtualIPs()
class Deployments(WindowsAzureDataTypedList):
list_type = Deployment
_repr_attributes = [
'items'
]
class UpgradeStatus(WindowsAzureData):
def __init__(self):
self.upgrade_type = ''
self.current_upgrade_domain_state = ''
self.current_upgrade_domain = ''
class RoleInstance(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.instance_name = ''
self.instance_status = ''
self.instance_upgrade_domain = 0
self.instance_fault_domain = 0
self.instance_size = ''
self.instance_state_details = ''
self.instance_error_code = ''
self.ip_address = ''
self.instance_endpoints = InstanceEndpoints()
self.power_state = ''
self.fqdn = ''
self.host_name = ''
class RoleInstanceList(WindowsAzureDataTypedList):
list_type = RoleInstance
_repr_attributes = [
'items'
]
class InstanceEndpoint(WindowsAzureData):
def __init__(self):
self.name = ''
self.vip = ''
self.public_port = ''
self.local_port = ''
self.protocol = ''
class InstanceEndpoints(WindowsAzureDataTypedList):
list_type = InstanceEndpoint
_repr_attributes = [
'items'
]
class InputEndpoint(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.vip = ''
self.port = ''
class InputEndpoints(WindowsAzureDataTypedList):
list_type = InputEndpoint
_repr_attributes = [
'items'
]
class Role(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.os_version = ''
class RoleList(WindowsAzureDataTypedList):
list_type = Role
_repr_attributes = [
'items'
]
class PersistentVMDowntimeInfo(WindowsAzureData):
def __init__(self):
self.start_time = ''
self.end_time = ''
self.status = ''
class AsynchronousOperationResult(WindowsAzureData):
def __init__(self, request_id=None):
self.request_id = request_id
class Disk(WindowsAzureData):
def __init__(self):
self.affinity_group = ''
self.attached_to = AttachedTo()
self.has_operating_system = ''
self.is_corrupted = ''
self.location = ''
self.logical_disk_size_in_gb = 0
self.label = ''
self.media_link = ''
self.name = ''
self.os = ''
self.source_image_name = ''
class Disks(WindowsAzureDataTypedList):
list_type = Disk
_repr_attributes = [
'items'
]
class AttachedTo(WindowsAzureData):
def __init__(self):
self.hosted_service_name = ''
self.deployment_name = ''
self.role_name = ''
class OperationError(WindowsAzureData):
def __init__(self):
self.code = ''
self.message = ''
class Operation(WindowsAzureData):
def __init__(self):
self.id = ''
self.status = ''
self.http_status_code = ''
self.error = OperationError()
class OperatingSystem(WindowsAzureData):
def __init__(self):
self.version = ''
self.label = _Base64String()
self.is_default = True
self.is_active = True
self.family = 0
self.family_label = _Base64String()
class OSDiskConfiguration(WindowsAzureData):
def __init__(self):
self.name = ''
self.host_caching = ''
self.os_state = ''
self.os = ''
self.media_link = ''
self.logical_disk_size_in_gb = 0
class OperatingSystems(WindowsAzureDataTypedList):
list_type = OperatingSystem
_repr_attributes = [
'items'
]
class OperatingSystemFamily(WindowsAzureData):
def __init__(self):
self.name = ''
self.label = _Base64String()
self.operating_systems = OperatingSystems()
class OperatingSystemFamilies(WindowsAzureDataTypedList):
list_type = OperatingSystemFamily
_repr_attributes = [
'items'
]
class Subscription(WindowsAzureData):
def __init__(self):
self.subscription_id = ''
self.subscription_name = ''
self.subscription_status = ''
self.account_admin_live_email_id = ''
self.service_admin_live_email_id = ''
self.max_core_count = 0
self.max_storage_accounts = 0
self.max_hosted_services = 0
self.current_core_count = 0
self.current_hosted_services = 0
self.current_storage_accounts = 0
self.max_virtual_network_sites = 0
self.max_local_network_sites = 0
self.max_dns_servers = 0
class AvailabilityResponse(WindowsAzureData):
def __init__(self):
self.result = False
class SubscriptionCertificate(WindowsAzureData):
def __init__(self):
self.subscription_certificate_public_key = ''
self.subscription_certificate_thumbprint = ''
self.subscription_certificate_data = ''
self.created = ''
class SubscriptionCertificates(WindowsAzureDataTypedList):
list_type = SubscriptionCertificate
_repr_attributes = [
'items'
]
class AzureHTTPRequest(object):
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = [] # list of (name, value)
self.headers = {} # list of (header name, header value)
self.body = ''
self.protocol_override = None
class AzureHTTPResponse(object):
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
"""
Helper classes and functions.
"""
class _Base64String(str):
pass
class _ListOf(list):
"""
A list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists
"""
def __init__(self, list_type, xml_element_name=None):
self.list_type = list_type
if xml_element_name is None:
self.xml_element_name = list_type.__name__
else:
self.xml_element_name = xml_element_name
super(_ListOf, self).__init__()
class ScalarListOf(list):
"""
A list of scalar types which carries with it the type that's
expected to go in it along with its xml element name.
Used for deserializaion and construction of the lists
"""
def __init__(self, list_type, xml_element_name):
self.list_type = list_type
self.xml_element_name = xml_element_name
super(ScalarListOf, self).__init__()
class _DictOf(dict):
"""
A dict which carries with it the xml element names for key,val.
Used for deserializaion and construction of the lists
"""
def __init__(self,
pair_xml_element_name,
key_xml_element_name,
value_xml_element_name):
self.pair_xml_element_name = pair_xml_element_name
self.key_xml_element_name = key_xml_element_name
self.value_xml_element_name = value_xml_element_name
super(_DictOf, self).__init__()
class AzureNodeLocation(NodeLocation):
# we can also have something in here for available services which is an
# extra to the API with Azure
def __init__(self, id, name, country, driver, available_services,
virtual_machine_role_sizes):
super(AzureNodeLocation, self).__init__(id, name, country, driver)
self.available_services = available_services
self.virtual_machine_role_sizes = virtual_machine_role_sizes
def __repr__(self):
return (
(
'<AzureNodeLocation: id=%s, name=%s, country=%s, '
'driver=%s services=%s virtualMachineRoleSizes=%s >'
) % (
self.id,
self.name,
self.country,
self.driver.name,
','.join(self.available_services),
','.join(self.virtual_machine_role_sizes)
)
)
| illfelder/libcloud | libcloud/compute/drivers/azure.py | Python | apache-2.0 | 113,249 | [
"VisIt"
] | c6005998ac007ac4fbd04438589d2e0e1e7bae9702458d765ba0f97d993c0961 |
# -*- coding: utf-8 -*-
#
# brunel_alpha_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Random balanced network (alpha synapses) connected with NEST
------------------------------------------------------------
This script simulates an excitatory and an inhibitory population on
the basis of the network used in
Brunel N, Dynamics of Sparsely Connected Networks of Excitatory and
Inhibitory Spiking Neurons, Journal of Computational Neuroscience 8,
183–208 (2000).
In contrast to brunel-alpha-numpy.py, this variant uses NEST's builtin
connection routines to draw the random connections instead of NumPy.
When connecting the network customary synapse models are used, which
allow for querying the number of created synapses. Using spike
detectors the average firing rates of the neurons in the populations
are established. The building as well as the simulation time of the
network are recorded.
'''
'''
Importing all necessary modules for simulation, analysis and plotting.
'''
from scipy.optimize import fsolve
import nest
import nest.raster_plot
import time
from numpy import exp
'''
Definition of functions used in this example. First, define the
Lambert W function implemented in SLI. The second function
computes the maximum of the postsynaptic potential for a synaptic
input current of unit amplitude (1 pA) using the Lambert W
function. Thus function will later be used to calibrate the synaptic
weights.
'''
def LambertWm1(x):
nest.sli_push(x); nest.sli_run('LambertWm1'); y=nest.sli_pop()
return y
def ComputePSPnorm(tauMem, CMem, tauSyn):
a = (tauMem / tauSyn)
b = (1.0 / tauSyn - 1.0 / tauMem)
# time of maximum
t_max = 1.0/b * ( -LambertWm1(-exp(-1.0/a)/a) - 1.0/a )
# maximum of PSP for current of unit amplitude
return exp(1.0)/(tauSyn*CMem*b) * ((exp(-t_max/tauMem) - exp(-t_max/tauSyn)) / b - t_max*exp(-t_max/tauSyn))
nest.ResetKernel()
'''
Assigning the current time to a variable in order to determine the
build time of the network.
'''
startbuild = time.time()
'''
Assigning the simulation parameters to variables.
'''
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
'''
Definition of the parameters crucial for asynchronous irregular firing
of the neurons.
'''
g = 5.0 # ratio inhibitory weight/excitatory weight
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
'''
Definition of the number of neurons in the network and the number of
neuron recorded from
'''
order = 2500
NE = 4*order # number of excitatory neurons
NI = 1*order # number of inhibitory neurons
N_neurons = NE+NI # number of neurons in total
N_rec = 50 # record from 50 neurons
'''
Definition of connectivity parameter
'''
CE = int(epsilon*NE) # number of excitatory synapses per neuron
CI = int(epsilon*NI) # number of inhibitory synapses per neuron
C_tot = int(CI+CE) # total number of synapses per neuron
'''
Initialization of the parameters of the integrate and fire neuron and
the synapses. The parameter of the neuron are stored in a dictionary.
The synaptic currents are normalized such that the amplitude of the
PSP is J.
'''
tauSyn = 0.5 # synaptic time constant in ms
tauMem = 20.0 # time constant of membrane potential in ms
CMem = 250.0 # capacitance of membrane in in pF
theta = 20.0 # membrane threshold potential in mV
neuron_params= {"C_m": CMem,
"tau_m": tauMem,
"tau_syn_ex": tauSyn,
"tau_syn_in": tauSyn,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta}
J = 0.1 # postsynaptic amplitude in mV
J_unit = ComputePSPnorm(tauMem, CMem, tauSyn)
J_ex = J / J_unit # amplitude of excitatory postsynaptic current
J_in = -g*J_ex # amplitude of inhibitory postsynaptic current
'''
Definition of threshold rate, which is the external rate needed to fix
the membrane potential around its threshold, the external firing rate
and the rate of the poisson generator which is multiplied by the
in-degree CE and converted to Hz by multiplication by 1000.
'''
nu_th = (theta * CMem) / (J_ex*CE*exp(1)*tauMem*tauSyn)
nu_ex = eta*nu_th
p_rate = 1000.0*nu_ex*CE
'''
Configuration of the simulation kernel by the previously defined time
resolution used in the simulation. Setting "print_time" to True prints
the already processed simulation time as well as its percentage of the
total simulation time.
'''
nest.SetKernelStatus({"resolution": dt, "print_time": True, "overwrite_files": True})
print("Building network")
'''
Configuration of the model `iaf_psc_alpha` and `poisson_generator`
using SetDefaults(). This function expects the model to be the
inserted as a string and the parameter to be specified in a
dictionary. All instances of theses models created after this point
will have the properties specified in the dictionary by default.
'''
nest.SetDefaults("iaf_psc_alpha", neuron_params)
nest.SetDefaults("poisson_generator",{"rate": p_rate})
'''
Creation of the nodes using `Create`. We store the returned handles in
variables for later reference. Here the excitatory and inhibitory, as
well as the poisson generator and two spike detectors. The spike
detectors will later be used to record excitatory and inhibitory
spikes.
'''
nodes_ex = nest.Create("iaf_psc_alpha",NE)
nodes_in = nest.Create("iaf_psc_alpha",NI)
noise = nest.Create("poisson_generator")
espikes = nest.Create("spike_detector")
ispikes = nest.Create("spike_detector")
'''
Configuration of the spike detectors recording excitatory and
inhibitory spikes using `SetStatus`, which expects a list of node
handles and a list of parameter dictionaries. Setting the variable
"to_file" to True ensures that the spikes will be recorded in a .gdf
file starting with the string assigned to label. Setting "withtime"
and "withgid" to True ensures that each spike is saved to file by
stating the gid of the spiking neuron and the spike time in one line.
'''
nest.SetStatus(espikes,[{"label": "brunel-py-ex",
"withtime": True,
"withgid": True,
"to_file": True}])
nest.SetStatus(ispikes,[{"label": "brunel-py-in",
"withtime": True,
"withgid": True,
"to_file": True}])
print("Connecting devices")
'''
Definition of a synapse using `CopyModel`, which expects the model
name of a pre-defined synapse, the name of the customary synapse and
an optional parameter dictionary. The parameters defined in the
dictionary will be the default parameter for the customary
synapse. Here we define one synapse for the excitatory and one for the
inhibitory connections giving the previously defined weights and equal
delays.
'''
nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay})
nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay})
'''
Connecting the previously defined poisson generator to the excitatory
and inhibitory neurons using the excitatory synapse. Since the poisson
generator is connected to all neurons in the population the default
rule ('all_to_all') of Connect() is used. The synaptic properties are
inserted via syn_spec which expects a dictionary when defining
multiple variables or a string when simply using a pre-defined
synapse.
'''
nest.Connect(noise,nodes_ex, syn_spec="excitatory")
nest.Connect(noise,nodes_in, syn_spec="excitatory")
'''
Connecting the first N_rec nodes of the excitatory and inhibitory
population to the associated spike detectors using excitatory
synapses. Here the same shortcut for the specification of the synapse
as defined above is used.
'''
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
print("Connecting network")
print("Excitatory connections")
'''
Connecting the excitatory population to all neurons using the
pre-defined excitatory synapse. Beforehand, the connection parameter
are defined in a dictionary. Here we use the connection rule
'fixed_indegree', which requires the definition of the indegree. Since
the synapse specification is reduced to assigning the pre-defined
excitatory synapse it suffices to insert a string.
'''
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex+nodes_in, conn_params_ex, "excitatory")
print("Inhibitory connections")
'''
Connecting the inhibitory population to all neurons using the
pre-defined inhibitory synapse. The connection parameter as well as
the synapse paramtere are defined analogously to the connection from
the excitatory population defined above.
'''
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex+nodes_in, conn_params_in, "inhibitory")
'''
Storage of the time point after the buildup of the network in a
variable.
'''
endbuild=time.time()
'''
Simulation of the network.
'''
print("Simulating")
nest.Simulate(simtime)
'''
Storage of the time point after the simulation of the network in a
variable.
'''
endsimulate= time.time()
'''
Reading out the total number of spikes received from the spike
detector connected to the excitatory population and the inhibitory
population.
'''
events_ex = nest.GetStatus(espikes,"n_events")[0]
events_in = nest.GetStatus(ispikes,"n_events")[0]
'''
Calculation of the average firing rate of the excitatory and the
inhibitory neurons by dividing the total number of recorded spikes by
the number of neurons recorded from and the simulation time. The
multiplication by 1000.0 converts the unit 1/ms to 1/s=Hz.
'''
rate_ex = events_ex/simtime*1000.0/N_rec
rate_in = events_in/simtime*1000.0/N_rec
'''
Reading out the number of connections established using the excitatory
and inhibitory synapse model. The numbers are summed up resulting in
the total number of synapses.
'''
num_synapses = nest.GetDefaults("excitatory")["num_connections"]+\
nest.GetDefaults("inhibitory")["num_connections"]
'''
Establishing the time it took to build and simulate the network by
taking the difference of the pre-defined time variables.
'''
build_time = endbuild-startbuild
sim_time = endsimulate-endbuild
'''
Printing the network properties, firing rates and building times.
'''
print("Brunel network simulation (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
'''
Plot a raster of the excitatory neurons and a histogram.
'''
nest.raster_plot.from_device(espikes, hist=True)
| zifeo/nest-simulator | pynest/examples/brunel_alpha_nest.py | Python | gpl-2.0 | 11,722 | [
"NEURON"
] | 9a0943f1258171c92e8b1e44814246ed714d43f14ca36546f543ce50d6dd5713 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**********************************************
**espresso.interaction.LennardJonesAutoBonds**
**********************************************
"""
from espresso import pmi, infinity
from espresso.esutil import *
from espresso.Exceptions import MissingFixedPairList
from espresso.interaction.Potential import *
from espresso.interaction.Interaction import *
from _espresso import interaction_LennardJonesAutoBonds, \
interaction_VerletListLennardJonesAutoBonds, \
interaction_VerletListAdressLennardJonesAutoBonds, \
interaction_VerletListHadressLennardJonesAutoBonds, \
interaction_CellListLennardJonesAutoBonds, \
interaction_FixedPairListLennardJonesAutoBonds
class LennardJonesAutoBondsLocal(PotentialLocal, interaction_LennardJonesAutoBonds):
'The (local) Lennard-Jones auto bond potential.'
def __init__(self, epsilon=1.0, sigma=1.0,
cutoff=infinity, bondlist=None, maxcrosslinks=2):
"""Initialize the local Lennard Jones auto bonds object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if bondlist == None:
raise MissingFixedPairList('LennardsJonesAutoBonds needs a FixedPairList to be able to create new bonds')
cxxinit(self, interaction_LennardJonesAutoBonds, epsilon, sigma, cutoff, bondlist, maxcrosslinks)
class VerletListLennardJonesAutoBondsLocal(InteractionLocal, interaction_VerletListLennardJonesAutoBonds):
'The (local) Lennard Jones auto bonds interaction using Verlet lists.'
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListLennardJonesAutoBonds, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def getVerletListLocal(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
class VerletListAdressLennardJonesAutoBondsLocal(InteractionLocal, interaction_VerletListAdressLennardJonesAutoBonds):
'The (local) Lennard Jones auto bonds interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressLennardJonesAutoBonds, vl, fixedtupleList)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class VerletListHadressLennardJonesAutoBondsLocal(InteractionLocal, interaction_VerletListHadressLennardJonesAutoBonds):
'The (local) Lennard Jones auto bonds interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressLennardJonesAutoBonds, vl, fixedtupleList)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class CellListLennardJonesAutoBondsLocal(InteractionLocal, interaction_CellListLennardJonesAutoBonds):
'The (local) Lennard Jones auto bonds interaction using cell lists.'
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListLennardJonesAutoBonds, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListLennardJonesAutoBondsLocal(InteractionLocal, interaction_FixedPairListLennardJonesAutoBonds):
'The (local) Lennard-Jones auto bonds interaction using FixedPair lists.'
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListLennardJonesAutoBonds, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
if pmi.isController:
class LennardJonesAutoBonds(Potential):
'The Lennard-Jones auto bonds potential.'
pmiproxydefs = dict(
cls = 'espresso.interaction.LennardJonesAutoBondsLocal',
pmiproperty = ['epsilon', 'sigma']
)
class VerletListLennardJonesAutoBonds(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListLennardJonesAutoBondsLocal',
pmicall = ['setPotential','getPotential','getVerletList']
)
class VerletListAdressLennardJonesAutoBonds(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListAdressLennardJonesAutoBondsLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressLennardJonesAutoBonds(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListHadressLennardJonesAutoBondsLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class CellListLennardJonesAutoBonds(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.CellListLennardJonesAutoBondsLocal',
pmicall = ['setPotential']
)
class FixedPairListLennardJonesAutoBonds(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.FixedPairListLennardJonesAutoBondsLocal',
pmicall = ['setPotential']
)
| BackupTheBerlios/espressopp | src/interaction/LennardJonesAutoBonds.py | Python | gpl-3.0 | 7,807 | [
"ESPResSo"
] | d9acd84347aa39b513379f03c724b092500d4f2b503e800aa5cf92de7bdba816 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class gatewayCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'delete_resource': ('content_type', 'data', 'extensions', ),
'get_resource': ('content_type', 'data', 'extensions', ),
'patch_resource': ('content_type', 'data', 'extensions', ),
'post_resource': ('content_type', 'data', 'extensions', ),
'put_resource': ('content_type', 'data', 'extensions', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=gatewayCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the gateway client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| googleapis/python-gke-connect-gateway | scripts/fixup_gateway_v1beta1_keywords.py | Python | apache-2.0 | 6,237 | [
"VisIt"
] | aa17e8b517b7edaf7b00fd70f9141bf7bd56b55c0e8ae71a917e88789e05fd66 |
###############################################################################
# Name: s.py #
# Purpose: Define S and R syntax for highlighting and other features #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: s.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for the S and R statistical languages
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _s.py 70466 2012-01-26 20:55:16Z CJP $"
__revision__ = "$Revision: 70466 $"
#-----------------------------------------------------------------------------#
# Imports
from pygments.token import Token
from pygments.lexers import get_lexer_by_name
import wx
import wx.stc as stc
#Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
# Style Id's
STC_S_DEFAULT, \
STC_S_COMMENT, \
STC_S_NUMBER, \
STC_S_STRING, \
STC_S_STRINGEOL, \
STC_S_OPERATOR, \
STC_S_KEYWORD = range(7)
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
KEYWORDS = ("all array break call else exp for function if length library list "
"match max mean min while return try NULL NA TRUE FALSE")
R_KEYWORDS = "if else repeat while function for in next break TRUE FALSE NULL NA Inf NaN"
R_KEYWORDS2 = ("abbreviate abline abs acf acos acosh addmargins aggregate agrep "
"alarm alias alist all anova any aov aperm append apply approx "
"approxfun apropos ar args arima array arrows asin asinh assign "
"assocplot atan atanh attach attr attributes autoload autoloader "
"ave axis backsolve barplot basename beta bindtextdomain binomial "
"biplot bitmap bmp body box boxplot bquote break browser builtins "
"bxp by bzfile c call cancor capabilities casefold cat category "
"cbind ccf ceiling character charmatch chartr chol choose chull "
"citation class close cm cmdscale codes coef coefficients col "
"colnames colors colorspaces colours comment complex confint "
"conflicts contour contrasts contributors convolve cophenetic "
"coplot cor cos cosh cov covratio cpgram crossprod cummax cummin "
"cumprod cumsum curve cut cutree cycle data dataentry date dbeta "
"dbinom dcauchy dchisq de debug debugger decompose delay deltat "
"demo dendrapply density deparse deriv det detach determinant "
"deviance dexp df dfbeta dfbetas dffits dgamma dgeom dget dhyper "
"diag diff diffinv difftime digamma dim dimnames dir dirname "
"dist dlnorm dlogis dmultinom dnbinom dnorm dotchart double "
"dpois dput drop dsignrank dt dump dunif duplicated dweibull "
"dwilcox eapply ecdf edit effects eigen emacs embed end "
"environment eval evalq example exists exp expression factanal "
"factor factorial family fft fifo file filter find fitted fivenum "
"fix floor flush for force formals format formula forwardsolve "
"fourfoldplot frame frequency ftable function gamma gaussian gc "
"gcinfo gctorture get getenv geterrmessage gettext gettextf getwd "
"gl glm globalenv gray grep grey grid gsub gzcon gzfile hat "
"hatvalues hcl hclust head heatmap help hist history hsv "
"httpclient iconv iconvlist identical identify if ifelse image "
"influence inherits integer integrate interaction interactive "
"intersect invisible isoreg jitter jpeg julian kappa kernapply "
"kernel kmeans knots kronecker ksmooth labels lag lapply layout "
"lbeta lchoose lcm legend length letters levels lfactorial "
"lgamma library licence license line lines list lm load "
"loadhistory loadings local locator loess log logb logical "
"loglin lowess ls lsfit machine mad mahalanobis makepredictcall "
"manova mapply match matlines matplot matpoints matrix max mean "
"median medpolish menu merge message methods mget min missing "
"mode monthplot months mosaicplot mtext mvfft names napredict "
"naprint naresid nargs nchar ncol next nextn ngettext nlevels nlm "
"nls noquote nrow numeric objects offset open optim optimise "
"optimize options order ordered outer pacf page pairlist pairs "
"palette par parse paste pbeta pbinom pbirthday pcauchy pchisq "
"pdf pentagamma person persp pexp pf pgamma pgeom phyper pi pico "
"pictex pie piechart pipe plclust plnorm plogis plot pmatch pmax "
"pmin pnbinom png pnorm points poisson poly polygon polym "
"polyroot postscript power ppoints ppois ppr prcomp predict "
"preplot pretty princomp print prmatrix prod profile profiler "
"proj promax prompt provide psigamma psignrank pt ptukey punif "
"pweibull pwilcox q qbeta qbinom qbirthday qcauchy qchisq qexp qf "
"qgamma qgeom qhyper qlnorm qlogis qnbinom qnorm qpois qqline "
"qqnorm qqplot qr qsignrank qt qtukey quantile quarters quasi "
"quasibinomial quasipoisson quit qunif quote qweibull qwilcox "
"rainbow range rank raw rbeta rbind rbinom rcauchy rchisq "
"readline real recover rect reformulate regexpr relevel remove "
"reorder rep repeat replace replicate replications require "
"reshape resid residuals restart return rev rexp rf rgamma rgb "
"rgeom rhyper rle rlnorm rlogis rm rmultinom rnbinom rnorm round "
"row rownames rowsum rpois rsignrank rstandard rstudent rt rug "
"runif runmed rweibull rwilcox sample sapply save savehistory "
"scale scan screen screeplot sd search searchpaths seek segments "
"seq sequence serialize setdiff setequal setwd shell sign signif "
"sin single sinh sink smooth solve sort source spectrum spline "
"splinefun split sprintf sqrt stack stars start stderr stdin "
"stdout stem step stepfun stl stop stopifnot str strftime "
"strheight stripchart strptime strsplit strtrim structure "
"strwidth strwrap sub subset substitute substr substring sum "
"summary sunflowerplot supsmu svd sweep switch symbols symnum "
"system t table tabulate tail tan tanh tapply tempdir tempfile "
"termplot terms tetragamma text time title toeplitz tolower "
"topenv toupper trace traceback transform trigamma trunc truncate "
"try ts tsdiag tsp typeof unclass undebug union unique uniroot "
"unix unlink unlist unname unserialize unsplit unstack untrace "
"unz update upgrade url var varimax vcov vector version vi "
"vignette warning warnings weekdays weights which while window "
"windows with write wsbrowser xedit xemacs xfig xinch xor xtabs "
"xyinch yinch zapsmall")
R_KEYWORDS3 = ("acme aids aircondit amis aml banking barchart barley beaver "
"bigcity boot brambles breslow bs bwplot calcium cane "
"capability cav censboot channing city claridge cloth cloud "
"coal condense contourplot control corr darwin densityplot "
"dogs dotplot ducks empinf envelope environmental ethanol fir "
"frets gpar grav gravity grob hirose histogram islay knn "
"larrows levelplot llines logit lpoints lsegments lset ltext "
"lvqinit lvqtest manaus melanoma melanoma motor multiedit "
"neuro nitrofen nodal ns nuclear oneway parallel paulsen "
"poisons polar qq qqmath remission rfs saddle salinity shingle "
"simplex singer somgrid splom stripplot survival tau tmd "
"tsboot tuna unit urine viewport wireframe wool xyplot")
#---- Syntax Style Specs ----#
if wx.VERSION >= (2, 9, 0, 0, ''):
SYNTAX_ITEMS = [ (stc.STC_R_BASEKWORD, 'class_style'), #TODO
(stc.STC_R_COMMENT, 'comment_style'),
(stc.STC_R_DEFAULT, 'default_style'),
(stc.STC_R_IDENTIFIER, 'default_style'),
(stc.STC_R_INFIX, 'default_style'), #TODO
(stc.STC_R_INFIXEOL, 'default_style'), #TODO
(stc.STC_R_KWORD, 'keyword_style'),
(stc.STC_R_NUMBER, 'number_style'),
(stc.STC_R_OPERATOR, 'operator_style'),
(stc.STC_R_OTHERKWORD, 'keyword2_style'),
(stc.STC_R_STRING, 'string_style'),
(stc.STC_R_STRING2, 'char_style')] #TODO
else:
SYNTAX_ITEMS = [ (STC_S_DEFAULT, 'default_style'),
(STC_S_COMMENT, 'comment_style'),
(STC_S_NUMBER, 'number_style'),
(STC_S_STRING, 'string_style'),
(STC_S_STRINGEOL, 'stringeol_style'),
(STC_S_OPERATOR, 'operator_style'),
(STC_S_KEYWORD, 'keyword_style') ]
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for R and S"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
if wx.VERSION >= (2, 9, 0, 0, ''):
self.SetLexer(stc.STC_LEX_R)
else:
self.SetLexer(stc.STC_LEX_CONTAINER)
self.RegisterFeature(synglob.FEATURE_STYLETEXT, StyleText)
def GetKeywords(self):
"""Returns Specified Keywords List """
if wx.VERSION >= (2, 9, 0, 0, ''):
return [(0, R_KEYWORDS), (1, R_KEYWORDS2), (2, R_KEYWORDS3)]
else:
return [(1, KEYWORDS)]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u"#",]
#-----------------------------------------------------------------------------#
def StyleText(_stc, start, end):
"""Style the text
@param _stc: Styled text control instance
@param start: Start position
@param end: end position
@todo: performance improvements
@todo: style errors caused by unicode characters (related to internal utf8)
"""
cpos = 0
_stc.StartStyling(cpos, 0x1f)
lexer = get_lexer_by_name("s")
is_wineol = _stc.GetEOLMode() == stc.STC_EOL_CRLF
for token, txt in lexer.get_tokens(_stc.GetTextRange(0, end)):
style = TOKEN_MAP.get(token, STC_S_DEFAULT)
tlen = len(txt)
# Account for \r\n end of line characters
if is_wineol and "\n" in txt:
tlen += txt.count("\n")
if tlen:
_stc.SetStyling(tlen, style)
cpos += tlen
_stc.StartStyling(cpos, 0x1f)
#-----------------------------------------------------------------------------#
TOKEN_MAP = { Token.Literal.String : STC_S_STRING,
Token.Comment : STC_S_COMMENT,
Token.Comment.Single : STC_S_COMMENT,
Token.Operator : STC_S_OPERATOR,
Token.Punctuation : STC_S_OPERATOR,
Token.Number : STC_S_NUMBER,
Token.Literal.Number : STC_S_NUMBER,
Token.Keyword : STC_S_KEYWORD,
Token.Keyword.Constant: STC_S_KEYWORD }
| garrettcap/Bulletproof-Backup | wx/tools/Editra/src/syntax/_s.py | Python | gpl-2.0 | 12,071 | [
"Gaussian"
] | 1d75aa1e5e706e6ef2475b544a2d93e496c10586c23ce79f34031d5aa4c62408 |
# Copyright (C) 2011-2012 CRS4.
#
# This file is part of Seal.
#
# Seal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Seal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Seal. If not, see <http://www.gnu.org/licenses/>.
"""
How to use this test:
You need to provide as command line arguments:
1. The fasta reference sequence file name
2, 3. The two reads and mates sequence files
use LOG_LEVEL = logging.DEBUG; N_ITER = 1 to see details.
use LOG_LEVEL = logging.INFO; N_ITER = 10 to check for leaks.
"""
import sys, gc, logging
#print gc.isenabled()
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
import itertools as it
import seal.lib.aligner.bwa as bwa
import Bio.SeqIO
from seal.lib.util.meminfo import meminfo
MB = float(2**20)
def print_meminfo(status, log_function=logging.debug):
minfo = meminfo()
msg = "%s: size = %.1fm; resident = %.1fm" % (
status, minfo["size"]/MB, minfo["resident"]/MB)
log_function(msg)
def run_bwa_py_sampe_alloc_only(refseq_fname, read_fname, mate_fname):
read_flow = Bio.SeqIO.parse(open(read_fname), 'fastq-illumina')
mate_flow = Bio.SeqIO.parse(open(mate_fname), 'fastq-illumina')
pairs = [x for x in it.izip(read_flow, mate_flow)]
print_meminfo("AFTER READING PAIRS")
bwsa = bwa.build_bws_array(pairs)
print_meminfo("AFTER BUILDING BWSA")
bwts = bwa.restore_index(refseq_fname)
print_meminfo("AFTER RESTORING INDEX")
bnsp, pacseq = bwa.restore_reference(refseq_fname)
print_meminfo("AFTER RESTORING REFERENCE")
gopt, popt = bwa.gap_init_opt(), bwa.pe_init_opt()
ii, last_ii = bwa.isize_info_t(), bwa.isize_info_t()
last_ii.avg = -1.0
l = len(pairs)
print_meminfo("AFTER INIT OPT & II")
# deallocate seq & ref data
for i in 0, 1:
bwa.free_seq(l, bwsa[i])
bwa.bwt_destroy(bwts[i])
bwa.bns_destroy(bnsp)
print_meminfo("AFTER DEALLOC")
del pacseq
n_unreachable = gc.collect()
logging.debug("n_unreachable = %d" % n_unreachable)
print_meminfo("AFTER DEL PACSEQ")
del pairs
n_unreachable = gc.collect()
logging.debug("n_unreachable = %d" % n_unreachable)
print_meminfo("AFTER DEL PAIRS")
def run_bwa_py_sampe(refseq_fname, read_fname, mate_fname):
read_flow = Bio.SeqIO.parse(open(read_fname), 'fastq-illumina')
mate_flow = Bio.SeqIO.parse(open(mate_fname), 'fastq-illumina')
pairs = [x for x in it.izip(read_flow, mate_flow)]
print_meminfo("AFTER READING PAIRS")
bwsa = bwa.build_bws_array(pairs)
print_meminfo("AFTER BUILDING BWSA")
bwts = bwa.restore_index(refseq_fname)
print_meminfo("AFTER RESTORING INDEX")
bnsp, pacseq = bwa.restore_reference(refseq_fname)
print_meminfo("AFTER RESTORING REFERENCE")
gopt, popt = bwa.gap_init_opt(), bwa.pe_init_opt()
ii, last_ii = bwa.isize_info_t(), bwa.isize_info_t()
last_ii.avg = -1.0
l = len(pairs)
print_meminfo("AFTER INIT OPT & II")
bwa.cal_sa_reg_gap(0, bwts, l, bwsa[0], gopt)
bwa.cal_sa_reg_gap(0, bwts, l, bwsa[1], gopt)
print_meminfo("AFTER CAL_SA_REG_GAP")
cnt_chg = bwa.cal_pac_pos_pe(bwts, l, bwsa, ii, popt, gopt, last_ii)
print_meminfo("AFTER CAL_PAC_POS_PE")
bwa.paired_sw(bnsp, pacseq, l, bwsa, popt, ii)
print_meminfo("AFTER PAIRED_SW")
bwa.refine_gapped(bnsp, l, bwsa[0], pacseq)
bwa.refine_gapped(bnsp, l, bwsa[1], pacseq)
print_meminfo("AFTER REFINE_GAPPED")
for k in xrange(l):
v1 = bwa.analyze_hit(gopt[0], bnsp, bwsa[0][k], bwsa[1][k])
v2 = bwa.analyze_hit(gopt[0], bnsp, bwsa[1][k], bwsa[0][k])
print_meminfo("AFTER ANALYZE_HIT")
# deallocate seq & ref data
for i in 0, 1:
bwa.free_seq(l, bwsa[i])
bwa.bwt_destroy(bwts[i])
bwa.bns_destroy(bnsp)
print_meminfo("AFTER DEALLOC")
del pacseq
n_unreachable = gc.collect()
logging.debug("n_unreachable = %d" % n_unreachable)
print_meminfo("AFTER DEL PACSEQ")
del pairs
n_unreachable = gc.collect()
logging.debug("n_unreachable = %d" % n_unreachable)
print_meminfo("AFTER DEL PAIRS")
def main(argv):
LOG_LEVEL = logging.DEBUG; N_ITER = 1
#LOG_LEVEL = logging.INFO; N_ITER = 10
logging.basicConfig(level=LOG_LEVEL)
#fun = run_bwa_py_sampe_alloc_only
fun = run_bwa_py_sampe
print_meminfo("START", logging.info)
try:
refseq_fn = argv[1]
read_fn = argv[2]
mate_fn = argv[3]
except IndexError:
sys.exit("Usage: %s REFSEQ_FN READ_FN MATE_FN" % sys.argv[0] + __doc__)
#u.build_index(refseq_fn)
for i in xrange(N_ITER):
fun(refseq_fn, read_fn, mate_fn)
print_meminfo("END ITERATION %d" % i, logging.info)
if __name__ == "__main__":
main(sys.argv)
| crs4/seal | tests/tseal/lib/aligner/bwa/test_bwa_memory.py | Python | gpl-3.0 | 4,962 | [
"BWA"
] | 2a087ac3bf5647732a66ab1bebb79ee6667ce9502a7a6090b9441bf9adb76b5f |
import torch
import numpy as np
from torch import nn
def identity(x):
return x
_str_to_activation = {
'identity': identity,
'relu': nn.ReLU(),
'tanh': nn.Tanh(),
'leaky_relu': nn.LeakyReLU(),
'sigmoid': nn.Sigmoid(),
'selu': nn.SELU(),
'softplus': nn.Softplus(),
}
def activation_from_string(string):
return _str_to_activation[string]
def soft_update_from_to(source, target, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
def copy_model_params_from_to(source, target):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def maximum_2d(t1, t2):
# noinspection PyArgumentList
return torch.max(
torch.cat((t1.unsqueeze(2), t2.unsqueeze(2)), dim=2),
dim=2,
)[0].squeeze(2)
def kronecker_product(t1, t2):
"""
Computes the Kronecker product between two tensors
See https://en.wikipedia.org/wiki/Kronecker_product
"""
t1_height, t1_width = t1.size()
t2_height, t2_width = t2.size()
out_height = t1_height * t2_height
out_width = t1_width * t2_width
# TODO(vitchyr): see if you can use expand instead of repeat
tiled_t2 = t2.repeat(t1_height, t1_width)
expanded_t1 = (
t1.unsqueeze(2)
.unsqueeze(3)
.repeat(1, t2_height, t2_width, 1)
.view(out_height, out_width)
)
return expanded_t1 * tiled_t2
def alpha_dropout(
x,
p=0.05,
alpha=-1.7580993408473766,
fixedPointMean=0,
fixedPointVar=1,
training=False,
):
keep_prob = 1 - p
if keep_prob == 1 or not training:
return x
a = np.sqrt(fixedPointVar / (keep_prob * (
(1 - keep_prob) * pow(alpha - fixedPointMean, 2) + fixedPointVar)))
b = fixedPointMean - a * (
keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
keep_prob = 1 - p
random_tensor = keep_prob + torch.rand(x.size())
binary_tensor = torch.floor(random_tensor)
x = x.mul(binary_tensor)
ret = x + alpha * (1 - binary_tensor)
ret.mul_(a).add_(b)
return ret
def alpha_selu(x, training=False):
return alpha_dropout(nn.SELU(x), training=training)
def double_moments(x, y):
"""
Returns the first two moments between x and y.
Specifically, for each vector x_i and y_i in x and y, compute their
outer-product. Flatten this resulting matrix and return it.
The first moments (i.e. x_i and y_i) are included by appending a `1` to x_i
and y_i before taking the outer product.
:param x: Shape [batch_size, feature_x_dim]
:param y: Shape [batch_size, feature_y_dim]
:return: Shape [batch_size, (feature_x_dim + 1) * (feature_y_dim + 1)
"""
batch_size, x_dim = x.size()
_, y_dim = x.size()
x = torch.cat((x, torch.ones(batch_size, 1)), dim=1)
y = torch.cat((y, torch.ones(batch_size, 1)), dim=1)
x_dim += 1
y_dim += 1
x = x.unsqueeze(2)
y = y.unsqueeze(1)
outer_prod = (
x.expand(batch_size, x_dim, y_dim) * y.expand(batch_size, x_dim,
y_dim)
)
return outer_prod.view(batch_size, -1)
def batch_diag(diag_values, diag_mask=None):
batch_size, dim = diag_values.size()
if diag_mask is None:
diag_mask = torch.diag(torch.ones(dim))
batch_diag_mask = diag_mask.unsqueeze(0).expand(batch_size, dim, dim)
batch_diag_values = diag_values.unsqueeze(1).expand(batch_size, dim, dim)
return batch_diag_values * batch_diag_mask
def batch_square_vector(vector, M):
"""
Compute x^T M x
"""
vector = vector.unsqueeze(2)
return torch.bmm(torch.bmm(vector.transpose(2, 1), M), vector).squeeze(2)
def fanin_init(tensor):
size = tensor.size()
if len(size) == 2:
fan_in = size[0]
elif len(size) > 2:
fan_in = np.prod(size[1:])
else:
raise Exception("Shape must be have dimension at least 2.")
bound = 1. / np.sqrt(fan_in)
return tensor.data.uniform_(-bound, bound)
def fanin_init_weights_like(tensor):
size = tensor.size()
if len(size) == 2:
fan_in = size[0]
elif len(size) > 2:
fan_in = np.prod(size[1:])
else:
raise Exception("Shape must be have dimension at least 2.")
bound = 1. / np.sqrt(fan_in)
new_tensor = FloatTensor(tensor.size())
new_tensor.uniform_(-bound, bound)
return new_tensor
def almost_identity_weights_like(tensor):
"""
Set W = I + lambda * Gaussian no
:param tensor:
:return:
"""
shape = tensor.size()
init_value = np.eye(*shape)
init_value += 0.01 * np.random.rand(*shape)
return FloatTensor(init_value)
def clip1(x):
return torch.clamp(x, -1, 1)
def compute_conv_output_size(h_in, w_in, kernel_size, stride, padding=0):
h_out = (h_in + 2 * padding - (kernel_size - 1) - 1) / stride + 1
w_out = (w_in + 2 * padding - (kernel_size - 1) - 1) / stride + 1
return int(np.floor(h_out)), int(np.floor(w_out))
def compute_deconv_output_size(h_in, w_in, kernel_size, stride, padding=0):
h_out = (h_in - 1) * stride - 2 * padding + kernel_size
w_out = (w_in - 1) * stride - 2 * padding + kernel_size
return int(np.floor(h_out)), int(np.floor(w_out))
def compute_conv_layer_sizes(h_in, w_in, kernel_sizes, strides, paddings=None):
if paddings == None:
for kernel, stride in zip(kernel_sizes, strides):
h_in, w_in = compute_conv_output_size(h_in, w_in, kernel, stride)
print('Output Size:', (h_in, w_in))
else:
for kernel, stride, padding in zip(kernel_sizes, strides, paddings):
h_in, w_in = compute_conv_output_size(h_in, w_in, kernel, stride,
padding=padding)
print('Output Size:', (h_in, w_in))
def compute_deconv_layer_sizes(h_in, w_in, kernel_sizes, strides,
paddings=None):
if paddings == None:
for kernel, stride in zip(kernel_sizes, strides):
h_in, w_in = compute_deconv_output_size(h_in, w_in, kernel, stride)
print('Output Size:', (h_in, w_in))
else:
for kernel, stride, padding in zip(kernel_sizes, strides, paddings):
h_in, w_in = compute_deconv_output_size(h_in, w_in, kernel, stride,
padding=padding)
print('Output Size:', (h_in, w_in))
"""
GPU wrappers
"""
_use_gpu = False
device = None
def set_gpu_mode(mode, gpu_id=0):
global _use_gpu
global device
global _gpu_id
_gpu_id = gpu_id
_use_gpu = mode
device = torch.device("cuda:" + str(gpu_id) if _use_gpu else "cpu")
def gpu_enabled():
return _use_gpu
def set_device(gpu_id):
torch.cuda.set_device(gpu_id)
# noinspection PyPep8Naming
def FloatTensor(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.FloatTensor(*args, **kwargs, device=torch_device)
def from_numpy(*args, **kwargs):
return torch.from_numpy(*args, **kwargs).float().to(device)
def get_numpy(tensor):
return tensor.to('cpu').detach().numpy()
def randint(*sizes, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.randint(*sizes, **kwargs, device=torch_device)
def zeros(*sizes, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.zeros(*sizes, **kwargs, device=torch_device)
def ones(*sizes, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.ones(*sizes, **kwargs, device=torch_device)
def ones_like(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.ones_like(*args, **kwargs, device=torch_device)
def randn(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.randn(*args, **kwargs, device=torch_device)
def zeros_like(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.zeros_like(*args, **kwargs, device=torch_device)
def tensor(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.tensor(*args, **kwargs, device=torch_device)
def normal(*args, **kwargs):
return torch.normal(*args, **kwargs).to(device)
| vitchyr/rlkit | rlkit/torch/pytorch_util.py | Python | mit | 8,657 | [
"Gaussian"
] | c85164180ac091b0f962c28d4659b39afe5e2fd5f6765b6e7999b359099a8595 |
#!/usr/bin/env python
# This file is part of fast-tab and licensed under The MIT License (MIT).
import threading
import urllib
import re
import time
from config import *
class HttpError(BaseException):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CrawlerThread(threading.Thread):
def __init__(self, logger, crawling_queue, crawled_dict, processing_queue, processed_dict):
threading.Thread.__init__(self)
self.logger = logger
self.crawling_queue = crawling_queue
self.crawled_dict = crawled_dict
self.processing_queue = processing_queue
self.processed_dict = processed_dict
# self.config = Config()
# self.config.read_config()
def __del__(self):
self.logger.info("crawled died")
self.db_conn.close()
def run(self):
self.logger.info("crawler spawned")
while True:
url = self.crawling_queue.get()
self.logger.debug("crawling " + url)
try:
html = self.fetch_page(url)
except HttpError as e:
self.logger.info("HttpError: Code " + e.value + " at " + url)
# TODO rewrite for normal URLs
# TODO hier weitermachen ->
if (re.match('https:\/\/play.google.com\/store\/apps\/details\?id=[^&"?#<>()]*', url) != None):
urls = re.findall('\/store\/apps\/details\?id=([^&"?#<>()]*)', url)[0]
continue
# add found urls to queue
identifiers = self.find_identifiers(html)
for identifier in identifiers:
self.logger.debug("found " + identifier + " at " + url)
if (self.discovered_dict.has_key(identifier) is False):
url_app = self.config.app_url + identifier + "&hl=en"
self.crawling_queue.put(url_app)
self.discovered_dict[identifier] = url_app
self.logger.debug("added for visit: " + url_app)
# check if this page is to be processed
# TODO: use escaped self.config.app_url
if (re.match('https:\/\/play.google.com\/store\/apps\/details\?id=[^&"?#<>()]*', url) != None):
identifier = re.findall('\/store\/apps\/details\?id=([^&"?#<>()]*)', url)[0]
if (self.discovered_dict.has_key(identifier) is False):
self.discovered_dict[identifier] = url
self.logger.debug("added for processing: " + identifier + " from " + url)
item = (identifier, url, html)
self.processing_queue.put(item)
self.crawled_dict[url] = "crawled"
self.crawling_queue.task_done()
time.sleep(1)
def fetch_page(self, url):
f = urllib.urlopen(url)
code = str(f.getcode())
if (re.findall('(2\\d\\d|3\\d\\d)', code)):
return ''.join(f.readlines())
else:
raise HttpError(str(f.getcode()))
| Tie-fighter/fast-tab | CrawlerThread.py | Python | mit | 3,039 | [
"VisIt"
] | 5daf225f48ffd20ffd073cceb7d34557bde49b2c94de7506d04c98afcf955c12 |
""" Queries BDII for unknown CE.
Queries BDII for CE information and puts it to CS.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities.Grid import ldapSite, ldapCluster, ldapCE, ldapCEState
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.Core.Security.ProxyInfo import getProxyInfo, formatProxyInfoAsString
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import getVO
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
class CE2CSAgent( AgentModule ):
def __init__(self):
""" c'tor
"""
self.addressTo = ''
self.addressFrom = ''
self.voName = ''
self.subject = "CE2CSAgent"
self.alternativeBDIIs = []
self.csAPI = None
def initialize( self ):
# TODO: Have no default and if no mail is found then use the diracAdmin group
# and resolve all associated mail addresses.
self.addressTo = self.am_getOption( 'MailTo', self.addressTo )
self.addressFrom = self.am_getOption( 'MailFrom', self.addressFrom )
# Create a list of alternative bdii urls
self.alternativeBDIIs = self.am_getOption( 'AlternativeBDIIs', [] )
# Check if the bdii url is appended by a port number, if not append the default 2170
for index, url in enumerate( self.alternativeBDIIs ):
if not url.split( ':' )[-1].isdigit():
self.alternativeBDIIs[index] += ':2170'
if self.addressTo and self.addressFrom:
self.log.info( "MailTo", self.addressTo )
self.log.info( "MailFrom", self.addressFrom )
if self.alternativeBDIIs :
self.log.info( "AlternativeBDII URLs:", self.alternativeBDIIs )
self.subject = "CE2CSAgent"
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/TestManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'TestManager' )
self.voName = self.am_getOption( 'VirtualOrganization', [] )
if not self.voName:
vo = getVO()
if vo:
self.voName = [ vo ]
if self.voName:
self.log.info( "Agent will manage VO(s) %s" % self.voName )
else:
self.log.fatal( "VirtualOrganization option not defined for agent" )
return S_ERROR()
self.csAPI = CSAPI()
return self.csAPI.initialize()
def execute( self ):
self.log.info( "Start Execution" )
result = getProxyInfo()
if not result['OK']:
return result
infoDict = result[ 'Value' ]
self.log.info( formatProxyInfoAsString( infoDict ) )
# Get a "fresh" copy of the CS data
result = self.csAPI.downloadCSData()
if not result['OK']:
self.log.warn( "Could not download a fresh copy of the CS data", result[ 'Message' ] )
self.__lookForCE()
self.__infoFromCE()
self.log.info( "End Execution" )
return S_OK()
def __checkAlternativeBDIISite( self, fun, *args ):
if self.alternativeBDIIs:
self.log.warn( "Trying to use alternative BDII sites" )
for site in self.alternativeBDIIs :
self.log.info( "Trying to contact alternative BDII", site )
if len( args ) == 1 :
result = fun( args[0], host = site )
elif len( args ) == 2 :
result = fun( args[0], vo = args[1], host = site )
if not result['OK'] :
self.log.error ( "Problem contacting alternative BDII", result['Message'] )
elif result['OK'] :
return result
self.log.warn( "Also checking alternative BDII sites failed" )
return result
def __lookForCE( self ):
knownCEs = self.am_getOption( 'BannedCEs', [] )
resources = Resources( self.voName )
result = resources.getEligibleResources( 'Computing', {'CEType':['LCG','CREAM'] } )
if not result['OK']:
return
grids = result['Value']
for grid in grids:
result = gConfig.getSections( '/Resources/Sites/%s' % grid )
if not result['OK']:
return
sites = result['Value']
for site in sites:
opt = gConfig.getOptionsDict( '/Resources/Sites/%s/%s' % ( grid, site ) )['Value']
ces = List.fromChar( opt.get( 'CE', '' ) )
knownCEs += ces
response = ''
for vo in self.voName:
self.log.info( "Check for available CEs for VO", vo )
response = ldapCEState( '', vo )
if not response['OK']:
self.log.error( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCEState, '', vo )
return response
newCEs = {}
for queue in response['Value']:
try:
queueName = queue['GlueCEUniqueID']
except:
continue
ceName = queueName.split( ":" )[0]
if not ceName in knownCEs:
newCEs[ceName] = None
self.log.debug( "New CE", ceName )
body = ""
possibleNewSites = []
for ce in newCEs.iterkeys():
response = ldapCluster( ce )
if not response['OK']:
self.log.warn( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCluster, ce )
continue
clusters = {}
clusters = response['Value']
if len( clusters ) != 1:
self.log.warn( "Error in cluster length", " CE %s Length %d" % ( ce, len( clusters ) ) )
if len( clusters ) == 0:
continue
cluster = clusters[0]
fkey = cluster.get( 'GlueForeignKey', [] )
if type( fkey ) == type( '' ):
fkey = [fkey]
nameBDII = None
for entry in fkey:
if entry.count( 'GlueSiteUniqueID' ):
nameBDII = entry.split( '=' )[1]
break
if not nameBDII:
continue
ceString = "CE: %s, GOCDB Name: %s" % ( ce, nameBDII )
self.log.info( ceString )
response = ldapCE( ce )
if not response['OK']:
self.log.warn( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCE, ce )
continue
ceInfos = response['Value']
if len( ceInfos ):
ceInfo = ceInfos[0]
systemName = ceInfo.get( 'GlueHostOperatingSystemName', 'Unknown' )
systemVersion = ceInfo.get( 'GlueHostOperatingSystemVersion', 'Unknown' )
systemRelease = ceInfo.get( 'GlueHostOperatingSystemRelease', 'Unknown' )
else:
systemName = "Unknown"
systemVersion = "Unknown"
systemRelease = "Unknown"
osString = "SystemName: %s, SystemVersion: %s, SystemRelease: %s" % ( systemName, systemVersion, systemRelease )
self.log.info( osString )
response = ldapCEState( ce, vo )
if not response['OK']:
self.log.warn( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCEState, ce, vo )
continue
newCEString = "\n\n%s\n%s" % ( ceString, osString )
usefull = False
ceStates = response['Value']
for ceState in ceStates:
queueName = ceState.get( 'GlueCEUniqueID', 'UnknownName' )
queueStatus = ceState.get( 'GlueCEStateStatus', 'UnknownStatus' )
queueString = "%s %s" % ( queueName, queueStatus )
self.log.info( queueString )
newCEString += "\n%s" % queueString
if queueStatus.count( 'Production' ):
usefull = True
if usefull:
body += newCEString
possibleNewSites.append( 'dirac-admin-add-site DIRACSiteName %s %s' % ( nameBDII, ce ) )
if body:
body = "We are glad to inform You about new CE(s) possibly suitable for %s:\n" % vo + body
body += "\n\nTo suppress information about CE add its name to BannedCEs list."
for possibleNewSite in possibleNewSites:
body = "%s\n%s" % ( body, possibleNewSite )
self.log.info( body )
if self.addressTo and self.addressFrom:
notification = NotificationClient()
result = notification.sendMail( self.addressTo, self.subject, body, self.addressFrom, localAttempt = False )
return S_OK()
def __infoFromCE( self ):
sitesSection = cfgPath( 'Resources', 'Sites' )
result = gConfig.getSections( sitesSection )
if not result['OK']:
return
grids = result['Value']
changed = False
body = ""
for grid in grids:
gridSection = cfgPath( sitesSection, grid )
result = gConfig.getSections( gridSection )
if not result['OK']:
return
sites = result['Value']
for site in sites:
siteSection = cfgPath( gridSection, site )
opt = gConfig.getOptionsDict( siteSection )['Value']
name = opt.get( 'Name', '' )
if name:
coor = opt.get( 'Coordinates', 'Unknown' )
mail = opt.get( 'Mail', 'Unknown' )
result = ldapSite( name )
if not result['OK']:
self.log.warn( "BDII site %s: %s" % ( name, result['Message'] ) )
result = self.__checkAlternativeBDIISite( ldapSite, name )
if result['OK']:
bdiiSites = result['Value']
if len( bdiiSites ) == 0:
self.log.warn( name, "Error in BDII: leng = 0" )
else:
if not len( bdiiSites ) == 1:
self.log.warn( name, "Warning in BDII: leng = %d" % len( bdiiSites ) )
bdiiSite = bdiiSites[0]
try:
longitude = bdiiSite['GlueSiteLongitude']
latitude = bdiiSite['GlueSiteLatitude']
newcoor = "%s:%s" % ( longitude, latitude )
except:
self.log.warn( "Error in BDII coordinates" )
newcoor = "Unknown"
try:
newmail = bdiiSite['GlueSiteSysAdminContact'].split( ":" )[-1].strip()
except:
self.log.warn( "Error in BDII mail" )
newmail = "Unknown"
self.log.debug( "%s %s %s" % ( name, newcoor, newmail ) )
if newcoor != coor:
self.log.info( "%s" % ( name ), "%s -> %s" % ( coor, newcoor ) )
if coor == 'Unknown':
self.csAPI.setOption( cfgPath( siteSection, 'Coordinates' ), newcoor )
else:
self.csAPI.modifyValue( cfgPath( siteSection, 'Coordinates' ), newcoor )
changed = True
if newmail != mail:
self.log.info( "%s" % ( name ), "%s -> %s" % ( mail, newmail ) )
if mail == 'Unknown':
self.csAPI.setOption( cfgPath( siteSection, 'Mail' ), newmail )
else:
self.csAPI.modifyValue( cfgPath( siteSection, 'Mail' ), newmail )
changed = True
ceList = List.fromChar( opt.get( 'CE', '' ) )
if not ceList:
self.log.warn( site, 'Empty site list' )
continue
# result = gConfig.getSections( cfgPath( siteSection,'CEs' )
# if not result['OK']:
# self.log.debug( "Section CEs:", result['Message'] )
for ce in ceList:
ceSection = cfgPath( siteSection, 'CEs', ce )
result = gConfig.getOptionsDict( ceSection )
if not result['OK']:
self.log.debug( "Section CE", result['Message'] )
wnTmpDir = 'Unknown'
arch = 'Unknown'
os = 'Unknown'
si00 = 'Unknown'
pilot = 'Unknown'
ceType = 'Unknown'
else:
ceopt = result['Value']
wnTmpDir = ceopt.get( 'wnTmpDir', 'Unknown' )
arch = ceopt.get( 'architecture', 'Unknown' )
os = ceopt.get( 'OS', 'Unknown' )
si00 = ceopt.get( 'SI00', 'Unknown' )
pilot = ceopt.get( 'Pilot', 'Unknown' )
ceType = ceopt.get( 'CEType', 'Unknown' )
result = ldapCE( ce )
if not result['OK']:
self.log.warn( 'Error in BDII for %s' % ce, result['Message'] )
result = self.__checkAlternativeBDIISite( ldapCE, ce )
continue
try:
bdiiCE = result['Value'][0]
except:
self.log.warn( 'Error in BDII for %s' % ce, result )
bdiiCE = None
if bdiiCE:
try:
newWNTmpDir = bdiiCE['GlueSubClusterWNTmpDir']
except:
newWNTmpDir = 'Unknown'
if wnTmpDir != newWNTmpDir and newWNTmpDir != 'Unknown':
section = cfgPath( ceSection, 'wnTmpDir' )
self.log.info( section, " -> ".join( ( wnTmpDir, newWNTmpDir ) ) )
if wnTmpDir == 'Unknown':
self.csAPI.setOption( section, newWNTmpDir )
else:
self.csAPI.modifyValue( section, newWNTmpDir )
changed = True
try:
newArch = bdiiCE['GlueHostArchitecturePlatformType']
except:
newArch = 'Unknown'
if arch != newArch and newArch != 'Unknown':
section = cfgPath( ceSection, 'architecture' )
self.log.info( section, " -> ".join( ( arch, newArch ) ) )
if arch == 'Unknown':
self.csAPI.setOption( section, newArch )
else:
self.csAPI.modifyValue( section, newArch )
changed = True
try:
newOS = '_'.join( ( bdiiCE['GlueHostOperatingSystemName'],
bdiiCE['GlueHostOperatingSystemVersion'],
bdiiCE['GlueHostOperatingSystemRelease'] ) )
except:
newOS = 'Unknown'
if os != newOS and newOS != 'Unknown':
section = cfgPath( ceSection, 'OS' )
self.log.info( section, " -> ".join( ( os, newOS ) ) )
if os == 'Unknown':
self.csAPI.setOption( section, newOS )
else:
self.csAPI.modifyValue( section, newOS )
changed = True
body = body + "OS was changed %s -> %s for %s at %s\n" % ( os, newOS, ce, site )
try:
newSI00 = bdiiCE['GlueHostBenchmarkSI00']
except:
newSI00 = 'Unknown'
if si00 != newSI00 and newSI00 != 'Unknown':
section = cfgPath( ceSection, 'SI00' )
self.log.info( section, " -> ".join( ( si00, newSI00 ) ) )
if si00 == 'Unknown':
self.csAPI.setOption( section, newSI00 )
else:
self.csAPI.modifyValue( section, newSI00 )
changed = True
try:
rte = bdiiCE['GlueHostApplicationSoftwareRunTimeEnvironment']
for vo in self.voName:
if vo.lower() == 'lhcb':
if 'VO-lhcb-pilot' in rte:
newPilot = 'True'
else:
newPilot = 'False'
else:
newPilot = 'Unknown'
except:
newPilot = 'Unknown'
if pilot != newPilot and newPilot != 'Unknown':
section = cfgPath( ceSection, 'Pilot' )
self.log.info( section, " -> ".join( ( pilot, newPilot ) ) )
if pilot == 'Unknown':
self.csAPI.setOption( section, newPilot )
else:
self.csAPI.modifyValue( section, newPilot )
changed = True
newVO = ''
for vo in self.voName:
result = ldapCEState( ce, vo ) #getBDIICEVOView
if not result['OK']:
self.log.warn( 'Error in BDII for queue %s' % ce, result['Message'] )
result = self.__checkAlternativeBDIISite( ldapCEState, ce, vo )
continue
try:
queues = result['Value']
except:
self.log.warn( 'Error in BDII for queue %s' % ce, result['Massage'] )
continue
newCEType = 'Unknown'
for queue in queues:
try:
queueType = queue['GlueCEImplementationName']
except:
queueType = 'Unknown'
if newCEType == 'Unknown':
newCEType = queueType
else:
if queueType != newCEType:
self.log.warn( 'Error in BDII for CE %s ' % ce, 'different CE types %s %s' % ( newCEType, queueType ) )
if newCEType=='ARC-CE':
newCEType = 'ARC'
if ceType != newCEType and newCEType != 'Unknown':
section = cfgPath( ceSection, 'CEType' )
self.log.info( section, " -> ".join( ( ceType, newCEType ) ) )
if ceType == 'Unknown':
self.csAPI.setOption( section, newCEType )
else:
self.csAPI.modifyValue( section, newCEType )
changed = True
for queue in queues:
try:
queueName = queue['GlueCEUniqueID'].split( '/' )[-1]
except:
self.log.warn( 'Error in queueName ', queue )
continue
try:
newMaxCPUTime = queue['GlueCEPolicyMaxCPUTime']
except:
newMaxCPUTime = None
newSI00 = None
try:
caps = queue['GlueCECapability']
if type( caps ) == type( '' ):
caps = [caps]
for cap in caps:
if cap.count( 'CPUScalingReferenceSI00' ):
newSI00 = cap.split( '=' )[-1]
except:
newSI00 = None
queueSection = cfgPath( ceSection, 'Queues', queueName )
result = gConfig.getOptionsDict( queueSection )
if not result['OK']:
self.log.warn( "Section Queues", result['Message'] )
maxCPUTime = 'Unknown'
si00 = 'Unknown'
allowedVOs = ['']
else:
queueOpt = result['Value']
maxCPUTime = queueOpt.get( 'maxCPUTime', 'Unknown' )
si00 = queueOpt.get( 'SI00', 'Unknown' )
if newVO == '': # Remember previous iteration, if none - read from conf
allowedVOs = queueOpt.get( 'VO', '' ).split( "," )
else: # Else use newVO, as it can contain changes, which aren't in conf yet
allowedVOs = newVO.split( "," )
if newMaxCPUTime and ( maxCPUTime != newMaxCPUTime ):
section = cfgPath( queueSection, 'maxCPUTime' )
self.log.info( section, " -> ".join( ( maxCPUTime, newMaxCPUTime ) ) )
if maxCPUTime == 'Unknown':
self.csAPI.setOption( section, newMaxCPUTime )
else:
self.csAPI.modifyValue( section, newMaxCPUTime )
changed = True
if newSI00 and ( si00 != newSI00 ):
section = cfgPath( queueSection, 'SI00' )
self.log.info( section, " -> ".join( ( si00, newSI00 ) ) )
if si00 == 'Unknown':
self.csAPI.setOption( section, newSI00 )
else:
self.csAPI.modifyValue( section, newSI00 )
changed = True
modifyVO = True # Flag saying if we need VO option to change
newVO = ''
if allowedVOs != ['']:
for allowedVO in allowedVOs:
allowedVO = allowedVO.strip() # Get rid of spaces
newVO += allowedVO
if allowedVO == vo: # Current VO has been already in list
newVO = ''
modifyVO = False # Don't change anything
break # Skip next 'if', proceed to next VO
newVO += ', '
if modifyVO:
section = cfgPath( queueSection, 'VO' )
newVO += vo
self.log.info( section, " -> ".join( ( '%s' % allowedVOs, newVO ) ) )
if allowedVOs == ['']:
self.csAPI.setOption( section, newVO )
else:
self.csAPI.modifyValue( section, newVO )
changed = True
if changed:
self.log.info( body )
if body and self.addressTo and self.addressFrom:
notification = NotificationClient()
result = notification.sendMail( self.addressTo, self.subject, body, self.addressFrom, localAttempt = False )
return self.csAPI.commit()
else:
self.log.info( "No changes found" )
return S_OK()
| coberger/DIRAC | ConfigurationSystem/Agent/CE2CSAgent.py | Python | gpl-3.0 | 21,288 | [
"DIRAC"
] | a9957a2f23f09b9aaf80b9c9f3a0d1e7e34a682429594d17810ef9dcbf18dd88 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Scour
#
# Copyright 2010 Jeff Schiller
# Copyright 2010 Louis Simard
# Copyright 2013-2014 Tavendo GmbH
#
# This file is part of Scour, http://www.codedread.com/scour/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Notes:
# rubys' path-crunching ideas here: http://intertwingly.net/code/svgtidy/spec.rb
# (and implemented here: http://intertwingly.net/code/svgtidy/svgtidy.rb )
# Yet more ideas here: http://wiki.inkscape.org/wiki/index.php/Save_Cleaned_SVG
#
# * Process Transformations
# * Collapse all group based transformations
# Even more ideas here: http://esw.w3.org/topic/SvgTidy
# * analysis of path elements to see if rect can be used instead?
# (must also need to look at rounded corners)
# Next Up:
# - why are marker-start, -end not removed from the style attribute?
# - why are only overflow style properties considered and not attributes?
# - only remove unreferenced elements if they are not children of a referenced element
# - add an option to remove ids if they match the Inkscape-style of IDs
# - investigate point-reducing algorithms
# - parse transform attribute
# - if a <g> has only one element in it, collapse the <g> (ensure transform, etc are carried down)
from __future__ import division # use "true" division instead of integer division in Python 2 (see PEP 238)
from __future__ import print_function # use print() as a function in Python 2 (see PEP 3105)
from __future__ import absolute_import # use absolute imports by default in Python 2 (see PEP 328)
import math
import optparse
import os
import re
import sys
import time
import xml.dom.minidom
from xml.dom import Node, NotFoundErr
from collections import namedtuple, defaultdict
from decimal import Context, Decimal, InvalidOperation, getcontext
import six
from six.moves import range, urllib
from scour.svg_regex import svg_parser
from scour.svg_transform import svg_transform_parser
from scour.yocto_css import parseCssString
from scour import __version__
APP = u'scour'
VER = __version__
COPYRIGHT = u'Copyright Jeff Schiller, Louis Simard, 2010'
XML_ENTS_NO_QUOTES = {'<': '<', '>': '>', '&': '&'}
XML_ENTS_ESCAPE_APOS = XML_ENTS_NO_QUOTES.copy()
XML_ENTS_ESCAPE_APOS["'"] = '''
XML_ENTS_ESCAPE_QUOT = XML_ENTS_NO_QUOTES.copy()
XML_ENTS_ESCAPE_QUOT['"'] = '"'
# Used to split values where "x y" or "x,y" or a mix of the two is allowed
RE_COMMA_WSP = re.compile(r"\s*[\s,]\s*")
NS = {'SVG': 'http://www.w3.org/2000/svg',
'XLINK': 'http://www.w3.org/1999/xlink',
'SODIPODI': 'http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd',
'INKSCAPE': 'http://www.inkscape.org/namespaces/inkscape',
'ADOBE_ILLUSTRATOR': 'http://ns.adobe.com/AdobeIllustrator/10.0/',
'ADOBE_GRAPHS': 'http://ns.adobe.com/Graphs/1.0/',
'ADOBE_SVG_VIEWER': 'http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/',
'ADOBE_VARIABLES': 'http://ns.adobe.com/Variables/1.0/',
'ADOBE_SFW': 'http://ns.adobe.com/SaveForWeb/1.0/',
'ADOBE_EXTENSIBILITY': 'http://ns.adobe.com/Extensibility/1.0/',
'ADOBE_FLOWS': 'http://ns.adobe.com/Flows/1.0/',
'ADOBE_IMAGE_REPLACEMENT': 'http://ns.adobe.com/ImageReplacement/1.0/',
'ADOBE_CUSTOM': 'http://ns.adobe.com/GenericCustomNamespace/1.0/',
'ADOBE_XPATH': 'http://ns.adobe.com/XPath/1.0/',
'SKETCH': 'http://www.bohemiancoding.com/sketch/ns'
}
unwanted_ns = [NS['SODIPODI'], NS['INKSCAPE'], NS['ADOBE_ILLUSTRATOR'],
NS['ADOBE_GRAPHS'], NS['ADOBE_SVG_VIEWER'], NS['ADOBE_VARIABLES'],
NS['ADOBE_SFW'], NS['ADOBE_EXTENSIBILITY'], NS['ADOBE_FLOWS'],
NS['ADOBE_IMAGE_REPLACEMENT'], NS['ADOBE_CUSTOM'],
NS['ADOBE_XPATH'], NS['SKETCH']]
# A list of all SVG presentation properties
#
# Sources for this list:
# https://www.w3.org/TR/SVG/propidx.html (implemented)
# https://www.w3.org/TR/SVGTiny12/attributeTable.html (implemented)
# https://www.w3.org/TR/SVG2/propidx.html (not yet implemented)
#
svgAttributes = [
# SVG 1.1
'alignment-baseline',
'baseline-shift',
'clip',
'clip-path',
'clip-rule',
'color',
'color-interpolation',
'color-interpolation-filters',
'color-profile',
'color-rendering',
'cursor',
'direction',
'display',
'dominant-baseline',
'enable-background',
'fill',
'fill-opacity',
'fill-rule',
'filter',
'flood-color',
'flood-opacity',
'font',
'font-family',
'font-size',
'font-size-adjust',
'font-stretch',
'font-style',
'font-variant',
'font-weight',
'glyph-orientation-horizontal',
'glyph-orientation-vertical',
'image-rendering',
'kerning',
'letter-spacing',
'lighting-color',
'marker',
'marker-end',
'marker-mid',
'marker-start',
'mask',
'opacity',
'overflow',
'pointer-events',
'shape-rendering',
'stop-color',
'stop-opacity',
'stroke',
'stroke-dasharray',
'stroke-dashoffset',
'stroke-linecap',
'stroke-linejoin',
'stroke-miterlimit',
'stroke-opacity',
'stroke-width',
'text-anchor',
'text-decoration',
'text-rendering',
'unicode-bidi',
'visibility',
'word-spacing',
'writing-mode',
# SVG 1.2 Tiny
'audio-level',
'buffered-rendering',
'display-align',
'line-increment',
'solid-color',
'solid-opacity',
'text-align',
'vector-effect',
'viewport-fill',
'viewport-fill-opacity',
]
colors = {
'aliceblue': 'rgb(240, 248, 255)',
'antiquewhite': 'rgb(250, 235, 215)',
'aqua': 'rgb( 0, 255, 255)',
'aquamarine': 'rgb(127, 255, 212)',
'azure': 'rgb(240, 255, 255)',
'beige': 'rgb(245, 245, 220)',
'bisque': 'rgb(255, 228, 196)',
'black': 'rgb( 0, 0, 0)',
'blanchedalmond': 'rgb(255, 235, 205)',
'blue': 'rgb( 0, 0, 255)',
'blueviolet': 'rgb(138, 43, 226)',
'brown': 'rgb(165, 42, 42)',
'burlywood': 'rgb(222, 184, 135)',
'cadetblue': 'rgb( 95, 158, 160)',
'chartreuse': 'rgb(127, 255, 0)',
'chocolate': 'rgb(210, 105, 30)',
'coral': 'rgb(255, 127, 80)',
'cornflowerblue': 'rgb(100, 149, 237)',
'cornsilk': 'rgb(255, 248, 220)',
'crimson': 'rgb(220, 20, 60)',
'cyan': 'rgb( 0, 255, 255)',
'darkblue': 'rgb( 0, 0, 139)',
'darkcyan': 'rgb( 0, 139, 139)',
'darkgoldenrod': 'rgb(184, 134, 11)',
'darkgray': 'rgb(169, 169, 169)',
'darkgreen': 'rgb( 0, 100, 0)',
'darkgrey': 'rgb(169, 169, 169)',
'darkkhaki': 'rgb(189, 183, 107)',
'darkmagenta': 'rgb(139, 0, 139)',
'darkolivegreen': 'rgb( 85, 107, 47)',
'darkorange': 'rgb(255, 140, 0)',
'darkorchid': 'rgb(153, 50, 204)',
'darkred': 'rgb(139, 0, 0)',
'darksalmon': 'rgb(233, 150, 122)',
'darkseagreen': 'rgb(143, 188, 143)',
'darkslateblue': 'rgb( 72, 61, 139)',
'darkslategray': 'rgb( 47, 79, 79)',
'darkslategrey': 'rgb( 47, 79, 79)',
'darkturquoise': 'rgb( 0, 206, 209)',
'darkviolet': 'rgb(148, 0, 211)',
'deeppink': 'rgb(255, 20, 147)',
'deepskyblue': 'rgb( 0, 191, 255)',
'dimgray': 'rgb(105, 105, 105)',
'dimgrey': 'rgb(105, 105, 105)',
'dodgerblue': 'rgb( 30, 144, 255)',
'firebrick': 'rgb(178, 34, 34)',
'floralwhite': 'rgb(255, 250, 240)',
'forestgreen': 'rgb( 34, 139, 34)',
'fuchsia': 'rgb(255, 0, 255)',
'gainsboro': 'rgb(220, 220, 220)',
'ghostwhite': 'rgb(248, 248, 255)',
'gold': 'rgb(255, 215, 0)',
'goldenrod': 'rgb(218, 165, 32)',
'gray': 'rgb(128, 128, 128)',
'grey': 'rgb(128, 128, 128)',
'green': 'rgb( 0, 128, 0)',
'greenyellow': 'rgb(173, 255, 47)',
'honeydew': 'rgb(240, 255, 240)',
'hotpink': 'rgb(255, 105, 180)',
'indianred': 'rgb(205, 92, 92)',
'indigo': 'rgb( 75, 0, 130)',
'ivory': 'rgb(255, 255, 240)',
'khaki': 'rgb(240, 230, 140)',
'lavender': 'rgb(230, 230, 250)',
'lavenderblush': 'rgb(255, 240, 245)',
'lawngreen': 'rgb(124, 252, 0)',
'lemonchiffon': 'rgb(255, 250, 205)',
'lightblue': 'rgb(173, 216, 230)',
'lightcoral': 'rgb(240, 128, 128)',
'lightcyan': 'rgb(224, 255, 255)',
'lightgoldenrodyellow': 'rgb(250, 250, 210)',
'lightgray': 'rgb(211, 211, 211)',
'lightgreen': 'rgb(144, 238, 144)',
'lightgrey': 'rgb(211, 211, 211)',
'lightpink': 'rgb(255, 182, 193)',
'lightsalmon': 'rgb(255, 160, 122)',
'lightseagreen': 'rgb( 32, 178, 170)',
'lightskyblue': 'rgb(135, 206, 250)',
'lightslategray': 'rgb(119, 136, 153)',
'lightslategrey': 'rgb(119, 136, 153)',
'lightsteelblue': 'rgb(176, 196, 222)',
'lightyellow': 'rgb(255, 255, 224)',
'lime': 'rgb( 0, 255, 0)',
'limegreen': 'rgb( 50, 205, 50)',
'linen': 'rgb(250, 240, 230)',
'magenta': 'rgb(255, 0, 255)',
'maroon': 'rgb(128, 0, 0)',
'mediumaquamarine': 'rgb(102, 205, 170)',
'mediumblue': 'rgb( 0, 0, 205)',
'mediumorchid': 'rgb(186, 85, 211)',
'mediumpurple': 'rgb(147, 112, 219)',
'mediumseagreen': 'rgb( 60, 179, 113)',
'mediumslateblue': 'rgb(123, 104, 238)',
'mediumspringgreen': 'rgb( 0, 250, 154)',
'mediumturquoise': 'rgb( 72, 209, 204)',
'mediumvioletred': 'rgb(199, 21, 133)',
'midnightblue': 'rgb( 25, 25, 112)',
'mintcream': 'rgb(245, 255, 250)',
'mistyrose': 'rgb(255, 228, 225)',
'moccasin': 'rgb(255, 228, 181)',
'navajowhite': 'rgb(255, 222, 173)',
'navy': 'rgb( 0, 0, 128)',
'oldlace': 'rgb(253, 245, 230)',
'olive': 'rgb(128, 128, 0)',
'olivedrab': 'rgb(107, 142, 35)',
'orange': 'rgb(255, 165, 0)',
'orangered': 'rgb(255, 69, 0)',
'orchid': 'rgb(218, 112, 214)',
'palegoldenrod': 'rgb(238, 232, 170)',
'palegreen': 'rgb(152, 251, 152)',
'paleturquoise': 'rgb(175, 238, 238)',
'palevioletred': 'rgb(219, 112, 147)',
'papayawhip': 'rgb(255, 239, 213)',
'peachpuff': 'rgb(255, 218, 185)',
'peru': 'rgb(205, 133, 63)',
'pink': 'rgb(255, 192, 203)',
'plum': 'rgb(221, 160, 221)',
'powderblue': 'rgb(176, 224, 230)',
'purple': 'rgb(128, 0, 128)',
'red': 'rgb(255, 0, 0)',
'rosybrown': 'rgb(188, 143, 143)',
'royalblue': 'rgb( 65, 105, 225)',
'saddlebrown': 'rgb(139, 69, 19)',
'salmon': 'rgb(250, 128, 114)',
'sandybrown': 'rgb(244, 164, 96)',
'seagreen': 'rgb( 46, 139, 87)',
'seashell': 'rgb(255, 245, 238)',
'sienna': 'rgb(160, 82, 45)',
'silver': 'rgb(192, 192, 192)',
'skyblue': 'rgb(135, 206, 235)',
'slateblue': 'rgb(106, 90, 205)',
'slategray': 'rgb(112, 128, 144)',
'slategrey': 'rgb(112, 128, 144)',
'snow': 'rgb(255, 250, 250)',
'springgreen': 'rgb( 0, 255, 127)',
'steelblue': 'rgb( 70, 130, 180)',
'tan': 'rgb(210, 180, 140)',
'teal': 'rgb( 0, 128, 128)',
'thistle': 'rgb(216, 191, 216)',
'tomato': 'rgb(255, 99, 71)',
'turquoise': 'rgb( 64, 224, 208)',
'violet': 'rgb(238, 130, 238)',
'wheat': 'rgb(245, 222, 179)',
'white': 'rgb(255, 255, 255)',
'whitesmoke': 'rgb(245, 245, 245)',
'yellow': 'rgb(255, 255, 0)',
'yellowgreen': 'rgb(154, 205, 50)',
}
# A list of default poperties that are safe to remove
#
# Sources for this list:
# https://www.w3.org/TR/SVG/propidx.html (implemented)
# https://www.w3.org/TR/SVGTiny12/attributeTable.html (implemented)
# https://www.w3.org/TR/SVG2/propidx.html (not yet implemented)
#
default_properties = { # excluded all properties with 'auto' as default
# SVG 1.1 presentation attributes
'baseline-shift': 'baseline',
'clip-path': 'none',
'clip-rule': 'nonzero',
'color': '#000',
'color-interpolation-filters': 'linearRGB',
'color-interpolation': 'sRGB',
'direction': 'ltr',
'display': 'inline',
'enable-background': 'accumulate',
'fill': '#000',
'fill-opacity': '1',
'fill-rule': 'nonzero',
'filter': 'none',
'flood-color': '#000',
'flood-opacity': '1',
'font-size-adjust': 'none',
'font-size': 'medium',
'font-stretch': 'normal',
'font-style': 'normal',
'font-variant': 'normal',
'font-weight': 'normal',
'glyph-orientation-horizontal': '0deg',
'letter-spacing': 'normal',
'lighting-color': '#fff',
'marker': 'none',
'marker-start': 'none',
'marker-mid': 'none',
'marker-end': 'none',
'mask': 'none',
'opacity': '1',
'pointer-events': 'visiblePainted',
'stop-color': '#000',
'stop-opacity': '1',
'stroke': 'none',
'stroke-dasharray': 'none',
'stroke-dashoffset': '0',
'stroke-linecap': 'butt',
'stroke-linejoin': 'miter',
'stroke-miterlimit': '4',
'stroke-opacity': '1',
'stroke-width': '1',
'text-anchor': 'start',
'text-decoration': 'none',
'unicode-bidi': 'normal',
'visibility': 'visible',
'word-spacing': 'normal',
'writing-mode': 'lr-tb',
# SVG 1.2 tiny properties
'audio-level': '1',
'solid-color': '#000',
'solid-opacity': '1',
'text-align': 'start',
'vector-effect': 'none',
'viewport-fill': 'none',
'viewport-fill-opacity': '1',
}
def is_same_sign(a, b):
return (a <= 0 and b <= 0) or (a >= 0 and b >= 0)
def is_same_direction(x1, y1, x2, y2):
if is_same_sign(x1, x2) and is_same_sign(y1, y2):
diff = y1/x1 - y2/x2
return scouringContext.plus(1 + diff) == 1
else:
return False
scinumber = re.compile(r"[-+]?(\d*\.?)?\d+[eE][-+]?\d+")
number = re.compile(r"[-+]?(\d*\.?)?\d+")
sciExponent = re.compile(r"[eE]([-+]?\d+)")
unit = re.compile("(em|ex|px|pt|pc|cm|mm|in|%){1,1}$")
class Unit(object):
# Integer constants for units.
INVALID = -1
NONE = 0
PCT = 1
PX = 2
PT = 3
PC = 4
EM = 5
EX = 6
CM = 7
MM = 8
IN = 9
# String to Unit. Basically, converts unit strings to their integer constants.
s2u = {
'': NONE,
'%': PCT,
'px': PX,
'pt': PT,
'pc': PC,
'em': EM,
'ex': EX,
'cm': CM,
'mm': MM,
'in': IN,
}
# Unit to String. Basically, converts unit integer constants to their corresponding strings.
u2s = {
NONE: '',
PCT: '%',
PX: 'px',
PT: 'pt',
PC: 'pc',
EM: 'em',
EX: 'ex',
CM: 'cm',
MM: 'mm',
IN: 'in',
}
# @staticmethod
def get(unitstr):
if unitstr is None:
return Unit.NONE
try:
return Unit.s2u[unitstr]
except KeyError:
return Unit.INVALID
# @staticmethod
def str(unitint):
try:
return Unit.u2s[unitint]
except KeyError:
return 'INVALID'
get = staticmethod(get)
str = staticmethod(str)
class SVGLength(object):
def __init__(self, str):
try: # simple unitless and no scientific notation
self.value = float(str)
if int(self.value) == self.value:
self.value = int(self.value)
self.units = Unit.NONE
except ValueError:
# we know that the length string has an exponent, a unit, both or is invalid
# parse out number, exponent and unit
self.value = 0
unitBegin = 0
scinum = scinumber.match(str)
if scinum is not None:
# this will always match, no need to check it
numMatch = number.match(str)
expMatch = sciExponent.search(str, numMatch.start(0))
self.value = (float(numMatch.group(0)) *
10 ** float(expMatch.group(1)))
unitBegin = expMatch.end(1)
else:
# unit or invalid
numMatch = number.match(str)
if numMatch is not None:
self.value = float(numMatch.group(0))
unitBegin = numMatch.end(0)
if int(self.value) == self.value:
self.value = int(self.value)
if unitBegin != 0:
unitMatch = unit.search(str, unitBegin)
if unitMatch is not None:
self.units = Unit.get(unitMatch.group(0))
# invalid
else:
# TODO: this needs to set the default for the given attribute (how?)
self.value = 0
self.units = Unit.INVALID
def findElementsWithId(node, elems=None):
"""
Returns all elements with id attributes
"""
if elems is None:
elems = {}
id = node.getAttribute('id')
if id != '':
elems[id] = node
if node.hasChildNodes():
for child in node.childNodes:
# from http://www.w3.org/TR/DOM-Level-2-Core/idl-definitions.html
# we are only really interested in nodes of type Element (1)
if child.nodeType == Node.ELEMENT_NODE:
findElementsWithId(child, elems)
return elems
referencingProps = ['fill', 'stroke', 'filter', 'clip-path', 'mask', 'marker-start', 'marker-end', 'marker-mid']
def findReferencedElements(node, ids=None):
"""
Returns IDs of all referenced elements
- node is the node at which to start the search.
- returns a map which has the id as key and
each value is is a set of nodes
Currently looks at 'xlink:href' and all attributes in 'referencingProps'
"""
global referencingProps
if ids is None:
ids = {}
# TODO: input argument ids is clunky here (see below how it is called)
# GZ: alternative to passing dict, use **kwargs
# if this node is a style element, parse its text into CSS
if node.nodeName == 'style' and node.namespaceURI == NS['SVG']:
# one stretch of text, please! (we could use node.normalize(), but
# this actually modifies the node, and we don't want to keep
# whitespace around if there's any)
stylesheet = "".join(child.nodeValue for child in node.childNodes)
if stylesheet != '':
cssRules = parseCssString(stylesheet)
for rule in cssRules:
for propname in rule['properties']:
propval = rule['properties'][propname]
findReferencingProperty(node, propname, propval, ids)
return ids
# else if xlink:href is set, then grab the id
href = node.getAttributeNS(NS['XLINK'], 'href')
if href != '' and len(href) > 1 and href[0] == '#':
# we remove the hash mark from the beginning of the id
id = href[1:]
if id in ids:
ids[id].add(node)
else:
ids[id] = {node}
# now get all style properties and the fill, stroke, filter attributes
styles = node.getAttribute('style').split(';')
for style in styles:
propval = style.split(':')
if len(propval) == 2:
prop = propval[0].strip()
val = propval[1].strip()
findReferencingProperty(node, prop, val, ids)
for attr in referencingProps:
val = node.getAttribute(attr).strip()
if not val:
continue
findReferencingProperty(node, attr, val, ids)
if node.hasChildNodes():
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
findReferencedElements(child, ids)
return ids
def findReferencingProperty(node, prop, val, ids):
global referencingProps
if prop in referencingProps and val != '':
if len(val) >= 7 and val[0:5] == 'url(#':
id = val[5:val.find(')')]
if id in ids:
ids[id].add(node)
else:
ids[id] = {node}
# if the url has a quote in it, we need to compensate
elif len(val) >= 8:
id = None
# double-quote
if val[0:6] == 'url("#':
id = val[6:val.find('")')]
# single-quote
elif val[0:6] == "url('#":
id = val[6:val.find("')")]
if id is not None:
if id in ids:
ids[id].add(node)
else:
ids[id] = {node}
def removeUnusedDefs(doc, defElem, elemsToRemove=None, referencedIDs=None):
if elemsToRemove is None:
elemsToRemove = []
# removeUnusedDefs do not change the XML itself; therefore there is no point in
# recomputing findReferencedElements when we recurse into child nodes.
if referencedIDs is None:
referencedIDs = findReferencedElements(doc.documentElement)
keepTags = ['font', 'style', 'metadata', 'script', 'title', 'desc']
for elem in defElem.childNodes:
# only look at it if an element and not referenced anywhere else
if elem.nodeType != Node.ELEMENT_NODE:
continue
elem_id = elem.getAttribute('id')
if elem_id == '' or elem_id not in referencedIDs:
# we only inspect the children of a group in a defs if the group
# is not referenced anywhere else
if elem.nodeName == 'g' and elem.namespaceURI == NS['SVG']:
elemsToRemove = removeUnusedDefs(doc, elem, elemsToRemove, referencedIDs=referencedIDs)
# we only remove if it is not one of our tags we always keep (see above)
elif elem.nodeName not in keepTags:
elemsToRemove.append(elem)
return elemsToRemove
def removeUnreferencedElements(doc, keepDefs):
"""
Removes all unreferenced elements except for <svg>, <font>, <metadata>, <title>, and <desc>.
Also vacuums the defs of any non-referenced renderable elements.
Returns the number of unreferenced elements removed from the document.
"""
global _num_elements_removed
num = 0
# Remove certain unreferenced elements outside of defs
removeTags = ['linearGradient', 'radialGradient', 'pattern']
identifiedElements = findElementsWithId(doc.documentElement)
referencedIDs = findReferencedElements(doc.documentElement)
if not keepDefs:
# Remove most unreferenced elements inside defs
defs = doc.documentElement.getElementsByTagName('defs')
for aDef in defs:
elemsToRemove = removeUnusedDefs(doc, aDef, referencedIDs=referencedIDs)
for elem in elemsToRemove:
elem.parentNode.removeChild(elem)
_num_elements_removed += 1
num += 1
for id in identifiedElements:
if id not in referencedIDs:
goner = identifiedElements[id]
if (goner is not None and goner.nodeName in removeTags
and goner.parentNode is not None
and goner.parentNode.tagName != 'defs'):
goner.parentNode.removeChild(goner)
num += 1
_num_elements_removed += 1
return num
def shortenIDs(doc, prefix, options):
"""
Shortens ID names used in the document. ID names referenced the most often are assigned the
shortest ID names.
Returns the number of bytes saved by shortening ID names in the document.
"""
num = 0
identifiedElements = findElementsWithId(doc.documentElement)
# This map contains maps the (original) ID to the nodes referencing it.
# At the end of this function, it will no longer be valid and while we
# could keep it up to date, it will complicate the code for no gain
# (as we do not reuse the data structure beyond this function).
referencedIDs = findReferencedElements(doc.documentElement)
# Make idList (list of idnames) sorted by reference count
# descending, so the highest reference count is first.
# First check that there's actually a defining element for the current ID name.
# (Cyn: I've seen documents with #id references but no element with that ID!)
idList = [(len(referencedIDs[rid]), rid) for rid in referencedIDs
if rid in identifiedElements]
idList.sort(reverse=True)
idList = [rid for count, rid in idList]
# Add unreferenced IDs to end of idList in arbitrary order
idList.extend([rid for rid in identifiedElements if rid not in idList])
# Ensure we do not reuse a protected ID by accident
protectedIDs = protected_ids(identifiedElements, options)
# IDs that have been allocated and should not be remapped.
consumedIDs = set()
# List of IDs that need to be assigned a new ID. The list is ordered
# such that earlier entries will be assigned a shorter ID than those
# later in the list. IDs in this list *can* obtain an ID that is
# longer than they already are.
need_new_id = []
id_allocations = list(compute_id_lengths(len(idList) + 1))
# Reverse so we can use it as a stack and still work from "shortest to
# longest" ID.
id_allocations.reverse()
# Here we loop over all current IDs (that we /might/ want to remap)
# and group them into two. 1) The IDs that already have a perfect
# length (these are added to consumedIDs) and 2) the IDs that need
# to change length (these are appended to need_new_id).
optimal_id_length, id_use_limit = 0, 0
for current_id in idList:
# If we are out of IDs of the current length, then move on
# to the next length
if id_use_limit < 1:
optimal_id_length, id_use_limit = id_allocations.pop()
# Reserve an ID from this length
id_use_limit -= 1
# We check for strictly equal to optimal length because our ID
# remapping may have to assign one node a longer ID because
# another node needs a shorter ID.
if len(current_id) == optimal_id_length:
# This rid is already of optimal length - lets just keep it.
consumedIDs.add(current_id)
else:
# Needs a new (possibly longer) ID.
need_new_id.append(current_id)
curIdNum = 1
for old_id in need_new_id:
new_id = intToID(curIdNum, prefix)
# Skip ahead if the new ID has already been used or is protected.
while new_id in protectedIDs or new_id in consumedIDs:
curIdNum += 1
new_id = intToID(curIdNum, prefix)
# Now that we have found the first available ID, do the remap.
num += renameID(old_id, new_id, identifiedElements, referencedIDs.get(old_id))
curIdNum += 1
return num
def compute_id_lengths(highest):
"""Compute how many IDs are available of a given size
Example:
>>> lengths = list(compute_id_lengths(512))
>>> lengths
[(1, 26), (2, 676)]
>>> total_limit = sum(x[1] for x in lengths)
>>> total_limit
702
>>> intToID(total_limit, '')
'zz'
Which tells us that we got 26 IDs of length 1 and up to 676 IDs of length two
if we need to allocate 512 IDs.
:param highest: Highest ID that need to be allocated
:return: An iterator that returns tuples of (id-length, use-limit). The
use-limit applies only to the given id-length (i.e. it is excluding IDs
of shorter length). Note that the sum of the use-limit values is always
equal to or greater than the highest param.
"""
step = 26
id_length = 0
use_limit = 1
while highest:
id_length += 1
use_limit *= step
yield (id_length, use_limit)
highest = int((highest - 1) / step)
def intToID(idnum, prefix):
"""
Returns the ID name for the given ID number, spreadsheet-style, i.e. from a to z,
then from aa to az, ba to bz, etc., until zz.
"""
rid = ''
while idnum > 0:
idnum -= 1
rid = chr((idnum % 26) + ord('a')) + rid
idnum = int(idnum / 26)
return prefix + rid
def renameID(idFrom, idTo, identifiedElements, referringNodes):
"""
Changes the ID name from idFrom to idTo, on the declaring element
as well as all nodes in referringNodes.
Updates identifiedElements.
Returns the number of bytes saved by this replacement.
"""
num = 0
definingNode = identifiedElements[idFrom]
definingNode.setAttribute("id", idTo)
num += len(idFrom) - len(idTo)
# Update references to renamed node
if referringNodes is not None:
# Look for the idFrom ID name in each of the referencing elements,
# exactly like findReferencedElements would.
# Cyn: Duplicated processing!
for node in referringNodes:
# if this node is a style element, parse its text into CSS
if node.nodeName == 'style' and node.namespaceURI == NS['SVG']:
# node.firstChild will be either a CDATA or a Text node now
if node.firstChild is not None:
# concatenate the value of all children, in case
# there's a CDATASection node surrounded by whitespace
# nodes
# (node.normalize() will NOT work here, it only acts on Text nodes)
oldValue = "".join(child.nodeValue for child in node.childNodes)
# not going to reparse the whole thing
newValue = oldValue.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url(#'" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url(#"' + idFrom + '")', 'url(#' + idTo + ')')
# and now replace all the children with this new stylesheet.
# again, this is in case the stylesheet was a CDATASection
node.childNodes[:] = [node.ownerDocument.createTextNode(newValue)]
num += len(oldValue) - len(newValue)
# if xlink:href is set to #idFrom, then change the id
href = node.getAttributeNS(NS['XLINK'], 'href')
if href == '#' + idFrom:
node.setAttributeNS(NS['XLINK'], 'href', '#' + idTo)
num += len(idFrom) - len(idTo)
# if the style has url(#idFrom), then change the id
styles = node.getAttribute('style')
if styles != '':
newValue = styles.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url('#" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url("#' + idFrom + '")', 'url(#' + idTo + ')')
node.setAttribute('style', newValue)
num += len(styles) - len(newValue)
# now try the fill, stroke, filter attributes
for attr in referencingProps:
oldValue = node.getAttribute(attr)
if oldValue != '':
newValue = oldValue.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url('#" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url("#' + idFrom + '")', 'url(#' + idTo + ')')
node.setAttribute(attr, newValue)
num += len(oldValue) - len(newValue)
return num
def protected_ids(seenIDs, options):
"""Return a list of protected IDs out of the seenIDs"""
protectedIDs = []
if options.protect_ids_prefix or options.protect_ids_noninkscape or options.protect_ids_list:
protect_ids_prefixes = []
protect_ids_list = []
if options.protect_ids_list:
protect_ids_list = options.protect_ids_list.split(",")
if options.protect_ids_prefix:
protect_ids_prefixes = options.protect_ids_prefix.split(",")
for id in seenIDs:
protected = False
if options.protect_ids_noninkscape and not id[-1].isdigit():
protected = True
elif protect_ids_list and id in protect_ids_list:
protected = True
elif protect_ids_prefixes:
if any(id.startswith(prefix) for prefix in protect_ids_prefixes):
protected = True
if protected:
protectedIDs.append(id)
return protectedIDs
def unprotected_ids(doc, options):
u"""Returns a list of unprotected IDs within the document doc."""
identifiedElements = findElementsWithId(doc.documentElement)
protectedIDs = protected_ids(identifiedElements, options)
if protectedIDs:
for id in protectedIDs:
del identifiedElements[id]
return identifiedElements
def removeUnreferencedIDs(referencedIDs, identifiedElements):
"""
Removes the unreferenced ID attributes.
Returns the number of ID attributes removed
"""
global _num_ids_removed
keepTags = ['font']
num = 0
for id in identifiedElements:
node = identifiedElements[id]
if id not in referencedIDs and node.nodeName not in keepTags:
node.removeAttribute('id')
_num_ids_removed += 1
num += 1
return num
def removeNamespacedAttributes(node, namespaces):
num = 0
if node.nodeType == Node.ELEMENT_NODE:
# remove all namespace'd attributes from this element
attrList = node.attributes
attrsToRemove = []
for attrNum in range(attrList.length):
attr = attrList.item(attrNum)
if attr is not None and attr.namespaceURI in namespaces:
attrsToRemove.append(attr.nodeName)
for attrName in attrsToRemove:
node.removeAttribute(attrName)
num += len(attrsToRemove)
# now recurse for children
for child in node.childNodes:
num += removeNamespacedAttributes(child, namespaces)
return num
def removeNamespacedElements(node, namespaces):
num = 0
if node.nodeType == Node.ELEMENT_NODE:
# remove all namespace'd child nodes from this element
childList = node.childNodes
childrenToRemove = []
for child in childList:
if child is not None and child.namespaceURI in namespaces:
childrenToRemove.append(child)
for child in childrenToRemove:
node.removeChild(child)
num += len(childrenToRemove)
# now recurse for children
for child in node.childNodes:
num += removeNamespacedElements(child, namespaces)
return num
def removeDescriptiveElements(doc, options):
elementTypes = []
if options.remove_descriptive_elements:
elementTypes.extend(("title", "desc", "metadata"))
else:
if options.remove_titles:
elementTypes.append("title")
if options.remove_descriptions:
elementTypes.append("desc")
if options.remove_metadata:
elementTypes.append("metadata")
if not elementTypes:
return
global _num_elements_removed
num = 0
elementsToRemove = []
for elementType in elementTypes:
elementsToRemove.extend(doc.documentElement.getElementsByTagName(elementType))
for element in elementsToRemove:
element.parentNode.removeChild(element)
num += 1
_num_elements_removed += 1
return num
def removeNestedGroups(node):
"""
This walks further and further down the tree, removing groups
which do not have any attributes or a title/desc child and
promoting their children up one level
"""
global _num_elements_removed
num = 0
groupsToRemove = []
# Only consider <g> elements for promotion if this element isn't a <switch>.
# (partial fix for bug 594930, required by the SVG spec however)
if not (node.nodeType == Node.ELEMENT_NODE and node.nodeName == 'switch'):
for child in node.childNodes:
if child.nodeName == 'g' and child.namespaceURI == NS['SVG'] and len(child.attributes) == 0:
# only collapse group if it does not have a title or desc as a direct descendant,
for grandchild in child.childNodes:
if grandchild.nodeType == Node.ELEMENT_NODE and grandchild.namespaceURI == NS['SVG'] and \
grandchild.nodeName in ['title', 'desc']:
break
else:
groupsToRemove.append(child)
for g in groupsToRemove:
while g.childNodes.length > 0:
g.parentNode.insertBefore(g.firstChild, g)
g.parentNode.removeChild(g)
_num_elements_removed += 1
num += 1
# now recurse for children
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
num += removeNestedGroups(child)
return num
def moveCommonAttributesToParentGroup(elem, referencedElements):
"""
This recursively calls this function on all children of the passed in element
and then iterates over all child elements and removes common inheritable attributes
from the children and places them in the parent group. But only if the parent contains
nothing but element children and whitespace. The attributes are only removed from the
children if the children are not referenced by other elements in the document.
"""
num = 0
childElements = []
# recurse first into the children (depth-first)
for child in elem.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
# only add and recurse if the child is not referenced elsewhere
if not child.getAttribute('id') in referencedElements:
childElements.append(child)
num += moveCommonAttributesToParentGroup(child, referencedElements)
# else if the parent has non-whitespace text children, do not
# try to move common attributes
elif child.nodeType == Node.TEXT_NODE and child.nodeValue.strip():
return num
# only process the children if there are more than one element
if len(childElements) <= 1:
return num
commonAttrs = {}
# add all inheritable properties of the first child element
# FIXME: Note there is a chance that the first child is a set/animate in which case
# its fill attribute is not what we want to look at, we should look for the first
# non-animate/set element
attrList = childElements[0].attributes
for index in range(attrList.length):
attr = attrList.item(index)
# this is most of the inheritable properties from http://www.w3.org/TR/SVG11/propidx.html
# and http://www.w3.org/TR/SVGTiny12/attributeTable.html
if attr.nodeName in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
# we just add all the attributes from the first child
commonAttrs[attr.nodeName] = attr.nodeValue
# for each subsequent child element
for childNum in range(len(childElements)):
# skip first child
if childNum == 0:
continue
child = childElements[childNum]
# if we are on an animateXXX/set element, ignore it (due to the 'fill' attribute)
if child.localName in ['set', 'animate', 'animateColor', 'animateTransform', 'animateMotion']:
continue
distinctAttrs = []
# loop through all current 'common' attributes
for name in commonAttrs:
# if this child doesn't match that attribute, schedule it for removal
if child.getAttribute(name) != commonAttrs[name]:
distinctAttrs.append(name)
# remove those attributes which are not common
for name in distinctAttrs:
del commonAttrs[name]
# commonAttrs now has all the inheritable attributes which are common among all child elements
for name in commonAttrs:
for child in childElements:
child.removeAttribute(name)
elem.setAttribute(name, commonAttrs[name])
# update our statistic (we remove N*M attributes and add back in M attributes)
num += (len(childElements) - 1) * len(commonAttrs)
return num
def mergeSiblingGroupsWithCommonAttributes(elem):
"""
Merge two or more sibling <g> elements with the identical attributes.
This function acts recursively on the given element.
"""
num = 0
i = elem.childNodes.length - 1
while i >= 0:
currentNode = elem.childNodes.item(i)
if currentNode.nodeType != Node.ELEMENT_NODE or currentNode.nodeName != 'g' or \
currentNode.namespaceURI != NS['SVG']:
i -= 1
continue
attributes = {a.nodeName: a.nodeValue for a in currentNode.attributes.values()}
if not attributes:
i -= 1
continue
runStart, runEnd = i, i
runElements = 1
while runStart > 0:
nextNode = elem.childNodes.item(runStart - 1)
if nextNode.nodeType == Node.ELEMENT_NODE:
if nextNode.nodeName != 'g' or nextNode.namespaceURI != NS['SVG']:
break
nextAttributes = {a.nodeName: a.nodeValue for a in nextNode.attributes.values()}
hasNoMergeTags = (True for n in nextNode.childNodes
if n.nodeType == Node.ELEMENT_NODE
and n.nodeName in ('title', 'desc')
and n.namespaceURI == NS['SVG'])
if attributes != nextAttributes or any(hasNoMergeTags):
break
else:
runElements += 1
runStart -= 1
else:
runStart -= 1
# Next loop will start from here
i = runStart - 1
if runElements < 2:
continue
# Find the <g> entry that starts the run (we might have run
# past it into a text node or a comment node.
while True:
node = elem.childNodes.item(runStart)
if node.nodeType == Node.ELEMENT_NODE and node.nodeName == 'g' and node.namespaceURI == NS['SVG']:
break
runStart += 1
primaryGroup = elem.childNodes.item(runStart)
runStart += 1
nodes = elem.childNodes[runStart:runEnd+1]
for node in nodes:
if node.nodeType == Node.ELEMENT_NODE and node.nodeName == 'g' and node.namespaceURI == NS['SVG']:
# Merge
primaryGroup.childNodes.extend(node.childNodes)
node.childNodes = []
else:
primaryGroup.childNodes.append(node)
elem.childNodes.remove(node)
# each child gets the same treatment, recursively
for childNode in elem.childNodes:
if childNode.nodeType == Node.ELEMENT_NODE:
num += mergeSiblingGroupsWithCommonAttributes(childNode)
return num
def createGroupsForCommonAttributes(elem):
"""
Creates <g> elements to contain runs of 3 or more
consecutive child elements having at least one common attribute.
Common attributes are not promoted to the <g> by this function.
This is handled by moveCommonAttributesToParentGroup.
If all children have a common attribute, an extra <g> is not created.
This function acts recursively on the given element.
"""
num = 0
global _num_elements_removed
# TODO perhaps all of the Presentation attributes in http://www.w3.org/TR/SVG/struct.html#GElement
# could be added here
# Cyn: These attributes are the same as in moveAttributesToParentGroup, and must always be
for curAttr in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
# Iterate through the children in reverse order, so item(i) for
# items we have yet to visit still returns the correct nodes.
curChild = elem.childNodes.length - 1
while curChild >= 0:
childNode = elem.childNodes.item(curChild)
if (
childNode.nodeType == Node.ELEMENT_NODE and
childNode.getAttribute(curAttr) != '' and
childNode.nodeName in [
# only attempt to group elements that the content model allows to be children of a <g>
# SVG 1.1 (see https://www.w3.org/TR/SVG/struct.html#GElement)
'animate', 'animateColor', 'animateMotion', 'animateTransform', 'set', # animation elements
'desc', 'metadata', 'title', # descriptive elements
'circle', 'ellipse', 'line', 'path', 'polygon', 'polyline', 'rect', # shape elements
'defs', 'g', 'svg', 'symbol', 'use', # structural elements
'linearGradient', 'radialGradient', # gradient elements
'a', 'altGlyphDef', 'clipPath', 'color-profile', 'cursor', 'filter',
'font', 'font-face', 'foreignObject', 'image', 'marker', 'mask',
'pattern', 'script', 'style', 'switch', 'text', 'view',
# SVG 1.2 (see https://www.w3.org/TR/SVGTiny12/elementTable.html)
'animation', 'audio', 'discard', 'handler', 'listener',
'prefetch', 'solidColor', 'textArea', 'video'
]
):
# We're in a possible run! Track the value and run length.
value = childNode.getAttribute(curAttr)
runStart, runEnd = curChild, curChild
# Run elements includes only element tags, no whitespace/comments/etc.
# Later, we calculate a run length which includes these.
runElements = 1
# Backtrack to get all the nodes having the same
# attribute value, preserving any nodes in-between.
while runStart > 0:
nextNode = elem.childNodes.item(runStart - 1)
if nextNode.nodeType == Node.ELEMENT_NODE:
if nextNode.getAttribute(curAttr) != value:
break
else:
runElements += 1
runStart -= 1
else:
runStart -= 1
if runElements >= 3:
# Include whitespace/comment/etc. nodes in the run.
while runEnd < elem.childNodes.length - 1:
if elem.childNodes.item(runEnd + 1).nodeType == Node.ELEMENT_NODE:
break
else:
runEnd += 1
runLength = runEnd - runStart + 1
if runLength == elem.childNodes.length: # Every child has this
# If the current parent is a <g> already,
if elem.nodeName == 'g' and elem.namespaceURI == NS['SVG']:
# do not act altogether on this attribute; all the
# children have it in common.
# Let moveCommonAttributesToParentGroup do it.
curChild = -1
continue
# otherwise, it might be an <svg> element, and
# even if all children have the same attribute value,
# it's going to be worth making the <g> since
# <svg> doesn't support attributes like 'stroke'.
# Fall through.
# Create a <g> element from scratch.
# We need the Document for this.
document = elem.ownerDocument
group = document.createElementNS(NS['SVG'], 'g')
# Move the run of elements to the group.
# a) ADD the nodes to the new group.
group.childNodes[:] = elem.childNodes[runStart:runEnd + 1]
for child in group.childNodes:
child.parentNode = group
# b) REMOVE the nodes from the element.
elem.childNodes[runStart:runEnd + 1] = []
# Include the group in elem's children.
elem.childNodes.insert(runStart, group)
group.parentNode = elem
num += 1
curChild = runStart - 1
_num_elements_removed -= 1
else:
curChild -= 1
else:
curChild -= 1
# each child gets the same treatment, recursively
for childNode in elem.childNodes:
if childNode.nodeType == Node.ELEMENT_NODE:
num += createGroupsForCommonAttributes(childNode)
return num
def removeUnusedAttributesOnParent(elem):
"""
This recursively calls this function on all children of the element passed in,
then removes any unused attributes on this elem if none of the children inherit it
"""
num = 0
childElements = []
# recurse first into the children (depth-first)
for child in elem.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
childElements.append(child)
num += removeUnusedAttributesOnParent(child)
# only process the children if there are more than one element
if len(childElements) <= 1:
return num
# get all attribute values on this parent
attrList = elem.attributes
unusedAttrs = {}
for index in range(attrList.length):
attr = attrList.item(index)
if attr.nodeName in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
unusedAttrs[attr.nodeName] = attr.nodeValue
# for each child, if at least one child inherits the parent's attribute, then remove
for childNum in range(len(childElements)):
child = childElements[childNum]
inheritedAttrs = []
for name in unusedAttrs:
val = child.getAttribute(name)
if val == '' or val is None or val == 'inherit':
inheritedAttrs.append(name)
for a in inheritedAttrs:
del unusedAttrs[a]
# unusedAttrs now has all the parent attributes that are unused
for name in unusedAttrs:
elem.removeAttribute(name)
num += 1
return num
def removeDuplicateGradientStops(doc):
global _num_elements_removed
num = 0
for gradType in ['linearGradient', 'radialGradient']:
for grad in doc.getElementsByTagName(gradType):
stops = {}
stopsToRemove = []
for stop in grad.getElementsByTagName('stop'):
# convert percentages into a floating point number
offsetU = SVGLength(stop.getAttribute('offset'))
if offsetU.units == Unit.PCT:
offset = offsetU.value / 100.0
elif offsetU.units == Unit.NONE:
offset = offsetU.value
else:
offset = 0
# set the stop offset value to the integer or floating point equivalent
if int(offset) == offset:
stop.setAttribute('offset', str(int(offset)))
else:
stop.setAttribute('offset', str(offset))
color = stop.getAttribute('stop-color')
opacity = stop.getAttribute('stop-opacity')
style = stop.getAttribute('style')
if offset in stops:
oldStop = stops[offset]
if oldStop[0] == color and oldStop[1] == opacity and oldStop[2] == style:
stopsToRemove.append(stop)
stops[offset] = [color, opacity, style]
for stop in stopsToRemove:
stop.parentNode.removeChild(stop)
num += 1
_num_elements_removed += 1
# linear gradients
return num
def collapseSinglyReferencedGradients(doc):
global _num_elements_removed
num = 0
identifiedElements = findElementsWithId(doc.documentElement)
# make sure to reset the ref'ed ids for when we are running this in testscour
for rid, nodes in six.iteritems(findReferencedElements(doc.documentElement)):
# Make sure that there's actually a defining element for the current ID name.
# (Cyn: I've seen documents with #id references but no element with that ID!)
if len(nodes) == 1 and rid in identifiedElements:
elem = identifiedElements[rid]
if (
elem is not None and
elem.nodeType == Node.ELEMENT_NODE and
elem.nodeName in ['linearGradient', 'radialGradient'] and
elem.namespaceURI == NS['SVG']
):
# found a gradient that is referenced by only 1 other element
refElem = nodes.pop()
if refElem.nodeType == Node.ELEMENT_NODE and refElem.nodeName in ['linearGradient', 'radialGradient'] \
and refElem.namespaceURI == NS['SVG']:
# elem is a gradient referenced by only one other gradient (refElem)
# add the stops to the referencing gradient (this removes them from elem)
if len(refElem.getElementsByTagName('stop')) == 0:
stopsToAdd = elem.getElementsByTagName('stop')
for stop in stopsToAdd:
refElem.appendChild(stop)
# adopt the gradientUnits, spreadMethod, gradientTransform attributes if
# they are unspecified on refElem
for attr in ['gradientUnits', 'spreadMethod', 'gradientTransform']:
if refElem.getAttribute(attr) == '' and not elem.getAttribute(attr) == '':
refElem.setAttributeNS(None, attr, elem.getAttribute(attr))
# if both are radialGradients, adopt elem's fx,fy,cx,cy,r attributes if
# they are unspecified on refElem
if elem.nodeName == 'radialGradient' and refElem.nodeName == 'radialGradient':
for attr in ['fx', 'fy', 'cx', 'cy', 'r']:
if refElem.getAttribute(attr) == '' and not elem.getAttribute(attr) == '':
refElem.setAttributeNS(None, attr, elem.getAttribute(attr))
# if both are linearGradients, adopt elem's x1,y1,x2,y2 attributes if
# they are unspecified on refElem
if elem.nodeName == 'linearGradient' and refElem.nodeName == 'linearGradient':
for attr in ['x1', 'y1', 'x2', 'y2']:
if refElem.getAttribute(attr) == '' and not elem.getAttribute(attr) == '':
refElem.setAttributeNS(None, attr, elem.getAttribute(attr))
target_href = elem.getAttributeNS(NS['XLINK'], 'href')
if target_href:
# If the elem node had an xlink:href, then the
# refElem have to point to it as well to
# perserve the semantics of the image.
refElem.setAttributeNS(NS['XLINK'], 'href', target_href)
else:
# The elem node had no xlink:href reference,
# so we can simply remove the attribute.
refElem.removeAttributeNS(NS['XLINK'], 'href')
# now delete elem
elem.parentNode.removeChild(elem)
_num_elements_removed += 1
num += 1
return num
def computeGradientBucketKey(grad):
# Compute a key (hashable opaque value; here a string) from each
# gradient such that "key(grad1) == key(grad2)" is the same as
# saying that grad1 is a duplicate of grad2.
gradBucketAttr = ['gradientUnits', 'spreadMethod', 'gradientTransform',
'x1', 'y1', 'x2', 'y2', 'cx', 'cy', 'fx', 'fy', 'r']
gradStopBucketsAttr = ['offset', 'stop-color', 'stop-opacity', 'style']
# A linearGradient can never be a duplicate of a
# radialGradient (and vice versa)
subKeys = [grad.getAttribute(a) for a in gradBucketAttr]
subKeys.append(grad.getAttributeNS(NS['XLINK'], 'href'))
stops = grad.getElementsByTagName('stop')
if stops.length:
for i in range(stops.length):
stop = stops.item(i)
for attr in gradStopBucketsAttr:
stopKey = stop.getAttribute(attr)
subKeys.append(stopKey)
# Use a raw ASCII "record separator" control character as it is
# not likely to be used in any of these values (without having to
# be escaped).
return "\x1e".join(subKeys)
def detect_duplicate_gradients(*grad_lists):
"""Detects duplicate gradients from each iterable/generator given as argument
Yields (master, master_id, duplicates_id, duplicates) tuples where:
* master_id: The ID attribute of the master element. This will always be non-empty
and not None as long at least one of the gradients have a valid ID.
* duplicates_id: List of ID attributes of the duplicate gradients elements (can be
empty where the gradient had no ID attribute)
* duplicates: List of elements that are duplicates of the `master` element. Will
never include the `master` element. Has the same order as `duplicates_id` - i.e.
`duplicates[X].getAttribute("id") == duplicates_id[X]`.
"""
for grads in grad_lists:
grad_buckets = defaultdict(list)
for grad in grads:
key = computeGradientBucketKey(grad)
grad_buckets[key].append(grad)
for bucket in six.itervalues(grad_buckets):
if len(bucket) < 2:
# The gradient must be unique if it is the only one in
# this bucket.
continue
master = bucket[0]
duplicates = bucket[1:]
duplicates_ids = [d.getAttribute('id') for d in duplicates]
master_id = master.getAttribute('id')
if not master_id:
# If our selected "master" copy does not have an ID,
# then replace it with one that does (assuming any of
# them has one). This avoids broken images like we
# saw in GH#203
for i in range(len(duplicates_ids)):
dup_id = duplicates_ids[i]
if dup_id:
# We do not bother updating the master field
# as it is not used any more.
master_id = duplicates_ids[i]
duplicates[i] = master
# Clear the old id to avoid a redundant remapping
duplicates_ids[i] = ""
break
yield master_id, duplicates_ids, duplicates
def dedup_gradient(master_id, duplicates_ids, duplicates, referenced_ids):
func_iri = None
for dup_id, dup_grad in zip(duplicates_ids, duplicates):
# if the duplicate gradient no longer has a parent that means it was
# already re-mapped to another master gradient
if not dup_grad.parentNode:
continue
# With --keep-unreferenced-defs, we can end up with
# unreferenced gradients. See GH#156.
if dup_id in referenced_ids:
if func_iri is None:
# matches url(#<ANY_DUP_ID>), url('#<ANY_DUP_ID>') and url("#<ANY_DUP_ID>")
dup_id_regex = "|".join(duplicates_ids)
func_iri = re.compile('url\\([\'"]?#(?:' + dup_id_regex + ')[\'"]?\\)')
for elem in referenced_ids[dup_id]:
# find out which attribute referenced the duplicate gradient
for attr in ['fill', 'stroke']:
v = elem.getAttribute(attr)
(v_new, n) = func_iri.subn('url(#' + master_id + ')', v)
if n > 0:
elem.setAttribute(attr, v_new)
if elem.getAttributeNS(NS['XLINK'], 'href') == '#' + dup_id:
elem.setAttributeNS(NS['XLINK'], 'href', '#' + master_id)
styles = _getStyle(elem)
for style in styles:
v = styles[style]
(v_new, n) = func_iri.subn('url(#' + master_id + ')', v)
if n > 0:
styles[style] = v_new
_setStyle(elem, styles)
# now that all referencing elements have been re-mapped to the master
# it is safe to remove this gradient from the document
dup_grad.parentNode.removeChild(dup_grad)
# If the gradients have an ID, we update referenced_ids to match the newly remapped IDs.
# This enable us to avoid calling findReferencedElements once per loop, which is helpful as it is
# one of the slowest functions in scour.
if master_id:
try:
master_references = referenced_ids[master_id]
except KeyError:
master_references = set()
for dup_id in duplicates_ids:
references = referenced_ids.pop(dup_id, None)
if references is None:
continue
master_references.update(references)
# Only necessary but needed if the master gradient did
# not have any references originally
referenced_ids[master_id] = master_references
def removeDuplicateGradients(doc):
prev_num = -1
num = 0
# get a collection of all elements that are referenced and their referencing elements
referenced_ids = findReferencedElements(doc.documentElement)
while prev_num != num:
prev_num = num
linear_gradients = doc.getElementsByTagName('linearGradient')
radial_gradients = doc.getElementsByTagName('radialGradient')
for master_id, duplicates_ids, duplicates in detect_duplicate_gradients(linear_gradients, radial_gradients):
dedup_gradient(master_id, duplicates_ids, duplicates, referenced_ids)
num += len(duplicates)
return num
def _getStyle(node):
u"""Returns the style attribute of a node as a dictionary."""
if node.nodeType != Node.ELEMENT_NODE:
return {}
style_attribute = node.getAttribute('style')
if style_attribute:
styleMap = {}
rawStyles = style_attribute.split(';')
for style in rawStyles:
propval = style.split(':')
if len(propval) == 2:
styleMap[propval[0].strip()] = propval[1].strip()
return styleMap
else:
return {}
def _setStyle(node, styleMap):
u"""Sets the style attribute of a node to the dictionary ``styleMap``."""
fixedStyle = ';'.join(prop + ':' + styleMap[prop] for prop in styleMap)
if fixedStyle != '':
node.setAttribute('style', fixedStyle)
elif node.getAttribute('style'):
node.removeAttribute('style')
return node
def repairStyle(node, options):
num = 0
styleMap = _getStyle(node)
if styleMap:
# I've seen this enough to know that I need to correct it:
# fill: url(#linearGradient4918) rgb(0, 0, 0);
for prop in ['fill', 'stroke']:
if prop in styleMap:
chunk = styleMap[prop].split(') ')
if (len(chunk) == 2
and (chunk[0][:5] == 'url(#' or chunk[0][:6] == 'url("#' or chunk[0][:6] == "url('#")
and chunk[1] == 'rgb(0, 0, 0)'):
styleMap[prop] = chunk[0] + ')'
num += 1
# Here is where we can weed out unnecessary styles like:
# opacity:1
if 'opacity' in styleMap:
opacity = float(styleMap['opacity'])
# if opacity='0' then all fill and stroke properties are useless, remove them
if opacity == 0.0:
for uselessStyle in ['fill', 'fill-opacity', 'fill-rule', 'stroke', 'stroke-linejoin',
'stroke-opacity', 'stroke-miterlimit', 'stroke-linecap', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-opacity']:
if uselessStyle in styleMap and not styleInheritedByChild(node, uselessStyle):
del styleMap[uselessStyle]
num += 1
# if stroke:none, then remove all stroke-related properties (stroke-width, etc)
# TODO: should also detect if the computed value of this element is stroke="none"
if 'stroke' in styleMap and styleMap['stroke'] == 'none':
for strokestyle in ['stroke-width', 'stroke-linejoin', 'stroke-miterlimit',
'stroke-linecap', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-opacity']:
if strokestyle in styleMap and not styleInheritedByChild(node, strokestyle):
del styleMap[strokestyle]
num += 1
# we need to properly calculate computed values
if not styleInheritedByChild(node, 'stroke'):
if styleInheritedFromParent(node, 'stroke') in [None, 'none']:
del styleMap['stroke']
num += 1
# if fill:none, then remove all fill-related properties (fill-rule, etc)
if 'fill' in styleMap and styleMap['fill'] == 'none':
for fillstyle in ['fill-rule', 'fill-opacity']:
if fillstyle in styleMap and not styleInheritedByChild(node, fillstyle):
del styleMap[fillstyle]
num += 1
# fill-opacity: 0
if 'fill-opacity' in styleMap:
fillOpacity = float(styleMap['fill-opacity'])
if fillOpacity == 0.0:
for uselessFillStyle in ['fill', 'fill-rule']:
if uselessFillStyle in styleMap and not styleInheritedByChild(node, uselessFillStyle):
del styleMap[uselessFillStyle]
num += 1
# stroke-opacity: 0
if 'stroke-opacity' in styleMap:
strokeOpacity = float(styleMap['stroke-opacity'])
if strokeOpacity == 0.0:
for uselessStrokeStyle in ['stroke', 'stroke-width', 'stroke-linejoin', 'stroke-linecap',
'stroke-dasharray', 'stroke-dashoffset']:
if uselessStrokeStyle in styleMap and not styleInheritedByChild(node, uselessStrokeStyle):
del styleMap[uselessStrokeStyle]
num += 1
# stroke-width: 0
if 'stroke-width' in styleMap:
strokeWidth = SVGLength(styleMap['stroke-width'])
if strokeWidth.value == 0.0:
for uselessStrokeStyle in ['stroke', 'stroke-linejoin', 'stroke-linecap',
'stroke-dasharray', 'stroke-dashoffset', 'stroke-opacity']:
if uselessStrokeStyle in styleMap and not styleInheritedByChild(node, uselessStrokeStyle):
del styleMap[uselessStrokeStyle]
num += 1
# remove font properties for non-text elements
# I've actually observed this in real SVG content
if not mayContainTextNodes(node):
for fontstyle in ['font-family', 'font-size', 'font-stretch', 'font-size-adjust',
'font-style', 'font-variant', 'font-weight',
'letter-spacing', 'line-height', 'kerning',
'text-align', 'text-anchor', 'text-decoration',
'text-rendering', 'unicode-bidi',
'word-spacing', 'writing-mode']:
if fontstyle in styleMap:
del styleMap[fontstyle]
num += 1
# remove inkscape-specific styles
# TODO: need to get a full list of these
for inkscapeStyle in ['-inkscape-font-specification']:
if inkscapeStyle in styleMap:
del styleMap[inkscapeStyle]
num += 1
if 'overflow' in styleMap:
# remove overflow from elements to which it does not apply,
# see https://www.w3.org/TR/SVG/masking.html#OverflowProperty
if node.nodeName not in ['svg', 'symbol', 'image', 'foreignObject', 'marker', 'pattern']:
del styleMap['overflow']
num += 1
# if the node is not the root <svg> element the SVG's user agent style sheet
# overrides the initial (i.e. default) value with the value 'hidden', which can consequently be removed
# (see last bullet point in the link above)
elif node != node.ownerDocument.documentElement:
if styleMap['overflow'] == 'hidden':
del styleMap['overflow']
num += 1
# on the root <svg> element the CSS2 default overflow="visible" is the initial value and we can remove it
elif styleMap['overflow'] == 'visible':
del styleMap['overflow']
num += 1
# now if any of the properties match known SVG attributes we prefer attributes
# over style so emit them and remove them from the style map
if options.style_to_xml:
for propName in list(styleMap):
if propName in svgAttributes:
node.setAttribute(propName, styleMap[propName])
del styleMap[propName]
_setStyle(node, styleMap)
# recurse for our child elements
for child in node.childNodes:
num += repairStyle(child, options)
return num
def styleInheritedFromParent(node, style):
"""
Returns the value of 'style' that is inherited from the parents of the passed-in node
Warning: This method only considers presentation attributes and inline styles,
any style sheets are ignored!
"""
parentNode = node.parentNode
# return None if we reached the Document element
if parentNode.nodeType == Node.DOCUMENT_NODE:
return None
# check styles first (they take precedence over presentation attributes)
styles = _getStyle(parentNode)
if style in styles:
value = styles[style]
if not value == 'inherit':
return value
# check attributes
value = parentNode.getAttribute(style)
if value not in ['', 'inherit']:
return parentNode.getAttribute(style)
# check the next parent recursively if we did not find a value yet
return styleInheritedFromParent(parentNode, style)
def styleInheritedByChild(node, style, nodeIsChild=False):
"""
Returns whether 'style' is inherited by any children of the passed-in node
If False is returned, it is guaranteed that 'style' can safely be removed
from the passed-in node without influencing visual output of it's children
If True is returned, the passed-in node should not have its text-based
attributes removed.
Warning: This method only considers presentation attributes and inline styles,
any style sheets are ignored!
"""
# Comment, text and CDATA nodes don't have attributes and aren't containers so they can't inherit attributes
if node.nodeType != Node.ELEMENT_NODE:
return False
if nodeIsChild:
# if the current child node sets a new value for 'style'
# we can stop the search in the current branch of the DOM tree
# check attributes
if node.getAttribute(style) not in ['', 'inherit']:
return False
# check styles
styles = _getStyle(node)
if (style in styles) and not (styles[style] == 'inherit'):
return False
else:
# if the passed-in node does not have any children 'style' can obviously not be inherited
if not node.childNodes:
return False
# If we have child nodes recursively check those
if node.childNodes:
for child in node.childNodes:
if styleInheritedByChild(child, style, True):
return True
# If the current element is a container element the inherited style is meaningless
# (since we made sure it's not inherited by any of its children)
if node.nodeName in ['a', 'defs', 'glyph', 'g', 'marker', 'mask',
'missing-glyph', 'pattern', 'svg', 'switch', 'symbol']:
return False
# in all other cases we have to assume the inherited value of 'style' is meaningfull and has to be kept
# (e.g nodes without children at the end of the DOM tree, text nodes, ...)
return True
def mayContainTextNodes(node):
"""
Returns True if the passed-in node is probably a text element, or at least
one of its descendants is probably a text element.
If False is returned, it is guaranteed that the passed-in node has no
business having text-based attributes.
If True is returned, the passed-in node should not have its text-based
attributes removed.
"""
# Cached result of a prior call?
try:
return node.mayContainTextNodes
except AttributeError:
pass
result = True # Default value
# Comment, text and CDATA nodes don't have attributes and aren't containers
if node.nodeType != Node.ELEMENT_NODE:
result = False
# Non-SVG elements? Unknown elements!
elif node.namespaceURI != NS['SVG']:
result = True
# Blacklisted elements. Those are guaranteed not to be text elements.
elif node.nodeName in ['rect', 'circle', 'ellipse', 'line', 'polygon',
'polyline', 'path', 'image', 'stop']:
result = False
# Group elements. If we're missing any here, the default of True is used.
elif node.nodeName in ['g', 'clipPath', 'marker', 'mask', 'pattern',
'linearGradient', 'radialGradient', 'symbol']:
result = False
for child in node.childNodes:
if mayContainTextNodes(child):
result = True
# Everything else should be considered a future SVG-version text element
# at best, or an unknown element at worst. result will stay True.
# Cache this result before returning it.
node.mayContainTextNodes = result
return result
# A list of default attributes that are safe to remove if all conditions are fulfilled
#
# Each default attribute is an object of type 'DefaultAttribute' with the following fields:
# name - name of the attribute to be matched
# value - default value of the attribute
# units - the unit(s) for which 'value' is valid (see 'Unit' class for possible specifications)
# elements - name(s) of SVG element(s) for which the attribute specification is valid
# conditions - additional conditions that have to be fulfilled for removal of the specified default attribute
# implemented as lambda functions with one argument (an xml.dom.minidom node)
# evaluating to either True or False
# When not specifying a field value, it will be ignored (i.e. always matches)
#
# Sources for this list:
# https://www.w3.org/TR/SVG/attindex.html (mostly implemented)
# https://www.w3.org/TR/SVGTiny12/attributeTable.html (not yet implemented)
# https://www.w3.org/TR/SVG2/attindex.html (not yet implemented)
#
DefaultAttribute = namedtuple('DefaultAttribute', ['name', 'value', 'units', 'elements', 'conditions'])
DefaultAttribute.__new__.__defaults__ = (None,) * len(DefaultAttribute._fields)
default_attributes = [
# unit systems
DefaultAttribute('clipPathUnits', 'userSpaceOnUse', elements=['clipPath']),
DefaultAttribute('filterUnits', 'objectBoundingBox', elements=['filter']),
DefaultAttribute('gradientUnits', 'objectBoundingBox', elements=['linearGradient', 'radialGradient']),
DefaultAttribute('maskUnits', 'objectBoundingBox', elements=['mask']),
DefaultAttribute('maskContentUnits', 'userSpaceOnUse', elements=['mask']),
DefaultAttribute('patternUnits', 'objectBoundingBox', elements=['pattern']),
DefaultAttribute('patternContentUnits', 'userSpaceOnUse', elements=['pattern']),
DefaultAttribute('primitiveUnits', 'userSpaceOnUse', elements=['filter']),
DefaultAttribute('externalResourcesRequired', 'false',
elements=['a', 'altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'circle', 'clipPath', 'cursor', 'defs', 'ellipse',
'feImage', 'filter', 'font', 'foreignObject', 'g', 'image', 'line', 'linearGradient',
'marker', 'mask', 'mpath', 'path', 'pattern', 'polygon', 'polyline', 'radialGradient',
'rect', 'script', 'set', 'svg', 'switch', 'symbol', 'text', 'textPath', 'tref', 'tspan',
'use', 'view']),
# svg elements
DefaultAttribute('width', 100, Unit.PCT, elements=['svg']),
DefaultAttribute('height', 100, Unit.PCT, elements=['svg']),
DefaultAttribute('baseProfile', 'none', elements=['svg']),
DefaultAttribute('preserveAspectRatio', 'xMidYMid meet',
elements=['feImage', 'image', 'marker', 'pattern', 'svg', 'symbol', 'view']),
# common attributes / basic types
DefaultAttribute('x', 0, elements=['cursor', 'fePointLight', 'feSpotLight', 'foreignObject',
'image', 'pattern', 'rect', 'svg', 'text', 'use']),
DefaultAttribute('y', 0, elements=['cursor', 'fePointLight', 'feSpotLight', 'foreignObject',
'image', 'pattern', 'rect', 'svg', 'text', 'use']),
DefaultAttribute('z', 0, elements=['fePointLight', 'feSpotLight']),
DefaultAttribute('x1', 0, elements=['line']),
DefaultAttribute('y1', 0, elements=['line']),
DefaultAttribute('x2', 0, elements=['line']),
DefaultAttribute('y2', 0, elements=['line']),
DefaultAttribute('cx', 0, elements=['circle', 'ellipse']),
DefaultAttribute('cy', 0, elements=['circle', 'ellipse']),
# markers
DefaultAttribute('markerUnits', 'strokeWidth', elements=['marker']),
DefaultAttribute('refX', 0, elements=['marker']),
DefaultAttribute('refY', 0, elements=['marker']),
DefaultAttribute('markerHeight', 3, elements=['marker']),
DefaultAttribute('markerWidth', 3, elements=['marker']),
DefaultAttribute('orient', 0, elements=['marker']),
# text / textPath / tspan / tref
DefaultAttribute('lengthAdjust', 'spacing', elements=['text', 'textPath', 'tref', 'tspan']),
DefaultAttribute('startOffset', 0, elements=['textPath']),
DefaultAttribute('method', 'align', elements=['textPath']),
DefaultAttribute('spacing', 'exact', elements=['textPath']),
# filters and masks
DefaultAttribute('x', -10, Unit.PCT, ['filter', 'mask']),
DefaultAttribute('x', -0.1, Unit.NONE, ['filter', 'mask'],
conditions=lambda node: node.getAttribute('gradientUnits') != 'userSpaceOnUse'),
DefaultAttribute('y', -10, Unit.PCT, ['filter', 'mask']),
DefaultAttribute('y', -0.1, Unit.NONE, ['filter', 'mask'],
conditions=lambda node: node.getAttribute('gradientUnits') != 'userSpaceOnUse'),
DefaultAttribute('width', 120, Unit.PCT, ['filter', 'mask']),
DefaultAttribute('width', 1.2, Unit.NONE, ['filter', 'mask'],
conditions=lambda node: node.getAttribute('gradientUnits') != 'userSpaceOnUse'),
DefaultAttribute('height', 120, Unit.PCT, ['filter', 'mask']),
DefaultAttribute('height', 1.2, Unit.NONE, ['filter', 'mask'],
conditions=lambda node: node.getAttribute('gradientUnits') != 'userSpaceOnUse'),
# gradients
DefaultAttribute('x1', 0, elements=['linearGradient']),
DefaultAttribute('y1', 0, elements=['linearGradient']),
DefaultAttribute('y2', 0, elements=['linearGradient']),
DefaultAttribute('x2', 100, Unit.PCT, elements=['linearGradient']),
DefaultAttribute('x2', 1, Unit.NONE, elements=['linearGradient'],
conditions=lambda node: node.getAttribute('gradientUnits') != 'userSpaceOnUse'),
# remove fx/fy before cx/cy to catch the case where fx = cx = 50% or fy = cy = 50% respectively
DefaultAttribute('fx', elements=['radialGradient'],
conditions=lambda node: node.getAttribute('fx') == node.getAttribute('cx')),
DefaultAttribute('fy', elements=['radialGradient'],
conditions=lambda node: node.getAttribute('fy') == node.getAttribute('cy')),
DefaultAttribute('r', 50, Unit.PCT, elements=['radialGradient']),
DefaultAttribute('r', 0.5, Unit.NONE, elements=['radialGradient'],
conditions=lambda node: node.getAttribute('gradientUnits') != 'userSpaceOnUse'),
DefaultAttribute('cx', 50, Unit.PCT, elements=['radialGradient']),
DefaultAttribute('cx', 0.5, Unit.NONE, elements=['radialGradient'],
conditions=lambda node: node.getAttribute('gradientUnits') != 'userSpaceOnUse'),
DefaultAttribute('cy', 50, Unit.PCT, elements=['radialGradient']),
DefaultAttribute('cy', 0.5, Unit.NONE, elements=['radialGradient'],
conditions=lambda node: node.getAttribute('gradientUnits') != 'userSpaceOnUse'),
DefaultAttribute('spreadMethod', 'pad', elements=['linearGradient', 'radialGradient']),
# filter effects
# TODO: Some numerical attributes allow an optional second value ("number-optional-number")
# and are currently handled as strings to avoid an exception in 'SVGLength', see
# https://github.com/scour-project/scour/pull/192
DefaultAttribute('amplitude', 1, elements=['feFuncA', 'feFuncB', 'feFuncG', 'feFuncR']),
DefaultAttribute('azimuth', 0, elements=['feDistantLight']),
DefaultAttribute('baseFrequency', '0', elements=['feFuncA', 'feFuncB', 'feFuncG', 'feFuncR']),
DefaultAttribute('bias', 1, elements=['feConvolveMatrix']),
DefaultAttribute('diffuseConstant', 1, elements=['feDiffuseLighting']),
DefaultAttribute('edgeMode', 'duplicate', elements=['feConvolveMatrix']),
DefaultAttribute('elevation', 0, elements=['feDistantLight']),
DefaultAttribute('exponent', 1, elements=['feFuncA', 'feFuncB', 'feFuncG', 'feFuncR']),
DefaultAttribute('intercept', 0, elements=['feFuncA', 'feFuncB', 'feFuncG', 'feFuncR']),
DefaultAttribute('k1', 0, elements=['feComposite']),
DefaultAttribute('k2', 0, elements=['feComposite']),
DefaultAttribute('k3', 0, elements=['feComposite']),
DefaultAttribute('k4', 0, elements=['feComposite']),
DefaultAttribute('mode', 'normal', elements=['feBlend']),
DefaultAttribute('numOctaves', 1, elements=['feTurbulence']),
DefaultAttribute('offset', 0, elements=['feFuncA', 'feFuncB', 'feFuncG', 'feFuncR']),
DefaultAttribute('operator', 'over', elements=['feComposite']),
DefaultAttribute('operator', 'erode', elements=['feMorphology']),
DefaultAttribute('order', '3', elements=['feConvolveMatrix']),
DefaultAttribute('pointsAtX', 0, elements=['feSpotLight']),
DefaultAttribute('pointsAtY', 0, elements=['feSpotLight']),
DefaultAttribute('pointsAtZ', 0, elements=['feSpotLight']),
DefaultAttribute('preserveAlpha', 'false', elements=['feConvolveMatrix']),
DefaultAttribute('radius', '0', elements=['feMorphology']),
DefaultAttribute('scale', 0, elements=['feDisplacementMap']),
DefaultAttribute('seed', 0, elements=['feTurbulence']),
DefaultAttribute('specularConstant', 1, elements=['feSpecularLighting']),
DefaultAttribute('specularExponent', 1, elements=['feSpecularLighting', 'feSpotLight']),
DefaultAttribute('stdDeviation', '0', elements=['feGaussianBlur']),
DefaultAttribute('stitchTiles', 'noStitch', elements=['feTurbulence']),
DefaultAttribute('surfaceScale', 1, elements=['feDiffuseLighting', 'feSpecularLighting']),
DefaultAttribute('type', 'matrix', elements=['feColorMatrix']),
DefaultAttribute('type', 'turbulence', elements=['feTurbulence']),
DefaultAttribute('xChannelSelector', 'A', elements=['feDisplacementMap']),
DefaultAttribute('yChannelSelector', 'A', elements=['feDisplacementMap'])
]
# split to increase lookup performance
# TODO: 'default_attributes_universal' is actually empty right now - will we ever need it?
default_attributes_universal = [] # list containing attributes valid for all elements
default_attributes_per_element = defaultdict(list) # dict containing lists of attributes valid for individual elements
for default_attribute in default_attributes:
if default_attribute.elements is None:
default_attributes_universal.append(default_attribute)
else:
for element in default_attribute.elements:
default_attributes_per_element[element].append(default_attribute)
def taint(taintedSet, taintedAttribute):
u"""Adds an attribute to a set of attributes.
Related attributes are also included."""
taintedSet.add(taintedAttribute)
if taintedAttribute == 'marker':
taintedSet |= set(['marker-start', 'marker-mid', 'marker-end'])
if taintedAttribute in ['marker-start', 'marker-mid', 'marker-end']:
taintedSet.add('marker')
return taintedSet
def removeDefaultAttributeValue(node, attribute):
"""
Removes the DefaultAttribute 'attribute' from 'node' if specified conditions are fulfilled
Warning: Does NOT check if the attribute is actually valid for the passed element type for increased preformance!
"""
if not node.hasAttribute(attribute.name):
return 0
# differentiate between text and numeric values
if isinstance(attribute.value, str):
if node.getAttribute(attribute.name) == attribute.value:
if (attribute.conditions is None) or attribute.conditions(node):
node.removeAttribute(attribute.name)
return 1
else:
nodeValue = SVGLength(node.getAttribute(attribute.name))
if ((attribute.value is None)
or ((nodeValue.value == attribute.value) and not (nodeValue.units == Unit.INVALID))):
if ((attribute.units is None)
or (nodeValue.units == attribute.units)
or (isinstance(attribute.units, list) and nodeValue.units in attribute.units)):
if (attribute.conditions is None) or attribute.conditions(node):
node.removeAttribute(attribute.name)
return 1
return 0
def removeDefaultAttributeValues(node, options, tainted=set()):
u"""'tainted' keeps a set of attributes defined in parent nodes.
For such attributes, we don't delete attributes with default values."""
num = 0
if node.nodeType != Node.ELEMENT_NODE:
return 0
# Conditionally remove all default attributes defined in 'default_attributes' (a list of 'DefaultAttribute's)
#
# For increased performance do not iterate the whole list for each element but run only on valid subsets
# - 'default_attributes_universal' (attributes valid for all elements)
# - 'default_attributes_per_element' (attributes specific to one specific element type)
for attribute in default_attributes_universal:
num += removeDefaultAttributeValue(node, attribute)
if node.nodeName in default_attributes_per_element:
for attribute in default_attributes_per_element[node.nodeName]:
num += removeDefaultAttributeValue(node, attribute)
# Summarily get rid of default properties
attributes = [node.attributes.item(i).nodeName for i in range(node.attributes.length)]
for attribute in attributes:
if attribute not in tainted:
if attribute in default_properties:
if node.getAttribute(attribute) == default_properties[attribute]:
node.removeAttribute(attribute)
num += 1
else:
tainted = taint(tainted, attribute)
# Properties might also occur as styles, remove them too
styles = _getStyle(node)
for attribute in list(styles):
if attribute not in tainted:
if attribute in default_properties:
if styles[attribute] == default_properties[attribute]:
del styles[attribute]
num += 1
else:
tainted = taint(tainted, attribute)
_setStyle(node, styles)
# recurse for our child elements
for child in node.childNodes:
num += removeDefaultAttributeValues(child, options, tainted.copy())
return num
rgb = re.compile(r"\s*rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)\s*")
rgbp = re.compile(r"\s*rgb\(\s*(\d*\.?\d+)%\s*,\s*(\d*\.?\d+)%\s*,\s*(\d*\.?\d+)%\s*\)\s*")
def convertColor(value):
"""
Converts the input color string and returns a #RRGGBB (or #RGB if possible) string
"""
s = value
if s in colors:
s = colors[s]
rgbpMatch = rgbp.match(s)
if rgbpMatch is not None:
r = int(float(rgbpMatch.group(1)) * 255.0 / 100.0)
g = int(float(rgbpMatch.group(2)) * 255.0 / 100.0)
b = int(float(rgbpMatch.group(3)) * 255.0 / 100.0)
s = '#%02x%02x%02x' % (r, g, b)
else:
rgbMatch = rgb.match(s)
if rgbMatch is not None:
r = int(rgbMatch.group(1))
g = int(rgbMatch.group(2))
b = int(rgbMatch.group(3))
s = '#%02x%02x%02x' % (r, g, b)
if s[0] == '#':
s = s.lower()
if len(s) == 7 and s[1] == s[2] and s[3] == s[4] and s[5] == s[6]:
s = '#' + s[1] + s[3] + s[5]
return s
def convertColors(element):
"""
Recursively converts all color properties into #RRGGBB format if shorter
"""
numBytes = 0
if element.nodeType != Node.ELEMENT_NODE:
return 0
# set up list of color attributes for each element type
attrsToConvert = []
if element.nodeName in ['rect', 'circle', 'ellipse', 'polygon',
'line', 'polyline', 'path', 'g', 'a']:
attrsToConvert = ['fill', 'stroke']
elif element.nodeName in ['stop']:
attrsToConvert = ['stop-color']
elif element.nodeName in ['solidColor']:
attrsToConvert = ['solid-color']
# now convert all the color formats
styles = _getStyle(element)
for attr in attrsToConvert:
oldColorValue = element.getAttribute(attr)
if oldColorValue != '':
newColorValue = convertColor(oldColorValue)
oldBytes = len(oldColorValue)
newBytes = len(newColorValue)
if oldBytes > newBytes:
element.setAttribute(attr, newColorValue)
numBytes += (oldBytes - len(element.getAttribute(attr)))
# colors might also hide in styles
if attr in styles:
oldColorValue = styles[attr]
newColorValue = convertColor(oldColorValue)
oldBytes = len(oldColorValue)
newBytes = len(newColorValue)
if oldBytes > newBytes:
styles[attr] = newColorValue
numBytes += (oldBytes - newBytes)
_setStyle(element, styles)
# now recurse for our child elements
for child in element.childNodes:
numBytes += convertColors(child)
return numBytes
# TODO: go over what this method does and see if there is a way to optimize it
# TODO: go over the performance of this method and see if I can save memory/speed by
# reusing data structures, etc
def cleanPath(element, options):
"""
Cleans the path string (d attribute) of the element
"""
global _num_bytes_saved_in_path_data
global _num_path_segments_removed
# this gets the parser object from svg_regex.py
oldPathStr = element.getAttribute('d')
path = svg_parser.parse(oldPathStr)
style = _getStyle(element)
# This determines whether the stroke has round or square linecaps. If it does, we do not want to collapse empty
# segments, as they are actually rendered (as circles or squares with diameter/dimension matching the path-width).
has_round_or_square_linecaps = (
element.getAttribute('stroke-linecap') in ['round', 'square']
or 'stroke-linecap' in style and style['stroke-linecap'] in ['round', 'square']
)
# This determines whether the stroke has intermediate markers. If it does, we do not want to collapse
# straight segments running in the same direction, as markers are rendered on the intermediate nodes.
has_intermediate_markers = (
element.hasAttribute('marker')
or element.hasAttribute('marker-mid')
or 'marker' in style
or 'marker-mid' in style
)
# The first command must be a moveto, and whether it's relative (m)
# or absolute (M), the first set of coordinates *is* absolute. So
# the first iteration of the loop below will get x,y and startx,starty.
# convert absolute coordinates into relative ones.
# Reuse the data structure 'path', since we're not adding or removing subcommands.
# Also reuse the coordinate lists since we're not adding or removing any.
x = y = 0
for pathIndex in range(len(path)):
cmd, data = path[pathIndex] # Changes to cmd don't get through to the data structure
i = 0
# adjust abs to rel
# only the A command has some values that we don't want to adjust (radii, rotation, flags)
if cmd == 'A':
for i in range(i, len(data), 7):
data[i + 5] -= x
data[i + 6] -= y
x += data[i + 5]
y += data[i + 6]
path[pathIndex] = ('a', data)
elif cmd == 'a':
x += sum(data[5::7])
y += sum(data[6::7])
elif cmd == 'H':
for i in range(i, len(data)):
data[i] -= x
x += data[i]
path[pathIndex] = ('h', data)
elif cmd == 'h':
x += sum(data)
elif cmd == 'V':
for i in range(i, len(data)):
data[i] -= y
y += data[i]
path[pathIndex] = ('v', data)
elif cmd == 'v':
y += sum(data)
elif cmd == 'M':
startx, starty = data[0], data[1]
# If this is a path starter, don't convert its first
# coordinate to relative; that would just make it (0, 0)
if pathIndex != 0:
data[0] -= x
data[1] -= y
x, y = startx, starty
i = 2
for i in range(i, len(data), 2):
data[i] -= x
data[i + 1] -= y
x += data[i]
y += data[i + 1]
path[pathIndex] = ('m', data)
elif cmd in ['L', 'T']:
for i in range(i, len(data), 2):
data[i] -= x
data[i + 1] -= y
x += data[i]
y += data[i + 1]
path[pathIndex] = (cmd.lower(), data)
elif cmd in ['m']:
if pathIndex == 0:
# START OF PATH - this is an absolute moveto
# followed by relative linetos
startx, starty = data[0], data[1]
x, y = startx, starty
i = 2
else:
startx = x + data[0]
starty = y + data[1]
for i in range(i, len(data), 2):
x += data[i]
y += data[i + 1]
elif cmd in ['l', 't']:
x += sum(data[0::2])
y += sum(data[1::2])
elif cmd in ['S', 'Q']:
for i in range(i, len(data), 4):
data[i] -= x
data[i + 1] -= y
data[i + 2] -= x
data[i + 3] -= y
x += data[i + 2]
y += data[i + 3]
path[pathIndex] = (cmd.lower(), data)
elif cmd in ['s', 'q']:
x += sum(data[2::4])
y += sum(data[3::4])
elif cmd == 'C':
for i in range(i, len(data), 6):
data[i] -= x
data[i + 1] -= y
data[i + 2] -= x
data[i + 3] -= y
data[i + 4] -= x
data[i + 5] -= y
x += data[i + 4]
y += data[i + 5]
path[pathIndex] = ('c', data)
elif cmd == 'c':
x += sum(data[4::6])
y += sum(data[5::6])
elif cmd in ['z', 'Z']:
x, y = startx, starty
path[pathIndex] = ('z', data)
# remove empty segments and redundant commands
# Reuse the data structure 'path' and the coordinate lists, even if we're
# deleting items, because these deletions are relatively cheap.
if not has_round_or_square_linecaps:
# remove empty path segments
for pathIndex in range(len(path)):
cmd, data = path[pathIndex]
i = 0
if cmd in ['m', 'l', 't']:
if cmd == 'm':
# It might be tempting to rewrite "m0 0 ..." into
# "l..." here. However, this is an unsound
# optimization in general as "m0 0 ... z" is
# different from "l...z".
#
# To do such a rewrite, we need to understand the
# full subpath. This logic happens after this
# loop.
i = 2
while i < len(data):
if data[i] == data[i + 1] == 0:
del data[i:i + 2]
_num_path_segments_removed += 1
else:
i += 2
elif cmd == 'c':
while i < len(data):
if data[i] == data[i + 1] == data[i + 2] == data[i + 3] == data[i + 4] == data[i + 5] == 0:
del data[i:i + 6]
_num_path_segments_removed += 1
else:
i += 6
elif cmd == 'a':
while i < len(data):
if data[i + 5] == data[i + 6] == 0:
del data[i:i + 7]
_num_path_segments_removed += 1
else:
i += 7
elif cmd == 'q':
while i < len(data):
if data[i] == data[i + 1] == data[i + 2] == data[i + 3] == 0:
del data[i:i + 4]
_num_path_segments_removed += 1
else:
i += 4
elif cmd in ['h', 'v']:
oldLen = len(data)
path[pathIndex] = (cmd, [coord for coord in data if coord != 0])
_num_path_segments_removed += len(path[pathIndex][1]) - oldLen
# remove no-op commands
pathIndex = len(path)
subpath_needs_anchor = False
# NB: We can never rewrite the first m/M command (expect if it
# is the only command)
while pathIndex > 1:
pathIndex -= 1
cmd, data = path[pathIndex]
if cmd == 'z':
next_cmd, next_data = path[pathIndex - 1]
if next_cmd == 'm' and len(next_data) == 2:
# mX Yz -> mX Y
# note the len check on next_data as it is not
# safe to rewrite "m0 0 1 1z" in general (it is a
# question of where the "pen" ends - you can
# continue a draw on the same subpath after a
# "z").
del path[pathIndex]
_num_path_segments_removed += 1
else:
# it is not safe to rewrite "m0 0 ..." to "l..."
# because of this "z" command.
subpath_needs_anchor = True
elif cmd == 'm':
if len(path) - 1 == pathIndex and len(data) == 2:
# Ends with an empty move (but no line/draw
# following it)
del path[pathIndex]
_num_path_segments_removed += 1
continue
if subpath_needs_anchor:
subpath_needs_anchor = False
elif data[0] == data[1] == 0:
# unanchored, i.e. we can replace "m0 0 ..." with
# "l..." as there is no "z" after it.
path[pathIndex] = ('l', data[2:])
_num_path_segments_removed += 1
# fixup: Delete subcommands having no coordinates.
path = [elem for elem in path if len(elem[1]) > 0 or elem[0] == 'z']
# convert straight curves into lines
newPath = [path[0]]
for (cmd, data) in path[1:]:
i = 0
newData = data
if cmd == 'c':
newData = []
while i < len(data):
# since all commands are now relative, we can think of previous point as (0,0)
# and new point (dx,dy) is (data[i+4],data[i+5])
# eqn of line will be y = (dy/dx)*x or if dx=0 then eqn of line is x=0
(p1x, p1y) = (data[i], data[i + 1])
(p2x, p2y) = (data[i + 2], data[i + 3])
dx = data[i + 4]
dy = data[i + 5]
foundStraightCurve = False
if dx == 0:
if p1x == 0 and p2x == 0:
foundStraightCurve = True
else:
m = dy / dx
if p1y == m * p1x and p2y == m * p2x:
foundStraightCurve = True
if foundStraightCurve:
# flush any existing curve coords first
if newData:
newPath.append((cmd, newData))
newData = []
# now create a straight line segment
newPath.append(('l', [dx, dy]))
else:
newData.extend(data[i:i + 6])
i += 6
if newData or cmd == 'z' or cmd == 'Z':
newPath.append((cmd, newData))
path = newPath
# collapse all consecutive commands of the same type into one command
prevCmd = ''
prevData = []
newPath = []
for (cmd, data) in path:
if prevCmd == '':
# initialize with current path cmd and data
prevCmd = cmd
prevData = data
else:
# collapse if
# - cmd is not moveto (explicit moveto commands are not drawn)
# - the previous and current commands are the same type,
# - the previous command is moveto and the current is lineto
# (subsequent moveto pairs are treated as implicit lineto commands)
if cmd != 'm' and (cmd == prevCmd or (cmd == 'l' and prevCmd == 'm')):
prevData.extend(data)
# else flush the previous command if it is not the same type as the current command
else:
newPath.append((prevCmd, prevData))
prevCmd = cmd
prevData = data
# flush last command and data
newPath.append((prevCmd, prevData))
path = newPath
# convert to shorthand path segments where possible
newPath = []
for (cmd, data) in path:
# convert line segments into h,v where possible
if cmd == 'l':
i = 0
lineTuples = []
while i < len(data):
if data[i] == 0:
# vertical
if lineTuples:
# flush the existing line command
newPath.append(('l', lineTuples))
lineTuples = []
# append the v and then the remaining line coords
newPath.append(('v', [data[i + 1]]))
_num_path_segments_removed += 1
elif data[i + 1] == 0:
if lineTuples:
# flush the line command, then append the h and then the remaining line coords
newPath.append(('l', lineTuples))
lineTuples = []
newPath.append(('h', [data[i]]))
_num_path_segments_removed += 1
else:
lineTuples.extend(data[i:i + 2])
i += 2
if lineTuples:
newPath.append(('l', lineTuples))
# also handle implied relative linetos
elif cmd == 'm':
i = 2
lineTuples = [data[0], data[1]]
while i < len(data):
if data[i] == 0:
# vertical
if lineTuples:
# flush the existing m/l command
newPath.append((cmd, lineTuples))
lineTuples = []
cmd = 'l' # dealing with linetos now
# append the v and then the remaining line coords
newPath.append(('v', [data[i + 1]]))
_num_path_segments_removed += 1
elif data[i + 1] == 0:
if lineTuples:
# flush the m/l command, then append the h and then the remaining line coords
newPath.append((cmd, lineTuples))
lineTuples = []
cmd = 'l' # dealing with linetos now
newPath.append(('h', [data[i]]))
_num_path_segments_removed += 1
else:
lineTuples.extend(data[i:i + 2])
i += 2
if lineTuples:
newPath.append((cmd, lineTuples))
# convert Bézier curve segments into s where possible
elif cmd == 'c':
# set up the assumed bezier control point as the current point,
# i.e. (0,0) since we're using relative coords
bez_ctl_pt = (0, 0)
# however if the previous command was 's'
# the assumed control point is a reflection of the previous control point at the current point
if len(newPath):
(prevCmd, prevData) = newPath[-1]
if prevCmd == 's':
bez_ctl_pt = (prevData[-2] - prevData[-4], prevData[-1] - prevData[-3])
i = 0
curveTuples = []
while i < len(data):
# rotate by 180deg means negate both coordinates
# if the previous control point is equal then we can substitute a
# shorthand bezier command
if bez_ctl_pt[0] == data[i] and bez_ctl_pt[1] == data[i + 1]:
if curveTuples:
newPath.append(('c', curveTuples))
curveTuples = []
# append the s command
newPath.append(('s', [data[i + 2], data[i + 3], data[i + 4], data[i + 5]]))
_num_path_segments_removed += 1
else:
j = 0
while j <= 5:
curveTuples.append(data[i + j])
j += 1
# set up control point for next curve segment
bez_ctl_pt = (data[i + 4] - data[i + 2], data[i + 5] - data[i + 3])
i += 6
if curveTuples:
newPath.append(('c', curveTuples))
# convert quadratic curve segments into t where possible
elif cmd == 'q':
quad_ctl_pt = (0, 0)
i = 0
curveTuples = []
while i < len(data):
if quad_ctl_pt[0] == data[i] and quad_ctl_pt[1] == data[i + 1]:
if curveTuples:
newPath.append(('q', curveTuples))
curveTuples = []
# append the t command
newPath.append(('t', [data[i + 2], data[i + 3]]))
_num_path_segments_removed += 1
else:
j = 0
while j <= 3:
curveTuples.append(data[i + j])
j += 1
quad_ctl_pt = (data[i + 2] - data[i], data[i + 3] - data[i + 1])
i += 4
if curveTuples:
newPath.append(('q', curveTuples))
else:
newPath.append((cmd, data))
path = newPath
# For each m, l, h or v, collapse unnecessary coordinates that run in the same direction
# i.e. "h-100-100" becomes "h-200" but "h300-100" does not change.
# If the path has intermediate markers we have to preserve intermediate nodes, though.
# Reuse the data structure 'path', since we're not adding or removing subcommands.
# Also reuse the coordinate lists, even if we're deleting items, because these
# deletions are relatively cheap.
if not has_intermediate_markers:
for pathIndex in range(len(path)):
cmd, data = path[pathIndex]
# h / v expects only one parameter and we start drawing with the first (so we need at least 2)
if cmd in ['h', 'v'] and len(data) >= 2:
coordIndex = 0
while coordIndex+1 < len(data):
if is_same_sign(data[coordIndex], data[coordIndex+1]):
data[coordIndex] += data[coordIndex+1]
del data[coordIndex+1]
_num_path_segments_removed += 1
else:
coordIndex += 1
# l expects two parameters and we start drawing with the first (so we need at least 4)
elif cmd == 'l' and len(data) >= 4:
coordIndex = 0
while coordIndex+2 < len(data):
if is_same_direction(*data[coordIndex:coordIndex+4]):
data[coordIndex] += data[coordIndex+2]
data[coordIndex+1] += data[coordIndex+3]
del data[coordIndex+2] # delete the next two elements
del data[coordIndex+2]
_num_path_segments_removed += 1
else:
coordIndex += 2
# m expects two parameters but we have to skip the first pair as it's not drawn (so we need at least 6)
elif cmd == 'm' and len(data) >= 6:
coordIndex = 2
while coordIndex+2 < len(data):
if is_same_direction(*data[coordIndex:coordIndex+4]):
data[coordIndex] += data[coordIndex+2]
data[coordIndex+1] += data[coordIndex+3]
del data[coordIndex+2] # delete the next two elements
del data[coordIndex+2]
_num_path_segments_removed += 1
else:
coordIndex += 2
# it is possible that we have consecutive h, v, c, t commands now
# so again collapse all consecutive commands of the same type into one command
prevCmd = ''
prevData = []
newPath = [path[0]]
for (cmd, data) in path[1:]:
# flush the previous command if it is not the same type as the current command
if prevCmd != '':
if cmd != prevCmd or cmd == 'm':
newPath.append((prevCmd, prevData))
prevCmd = ''
prevData = []
# if the previous and current commands are the same type, collapse
if cmd == prevCmd and cmd != 'm':
prevData.extend(data)
# save last command and data
else:
prevCmd = cmd
prevData = data
# flush last command and data
if prevCmd != '':
newPath.append((prevCmd, prevData))
path = newPath
newPathStr = serializePath(path, options)
# if for whatever reason we actually made the path longer don't use it
# TODO: maybe we could compare path lengths after each optimization step and use the shortest
if len(newPathStr) <= len(oldPathStr):
_num_bytes_saved_in_path_data += (len(oldPathStr) - len(newPathStr))
element.setAttribute('d', newPathStr)
def parseListOfPoints(s):
"""
Parse string into a list of points.
Returns a list containing an even number of coordinate strings
"""
i = 0
# (wsp)? comma-or-wsp-separated coordinate pairs (wsp)?
# coordinate-pair = coordinate comma-or-wsp coordinate
# coordinate = sign? integer
# comma-wsp: (wsp+ comma? wsp*) | (comma wsp*)
ws_nums = RE_COMMA_WSP.split(s.strip())
nums = []
# also, if 100-100 is found, split it into two also
# <polygon points="100,-100,100-100,100-100-100,-100-100" />
for i in range(len(ws_nums)):
negcoords = ws_nums[i].split("-")
# this string didn't have any negative coordinates
if len(negcoords) == 1:
nums.append(negcoords[0])
# we got negative coords
else:
for j in range(len(negcoords)):
# first number could be positive
if j == 0:
if negcoords[0] != '':
nums.append(negcoords[0])
# otherwise all other strings will be negative
else:
# unless we accidentally split a number that was in scientific notation
# and had a negative exponent (500.00e-1)
prev = ""
if len(nums):
prev = nums[len(nums) - 1]
if prev and prev[len(prev) - 1] in ['e', 'E']:
nums[len(nums) - 1] = prev + '-' + negcoords[j]
else:
nums.append('-' + negcoords[j])
# if we have an odd number of points, return empty
if len(nums) % 2 != 0:
return []
# now resolve into Decimal values
i = 0
while i < len(nums):
try:
nums[i] = getcontext().create_decimal(nums[i])
nums[i + 1] = getcontext().create_decimal(nums[i + 1])
except InvalidOperation: # one of the lengths had a unit or is an invalid number
return []
i += 2
return nums
def cleanPolygon(elem, options):
"""
Remove unnecessary closing point of polygon points attribute
"""
global _num_points_removed_from_polygon
pts = parseListOfPoints(elem.getAttribute('points'))
N = len(pts) / 2
if N >= 2:
(startx, starty) = pts[:2]
(endx, endy) = pts[-2:]
if startx == endx and starty == endy:
del pts[-2:]
_num_points_removed_from_polygon += 1
elem.setAttribute('points', scourCoordinates(pts, options, True))
def cleanPolyline(elem, options):
"""
Scour the polyline points attribute
"""
pts = parseListOfPoints(elem.getAttribute('points'))
elem.setAttribute('points', scourCoordinates(pts, options, True))
def controlPoints(cmd, data):
"""
Checks if there are control points in the path data
Returns the indices of all values in the path data which are control points
"""
cmd = cmd.lower()
if cmd in ['c', 's', 'q']:
indices = range(len(data))
if cmd == 'c': # c: (x1 y1 x2 y2 x y)+
return [index for index in indices if (index % 6) < 4]
elif cmd in ['s', 'q']: # s: (x2 y2 x y)+ q: (x1 y1 x y)+
return [index for index in indices if (index % 4) < 2]
return []
def flags(cmd, data):
"""
Checks if there are flags in the path data
Returns the indices of all values in the path data which are flags
"""
if cmd.lower() == 'a': # a: (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+
indices = range(len(data))
return [index for index in indices if (index % 7) in [3, 4]]
return []
def serializePath(pathObj, options):
"""
Reserializes the path data with some cleanups.
"""
# elliptical arc commands must have comma/wsp separating the coordinates
# this fixes an issue outlined in Fix https://bugs.launchpad.net/scour/+bug/412754
return ''.join(cmd + scourCoordinates(data, options,
control_points=controlPoints(cmd, data),
flags=flags(cmd, data))
for cmd, data in pathObj)
def serializeTransform(transformObj):
"""
Reserializes the transform data with some cleanups.
"""
return ' '.join(command + '(' + ' '.join(scourUnitlessLength(number) for number in numbers) + ')'
for command, numbers in transformObj)
def scourCoordinates(data, options, force_whitespace=False, control_points=[], flags=[]):
"""
Serializes coordinate data with some cleanups:
- removes all trailing zeros after the decimal
- integerize coordinates if possible
- removes extraneous whitespace
- adds spaces between values in a subcommand if required (or if force_whitespace is True)
"""
if data is not None:
newData = []
c = 0
previousCoord = ''
for coord in data:
is_control_point = c in control_points
scouredCoord = scourUnitlessLength(coord,
renderer_workaround=options.renderer_workaround,
is_control_point=is_control_point)
# don't output a space if this number starts with a dot (.) or minus sign (-); we only need a space if
# - this number starts with a digit
# - this number starts with a dot but the previous number had *no* dot or exponent
# i.e. '1.3 0.5' -> '1.3.5' or '1e3 0.5' -> '1e3.5' is fine but '123 0.5' -> '123.5' is obviously not
# - 'force_whitespace' is explicitly set to 'True'
# we never need a space after flags (occuring in elliptical arcs), but librsvg struggles without it
if (c > 0
and (force_whitespace
or scouredCoord[0].isdigit()
or (scouredCoord[0] == '.' and not ('.' in previousCoord or 'e' in previousCoord)))
and ((c-1 not in flags) or options.renderer_workaround)):
newData.append(' ')
# add the scoured coordinate to the path string
newData.append(scouredCoord)
previousCoord = scouredCoord
c += 1
# What we need to do to work around GNOME bugs 548494, 563933 and 620565, is to make sure that a dot doesn't
# immediately follow a command (so 'h50' and 'h0.5' are allowed, but not 'h.5').
# Then, we need to add a space character after any coordinates having an 'e' (scientific notation),
# so as to have the exponent separate from the next number.
# TODO: Check whether this is still required (bugs all marked as fixed, might be time to phase it out)
if options.renderer_workaround:
if len(newData) > 0:
for i in range(1, len(newData)):
if newData[i][0] == '-' and 'e' in newData[i - 1]:
newData[i - 1] += ' '
return ''.join(newData)
else:
return ''.join(newData)
return ''
def scourLength(length):
"""
Scours a length. Accepts units.
"""
length = SVGLength(length)
return scourUnitlessLength(length.value) + Unit.str(length.units)
def scourUnitlessLength(length, renderer_workaround=False, is_control_point=False): # length is of a numeric type
"""
Scours the numeric part of a length only. Does not accept units.
This is faster than scourLength on elements guaranteed not to
contain units.
"""
if not isinstance(length, Decimal):
length = getcontext().create_decimal(str(length))
initial_length = length
# reduce numeric precision
# plus() corresponds to the unary prefix plus operator and applies context precision and rounding
if is_control_point:
length = scouringContextC.plus(length)
else:
length = scouringContext.plus(length)
# remove trailing zeroes as we do not care for significance
intLength = length.to_integral_value()
if length == intLength:
length = Decimal(intLength)
else:
length = length.normalize()
# Gather the non-scientific notation version of the coordinate.
# Re-quantize from the initial value to prevent unnecessary loss of precision
# (e.g. 123.4 should become 123, not 120 or even 100)
nonsci = '{0:f}'.format(length)
nonsci = '{0:f}'.format(initial_length.quantize(Decimal(nonsci)))
if not renderer_workaround:
if len(nonsci) > 2 and nonsci[:2] == '0.':
nonsci = nonsci[1:] # remove the 0, leave the dot
elif len(nonsci) > 3 and nonsci[:3] == '-0.':
nonsci = '-' + nonsci[2:] # remove the 0, leave the minus and dot
return_value = nonsci
# Gather the scientific notation version of the coordinate which
# can only be shorter if the length of the number is at least 4 characters (e.g. 1000 = 1e3).
if len(nonsci) > 3:
# We have to implement this ourselves since both 'normalize()' and 'to_sci_string()'
# don't handle negative exponents in a reasonable way (e.g. 0.000001 remains unchanged)
exponent = length.adjusted() # how far do we have to shift the dot?
length = length.scaleb(-exponent).normalize() # shift the dot and remove potential trailing zeroes
sci = six.text_type(length) + 'e' + six.text_type(exponent)
if len(sci) < len(nonsci):
return_value = sci
return return_value
def reducePrecision(element):
"""
Because opacities, letter spacings, stroke widths and all that don't need
to be preserved in SVG files with 9 digits of precision.
Takes all of these attributes, in the given element node and its children,
and reduces their precision to the current Decimal context's precision.
Also checks for the attributes actually being lengths, not 'inherit', 'none'
or anything that isn't an SVGLength.
Returns the number of bytes saved after performing these reductions.
"""
num = 0
styles = _getStyle(element)
for lengthAttr in ['opacity', 'flood-opacity', 'fill-opacity',
'stroke-opacity', 'stop-opacity', 'stroke-miterlimit',
'stroke-dashoffset', 'letter-spacing', 'word-spacing',
'kerning', 'font-size-adjust', 'font-size',
'stroke-width']:
val = element.getAttribute(lengthAttr)
if val != '':
valLen = SVGLength(val)
if valLen.units != Unit.INVALID: # not an absolute/relative size or inherit, can be % though
newVal = scourLength(val)
if len(newVal) < len(val):
num += len(val) - len(newVal)
element.setAttribute(lengthAttr, newVal)
# repeat for attributes hidden in styles
if lengthAttr in styles:
val = styles[lengthAttr]
valLen = SVGLength(val)
if valLen.units != Unit.INVALID:
newVal = scourLength(val)
if len(newVal) < len(val):
num += len(val) - len(newVal)
styles[lengthAttr] = newVal
_setStyle(element, styles)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
num += reducePrecision(child)
return num
def optimizeAngle(angle):
"""
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
"""
# First, we put the new angle in the range ]-360, 360[.
# The modulo operator yields results with the sign of the
# divisor, so for negative dividends, we preserve the sign
# of the angle.
if angle < 0:
angle %= -360
else:
angle %= 360
# 720 degrees is unnecessary, as 360 covers all angles.
# As "-x" is shorter than "35x" and "-xxx" one character
# longer than positive angles <= 260, we constrain angle
# range to [-90, 270[ (or, equally valid: ]-100, 260]).
if angle >= 270:
angle -= 360
elif angle < -90:
angle += 360
return angle
def optimizeTransform(transform):
"""
Optimises a series of transformations parsed from a single
transform="" attribute.
The transformation list is modified in-place.
"""
# FIXME: reordering these would optimize even more cases:
# first: Fold consecutive runs of the same transformation
# extra: Attempt to cast between types to create sameness:
# "matrix(0 1 -1 0 0 0) rotate(180) scale(-1)" all
# are rotations (90, 180, 180) -- thus "rotate(90)"
# second: Simplify transforms where numbers are optional.
# third: Attempt to simplify any single remaining matrix()
#
# if there's only one transformation and it's a matrix,
# try to make it a shorter non-matrix transformation
# NOTE: as matrix(a b c d e f) in SVG means the matrix:
# |¯ a c e ¯| make constants |¯ A1 A2 A3 ¯|
# | b d f | translating them | B1 B2 B3 |
# |_ 0 0 1 _| to more readable |_ 0 0 1 _|
if len(transform) == 1 and transform[0][0] == 'matrix':
matrix = A1, B1, A2, B2, A3, B3 = transform[0][1]
# |¯ 1 0 0 ¯|
# | 0 1 0 | Identity matrix (no transformation)
# |_ 0 0 1 _|
if matrix == [1, 0, 0, 1, 0, 0]:
del transform[0]
# |¯ 1 0 X ¯|
# | 0 1 Y | Translation by (X, Y).
# |_ 0 0 1 _|
elif (A1 == 1 and A2 == 0
and B1 == 0 and B2 == 1):
transform[0] = ('translate', [A3, B3])
# |¯ X 0 0 ¯|
# | 0 Y 0 | Scaling by (X, Y).
# |_ 0 0 1 _|
elif (A2 == 0 and A3 == 0
and B1 == 0 and B3 == 0):
transform[0] = ('scale', [A1, B2])
# |¯ cos(A) -sin(A) 0 ¯| Rotation by angle A,
# | sin(A) cos(A) 0 | clockwise, about the origin.
# |_ 0 0 1 _| A is in degrees, [-180...180].
elif (A1 == B2 and -1 <= A1 <= 1 and A3 == 0
and -B1 == A2 and -1 <= B1 <= 1 and B3 == 0
# as cos² A + sin² A == 1 and as decimal trig is approximate:
# FIXME: the "epsilon" term here should really be some function
# of the precision of the (sin|cos)_A terms, not 1e-15:
and abs((B1 ** 2) + (A1 ** 2) - 1) < Decimal("1e-15")):
sin_A, cos_A = B1, A1
# while asin(A) and acos(A) both only have an 180° range
# the sign of sin(A) and cos(A) varies across quadrants,
# letting us hone in on the angle the matrix represents:
# -- => < -90 | -+ => -90..0 | ++ => 0..90 | +- => >= 90
#
# http://en.wikipedia.org/wiki/File:Sine_cosine_plot.svg
# shows asin has the correct angle the middle quadrants:
A = Decimal(str(math.degrees(math.asin(float(sin_A)))))
if cos_A < 0: # otherwise needs adjusting from the edges
if sin_A < 0:
A = -180 - A
else:
A = 180 - A
transform[0] = ('rotate', [A])
# Simplify transformations where numbers are optional.
for type, args in transform:
if type == 'translate':
# Only the X coordinate is required for translations.
# If the Y coordinate is unspecified, it's 0.
if len(args) == 2 and args[1] == 0:
del args[1]
elif type == 'rotate':
args[0] = optimizeAngle(args[0]) # angle
# Only the angle is required for rotations.
# If the coordinates are unspecified, it's the origin (0, 0).
if len(args) == 3 and args[1] == args[2] == 0:
del args[1:]
elif type == 'scale':
# Only the X scaling factor is required.
# If the Y factor is unspecified, it's the same as X.
if len(args) == 2 and args[0] == args[1]:
del args[1]
# Attempt to coalesce runs of the same transformation.
# Translations followed immediately by other translations,
# rotations followed immediately by other rotations,
# scaling followed immediately by other scaling,
# are safe to add.
# Identity skewX/skewY are safe to remove, but how do they accrete?
# |¯ 1 0 0 ¯|
# | tan(A) 1 0 | skews X coordinates by angle A
# |_ 0 0 1 _|
#
# |¯ 1 tan(A) 0 ¯|
# | 0 1 0 | skews Y coordinates by angle A
# |_ 0 0 1 _|
#
# FIXME: A matrix followed immediately by another matrix
# would be safe to multiply together, too.
i = 1
while i < len(transform):
currType, currArgs = transform[i]
prevType, prevArgs = transform[i - 1]
if currType == prevType == 'translate':
prevArgs[0] += currArgs[0] # x
# for y, only add if the second translation has an explicit y
if len(currArgs) == 2:
if len(prevArgs) == 2:
prevArgs[1] += currArgs[1] # y
elif len(prevArgs) == 1:
prevArgs.append(currArgs[1]) # y
del transform[i]
if prevArgs[0] == prevArgs[1] == 0:
# Identity translation!
i -= 1
del transform[i]
elif (currType == prevType == 'rotate'
and len(prevArgs) == len(currArgs) == 1):
# Only coalesce if both rotations are from the origin.
prevArgs[0] = optimizeAngle(prevArgs[0] + currArgs[0])
del transform[i]
elif currType == prevType == 'scale':
prevArgs[0] *= currArgs[0] # x
# handle an implicit y
if len(prevArgs) == 2 and len(currArgs) == 2:
# y1 * y2
prevArgs[1] *= currArgs[1]
elif len(prevArgs) == 1 and len(currArgs) == 2:
# create y2 = uniformscalefactor1 * y2
prevArgs.append(prevArgs[0] * currArgs[1])
elif len(prevArgs) == 2 and len(currArgs) == 1:
# y1 * uniformscalefactor2
prevArgs[1] *= currArgs[0]
del transform[i]
# if prevArgs is [1] or [1, 1], then it is effectively an
# identity matrix and can be removed.
if prevArgs[0] == 1 and (len(prevArgs) == 1 or prevArgs[1] == 1):
# Identity scale!
i -= 1
del transform[i]
else:
i += 1
# Some fixups are needed for single-element transformation lists, since
# the loop above was to coalesce elements with their predecessors in the
# list, and thus it required 2 elements.
i = 0
while i < len(transform):
currType, currArgs = transform[i]
if ((currType == 'skewX' or currType == 'skewY')
and len(currArgs) == 1 and currArgs[0] == 0):
# Identity skew!
del transform[i]
elif ((currType == 'rotate')
and len(currArgs) == 1 and currArgs[0] == 0):
# Identity rotation!
del transform[i]
else:
i += 1
def optimizeTransforms(element, options):
"""
Attempts to optimise transform specifications on the given node and its children.
Returns the number of bytes saved after performing these reductions.
"""
num = 0
for transformAttr in ['transform', 'patternTransform', 'gradientTransform']:
val = element.getAttribute(transformAttr)
if val != '':
transform = svg_transform_parser.parse(val)
optimizeTransform(transform)
newVal = serializeTransform(transform)
if len(newVal) < len(val):
if len(newVal):
element.setAttribute(transformAttr, newVal)
else:
element.removeAttribute(transformAttr)
num += len(val) - len(newVal)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
num += optimizeTransforms(child, options)
return num
def removeComments(element):
"""
Removes comments from the element and its children.
"""
global _num_bytes_saved_in_comments
num = 0
if isinstance(element, xml.dom.minidom.Comment):
_num_bytes_saved_in_comments += len(element.data)
element.parentNode.removeChild(element)
num += 1
else:
for subelement in element.childNodes[:]:
num += removeComments(subelement)
return num
def embedRasters(element, options):
import base64
"""
Converts raster references to inline images.
NOTE: there are size limits to base64-encoding handling in browsers
"""
global _num_rasters_embedded
href = element.getAttributeNS(NS['XLINK'], 'href')
# if xlink:href is set, then grab the id
if href != '' and len(href) > 1:
ext = os.path.splitext(os.path.basename(href))[1].lower()[1:]
# only operate on files with 'png', 'jpg', and 'gif' file extensions
if ext in ['png', 'jpg', 'gif']:
# fix common issues with file paths
# TODO: should we warn the user instead of trying to correct those invalid URIs?
# convert backslashes to slashes
href_fixed = href.replace('\\', '/')
# absolute 'file:' URIs have to use three slashes (unless specifying a host which I've never seen)
href_fixed = re.sub('file:/+', 'file:///', href_fixed)
# parse the URI to get scheme and path
# in principle it would make sense to work only with this ParseResult and call 'urlunparse()' in the end
# however 'urlunparse(urlparse(file:raster.png))' -> 'file:///raster.png' which is nonsense
parsed_href = urllib.parse.urlparse(href_fixed)
# assume locations without protocol point to local files (and should use the 'file:' protocol)
if parsed_href.scheme == '':
parsed_href = parsed_href._replace(scheme='file')
if href_fixed[0] == '/':
href_fixed = 'file://' + href_fixed
else:
href_fixed = 'file:' + href_fixed
# relative local paths are relative to the input file, therefore temporarily change the working dir
working_dir_old = None
if parsed_href.scheme == 'file' and parsed_href.path[0] != '/':
if options.infilename:
working_dir_old = os.getcwd()
working_dir_new = os.path.abspath(os.path.dirname(options.infilename))
os.chdir(working_dir_new)
# open/download the file
try:
file = urllib.request.urlopen(href_fixed)
rasterdata = file.read()
file.close()
except Exception as e:
print("WARNING: Could not open file '" + href + "' for embedding. "
"The raster image will be kept as a reference but might be invalid. "
"(Exception details: " + str(e) + ")", file=options.ensure_value("stdout", sys.stdout))
rasterdata = ''
finally:
# always restore initial working directory if we changed it above
if working_dir_old is not None:
os.chdir(working_dir_old)
# TODO: should we remove all images which don't resolve?
# then we also have to consider unreachable remote locations (i.e. if there is no internet connection)
if rasterdata != '':
# base64-encode raster
b64eRaster = base64.b64encode(rasterdata)
# set href attribute to base64-encoded equivalent
if b64eRaster != '':
# PNG and GIF both have MIME Type 'image/[ext]', but
# JPEG has MIME Type 'image/jpeg'
if ext == 'jpg':
ext = 'jpeg'
element.setAttributeNS(NS['XLINK'], 'href',
'data:image/' + ext + ';base64,' + b64eRaster.decode())
_num_rasters_embedded += 1
del b64eRaster
def properlySizeDoc(docElement, options):
# get doc width and height
w = SVGLength(docElement.getAttribute('width'))
h = SVGLength(docElement.getAttribute('height'))
# if width/height are not unitless or px then it is not ok to rewrite them into a viewBox.
# well, it may be OK for Web browsers and vector editors, but not for librsvg.
if options.renderer_workaround:
if ((w.units != Unit.NONE and w.units != Unit.PX) or
(h.units != Unit.NONE and h.units != Unit.PX)):
return
# else we have a statically sized image and we should try to remedy that
# parse viewBox attribute
vbSep = RE_COMMA_WSP.split(docElement.getAttribute('viewBox'))
# if we have a valid viewBox we need to check it
vbWidth, vbHeight = 0, 0
if len(vbSep) == 4:
try:
# if x or y are specified and non-zero then it is not ok to overwrite it
vbX = float(vbSep[0])
vbY = float(vbSep[1])
if vbX != 0 or vbY != 0:
return
# if width or height are not equal to doc width/height then it is not ok to overwrite it
vbWidth = float(vbSep[2])
vbHeight = float(vbSep[3])
if vbWidth != w.value or vbHeight != h.value:
return
# if the viewBox did not parse properly it is invalid and ok to overwrite it
except ValueError:
pass
# at this point it's safe to set the viewBox and remove width/height
docElement.setAttribute('viewBox', '0 0 %s %s' % (w.value, h.value))
docElement.removeAttribute('width')
docElement.removeAttribute('height')
def remapNamespacePrefix(node, oldprefix, newprefix):
if node is None or node.nodeType != Node.ELEMENT_NODE:
return
if node.prefix == oldprefix:
localName = node.localName
namespace = node.namespaceURI
doc = node.ownerDocument
parent = node.parentNode
# create a replacement node
newNode = None
if newprefix != '':
newNode = doc.createElementNS(namespace, newprefix + ":" + localName)
else:
newNode = doc.createElement(localName)
# add all the attributes
attrList = node.attributes
for i in range(attrList.length):
attr = attrList.item(i)
newNode.setAttributeNS(attr.namespaceURI, attr.name, attr.nodeValue)
# clone and add all the child nodes
for child in node.childNodes:
newNode.appendChild(child.cloneNode(True))
# replace old node with new node
parent.replaceChild(newNode, node)
# set the node to the new node in the remapped namespace prefix
node = newNode
# now do all child nodes
for child in node.childNodes:
remapNamespacePrefix(child, oldprefix, newprefix)
def make_well_formed(text, quote_dict=None):
if quote_dict is None:
quote_dict = XML_ENTS_NO_QUOTES
if not any(c in text for c in quote_dict):
# The quote-able characters are quite rare in SVG (they mostly only
# occur in text elements in practice). Therefore it make sense to
# optimize for this common case
return text
return ''.join(quote_dict[c] if c in quote_dict else c for c in text)
def choose_quote_character(value):
quot_count = value.count('"')
if quot_count == 0 or quot_count <= value.count("'"):
# Fewest "-symbols (if there are 0, we pick this to avoid spending
# time counting the '-symbols as it won't matter)
quote = '"'
xml_ent = XML_ENTS_ESCAPE_QUOT
else:
quote = "'"
xml_ent = XML_ENTS_ESCAPE_APOS
return quote, xml_ent
TEXT_CONTENT_ELEMENTS = ['text', 'tspan', 'tref', 'textPath', 'altGlyph',
'flowDiv', 'flowPara', 'flowSpan', 'flowTref', 'flowLine']
KNOWN_ATTRS = [
# TODO: Maybe update with full list from https://www.w3.org/TR/SVG/attindex.html
# (but should be kept intuitively ordered)
'id', 'xml:id', 'class',
'transform',
'x', 'y', 'z', 'width', 'height', 'x1', 'x2', 'y1', 'y2',
'dx', 'dy', 'rotate', 'startOffset', 'method', 'spacing',
'cx', 'cy', 'r', 'rx', 'ry', 'fx', 'fy',
'd', 'points',
] + sorted(svgAttributes) + [
'style',
]
KNOWN_ATTRS_ORDER_BY_NAME = defaultdict(lambda: len(KNOWN_ATTRS),
{name: order for order, name in enumerate(KNOWN_ATTRS)})
# use custom order for known attributes and alphabetical order for the rest
def _attribute_sort_key_function(attribute):
name = attribute.name
order_value = KNOWN_ATTRS_ORDER_BY_NAME[name]
return order_value, name
def attributes_ordered_for_output(element):
if not element.hasAttributes():
return []
attribute = element.attributes
# The .item(i) call is painfully slow (bpo#40689). Therefore we ensure we
# call it at most once per attribute.
# - it would be many times faster to use `attribute.values()` but sadly
# that is an "experimental" interface.
return sorted((attribute.item(i) for i in range(attribute.length)),
key=_attribute_sort_key_function)
# hand-rolled serialization function that has the following benefits:
# - pretty printing
# - somewhat judicious use of whitespace
# - ensure id attributes are first
def serializeXML(element, options, indent_depth=0, preserveWhitespace=False):
outParts = []
indent_type = ''
newline = ''
if options.newlines:
if options.indent_type == 'tab':
indent_type = '\t'
elif options.indent_type == 'space':
indent_type = ' '
indent_type *= options.indent_depth
newline = '\n'
outParts.extend([(indent_type * indent_depth), '<', element.nodeName])
# now serialize the other attributes
attrs = attributes_ordered_for_output(element)
for attr in attrs:
attrValue = attr.nodeValue
quote, xml_ent = choose_quote_character(attrValue)
attrValue = make_well_formed(attrValue, xml_ent)
if attr.nodeName == 'style':
# sort declarations
attrValue = ';'.join(sorted(attrValue.split(';')))
outParts.append(' ')
# preserve xmlns: if it is a namespace prefix declaration
if attr.prefix is not None:
outParts.extend([attr.prefix, ':'])
elif attr.namespaceURI is not None:
if attr.namespaceURI == 'http://www.w3.org/2000/xmlns/' and attr.nodeName.find('xmlns') == -1:
outParts.append('xmlns:')
elif attr.namespaceURI == 'http://www.w3.org/1999/xlink':
outParts.append('xlink:')
outParts.extend([attr.localName, '=', quote, attrValue, quote])
if attr.nodeName == 'xml:space':
if attrValue == 'preserve':
preserveWhitespace = True
elif attrValue == 'default':
preserveWhitespace = False
children = element.childNodes
if children.length == 0:
outParts.append('/>')
else:
outParts.append('>')
onNewLine = False
for child in element.childNodes:
# element node
if child.nodeType == Node.ELEMENT_NODE:
# do not indent inside text content elements as in SVG there's a difference between
# "text1\ntext2" and
# "text1\n text2"
# see https://www.w3.org/TR/SVG/text.html#WhiteSpace
if preserveWhitespace or element.nodeName in TEXT_CONTENT_ELEMENTS:
outParts.append(serializeXML(child, options, 0, preserveWhitespace))
else:
outParts.extend([newline, serializeXML(child, options, indent_depth + 1, preserveWhitespace)])
onNewLine = True
# text node
elif child.nodeType == Node.TEXT_NODE:
text_content = child.nodeValue
if not preserveWhitespace:
# strip / consolidate whitespace according to spec, see
# https://www.w3.org/TR/SVG/text.html#WhiteSpace
if element.nodeName in TEXT_CONTENT_ELEMENTS:
text_content = text_content.replace('\n', '')
text_content = text_content.replace('\t', ' ')
if child == element.firstChild:
text_content = text_content.lstrip()
elif child == element.lastChild:
text_content = text_content.rstrip()
while ' ' in text_content:
text_content = text_content.replace(' ', ' ')
else:
text_content = text_content.strip()
outParts.append(make_well_formed(text_content))
# CDATA node
elif child.nodeType == Node.CDATA_SECTION_NODE:
outParts.extend(['<![CDATA[', child.nodeValue, ']]>'])
# Comment node
elif child.nodeType == Node.COMMENT_NODE:
outParts.extend([newline, indent_type * (indent_depth+1), '<!--', child.nodeValue, '-->'])
# TODO: entities, processing instructions, what else?
else: # ignore the rest
pass
if onNewLine:
outParts.append(newline)
outParts.append(indent_type * indent_depth)
outParts.extend(['</', element.nodeName, '>'])
return "".join(outParts)
# this is the main method
# input is a string representation of the input XML
# returns a string representation of the output XML
def scourString(in_string, options=None):
# sanitize options (take missing attributes from defaults, discard unknown attributes)
options = sanitizeOptions(options)
# default or invalid value
if(options.cdigits < 0):
options.cdigits = options.digits
# create decimal contexts with reduced precision for scouring numbers
# calculations should be done in the default context (precision defaults to 28 significant digits)
# to minimize errors
global scouringContext
global scouringContextC # even more reduced precision for control points
scouringContext = Context(prec=options.digits)
scouringContextC = Context(prec=options.cdigits)
# globals for tracking statistics
# TODO: get rid of these globals...
global _num_elements_removed
global _num_attributes_removed
global _num_ids_removed
global _num_comments_removed
global _num_style_properties_fixed
global _num_rasters_embedded
global _num_path_segments_removed
global _num_points_removed_from_polygon
global _num_bytes_saved_in_path_data
global _num_bytes_saved_in_colors
global _num_bytes_saved_in_comments
global _num_bytes_saved_in_ids
global _num_bytes_saved_in_lengths
global _num_bytes_saved_in_transforms
_num_elements_removed = 0
_num_attributes_removed = 0
_num_ids_removed = 0
_num_comments_removed = 0
_num_style_properties_fixed = 0
_num_rasters_embedded = 0
_num_path_segments_removed = 0
_num_points_removed_from_polygon = 0
_num_bytes_saved_in_path_data = 0
_num_bytes_saved_in_colors = 0
_num_bytes_saved_in_comments = 0
_num_bytes_saved_in_ids = 0
_num_bytes_saved_in_lengths = 0
_num_bytes_saved_in_transforms = 0
doc = xml.dom.minidom.parseString(in_string)
# determine number of flowRoot elements in input document
# flowRoot elements don't render at all on current browsers (04/2016)
cnt_flowText_el = len(doc.getElementsByTagName('flowRoot'))
if cnt_flowText_el:
errmsg = "SVG input document uses {} flow text elements, " \
"which won't render on browsers!".format(cnt_flowText_el)
if options.error_on_flowtext:
raise Exception(errmsg)
else:
print("WARNING: {}".format(errmsg), file=sys.stderr)
# remove descriptive elements
removeDescriptiveElements(doc, options)
# remove unneeded namespaced elements/attributes added by common editors
if options.keep_editor_data is False:
_num_elements_removed += removeNamespacedElements(doc.documentElement,
unwanted_ns)
_num_attributes_removed += removeNamespacedAttributes(doc.documentElement,
unwanted_ns)
# remove the xmlns: declarations now
xmlnsDeclsToRemove = []
attrList = doc.documentElement.attributes
for index in range(attrList.length):
if attrList.item(index).nodeValue in unwanted_ns:
xmlnsDeclsToRemove.append(attrList.item(index).nodeName)
for attr in xmlnsDeclsToRemove:
doc.documentElement.removeAttribute(attr)
_num_attributes_removed += 1
# ensure namespace for SVG is declared
# TODO: what if the default namespace is something else (i.e. some valid namespace)?
if doc.documentElement.getAttribute('xmlns') != 'http://www.w3.org/2000/svg':
doc.documentElement.setAttribute('xmlns', 'http://www.w3.org/2000/svg')
# TODO: throw error or warning?
# check for redundant and unused SVG namespace declarations
def xmlnsUnused(prefix, namespace):
if doc.getElementsByTagNameNS(namespace, "*"):
return False
else:
for element in doc.getElementsByTagName("*"):
for attribute in element.attributes.values():
if attribute.name.startswith(prefix):
return False
return True
attrList = doc.documentElement.attributes
xmlnsDeclsToRemove = []
redundantPrefixes = []
for i in range(attrList.length):
attr = attrList.item(i)
name = attr.nodeName
val = attr.nodeValue
if name[0:6] == 'xmlns:':
if val == 'http://www.w3.org/2000/svg':
redundantPrefixes.append(name[6:])
xmlnsDeclsToRemove.append(name)
elif xmlnsUnused(name[6:], val):
xmlnsDeclsToRemove.append(name)
for attrName in xmlnsDeclsToRemove:
doc.documentElement.removeAttribute(attrName)
_num_attributes_removed += 1
for prefix in redundantPrefixes:
remapNamespacePrefix(doc.documentElement, prefix, '')
if options.strip_comments:
_num_comments_removed = removeComments(doc)
if options.strip_xml_space_attribute and doc.documentElement.hasAttribute('xml:space'):
doc.documentElement.removeAttribute('xml:space')
_num_attributes_removed += 1
# repair style (remove unnecessary style properties and change them into XML attributes)
_num_style_properties_fixed = repairStyle(doc.documentElement, options)
# convert colors to #RRGGBB format
if options.simple_colors:
_num_bytes_saved_in_colors = convertColors(doc.documentElement)
# remove unreferenced gradients/patterns outside of defs
# and most unreferenced elements inside of defs
while removeUnreferencedElements(doc, options.keep_defs) > 0:
pass
# remove empty defs, metadata, g
# NOTE: these elements will be removed if they just have whitespace-only text nodes
for tag in ['defs', 'title', 'desc', 'metadata', 'g']:
for elem in doc.documentElement.getElementsByTagName(tag):
removeElem = not elem.hasChildNodes()
if removeElem is False:
for child in elem.childNodes:
if child.nodeType in [Node.ELEMENT_NODE, Node.CDATA_SECTION_NODE, Node.COMMENT_NODE]:
break
elif child.nodeType == Node.TEXT_NODE and not child.nodeValue.isspace():
break
else:
removeElem = True
if removeElem:
elem.parentNode.removeChild(elem)
_num_elements_removed += 1
if options.strip_ids:
referencedIDs = findReferencedElements(doc.documentElement)
identifiedElements = unprotected_ids(doc, options)
removeUnreferencedIDs(referencedIDs, identifiedElements)
while removeDuplicateGradientStops(doc) > 0:
pass
# remove gradients that are only referenced by one other gradient
while collapseSinglyReferencedGradients(doc) > 0:
pass
# remove duplicate gradients
_num_elements_removed += removeDuplicateGradients(doc)
if options.group_collapse:
_num_elements_removed += mergeSiblingGroupsWithCommonAttributes(doc.documentElement)
# create <g> elements if there are runs of elements with the same attributes.
# this MUST be before moveCommonAttributesToParentGroup.
if options.group_create:
createGroupsForCommonAttributes(doc.documentElement)
# move common attributes to parent group
# NOTE: the if the <svg> element's immediate children
# all have the same value for an attribute, it must not
# get moved to the <svg> element. The <svg> element
# doesn't accept fill=, stroke= etc.!
referencedIds = findReferencedElements(doc.documentElement)
for child in doc.documentElement.childNodes:
_num_attributes_removed += moveCommonAttributesToParentGroup(child, referencedIds)
# remove unused attributes from parent
_num_attributes_removed += removeUnusedAttributesOnParent(doc.documentElement)
# Collapse groups LAST, because we've created groups. If done before
# moveAttributesToParentGroup, empty <g>'s may remain.
if options.group_collapse:
while removeNestedGroups(doc.documentElement) > 0:
pass
# remove unnecessary closing point of polygons and scour points
for polygon in doc.documentElement.getElementsByTagName('polygon'):
cleanPolygon(polygon, options)
# scour points of polyline
for polyline in doc.documentElement.getElementsByTagName('polyline'):
cleanPolyline(polyline, options)
# clean path data
for elem in doc.documentElement.getElementsByTagName('path'):
if elem.getAttribute('d') == '':
elem.parentNode.removeChild(elem)
else:
cleanPath(elem, options)
# shorten ID names as much as possible
if options.shorten_ids:
_num_bytes_saved_in_ids += shortenIDs(doc, options.shorten_ids_prefix, options)
# scour lengths (including coordinates)
for type in ['svg', 'image', 'rect', 'circle', 'ellipse', 'line',
'linearGradient', 'radialGradient', 'stop', 'filter']:
for elem in doc.getElementsByTagName(type):
for attr in ['x', 'y', 'width', 'height', 'cx', 'cy', 'r', 'rx', 'ry',
'x1', 'y1', 'x2', 'y2', 'fx', 'fy', 'offset']:
if elem.getAttribute(attr) != '':
elem.setAttribute(attr, scourLength(elem.getAttribute(attr)))
viewBox = doc.documentElement.getAttribute('viewBox')
if viewBox:
lengths = RE_COMMA_WSP.split(viewBox)
lengths = [scourUnitlessLength(length) for length in lengths]
doc.documentElement.setAttribute('viewBox', ' '.join(lengths))
# more length scouring in this function
_num_bytes_saved_in_lengths = reducePrecision(doc.documentElement)
# remove default values of attributes
_num_attributes_removed += removeDefaultAttributeValues(doc.documentElement, options)
# reduce the length of transformation attributes
_num_bytes_saved_in_transforms = optimizeTransforms(doc.documentElement, options)
# convert rasters references to base64-encoded strings
if options.embed_rasters:
for elem in doc.documentElement.getElementsByTagName('image'):
embedRasters(elem, options)
# properly size the SVG document (ideally width/height should be 100% with a viewBox)
if options.enable_viewboxing:
properlySizeDoc(doc.documentElement, options)
# output the document as a pretty string with a single space for indent
# NOTE: removed pretty printing because of this problem:
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-and-silly-whitespace/
# rolled our own serialize function here to save on space, put id first, customize indentation, etc
# out_string = doc.documentElement.toprettyxml(' ')
out_string = serializeXML(doc.documentElement, options) + '\n'
# return the string with its XML prolog and surrounding comments
if options.strip_xml_prolog is False:
total_output = '<?xml version="1.0" encoding="UTF-8"'
if doc.standalone:
total_output += ' standalone="yes"'
total_output += '?>\n'
else:
total_output = ""
for child in doc.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
total_output += out_string
else: # doctypes, entities, comments
total_output += child.toxml() + '\n'
return total_output
# used mostly by unit tests
# input is a filename
# returns the minidom doc representation of the SVG
def scourXmlFile(filename, options=None):
# sanitize options (take missing attributes from defaults, discard unknown attributes)
options = sanitizeOptions(options)
# we need to make sure infilename is set correctly (otherwise relative references in the SVG won't work)
options.ensure_value("infilename", filename)
# open the file and scour it
with open(filename, "rb") as f:
in_string = f.read()
out_string = scourString(in_string, options)
# prepare the output xml.dom.minidom object
doc = xml.dom.minidom.parseString(out_string.encode('utf-8'))
# since minidom does not seem to parse DTDs properly
# manually declare all attributes with name "id" to be of type ID
# (otherwise things like doc.getElementById() won't work)
all_nodes = doc.getElementsByTagName("*")
for node in all_nodes:
try:
node.setIdAttribute('id')
except NotFoundErr:
pass
return doc
# GZ: Seems most other commandline tools don't do this, is it really wanted?
class HeaderedFormatter(optparse.IndentedHelpFormatter):
"""
Show application name, version number, and copyright statement
above usage information.
"""
def format_usage(self, usage):
return "%s %s\n%s\n%s" % (APP, VER, COPYRIGHT,
optparse.IndentedHelpFormatter.format_usage(self, usage))
# GZ: would prefer this to be in a function or class scope, but tests etc need
# access to the defaults anyway
_options_parser = optparse.OptionParser(
usage="%prog [INPUT.SVG [OUTPUT.SVG]] [OPTIONS]",
description=("If the input/output files are not specified, stdin/stdout are used. "
"If the input/output files are specified with a svgz extension, "
"then compressed SVG is assumed."),
formatter=HeaderedFormatter(max_help_position=33),
version=VER)
# legacy options (kept around for backwards compatibility, should not be used in new code)
_options_parser.add_option("-p", action="store", type=int, dest="digits", help=optparse.SUPPRESS_HELP)
# general options
_options_parser.add_option("-q", "--quiet",
action="store_true", dest="quiet", default=False,
help="suppress non-error output")
_options_parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="verbose output (statistics, etc.)")
_options_parser.add_option("-i",
action="store", dest="infilename", metavar="INPUT.SVG",
help="alternative way to specify input filename")
_options_parser.add_option("-o",
action="store", dest="outfilename", metavar="OUTPUT.SVG",
help="alternative way to specify output filename")
_option_group_optimization = optparse.OptionGroup(_options_parser, "Optimization")
_option_group_optimization.add_option("--set-precision",
action="store", type=int, dest="digits", default=5, metavar="NUM",
help="set number of significant digits (default: %default)")
_option_group_optimization.add_option("--set-c-precision",
action="store", type=int, dest="cdigits", default=-1, metavar="NUM",
help="set number of significant digits for control points "
"(default: same as '--set-precision')")
_option_group_optimization.add_option("--disable-simplify-colors",
action="store_false", dest="simple_colors", default=True,
help="won't convert colors to #RRGGBB format")
_option_group_optimization.add_option("--disable-style-to-xml",
action="store_false", dest="style_to_xml", default=True,
help="won't convert styles into XML attributes")
_option_group_optimization.add_option("--disable-group-collapsing",
action="store_false", dest="group_collapse", default=True,
help="won't collapse <g> elements")
_option_group_optimization.add_option("--create-groups",
action="store_true", dest="group_create", default=False,
help="create <g> elements for runs of elements with identical attributes")
_option_group_optimization.add_option("--keep-editor-data",
action="store_true", dest="keep_editor_data", default=False,
help="won't remove Inkscape, Sodipodi, Adobe Illustrator "
"or Sketch elements and attributes")
_option_group_optimization.add_option("--keep-unreferenced-defs",
action="store_true", dest="keep_defs", default=False,
help="won't remove elements within the defs container that are unreferenced")
_option_group_optimization.add_option("--renderer-workaround",
action="store_true", dest="renderer_workaround", default=True,
help="work around various renderer bugs (currently only librsvg) (default)")
_option_group_optimization.add_option("--no-renderer-workaround",
action="store_false", dest="renderer_workaround", default=True,
help="do not work around various renderer bugs (currently only librsvg)")
_options_parser.add_option_group(_option_group_optimization)
_option_group_document = optparse.OptionGroup(_options_parser, "SVG document")
_option_group_document.add_option("--strip-xml-prolog",
action="store_true", dest="strip_xml_prolog", default=False,
help="won't output the XML prolog (<?xml ?>)")
_option_group_document.add_option("--remove-titles",
action="store_true", dest="remove_titles", default=False,
help="remove <title> elements")
_option_group_document.add_option("--remove-descriptions",
action="store_true", dest="remove_descriptions", default=False,
help="remove <desc> elements")
_option_group_document.add_option("--remove-metadata",
action="store_true", dest="remove_metadata", default=False,
help="remove <metadata> elements "
"(which may contain license/author information etc.)")
_option_group_document.add_option("--remove-descriptive-elements",
action="store_true", dest="remove_descriptive_elements", default=False,
help="remove <title>, <desc> and <metadata> elements")
_option_group_document.add_option("--enable-comment-stripping",
action="store_true", dest="strip_comments", default=False,
help="remove all comments (<!-- -->)")
_option_group_document.add_option("--disable-embed-rasters",
action="store_false", dest="embed_rasters", default=True,
help="won't embed rasters as base64-encoded data")
_option_group_document.add_option("--enable-viewboxing",
action="store_true", dest="enable_viewboxing", default=False,
help="changes document width/height to 100%/100% and creates viewbox coordinates")
_options_parser.add_option_group(_option_group_document)
_option_group_formatting = optparse.OptionGroup(_options_parser, "Output formatting")
_option_group_formatting.add_option("--indent",
action="store", type="string", dest="indent_type", default="space", metavar="TYPE",
help="indentation of the output: none, space, tab (default: %default)")
_option_group_formatting.add_option("--nindent",
action="store", type=int, dest="indent_depth", default=1, metavar="NUM",
help="depth of the indentation, i.e. number of spaces/tabs: (default: %default)")
_option_group_formatting.add_option("--no-line-breaks",
action="store_false", dest="newlines", default=True,
help="do not create line breaks in output"
"(also disables indentation; might be overridden by xml:space=\"preserve\")")
_option_group_formatting.add_option("--strip-xml-space",
action="store_true", dest="strip_xml_space_attribute", default=False,
help="strip the xml:space=\"preserve\" attribute from the root SVG element")
_options_parser.add_option_group(_option_group_formatting)
_option_group_ids = optparse.OptionGroup(_options_parser, "ID attributes")
_option_group_ids.add_option("--enable-id-stripping",
action="store_true", dest="strip_ids", default=False,
help="remove all unreferenced IDs")
_option_group_ids.add_option("--shorten-ids",
action="store_true", dest="shorten_ids", default=False,
help="shorten all IDs to the least number of letters possible")
_option_group_ids.add_option("--shorten-ids-prefix",
action="store", type="string", dest="shorten_ids_prefix", default="", metavar="PREFIX",
help="add custom prefix to shortened IDs")
_option_group_ids.add_option("--protect-ids-noninkscape",
action="store_true", dest="protect_ids_noninkscape", default=False,
help="don't remove IDs not ending with a digit")
_option_group_ids.add_option("--protect-ids-list",
action="store", type="string", dest="protect_ids_list", metavar="LIST",
help="don't remove IDs given in this comma-separated list")
_option_group_ids.add_option("--protect-ids-prefix",
action="store", type="string", dest="protect_ids_prefix", metavar="PREFIX",
help="don't remove IDs starting with the given prefix")
_options_parser.add_option_group(_option_group_ids)
_option_group_compatibility = optparse.OptionGroup(_options_parser, "SVG compatibility checks")
_option_group_compatibility.add_option("--error-on-flowtext",
action="store_true", dest="error_on_flowtext", default=False,
help="exit with error if the input SVG uses non-standard flowing text "
"(only warn by default)")
_options_parser.add_option_group(_option_group_compatibility)
def parse_args(args=None, ignore_additional_args=False):
options, rargs = _options_parser.parse_args(args)
if rargs:
if not options.infilename:
options.infilename = rargs.pop(0)
if not options.outfilename and rargs:
options.outfilename = rargs.pop(0)
if not ignore_additional_args and rargs:
_options_parser.error("Additional arguments not handled: %r, see --help" % rargs)
if options.digits < 1:
_options_parser.error("Number of significant digits has to be larger than zero, see --help")
if options.cdigits > options.digits:
options.cdigits = -1
print("WARNING: The value for '--set-c-precision' should be lower than the value for '--set-precision'. "
"Number of significant digits for control points reset to default value, see --help", file=sys.stderr)
if options.indent_type not in ['tab', 'space', 'none']:
_options_parser.error("Invalid value for --indent, see --help")
if options.indent_depth < 0:
_options_parser.error("Value for --nindent should be positive (or zero), see --help")
if options.infilename and options.outfilename and options.infilename == options.outfilename:
_options_parser.error("Input filename is the same as output filename")
return options
# this function was replaced by 'sanitizeOptions()' and is only kept for backwards compatibility
# TODO: delete this at some point or continue to keep it around?
def generateDefaultOptions():
return sanitizeOptions()
# sanitizes options by updating attributes in a set of defaults options while discarding unknown attributes
def sanitizeOptions(options=None):
optionsDict = dict((key, getattr(options, key)) for key in dir(options) if not key.startswith('__'))
sanitizedOptions = _options_parser.get_default_values()
sanitizedOptions._update_careful(optionsDict)
return sanitizedOptions
def maybe_gziped_file(filename, mode="r"):
if os.path.splitext(filename)[1].lower() in (".svgz", ".gz"):
import gzip
return gzip.GzipFile(filename, mode)
return open(filename, mode)
def getInOut(options):
if options.infilename:
infile = maybe_gziped_file(options.infilename, "rb")
# GZ: could catch a raised IOError here and report
else:
# GZ: could sniff for gzip compression here
#
# open the binary buffer of stdin and let XML parser handle decoding
try:
infile = sys.stdin.buffer
except AttributeError:
infile = sys.stdin
# the user probably does not want to manually enter SVG code into the terminal...
if sys.stdin.isatty():
_options_parser.error("No input file specified, see --help for detailed usage information")
if options.outfilename:
outfile = maybe_gziped_file(options.outfilename, "wb")
else:
# open the binary buffer of stdout as the output is already encoded
try:
outfile = sys.stdout.buffer
except AttributeError:
outfile = sys.stdout
# redirect informational output to stderr when SVG is output to stdout
options.stdout = sys.stderr
return [infile, outfile]
def getReport():
return (
' Number of elements removed: ' + str(_num_elements_removed) + os.linesep +
' Number of attributes removed: ' + str(_num_attributes_removed) + os.linesep +
' Number of unreferenced IDs removed: ' + str(_num_ids_removed) + os.linesep +
' Number of comments removed: ' + str(_num_comments_removed) + os.linesep +
' Number of style properties fixed: ' + str(_num_style_properties_fixed) + os.linesep +
' Number of raster images embedded: ' + str(_num_rasters_embedded) + os.linesep +
' Number of path segments reduced/removed: ' + str(_num_path_segments_removed) + os.linesep +
' Number of points removed from polygons: ' + str(_num_points_removed_from_polygon) + os.linesep +
' Number of bytes saved in path data: ' + str(_num_bytes_saved_in_path_data) + os.linesep +
' Number of bytes saved in colors: ' + str(_num_bytes_saved_in_colors) + os.linesep +
' Number of bytes saved in comments: ' + str(_num_bytes_saved_in_comments) + os.linesep +
' Number of bytes saved in IDs: ' + str(_num_bytes_saved_in_ids) + os.linesep +
' Number of bytes saved in lengths: ' + str(_num_bytes_saved_in_lengths) + os.linesep +
' Number of bytes saved in transformations: ' + str(_num_bytes_saved_in_transforms)
)
def start(options, input, output):
# sanitize options (take missing attributes from defaults, discard unknown attributes)
options = sanitizeOptions(options)
start = time.time()
# do the work
in_string = input.read()
out_string = scourString(in_string, options).encode("UTF-8")
output.write(out_string)
# Close input and output files (but do not attempt to close stdin/stdout!)
if not ((input is sys.stdin) or (hasattr(sys.stdin, 'buffer') and input is sys.stdin.buffer)):
input.close()
if not ((output is sys.stdout) or (hasattr(sys.stdout, 'buffer') and output is sys.stdout.buffer)):
output.close()
end = time.time()
# run-time in ms
duration = int(round((end - start) * 1000.))
oldsize = len(in_string)
newsize = len(out_string)
sizediff = (newsize / oldsize) * 100.
if not options.quiet:
print('Scour processed file "{}" in {} ms: {}/{} bytes new/orig -> {:.1f}%'.format(
input.name,
duration,
newsize,
oldsize,
sizediff), file=options.ensure_value("stdout", sys.stdout))
if options.verbose:
print(getReport(), file=options.ensure_value("stdout", sys.stdout))
def run():
options = parse_args()
(input, output) = getInOut(options)
start(options, input, output)
if __name__ == '__main__':
run()
| codedread/scour | scour/scour.py | Python | apache-2.0 | 178,584 | [
"VisIt"
] | ba723e8a69a43210c6ddab6a359e181245b8a4976505f41e7a789d895349b405 |
#!/usr/bin/env python
#
# LSST Data Management System
# Copyright 2012 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
import os
import unittest
import lsst.utils.tests as utilsTests
from lsst.pex.policy import Policy
import lsst.daf.persistence as dafPersist
from lsst.obs.decam import DecamMapper
import lsst.afw.display.ds9 as ds9
import lsst.afw.display.utils as displayUtils
import lsst.afw.cameraGeom as cameraGeom
import lsst.afw.cameraGeom.utils as cameraGeomUtils
try:
type(display)
except NameError:
display = False
def getButler(datadir):
bf = dafPersist.ButlerFactory(mapper=DecamMapper(root=os.path.join(datadir, "DATA"),
calibRoot=os.path.join(datadir, "CALIB")))
return bf.create()
class GetRawTestCase(unittest.TestCase):
"""Testing butler raw image retrieval"""
def setUp(self):
self.datadir = os.getenv("TESTDATA_DECAM_DIR")
assert self.datadir, "testdata_decam is not setup"
self.butler = getButler(self.datadir)
self.size = (2160, 4146)
self.dataId = {'visit': 135635}
self.filter = "r"
def tearDown(self):
del self.butler
def assertExposure(self, exp, side, ccd):
print "dataId: ", self.dataId
print "ccd: ", ccd
print "width: ", exp.getWidth()
print "height: ", exp.getHeight()
print "detector name: ", exp.getDetector().getId().getName()
self.assertEqual(exp.getWidth(), self.size[0])
self.assertEqual(exp.getHeight(), self.size[1])
self.assertEqual(exp.getFilter().getFilterProperty().getName(), self.filter)
self.assertEqual(exp.getDetector().getId().getName(), "%s%d" % (side.upper(), ccd))
def testRaw(self):
"""Test retrieval of raw image"""
frame = 0
if display:
cameraGeomUtils.showCamera(self.butler.mapper.camera, frame=frame)
for side in ("N", "S"):
for ccd in range(1, 32, 1):
raw = self.butler.get("raw", self.dataId, side=side, ccd=ccd)
self.assertExposure(raw, side, ccd)
if display:
frame += 1
ccd = cameraGeom.cast_Ccd(raw.getDetector())
for amp in ccd:
amp = cameraGeom.cast_Amp(amp)
print ccd.getId(), amp.getId(), amp.getDataSec().toString(), \
amp.getBiasSec().toString(), amp.getElectronicParams().getGain()
cameraGeomUtils.showCcd(ccd, ccdImage=raw, frame=frame)
# def testFlat(self):
# """Test retrieval of flat image"""
# for ccd in range(12):
# flat = self.butler.get("flat", self.dataId, ccd=ccd)
#
# self.assertExposure(flat, ccd)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def suite():
"""Returns a suite containing all the test cases in this module."""
utilsTests.init()
suites = []
suites += unittest.makeSuite(GetRawTestCase)
suites += unittest.makeSuite(utilsTests.MemoryTestCase)
return unittest.TestSuite(suites)
def run(shouldExit = False):
"""Run the tests"""
utilsTests.run(suite(), shouldExit)
if __name__ == "__main__":
run(True)
| LSST-nonproject/obs_omegacam | tests/getRaw.py | Python | gpl-3.0 | 4,058 | [
"VisIt"
] | 3e4622187211ba145830a0910c43a065f71dfacb4473009a5ae02bfdf2871b6e |
#!/usr/bin/env python
#
# Copyright 2002-2003 by Michael Hoffman. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.DocSQL: easy access to DB API databases.
>>> import os
>>> import MySQLdb
>>> from Bio import DocSQL
>>> db=MySQLdb.connect(passwd='', db='test')
>>> class CreatePeople(DocSQL.Create):
... '''
... CREATE TEMPORARY TABLE people
... (id INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT,
... last_name TINYTEXT,
... first_name TINYTEXT)
... '''
...
>>> CreatePeople(connection=db)
CreatePeople(message=Success)
"""
from __future__ import print_function
import sys
from Bio import MissingPythonDependencyError
try:
import MySQLdb
except:
raise MissingPythonDependencyError("Install MySQLdb if you want to use "
"Bio.DocSQL.")
__docformat__ = "restructuredtext en"
connection = None
class NoInsertionError(Exception):
pass
def _check_is_public(name):
if name[:6] == "_names":
raise AttributeError
class QueryRow(list):
def __init__(self, cursor):
try:
row = cursor.fetchone()
super(QueryRow, self).__init__(row)
except TypeError:
raise StopIteration
object.__setattr__(self, "_names", [x[0] for x in cursor.description]) # FIXME: legacy
object.__setattr__(self, "_names_hash", {})
for i, name in enumerate(self._names):
self._names_hash[name] = i
def __getattr__(self, name):
_check_is_public(name)
try:
return self[self._names_hash[name]]
except (KeyError, AttributeError):
raise AttributeError("'%s' object has no attribute '%s'"
% (self.__class__.__name__, name))
def __setattr__(self, name, value):
try:
self._names_hash
except AttributeError:
return object.__setattr__(self, name, value)
_check_is_public(name)
try:
index = self._names_hash[name]
self[index] = value
except KeyError:
return object.__setattr__(self, name, value)
class Query(object):
"""
SHOW TABLES
"""
MSG_FAILURE = "Failure"
MSG_SUCCESS = "Success"
message = "not executed"
error_message = ""
prefix = ""
suffix = ""
row_class = QueryRow
def __init__(self, *args, **keywds):
try:
self.connection = keywds['connection']
except KeyError:
self.connection = connection
try:
self.diagnostics = keywds['diagnostics']
except KeyError:
self.diagnostics = 0
self.statement = self.prefix + self.__doc__ + self.suffix
self.params = args
def __iter__(self):
return IterationCursor(self, self.connection)
def __repr__(self):
return "%s(message=%s)" % (self.__class__.__name__, self.message)
def cursor(self):
return iter(self).cursor
def dump(self):
for item in self:
print(item)
class QueryGeneric(Query):
def __init__(self, statement, *args, **keywds):
Query.__init__(self, *args, **keywds)
self.statement = statement,
class IterationCursor(object):
def __init__(self, query, connection=connection):
if connection is None:
raise TypeError("database connection is None")
self.cursor = connection.cursor()
self.row_class = query.row_class
if query.diagnostics:
sys.stderr.write("Query statement: %s\n" % query.statement)
sys.stderr.write("Query params: %s\n" % query.params)
self.cursor.execute(query.statement, query.params)
def __next__(self):
return self.row_class(self.cursor)
if sys.version_info[0] < 3:
def next(self):
"""Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
class QuerySingle(Query, QueryRow):
ignore_warnings = 0
def __init__(self, *args, **keywds):
message = self.MSG_FAILURE
Query.__init__(self, *args, **keywds)
try:
self.single_cursor = Query.cursor(self)
except MySQLdb.Warning:
if not self.ignore_warnings:
raise
self.row_class.__init__(self, self.cursor())
object.__setattr__(self, "message", self.MSG_SUCCESS)
def cursor(self):
return self.single_cursor
class QueryAll(list, Query):
def __init__(self, *args, **keywds):
Query.__init__(self, *args, **keywds)
list.__init__(self, [self.process_row(r) for r in self.cursor().fetchall()])
def process_row(self, row):
return row
class QueryAllFirstItem(QueryAll):
def process_row(self, row):
return row[0]
class Create(QuerySingle):
def __init__(self, *args, **keywds):
try:
QuerySingle.__init__(self, *args, **keywds)
except StopIteration:
self.message = self.MSG_SUCCESS
class Update(Create):
pass
class Insert(Create):
MSG_INTEGRITY_ERROR = "Couldn't insert: %s. "
def __init__(self, *args, **keywds):
try:
Create.__init__(self, *args, **keywds)
except MySQLdb.IntegrityError as error_data:
self.error_message += self.MSG_INTEGRITY_ERROR % error_data[1]
try:
self.total_count
except AttributeError:
self.total_count = 0
raise MySQLdb.IntegrityError(self.error_message)
self.id = self.cursor().insert_id()
try:
self.total_count += self.cursor().rowcount
except AttributeError:
self.total_count = self.cursor().rowcount
if self.cursor().rowcount == 0:
raise NoInsertionError
def _test(*args, **keywds):
import doctest
doctest.testmod(sys.modules[__name__], *args, **keywds)
if __name__ == "__main__":
if __debug__:
_test()
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/DocSQL.py | Python | gpl-2.0 | 6,126 | [
"Biopython"
] | a84f9ba6816192dad44bee4e785d205878b6c3ca626b1c3b01456b7ad4eba06c |
########################################################################
#
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import os
import json
from stat import S_IRUSR, S_IWUSR
import yaml
from ansible import constants as C
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.urls import open_url
from ansible.utils.display import Display
display = Display()
class NoTokenSentinel(object):
""" Represents an ansible.cfg server with not token defined (will ignore cmdline and GALAXY_TOKEN_PATH. """
def __new__(cls, *args, **kwargs):
return cls
class KeycloakToken(object):
'''A token granted by a Keycloak server.
Like sso.redhat.com as used by cloud.redhat.com
ie Automation Hub'''
token_type = 'Bearer'
def __init__(self, access_token=None, auth_url=None, validate_certs=True):
self.access_token = access_token
self.auth_url = auth_url
self._token = None
self.validate_certs = validate_certs
def _form_payload(self):
return 'grant_type=refresh_token&client_id=cloud-services&refresh_token=%s' % self.access_token
def get(self):
if self._token:
return self._token
# - build a request to POST to auth_url
# - body is form encoded
# - 'request_token' is the offline token stored in ansible.cfg
# - 'grant_type' is 'refresh_token'
# - 'client_id' is 'cloud-services'
# - should probably be based on the contents of the
# offline_ticket's JWT payload 'aud' (audience)
# or 'azp' (Authorized party - the party to which the ID Token was issued)
payload = self._form_payload()
resp = open_url(to_native(self.auth_url),
data=payload,
validate_certs=self.validate_certs,
method='POST',
http_agent=user_agent())
# TODO: handle auth errors
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
# - extract 'access_token'
self._token = data.get('access_token')
return self._token
def headers(self):
headers = {}
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
class GalaxyToken(object):
''' Class to storing and retrieving local galaxy token '''
token_type = 'Token'
def __init__(self, token=None):
self.b_file = to_bytes(C.GALAXY_TOKEN_PATH, errors='surrogate_or_strict')
# Done so the config file is only opened when set/get/save is called
self._config = None
self._token = token
@property
def config(self):
if self._config is None:
self._config = self._read()
# Prioritise the token passed into the constructor
if self._token:
self._config['token'] = None if self._token is NoTokenSentinel else self._token
return self._config
def _read(self):
action = 'Opened'
if not os.path.isfile(self.b_file):
# token file not found, create and chomd u+rw
open(self.b_file, 'w').close()
os.chmod(self.b_file, S_IRUSR | S_IWUSR) # owner has +rw
action = 'Created'
with open(self.b_file, 'r') as f:
config = yaml.safe_load(f)
display.vvv('%s %s' % (action, to_text(self.b_file)))
return config or {}
def set(self, token):
self._token = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.b_file, 'w') as f:
yaml.safe_dump(self.config, f, default_flow_style=False)
def headers(self):
headers = {}
token = self.get()
if token:
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
class BasicAuthToken(object):
token_type = 'Basic'
def __init__(self, username, password=None):
self.username = username
self.password = password
self._token = None
@staticmethod
def _encode_token(username, password):
token = "%s:%s" % (to_text(username, errors='surrogate_or_strict'),
to_text(password, errors='surrogate_or_strict', nonstring='passthru') or '')
b64_val = base64.b64encode(to_bytes(token, encoding='utf-8', errors='surrogate_or_strict'))
return to_text(b64_val)
def get(self):
if self._token:
return self._token
self._token = self._encode_token(self.username, self.password)
return self._token
def headers(self):
headers = {}
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
| azaghal/ansible | lib/ansible/galaxy/token.py | Python | gpl-3.0 | 5,736 | [
"Galaxy"
] | b6eada297fb0c43b903bdce96055d68d3d6e20122d673c92b8ad5c39032d77e1 |
import urllib
import requests
from requests import ConnectionError
from inbox.util.url import url_concat
from inbox.log import get_logger
log = get_logger()
from inbox.config import config
from inbox.basicauth import AuthError
# Google OAuth app credentials
GOOGLE_OAUTH_CLIENT_ID = config.get_required('GOOGLE_OAUTH_CLIENT_ID')
GOOGLE_OAUTH_CLIENT_SECRET = config.get_required('GOOGLE_OAUTH_CLIENT_SECRET')
REDIRECT_URI = config.get_required('GOOGLE_OAUTH_REDIRECT_URI')
OAUTH_AUTHENTICATE_URL = 'https://accounts.google.com/o/oauth2/auth'
OAUTH_ACCESS_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
OAUTH_TOKEN_VALIDATION_URL = 'https://www.googleapis.com/oauth2/v1/tokeninfo'
USER_INFO_URL = 'https://www.googleapis.com/oauth2/v1/userinfo'
OAUTH_SCOPE = ' '.join([
'https://www.googleapis.com/auth/userinfo.email', # email address
'https://www.googleapis.com/auth/userinfo.profile', # G+ profile
'https://mail.google.com/', # email
'https://www.google.com/m8/feeds', # contacts
'https://www.googleapis.com/auth/calendar' # calendar
])
class OAuthError(AuthError):
pass
class InvalidOAuthGrantError(OAuthError):
pass
def validate_token(access_token):
""" Helper function which will validate an access token. """
log.info('Validating oauth token...')
try:
response = requests.get(OAUTH_TOKEN_VALIDATION_URL +
'?access_token=' + access_token)
except ConnectionError, e:
log.error('Validation failed.')
log.error(e)
return None # TODO better error handling here
validation_dict = response.json()
if 'error' in validation_dict:
assert validation_dict['error'] == 'invalid_token'
log.error('{0} - {1}'.format(validation_dict['error'],
validation_dict['error_description']))
return None
return validation_dict
def new_token(refresh_token):
""" Helper function which gets a new access token from a refresh token."""
assert refresh_token is not None, 'refresh_token required'
log.info('Getting new oauth token...')
args = {
'refresh_token': refresh_token,
'client_id': GOOGLE_OAUTH_CLIENT_ID,
'client_secret': GOOGLE_OAUTH_CLIENT_SECRET,
'grant_type': 'refresh_token'
}
try:
headers = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
data = urllib.urlencode(args)
response = requests.post(OAUTH_ACCESS_TOKEN_URL, data=data,
headers=headers)
except requests.exceptions.HTTPError, e:
log.error(e) # TODO better error handling here
raise e
session_dict = response.json()
if u'error' in session_dict:
if session_dict['error'] == 'invalid_grant':
log.error('Refresh token is invalid.')
raise InvalidOAuthGrantError('Could not get new token')
else:
raise OAuthError(session_dict['error'])
return session_dict['access_token'], session_dict['expires_in']
# ------------------------------------------------------------------
# Console Support for providing link and reading response from user
# ------------------------------------------------------------------
def _show_authorize_link(email_address=None):
""" Show authorization link.
Prints out a message to the console containing a link that the user can
click on that will bring them to a page that allows them to authorize
access to their account.
"""
args = {
'redirect_uri': REDIRECT_URI,
'client_id': GOOGLE_OAUTH_CLIENT_ID,
'response_type': 'code',
'scope': OAUTH_SCOPE,
'access_type': 'offline', # to get a refresh token
}
if email_address:
args['login_hint'] = email_address
# DEBUG
args['approval_prompt'] = 'force'
# Prompt user for authorization + get auth_code
url = url_concat(OAUTH_AUTHENTICATE_URL, args)
print ("To authorize Inbox, visit this url and follow the directions:"
"\n\n{}").format(url)
def _user_info(access_token):
""" retrieves additional information about the user to store in the db"""
log.info('Fetching user info...')
try:
response = requests.get(USER_INFO_URL +
'?access_token=' + access_token)
except Exception, e:
log.error(e)
return None # TODO better error handling here
userinfo_dict = response.json()
if 'error' in userinfo_dict:
assert userinfo_dict['error'] == 'invalid_token'
log.error('%s - %s' % (userinfo_dict['error'],
userinfo_dict['error_description']))
return None
return userinfo_dict
def _get_authenticated_user(authorization_code):
log.info('Getting oauth authenticated user...')
args = {
'client_id': GOOGLE_OAUTH_CLIENT_ID,
'code': authorization_code,
'client_secret': GOOGLE_OAUTH_CLIENT_SECRET,
'grant_type': 'authorization_code',
'redirect_uri': REDIRECT_URI,
}
headers = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
data = urllib.urlencode(args)
resp = requests.post(OAUTH_ACCESS_TOKEN_URL, data=data, headers=headers)
session_dict = resp.json()
if u'error' in session_dict:
raise OAuthError(session_dict['error'])
access_token = session_dict['access_token']
validation_dict = validate_token(access_token)
userinfo_dict = _user_info(access_token)
z = session_dict.copy()
z.update(validation_dict)
z.update(userinfo_dict)
return z
def oauth_authorize_console(email_address):
""" Console I/O and checking for a user to authorize their account."""
_show_authorize_link(email_address)
while True:
auth_code = raw_input('Enter authorization code: ').strip()
try:
auth_response = _get_authenticated_user(auth_code)
return auth_response
except OAuthError:
print "\nInvalid authorization code, try again...\n"
| abhishekgahlot/inbox | inbox/oauth.py | Python | agpl-3.0 | 6,163 | [
"VisIt"
] | d648ad9b618d5af7928102dd3e937c935c7dd15f66efb57682cadc2748bae660 |
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import zeroinstall
from zeroinstall import _
from zeroinstall.cmd import slave
from zeroinstall.support import tasks, unicode
from zeroinstall.injector.model import Interface
from zeroinstall.gtkui import help_box
import gtk
from logging import warning
from zeroinstall.gui.dialog import DialogResponse, Template
from zeroinstall.gui.impl_list import ImplementationList
import time
from zeroinstall.gui import dialog
_dialogs = {} # Interface -> Properties
class Description(object):
def __init__(self, widgets):
description = widgets.get_widget('description')
description.connect('button-press-event', self.button_press)
self.buffer = description.get_buffer()
self.heading_style = self.buffer.create_tag(underline = True, scale = 1.2)
self.link_style = self.buffer.create_tag(underline = True, foreground = 'blue')
description.set_size_request(-1, 100)
def button_press(self, tv, bev):
if bev.type == gtk.gdk.BUTTON_PRESS and bev.button == 1:
x, y = tv.window_to_buffer_coords(tv.get_window_type(bev.window),
int(bev.x), int(bev.y))
itr = tv.get_iter_at_location(x, y)
if itr and self.link_style in itr.get_tags():
if not itr.begins_tag(self.link_style):
itr.backward_to_tag_toggle(self.link_style)
end = itr.copy()
end.forward_to_tag_toggle(self.link_style)
target = itr.get_text(end).strip()
from zeroinstall.gui import browser
browser.open_in_browser(target)
def strtime(self, secs):
try:
from locale import nl_langinfo, D_T_FMT
return time.strftime(nl_langinfo(D_T_FMT), time.localtime(secs))
except (ImportError, ValueError):
return time.ctime(secs)
def set_details(self, details):
buffer = self.buffer
heading_style = self.heading_style
buffer.delete(buffer.get_start_iter(), buffer.get_end_iter())
iter = buffer.get_start_iter()
if isinstance(details, Exception):
buffer.insert(iter, unicode(details))
return
for (style, text) in details:
if style == 'heading':
buffer.insert_with_tags(iter, text, heading_style)
elif style == 'link':
buffer.insert_with_tags(iter, text, self.link_style)
else:
buffer.insert(iter, text)
class Feeds(object):
URI = 0
ARCH = 1
USER = 2
def __init__(self, config, interface, widgets):
self.config = config
self.interface = interface
self.model = gtk.ListStore(str, str, bool)
self.description = Description(widgets)
add_remote_feed_button = widgets.get_widget('add_remote_feed')
add_remote_feed_button.connect('clicked', lambda b: add_remote_feed(config, widgets.get_widget(), interface))
add_local_feed_button = widgets.get_widget('add_local_feed')
add_local_feed_button.connect('clicked', lambda b: add_local_feed(config, interface))
self.remove_feed_button = widgets.get_widget('remove_feed')
@tasks.async
def remove_feed(button):
try:
model, iter = self.tv.get_selection().get_selected()
feed_uri = model[iter][Feeds.URI]
blocker = slave.remove_feed(interface.uri, feed_uri)
yield blocker
tasks.check(blocker)
from zeroinstall.gui import main
main.recalculate()
except Exception as ex:
import traceback
traceback.print_exc()
config.handler.report_error(ex)
self.remove_feed_button.connect('clicked', remove_feed)
self.tv = widgets.get_widget('feeds_list')
self.tv.set_model(self.model)
text = gtk.CellRendererText()
self.tv.append_column(gtk.TreeViewColumn(_('Source'), text, text = Feeds.URI))
self.tv.append_column(gtk.TreeViewColumn(_('Arch'), text, text = Feeds.ARCH))
sel = self.tv.get_selection()
sel.set_mode(gtk.SELECTION_BROWSE)
sel.connect('changed', self.sel_changed)
sel.select_path((0,))
self.lines = []
def build_model(self, details):
feeds = details['feeds']
return [(feed['url'], feed['arch'], feed['type'] == 'user-registered') for feed in feeds]
@tasks.async
def sel_changed(self, sel):
model, miter = sel.get_selected()
if not miter: return # build in progress
# Only enable removing user_override feeds
enable_remove = model[miter][Feeds.USER]
self.remove_feed_button.set_sensitive(enable_remove)
feed_url = model[miter][Feeds.URI]
try:
blocker = slave.get_feed_description(feed_url)
yield blocker
tasks.check(blocker)
self.description.set_details(blocker.result)
except Exception as ex:
warning("sel_changed", exc_info = ex)
self.description.set_details(ex)
def updated(self, details):
new_lines = self.build_model(details)
if new_lines != self.lines:
self.lines = new_lines
self.model.clear()
for line in self.lines:
self.model.append(line)
self.tv.get_selection().select_path((0,))
else:
self.sel_changed(self.tv.get_selection())
stability_to_combo_index = { None: 0, "stable": 1, "testing": 2, "developer": 3 }
class Properties(object):
interface = None
use_list = None
window = None
driver = None
ignore_stability_change = True
def __init__(self, driver, interface, iface_name, compile, show_versions = False):
self.driver = driver
widgets = Template('interface_properties')
self.interface = interface
window = widgets.get_widget('interface_properties')
self.window = window
window.set_title(_('Properties for %s') % iface_name)
window.set_default_size(-1, gtk.gdk.screen_height() / 3)
self.compile_button = widgets.get_widget('compile')
self.compile_button.connect('clicked', lambda b: compile(interface))
window.set_default_response(gtk.RESPONSE_CANCEL)
def response(dialog, resp):
if resp == gtk.RESPONSE_CANCEL:
window.destroy()
elif resp == gtk.RESPONSE_HELP:
properties_help.display()
window.connect('response', response)
notebook = widgets.get_widget('interface_notebook')
assert notebook
self.feeds = Feeds(driver.config, interface, widgets)
stability = widgets.get_widget('preferred_stability')
self.stability = stability
stability.connect('changed', lambda *args: self.ignore_stability_change or self.set_stability_policy())
self.use_list = ImplementationList(driver, interface, widgets)
self.feeds.tv.grab_focus()
window.connect('destroy', lambda s: driver.watchers.remove(self.update))
driver.watchers.append(self.update)
self.update()
if show_versions:
notebook.next_page()
@tasks.async
def set_stability_policy(self):
try:
i = self.stability.get_active()
if i == 0:
new_stability = None
else:
new_stability = ['stable', 'testing', 'developer'][i-1]
blocker = slave.invoke_master(["set-stability-policy", self.interface.uri, new_stability])
yield blocker
tasks.check(blocker)
from zeroinstall.gui import main
main.recalculate()
except Exception as ex:
warning("set_stability_policy", exc_info = ex)
def show(self):
self.window.show()
def destroy(self):
self.window.destroy()
@tasks.async
def update(self):
try:
blocker = slave.get_component_details(self.interface.uri)
yield blocker
tasks.check(blocker)
self.details = blocker.result
i = stability_to_combo_index[self.details['stability-policy']]
self.ignore_stability_change = True
self.stability.set_active(i)
self.ignore_stability_change = False
self.use_list.update(self.details)
self.feeds.updated(self.details)
self.compile_button.set_sensitive(self.details['may-compile'])
except:
warning("update failed", exc_info = True)
@tasks.async
def add_remote_feed(config, parent, interface):
try:
d = gtk.MessageDialog(parent, 0, gtk.MESSAGE_QUESTION, gtk.BUTTONS_CANCEL,
_('Enter the URL of the new source of implementations of this interface:'))
d.add_button(gtk.STOCK_ADD, gtk.RESPONSE_OK)
d.set_default_response(gtk.RESPONSE_OK)
entry = gtk.Entry()
align = gtk.VBox(False, 0)
align.set_border_width(4)
align.add(entry)
d.vbox.pack_start(align)
entry.set_activates_default(True)
entry.set_text('')
d.vbox.show_all()
error_label = gtk.Label('')
error_label.set_padding(4, 4)
align.pack_start(error_label)
d.show()
def error(message):
if message:
error_label.set_text(message)
error_label.show()
else:
error_label.hide()
while True:
got_response = DialogResponse(d)
yield got_response
tasks.check(got_response)
resp = got_response.response
error(None)
if resp == gtk.RESPONSE_OK:
try:
url = entry.get_text()
if not url:
raise zeroinstall.SafeException(_('Enter a URL'))
fetch = slave.add_remote_feed(interface.uri, url)
if fetch:
d.set_sensitive(False)
yield fetch
d.set_sensitive(True)
tasks.check(fetch)
d.destroy()
from zeroinstall.gui import main
main.recalculate()
except zeroinstall.SafeException as ex:
error(str(ex))
else:
d.destroy()
return
except Exception as ex:
import traceback
traceback.print_exc()
config.handler.report_error(ex)
def add_local_feed(config, interface):
chooser = gtk.FileChooserDialog(_('Select XML feed file'), action=gtk.FILE_CHOOSER_ACTION_OPEN, buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
@tasks.async
def ok(feed, config = config, interface = interface, chooser = chooser):
try:
blocker = slave.add_local_feed(interface.uri, feed)
yield blocker
tasks.check(blocker)
chooser.destroy()
from zeroinstall.gui import main
main.recalculate()
except Exception as ex:
dialog.alert(None, _("Error in feed file '%(feed)s':\n\n%(exception)s") % {'feed': feed, 'exception': str(ex)})
def check_response(widget, response, ok = ok):
if response == gtk.RESPONSE_OK:
ok(widget.get_filename())
elif response == gtk.RESPONSE_CANCEL:
widget.destroy()
chooser.connect('response', check_response)
chooser.show()
def edit(driver, interface, iface_name, compile, show_versions = False):
assert isinstance(interface, Interface)
if interface in _dialogs:
_dialogs[interface].destroy()
dialog = Properties(driver, interface, iface_name, compile, show_versions = show_versions)
_dialogs[interface] = dialog
dialog.show()
properties_help = help_box.HelpBox(_("Injector Properties Help"),
(_('Interface properties'), '\n' +
_("""This window displays information about an interface. There are two tabs at the top: \
Feeds shows the places where the injector looks for implementations of the interface, while \
Versions shows the list of implementations found (from all feeds) in order of preference.""")),
(_('The Feeds tab'), '\n' +
_("""At the top is a list of feeds. By default, the injector uses the full name of the interface \
as the default feed location (so if you ask it to run the program "http://foo/bar.xml" then it will \
by default get the list of versions by downloading "http://foo/bar.xml".
You can add and remove feeds using the buttons on the right. The main feed may also add \
some extra feeds itself. If you've checked out a developer version of a program, you can use \
the 'Add Local Feed...' button to let the injector know about it, for example.
Below the list of feeds is a box describing the selected one:
- At the top is its short name.
- Below that is the address (a URL or filename).
- 'Last upstream change' shows the version of the cached copy of the interface file.
- 'Last checked' is the last time a fresh copy of the upstream interface file was \
downloaded.
- Then there is a longer description of the interface.""")),
(_('The Versions tab'), '\n' +
_("""This tab shows a list of all known implementations of the interface, from all the feeds. \
The columns have the following meanings:
Version gives the version number. High-numbered versions are considered to be \
better than low-numbered ones.
Released gives the date this entry was added to the feed.
Stability is 'stable' if the implementation is believed to be stable, 'buggy' if \
it is known to contain serious bugs, and 'testing' if its stability is not yet \
known. This information is normally supplied and updated by the author of the \
software, but you can override their rating by right-clicking here (overridden \
values are shown in upper-case). You can also use the special level 'preferred'.
Fetch indicates how much data needs to be downloaded to get this version if you don't \
have it. If the implementation has already been downloaded to your computer, \
it will say (cached). (local) means that you installed this version manually and \
told Zero Install about it by adding a feed. (package) means that this version \
is provided by your distribution's package manager, not by Zero Install. \
In off-line mode, only cached implementations are considered for use.
Arch indicates what kind of computer system the implementation is for, or 'any' \
if it works with all types of system.
If you want to know why a particular version wasn't chosen, right-click over it \
and choose "Explain this decision" from the popup menu.
""") + '\n'),
(_('Sort order'), '\n' +
_("""The implementations are ordered by version number (highest first), with the \
currently selected one in bold. This is the "best" usable version.
Unusable ones are those for incompatible \
architectures, those marked as 'buggy' or 'insecure', versions explicitly marked as incompatible with \
another interface you are using and, in off-line mode, uncached implementations. Unusable \
implementations are shown crossed out.
For the usable implementations, the order is as follows:
- Preferred implementations come first.
- Then, if network use is set to 'Minimal', cached implementations come before \
non-cached.
- Then, implementations at or above the selected stability level come before all others.
- Then, higher-numbered versions come before low-numbered ones.
- Then cached come before non-cached (for 'Full' network use mode).""") + '\n'),
(_('Compiling'), '\n' +
_("""If there is no binary available for your system then you may be able to compile one from \
source by clicking on the Compile button. If no source is available, the Compile button will \
be shown shaded.""") + '\n'))
| afb/0install | zeroinstall/gui/properties.py | Python | lgpl-2.1 | 14,097 | [
"VisIt"
] | 9cde02e313ac7aa19ee99e0095514500d26d08ea8bd2adf6b3be6f6c21cd1211 |
# Copyright (c) 2003-2014 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""some various utilities and helper classes, most of them used in the
main pylint class
"""
from __future__ import print_function
import collections
import os
import re
import sys
import tokenize
import warnings
from os.path import dirname, basename, splitext, exists, isdir, join, normpath
import six
from six.moves import zip # pylint: disable=redefined-builtin
from logilab.common.interface import implements
from logilab.common.textutils import normalize_text
from logilab.common.configuration import rest_format_section
from logilab.common.ureports import Section
from astroid import nodes, Module
from astroid.modutils import modpath_from_file, get_module_files, \
file_from_modpath, load_module_from_file
from pylint.interfaces import IRawChecker, ITokenChecker, UNDEFINED
class UnknownMessage(Exception):
"""raised when a unregistered message id is encountered"""
class EmptyReport(Exception):
"""raised when a report is empty and so should not be displayed"""
MSG_TYPES = {
'I' : 'info',
'C' : 'convention',
'R' : 'refactor',
'W' : 'warning',
'E' : 'error',
'F' : 'fatal'
}
MSG_TYPES_LONG = {v: k for k, v in six.iteritems(MSG_TYPES)}
MSG_TYPES_STATUS = {
'I' : 0,
'C' : 16,
'R' : 8,
'W' : 4,
'E' : 2,
'F' : 1
}
_MSG_ORDER = 'EWRCIF'
MSG_STATE_SCOPE_CONFIG = 0
MSG_STATE_SCOPE_MODULE = 1
MSG_STATE_CONFIDENCE = 2
OPTION_RGX = re.compile(r'\s*#.*\bpylint:(.*)')
# The line/node distinction does not apply to fatal errors and reports.
_SCOPE_EXEMPT = 'FR'
class WarningScope(object):
LINE = 'line-based-msg'
NODE = 'node-based-msg'
_MsgBase = collections.namedtuple(
'_MsgBase',
['msg_id', 'symbol', 'msg', 'C', 'category', 'confidence',
'abspath', 'path', 'module', 'obj', 'line', 'column'])
class Message(_MsgBase):
"""This class represent a message to be issued by the reporters"""
def __new__(cls, msg_id, symbol, location, msg, confidence):
return _MsgBase.__new__(
cls, msg_id, symbol, msg, msg_id[0], MSG_TYPES[msg_id[0]],
confidence, *location)
def format(self, template):
"""Format the message according to the given template.
The template format is the one of the format method :
cf. http://docs.python.org/2/library/string.html#formatstrings
"""
# For some reason, _asdict on derived namedtuples does not work with
# Python 3.4. Needs some investigation.
return template.format(**dict(zip(self._fields, self)))
def get_module_and_frameid(node):
"""return the module name and the frame id in the module"""
frame = node.frame()
module, obj = '', []
while frame:
if isinstance(frame, Module):
module = frame.name
else:
obj.append(getattr(frame, 'name', '<lambda>'))
try:
frame = frame.parent.frame()
except AttributeError:
frame = None
obj.reverse()
return module, '.'.join(obj)
def category_id(cid):
cid = cid.upper()
if cid in MSG_TYPES:
return cid
return MSG_TYPES_LONG.get(cid)
def tokenize_module(module):
stream = module.file_stream
stream.seek(0)
readline = stream.readline
if sys.version_info < (3, 0):
if module.file_encoding is not None:
readline = lambda: stream.readline().decode(module.file_encoding,
'replace')
return list(tokenize.generate_tokens(readline))
return list(tokenize.tokenize(readline))
def build_message_def(checker, msgid, msg_tuple):
if implements(checker, (IRawChecker, ITokenChecker)):
default_scope = WarningScope.LINE
else:
default_scope = WarningScope.NODE
options = {}
if len(msg_tuple) > 3:
(msg, symbol, descr, options) = msg_tuple
elif len(msg_tuple) > 2:
(msg, symbol, descr) = msg_tuple[:3]
else:
# messages should have a symbol, but for backward compatibility
# they may not.
(msg, descr) = msg_tuple
warnings.warn("[pylint 0.26] description of message %s doesn't include "
"a symbolic name" % msgid, DeprecationWarning)
symbol = None
options.setdefault('scope', default_scope)
return MessageDefinition(checker, msgid, msg, descr, symbol, **options)
class MessageDefinition(object):
def __init__(self, checker, msgid, msg, descr, symbol, scope,
minversion=None, maxversion=None, old_names=None):
self.checker = checker
assert len(msgid) == 5, 'Invalid message id %s' % msgid
assert msgid[0] in MSG_TYPES, \
'Bad message type %s in %r' % (msgid[0], msgid)
self.msgid = msgid
self.msg = msg
self.descr = descr
self.symbol = symbol
self.scope = scope
self.minversion = minversion
self.maxversion = maxversion
self.old_names = old_names or []
def may_be_emitted(self):
"""return True if message may be emitted using the current interpreter"""
if self.minversion is not None and self.minversion > sys.version_info:
return False
if self.maxversion is not None and self.maxversion <= sys.version_info:
return False
return True
def format_help(self, checkerref=False):
"""return the help string for the given message id"""
desc = self.descr
if checkerref:
desc += ' This message belongs to the %s checker.' % \
self.checker.name
title = self.msg
if self.symbol:
msgid = '%s (%s)' % (self.symbol, self.msgid)
else:
msgid = self.msgid
if self.minversion or self.maxversion:
restr = []
if self.minversion:
restr.append('< %s' % '.'.join([str(n) for n in self.minversion]))
if self.maxversion:
restr.append('>= %s' % '.'.join([str(n) for n in self.maxversion]))
restr = ' or '.join(restr)
if checkerref:
desc += " It can't be emitted when using Python %s." % restr
else:
desc += " This message can't be emitted when using Python %s." % restr
desc = normalize_text(' '.join(desc.split()), indent=' ')
if title != '%s':
title = title.splitlines()[0]
return ':%s: *%s*\n%s' % (msgid, title, desc)
return ':%s:\n%s' % (msgid, desc)
class MessagesHandlerMixIn(object):
"""a mix-in class containing all the messages related methods for the main
lint class
"""
def __init__(self):
self._msgs_state = {}
self.msg_status = 0
def disable(self, msgid, scope='package', line=None, ignore_unknown=False):
"""don't output message of the given id"""
assert scope in ('package', 'module')
# handle disable=all by disabling all categories
if msgid == 'all':
for msgid in MSG_TYPES:
self.disable(msgid, scope, line)
return
# msgid is a category?
catid = category_id(msgid)
if catid is not None:
for _msgid in self.msgs_store._msgs_by_category.get(catid):
self.disable(_msgid, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
msgs_store = self.msgs_store
for checker in self._checkers[msgid.lower()]:
for _msgid in checker.msgs:
if _msgid in msgs_store._alternative_names:
self.disable(_msgid, scope, line)
return
# msgid is report id?
if msgid.lower().startswith('rp'):
self.disable_report(msgid)
return
try:
# msgid is a symbolic or numeric msgid.
msg = self.msgs_store.check_message_id(msgid)
except UnknownMessage:
if ignore_unknown:
return
raise
if scope == 'module':
self.file_state.set_msg_status(msg, line, False)
if msg.symbol != 'locally-disabled':
self.add_message('locally-disabled', line=line,
args=(msg.symbol, msg.msgid))
else:
msgs = self._msgs_state
msgs[msg.msgid] = False
# sync configuration object
self.config.disable_msg = [mid for mid, val in six.iteritems(msgs)
if not val]
def enable(self, msgid, scope='package', line=None, ignore_unknown=False):
"""reenable message of the given id"""
assert scope in ('package', 'module')
catid = category_id(msgid)
# msgid is a category?
if catid is not None:
for msgid in self.msgs_store._msgs_by_category.get(catid):
self.enable(msgid, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
for checker in self._checkers[msgid.lower()]:
for msgid_ in checker.msgs:
self.enable(msgid_, scope, line)
return
# msgid is report id?
if msgid.lower().startswith('rp'):
self.enable_report(msgid)
return
try:
# msgid is a symbolic or numeric msgid.
msg = self.msgs_store.check_message_id(msgid)
except UnknownMessage:
if ignore_unknown:
return
raise
if scope == 'module':
self.file_state.set_msg_status(msg, line, True)
self.add_message('locally-enabled', line=line, args=(msg.symbol, msg.msgid))
else:
msgs = self._msgs_state
msgs[msg.msgid] = True
# sync configuration object
self.config.enable = [mid for mid, val in six.iteritems(msgs) if val]
def get_message_state_scope(self, msgid, line=None, confidence=UNDEFINED):
"""Returns the scope at which a message was enabled/disabled."""
if self.config.confidence and confidence.name not in self.config.confidence:
return MSG_STATE_CONFIDENCE
try:
if line in self.file_state._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
def is_message_enabled(self, msg_descr, line=None, confidence=None):
"""return true if the message associated to the given message id is
enabled
msgid may be either a numeric or symbolic message id.
"""
if self.config.confidence and confidence:
if confidence.name not in self.config.confidence:
return False
try:
msgid = self.msgs_store.check_message_id(msg_descr).msgid
except UnknownMessage:
# The linter checks for messages that are not registered
# due to version mismatch, just treat them as message IDs
# for now.
msgid = msg_descr
if line is None:
return self._msgs_state.get(msgid, True)
try:
return self.file_state._module_msgs_state[msgid][line]
except KeyError:
return self._msgs_state.get(msgid, True)
def add_message(self, msg_descr, line=None, node=None, args=None, confidence=UNDEFINED):
"""Adds a message given by ID or name.
If provided, the message string is expanded using args
AST checkers should must the node argument (but may optionally
provide line if the line number is different), raw and token checkers
must provide the line argument.
"""
msg_info = self.msgs_store.check_message_id(msg_descr)
msgid = msg_info.msgid
# backward compatibility, message may not have a symbol
symbol = msg_info.symbol or msgid
# Fatal messages and reports are special, the node/scope distinction
# does not apply to them.
if msgid[0] not in _SCOPE_EXEMPT:
if msg_info.scope == WarningScope.LINE:
assert node is None and line is not None, (
'Message %s must only provide line, got line=%s, node=%s' % (msgid, line, node))
elif msg_info.scope == WarningScope.NODE:
# Node-based warnings may provide an override line.
assert node is not None, 'Message %s must provide Node, got None'
if line is None and node is not None:
line = node.fromlineno
if hasattr(node, 'col_offset'):
col_offset = node.col_offset # XXX measured in bytes for utf-8, divide by two for chars?
else:
col_offset = None
# should this message be displayed
if not self.is_message_enabled(msgid, line, confidence):
self.file_state.handle_ignored_message(
self.get_message_state_scope(msgid, line, confidence),
msgid, line, node, args, confidence)
return
# update stats
msg_cat = MSG_TYPES[msgid[0]]
self.msg_status |= MSG_TYPES_STATUS[msgid[0]]
self.stats[msg_cat] += 1
self.stats['by_module'][self.current_name][msg_cat] += 1
try:
self.stats['by_msg'][symbol] += 1
except KeyError:
self.stats['by_msg'][symbol] = 1
# expand message ?
msg = msg_info.msg
if args:
msg %= args
# get module and object
if node is None:
module, obj = self.current_name, ''
abspath = self.current_file
else:
module, obj = get_module_and_frameid(node)
abspath = node.root().file
path = abspath.replace(self.reporter.path_strip_prefix, '')
# add the message
self.reporter.handle_message(
Message(msgid, symbol,
(abspath, path, module, obj, line or 1, col_offset or 0), msg, confidence))
def print_full_documentation(self):
"""output a full documentation in ReST format"""
print("Pylint global options and switches")
print("----------------------------------")
print("")
print("Pylint provides global options and switches.")
print("")
by_checker = {}
for checker in self.get_checkers():
if checker.name == 'master':
if checker.options:
for section, options in checker.options_by_section():
if section is None:
title = 'General options'
else:
title = '%s options' % section.capitalize()
print(title)
print('~' * len(title))
rest_format_section(sys.stdout, None, options)
print("")
else:
try:
by_checker[checker.name][0] += checker.options_and_values()
by_checker[checker.name][1].update(checker.msgs)
by_checker[checker.name][2] += checker.reports
except KeyError:
by_checker[checker.name] = [list(checker.options_and_values()),
dict(checker.msgs),
list(checker.reports)]
print("Pylint checkers' options and switches")
print("-------------------------------------")
print("")
print("Pylint checkers can provide three set of features:")
print("")
print("* options that control their execution,")
print("* messages that they can raise,")
print("* reports that they can generate.")
print("")
print("Below is a list of all checkers and their features.")
print("")
for checker, (options, msgs, reports) in six.iteritems(by_checker):
title = '%s checker' % (checker.replace("_", " ").title())
print(title)
print('~' * len(title))
print("")
print("Verbatim name of the checker is ``%s``." % checker)
print("")
if options:
title = 'Options'
print(title)
print('^' * len(title))
rest_format_section(sys.stdout, None, options)
print("")
if msgs:
title = 'Messages'
print(title)
print('~' * len(title))
for msgid, msg in sorted(six.iteritems(msgs),
key=lambda kv: (_MSG_ORDER.index(kv[0][0]), kv[1])):
msg = build_message_def(checker, msgid, msg)
print(msg.format_help(checkerref=False))
print("")
if reports:
title = 'Reports'
print(title)
print('~' * len(title))
for report in reports:
print(':%s: %s' % report[:2])
print("")
print("")
class FileState(object):
"""Hold internal state specific to the currently analyzed file"""
def __init__(self, modname=None):
self.base_name = modname
self._module_msgs_state = {}
self._raw_module_msgs_state = {}
self._ignored_msgs = collections.defaultdict(set)
self._suppression_mapping = {}
def collect_block_lines(self, msgs_store, module_node):
"""Walk the AST to collect block level options line numbers."""
for msg, lines in six.iteritems(self._module_msgs_state):
self._raw_module_msgs_state[msg] = lines.copy()
orig_state = self._module_msgs_state.copy()
self._module_msgs_state = {}
self._suppression_mapping = {}
self._collect_block_lines(msgs_store, module_node, orig_state)
def _collect_block_lines(self, msgs_store, node, msg_state):
"""Recursivly walk (depth first) AST to collect block level options line
numbers.
"""
for child in node.get_children():
self._collect_block_lines(msgs_store, child, msg_state)
first = node.fromlineno
last = node.tolineno
# first child line number used to distinguish between disable
# which are the first child of scoped node with those defined later.
# For instance in the code below:
#
# 1. def meth8(self):
# 2. """test late disabling"""
# 3. # pylint: disable=E1102
# 4. print self.blip
# 5. # pylint: disable=E1101
# 6. print self.bla
#
# E1102 should be disabled from line 1 to 6 while E1101 from line 5 to 6
#
# this is necessary to disable locally messages applying to class /
# function using their fromlineno
if isinstance(node, (nodes.Module, nodes.Class, nodes.Function)) and node.body:
firstchildlineno = node.body[0].fromlineno
else:
firstchildlineno = last
for msgid, lines in six.iteritems(msg_state):
for lineno, state in list(lines.items()):
original_lineno = lineno
if first <= lineno <= last:
# Set state for all lines for this block, if the
# warning is applied to nodes.
if msgs_store.check_message_id(msgid).scope == WarningScope.NODE:
if lineno > firstchildlineno:
state = True
first_, last_ = node.block_range(lineno)
else:
first_ = lineno
last_ = last
for line in range(first_, last_+1):
# do not override existing entries
if not line in self._module_msgs_state.get(msgid, ()):
if line in lines: # state change in the same block
state = lines[line]
original_lineno = line
if not state:
self._suppression_mapping[(msgid, line)] = original_lineno
try:
self._module_msgs_state[msgid][line] = state
except KeyError:
self._module_msgs_state[msgid] = {line: state}
del lines[lineno]
def set_msg_status(self, msg, line, status):
"""Set status (enabled/disable) for a given message at a given line"""
assert line > 0
try:
self._module_msgs_state[msg.msgid][line] = status
except KeyError:
self._module_msgs_state[msg.msgid] = {line: status}
def handle_ignored_message(self, state_scope, msgid, line,
node, args, confidence): # pylint: disable=unused-argument
"""Report an ignored message.
state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG,
depending on whether the message was disabled locally in the module,
or globally. The other arguments are the same as for add_message.
"""
if state_scope == MSG_STATE_SCOPE_MODULE:
try:
orig_line = self._suppression_mapping[(msgid, line)]
self._ignored_msgs[(msgid, orig_line)].add(line)
except KeyError:
pass
def iter_spurious_suppression_messages(self, msgs_store):
for warning, lines in six.iteritems(self._raw_module_msgs_state):
for line, enable in six.iteritems(lines):
if not enable and (warning, line) not in self._ignored_msgs:
yield 'useless-suppression', line, \
(msgs_store.get_msg_display_string(warning),)
# don't use iteritems here, _ignored_msgs may be modified by add_message
for (warning, from_), lines in list(self._ignored_msgs.items()):
for line in lines:
yield 'suppressed-message', line, \
(msgs_store.get_msg_display_string(warning), from_)
class MessagesStore(object):
"""The messages store knows information about every possible message but has
no particular state during analysis.
"""
def __init__(self):
# Primary registry for all active messages (i.e. all messages
# that can be emitted by pylint for the underlying Python
# version). It contains the 1:1 mapping from symbolic names
# to message definition objects.
self._messages = {}
# Maps alternative names (numeric IDs, deprecated names) to
# message definitions. May contain several names for each definition
# object.
self._alternative_names = {}
self._msgs_by_category = collections.defaultdict(list)
@property
def messages(self):
"""The list of all active messages."""
return six.itervalues(self._messages)
def add_renamed_message(self, old_id, old_symbol, new_symbol):
"""Register the old ID and symbol for a warning that was renamed.
This allows users to keep using the old ID/symbol in suppressions.
"""
msg = self.check_message_id(new_symbol)
msg.old_names.append((old_id, old_symbol))
self._alternative_names[old_id] = msg
self._alternative_names[old_symbol] = msg
def register_messages(self, checker):
"""register a dictionary of messages
Keys are message ids, values are a 2-uple with the message type and the
message itself
message ids should be a string of len 4, where the two first characters
are the checker id and the two last the message id in this checker
"""
chkid = None
for msgid, msg_tuple in six.iteritems(checker.msgs):
msg = build_message_def(checker, msgid, msg_tuple)
assert msg.symbol not in self._messages, \
'Message symbol %r is already defined' % msg.symbol
# avoid duplicate / malformed ids
assert msg.msgid not in self._alternative_names, \
'Message id %r is already defined' % msgid
assert chkid is None or chkid == msg.msgid[1:3], \
'Inconsistent checker part in message id %r' % msgid
chkid = msg.msgid[1:3]
self._messages[msg.symbol] = msg
self._alternative_names[msg.msgid] = msg
for old_id, old_symbol in msg.old_names:
self._alternative_names[old_id] = msg
self._alternative_names[old_symbol] = msg
self._msgs_by_category[msg.msgid[0]].append(msg.msgid)
def check_message_id(self, msgid):
"""returns the Message object for this message.
msgid may be either a numeric or symbolic id.
Raises UnknownMessage if the message id is not defined.
"""
if msgid[1:].isdigit():
msgid = msgid.upper()
for source in (self._alternative_names, self._messages):
try:
return source[msgid]
except KeyError:
pass
raise UnknownMessage('No such message id %s' % msgid)
def get_msg_display_string(self, msgid):
"""Generates a user-consumable representation of a message.
Can be just the message ID or the ID and the symbol.
"""
return repr(self.check_message_id(msgid).symbol)
def help_message(self, msgids):
"""display help messages for the given message identifiers"""
for msgid in msgids:
try:
print(self.check_message_id(msgid).format_help(checkerref=True))
print("")
except UnknownMessage as ex:
print(ex)
print("")
continue
def list_messages(self):
"""output full messages list documentation in ReST format"""
msgs = sorted(six.itervalues(self._messages), key=lambda msg: msg.msgid)
for msg in msgs:
if not msg.may_be_emitted():
continue
print(msg.format_help(checkerref=False))
print("")
class ReportsHandlerMixIn(object):
"""a mix-in class containing all the reports and stats manipulation
related methods for the main lint class
"""
def __init__(self):
self._reports = collections.defaultdict(list)
self._reports_state = {}
def report_order(self):
""" Return a list of reports, sorted in the order
in which they must be called.
"""
return list(self._reports)
def register_report(self, reportid, r_title, r_cb, checker):
"""register a report
reportid is the unique identifier for the report
r_title the report's title
r_cb the method to call to make the report
checker is the checker defining the report
"""
reportid = reportid.upper()
self._reports[checker].append((reportid, r_title, r_cb))
def enable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = True
def disable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = False
def report_is_enabled(self, reportid):
"""return true if the report associated to the given identifier is
enabled
"""
return self._reports_state.get(reportid, True)
def make_reports(self, stats, old_stats):
"""render registered reports"""
sect = Section('Report',
'%s statements analysed.'% (self.stats['statement']))
for checker in self.report_order():
for reportid, r_title, r_cb in self._reports[checker]:
if not self.report_is_enabled(reportid):
continue
report_sect = Section(r_title)
try:
r_cb(report_sect, stats, old_stats)
except EmptyReport:
continue
report_sect.report_id = reportid
sect.append(report_sect)
return sect
def add_stats(self, **kwargs):
"""add some stats entries to the statistic dictionary
raise an AssertionError if there is a key conflict
"""
for key, value in six.iteritems(kwargs):
if key[-1] == '_':
key = key[:-1]
assert key not in self.stats
self.stats[key] = value
return self.stats
def expand_modules(files_or_modules, black_list):
"""take a list of files/modules/packages and return the list of tuple
(file, module name) which have to be actually checked
"""
result = []
errors = []
for something in files_or_modules:
if exists(something):
# this is a file or a directory
try:
modname = '.'.join(modpath_from_file(something))
except ImportError:
modname = splitext(basename(something))[0]
if isdir(something):
filepath = join(something, '__init__.py')
else:
filepath = something
else:
# suppose it's a module or package
modname = something
try:
filepath = file_from_modpath(modname.split('.'))
if filepath is None:
errors.append({'key' : 'ignored-builtin-module', 'mod': modname})
continue
except (ImportError, SyntaxError) as ex:
# FIXME p3k : the SyntaxError is a Python bug and should be
# removed as soon as possible http://bugs.python.org/issue10588
errors.append({'key': 'fatal', 'mod': modname, 'ex': ex})
continue
filepath = normpath(filepath)
result.append({'path': filepath, 'name': modname, 'isarg': True,
'basepath': filepath, 'basename': modname})
if not (modname.endswith('.__init__') or modname == '__init__') \
and '__init__.py' in filepath:
for subfilepath in get_module_files(dirname(filepath), black_list):
if filepath == subfilepath:
continue
submodname = '.'.join(modpath_from_file(subfilepath))
result.append({'path': subfilepath, 'name': submodname,
'isarg': False,
'basepath': filepath, 'basename': modname})
return result, errors
class PyLintASTWalker(object):
def __init__(self, linter):
# callbacks per node types
self.nbstatements = 1
self.visit_events = collections.defaultdict(list)
self.leave_events = collections.defaultdict(list)
self.linter = linter
def _is_method_enabled(self, method):
if not hasattr(method, 'checks_msgs'):
return True
for msg_desc in method.checks_msgs:
if self.linter.is_message_enabled(msg_desc):
return True
return False
def add_checker(self, checker):
"""walk to the checker's dir and collect visit and leave methods"""
# XXX : should be possible to merge needed_checkers and add_checker
vcids = set()
lcids = set()
visits = self.visit_events
leaves = self.leave_events
for member in dir(checker):
cid = member[6:]
if cid == 'default':
continue
if member.startswith('visit_'):
v_meth = getattr(checker, member)
# don't use visit_methods with no activated message:
if self._is_method_enabled(v_meth):
visits[cid].append(v_meth)
vcids.add(cid)
elif member.startswith('leave_'):
l_meth = getattr(checker, member)
# don't use leave_methods with no activated message:
if self._is_method_enabled(l_meth):
leaves[cid].append(l_meth)
lcids.add(cid)
visit_default = getattr(checker, 'visit_default', None)
if visit_default:
for cls in nodes.ALL_NODE_CLASSES:
cid = cls.__name__.lower()
if cid not in vcids:
visits[cid].append(visit_default)
# for now we have no "leave_default" method in Pylint
def walk(self, astroid):
"""call visit events of astroid checkers for the given node, recurse on
its children, then leave events.
"""
cid = astroid.__class__.__name__.lower()
if astroid.is_statement:
self.nbstatements += 1
# generate events for this node on each checker
for cb in self.visit_events.get(cid, ()):
cb(astroid)
# recurse on children
for child in astroid.get_children():
self.walk(child)
for cb in self.leave_events.get(cid, ()):
cb(astroid)
PY_EXTS = ('.py', '.pyc', '.pyo', '.pyw', '.so', '.dll')
def register_plugins(linter, directory):
"""load all module and package in the given directory, looking for a
'register' function in each one, used to register pylint checkers
"""
imported = {}
for filename in os.listdir(directory):
base, extension = splitext(filename)
if base in imported or base == '__pycache__':
continue
if extension in PY_EXTS and base != '__init__' or (
not extension and isdir(join(directory, base))):
try:
module = load_module_from_file(join(directory, filename))
except ValueError:
# empty module name (usually emacs auto-save files)
continue
except ImportError as exc:
print("Problem importing module %s: %s" % (filename, exc),
file=sys.stderr)
else:
if hasattr(module, 'register'):
module.register(linter)
imported[base] = 1
def get_global_option(checker, option, default=None):
""" Retrieve an option defined by the given *checker* or
by all known option providers.
It will look in the list of all options providers
until the given *option* will be found.
If the option wasn't found, the *default* value will be returned.
"""
# First, try in the given checker's config.
# After that, look in the options providers.
try:
return getattr(checker.config, option.replace("-", "_"))
except AttributeError:
pass
for provider in checker.linter.options_providers:
for options in provider.options:
if options[0] == option:
return getattr(provider.config, option.replace("-", "_"))
return default
| HackFisher/depot_tools | third_party/pylint/utils.py | Python | bsd-3-clause | 36,111 | [
"VisIt"
] | 3a6f4ff71e5f4b63dc01008f8aee7abde72caadda45fb78571ddfe89d3883531 |
(S'eb620a20b854ca69f2419f6edb7859f3'
p1
(ihappydoclib.parseinfo.moduleinfo
ModuleInfo
p2
(dp3
S'_namespaces'
p4
((dp5
S'Atom'
p6
(ihappydoclib.parseinfo.classinfo
ClassInfo
p7
(dp8
g4
((dp9
(dp10
S'__init__'
p11
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p12
(dp13
g4
((dp14
(dp15
tp16
sS'_exception_info'
p17
(dp18
sS'_parameter_names'
p19
(S'self'
p20
S'elem'
p21
S'r'
S'v'
S'f'
tp22
sS'_parameter_info'
p23
(dp24
g20
(NNNtp25
sS'r'
(I1
S'array([ 0., 0., 0.] )'
Ntp26
sg21
(NNNtp27
sS'f'
(I1
S'array([ 0., 0., 0.] )'
Ntp28
sS'v'
(I1
S'array([ 0., 0., 0.] )'
Ntp29
ssS'_filename'
p30
S'box/md.py'
p31
sS'_docstring'
p32
S'\n Initialize atom information.\n \n * elem -- the element of atom\n * r -- position-vector\n * v -- velocity-vector\n * f -- force-vector\n '
p33
sS'_name'
p34
g11
sS'_parent'
p35
g7
sS'_comment_info'
p36
(dp37
sS'_configuration_values'
p38
(dp39
sS'_class_info'
p40
g14
sS'_function_info'
p41
g15
sS'_comments'
p42
S''
sbstp43
sg30
g31
sg32
S"\n Class for atoms. \n \n Atom consists the element information, using the class\n 'Element', but contains also position, velocity etc. variable information.\n "
p44
sS'_class_member_info'
p45
(lp46
sg34
g6
sg35
g2
sg36
g37
sS'_base_class_info'
p47
(lp48
sg38
(dp49
sg40
g9
sg41
g10
sg42
S''
sbsS'Molecule'
p50
(ihappydoclib.parseinfo.classinfo
ClassInfo
p51
(dp52
g4
((dp53
(dp54
S'input_xyz'
p55
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p56
(dp57
g4
((dp58
(dp59
tp60
sg17
(dp61
sg19
(S'self'
p62
S'file'
p63
tp64
sg23
(dp65
g62
(NNNtp66
sg63
(NNNtp67
ssg30
g31
sg32
S'\n Read molecule from xyz-file. \n \n * file -- the given input xyz-file. It can be either a file name\n or a file object. If file is a file object the next "frame" is read\n without rewinding the file.\n '
p68
sg34
g55
sg35
g51
sg36
g37
sg38
(dp69
sg40
g58
sg41
g59
sg42
S''
sbsS'koe'
p70
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p71
(dp72
g4
((dp73
(dp74
tp75
sg17
(dp76
sg19
(S'self'
p77
S'kk'
p78
tp79
sg23
(dp80
g78
(NNNtp81
sg77
(NNNtp82
ssg30
g31
sg32
S''
sg34
g70
sg35
g51
sg36
g37
sg38
(dp83
sg40
g73
sg41
g74
sg42
S''
sbsS'av_bond_length'
p84
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p85
(dp86
g4
((dp87
(dp88
tp89
sg17
(dp90
sg19
(S'self'
p91
tp92
sg23
(dp93
g91
(NNNtp94
ssg30
g31
sg32
S'\n Return the average bond length using estimates\n from pair distribution function.\n '
p95
sg34
g84
sg35
g51
sg36
g37
sg38
(dp96
sg40
g87
sg41
g88
sg42
S''
sbsS'construct_bonds'
p97
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p98
(dp99
g4
((dp100
(dp101
tp102
sg17
(dp103
sg19
(S'self'
p104
tp105
sg23
(dp106
g104
(NNNtp107
ssg30
g31
sg32
S'\n Make the bonding list for the molecule.\n \n Use estimates for bond lengths from van der Waals radii.\n Make bond if R<(R_cov,1+R_cov,2)*1.2\n '
p108
sg34
g97
sg35
g51
sg36
g37
sg38
(dp109
sg40
g100
sg41
g101
sg42
S''
sbsS'output_xyz'
p110
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p111
(dp112
g4
((dp113
(dp114
tp115
sg17
(dp116
sg19
(S'self'
p117
S'file'
p118
tp119
sg23
(dp120
g117
(NNNtp121
sg118
(NNNtp122
ssg30
g31
sg32
S'\n Write the molecule into a xyz-file. \n \n * file -- the output file name or file object. If it is an file\n object, the molecule is simpy appended as next "frame".\n '
p123
sg34
g110
sg35
g51
sg36
g37
sg38
(dp124
sg40
g113
sg41
g114
sg42
S''
sbsS'__init__'
p125
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p126
(dp127
g4
((dp128
(dp129
tp130
sg17
(dp131
sg19
(S'self'
p132
S'file'
p133
S'format'
p134
S'efile'
p135
tp136
sg23
(dp137
g132
(NNNtp138
sg135
(I1
S'None'
Ntp139
sg133
(I1
S'None'
Ntp140
sg134
(I1
S"'xyz'"
Ntp141
ssg30
g31
sg32
S"\n Initialize molecule.\n \n * file -- if present, read molecule from this file\n * format -- format of the given file ('xyz','I_info', or 'dat')\n * efile -- the path for elements.dat -file used for element info\n "
p142
sg34
g125
sg35
g51
sg36
g37
sg38
(dp143
sg40
g128
sg41
g129
sg42
S''
sbsS'pair_distr_list'
p144
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p145
(dp146
g4
((dp147
(dp148
tp149
sg17
(dp150
sg19
(S'self'
p151
tp152
sg23
(dp153
g151
(NNNtp154
ssg30
g31
sg32
S'\n Return the array r_ij=|r_i-r_j| for all pairs a 1-D array.\n '
p155
sg34
g144
sg35
g51
sg36
g37
sg38
(dp156
sg40
g147
sg41
g148
sg42
S''
sbsS'add_molecule'
p157
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p158
(dp159
g4
((dp160
(dp161
tp162
sg17
(dp163
sg19
(S'self'
p164
S'mol2'
p165
tp166
sg23
(dp167
g164
(NNNtp168
sg165
(NNNtp169
ssg30
g31
sg32
S' \n Adds the atoms of another molecule to the present one.\n \n * mol2 -- the molecule the atoms of which will be added\n '
p170
sg34
g157
sg35
g51
sg36
g37
sg38
(dp171
sg40
g160
sg41
g161
sg42
S''
sbsS'r2array'
p172
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p173
(dp174
g4
((dp175
(dp176
tp177
sg17
(dp178
sg19
(S'self'
p179
tp180
sg23
(dp181
g179
(NNNtp182
ssg30
g31
sg32
S'\n Return the atom locations as an array for faster \n computations.\n '
p183
sg34
g172
sg35
g51
sg36
g37
sg38
(dp184
sg40
g175
sg41
g176
sg42
S''
sbsS'add'
p185
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p186
(dp187
g4
((dp188
(dp189
tp190
sg17
(dp191
sg19
(S'self'
p192
S'atom'
p193
tp194
sg23
(dp195
g192
(NNNtp196
sg193
(NNNtp197
ssg30
g31
sg32
S'\n Add atom into the molecule.\n \n * atom -- the atom to be added \n '
p198
sg34
g185
sg35
g51
sg36
g37
sg38
(dp199
sg40
g188
sg41
g189
sg42
S''
sbsS'output_atoms_dat_old'
p200
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p201
(dp202
g4
((dp203
(dp204
tp205
sg17
(dp206
sg19
(S'self'
p207
S'file'
p208
tp209
sg23
(dp210
g207
(NNNtp211
sg208
(NNNtp212
ssg30
g31
sg32
S'\n Writes the molecule into an atoms.dat file.\n \n This is for the older (<5.3 2007) version of atoms.dat.\n \n * file -- output file name\n '
p213
sg34
g200
sg35
g51
sg36
g37
sg38
(dp214
sg40
g203
sg41
g204
sg42
S''
sbsS'scale_r'
p215
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p216
(dp217
g4
((dp218
(dp219
tp220
sg17
(dp221
sg19
(S'self'
p222
S'x'
tp223
sg23
(dp224
S'x'
(NNNtp225
sg222
(NNNtp226
ssg30
g31
sg32
S'\n Scale all the coordinates.\n \n * x -- scaling factor.\n '
p227
sg34
g215
sg35
g51
sg36
g37
sg38
(dp228
sg40
g218
sg41
g219
sg42
S''
sbsS'__call__'
p229
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p230
(dp231
g4
((dp232
(dp233
tp234
sg17
(dp235
sg19
(S'self'
p236
tp237
sg23
(dp238
g236
(NNNtp239
ssg30
g31
sg32
S'\n Print some molecule data.\n \n Prints locations.\n '
p240
sg34
g229
sg35
g51
sg36
g37
sg38
(dp241
sg40
g232
sg41
g233
sg42
S''
sbsS'translate'
p242
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p243
(dp244
g4
((dp245
(dp246
tp247
sg17
(dp248
sg19
(S'self'
p249
S'v'
tp250
sg23
(dp251
g249
(NNNtp252
sS'v'
(NNNtp253
ssg30
g31
sg32
S'\n Translate the molecule.\n \n * v -- translate molecule by v\n '
p254
sg34
g242
sg35
g51
sg36
g37
sg38
(dp255
sg40
g245
sg41
g246
sg42
S''
sbsS'r_cm'
p256
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p257
(dp258
g4
((dp259
(dp260
tp261
sg17
(dp262
sg19
(S'self'
p263
tp264
sg23
(dp265
g263
(NNNtp266
ssg30
g31
sg32
S'\n Return the center of mass vector of the molecule.\n '
p267
sg34
g256
sg35
g51
sg36
g37
sg38
(dp268
sg40
g259
sg41
g260
sg42
S''
sbsS'energy_atoms_separated'
p269
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p270
(dp271
g4
((dp272
(dp273
tp274
sg17
(dp275
sg19
(S'self'
p276
tp277
sg23
(dp278
g276
(NNNtp279
ssg30
g31
sg32
S'\n Return the total energy of separate atoms.\n \n Sum of sp energies.\n '
p280
sg34
g269
sg35
g51
sg36
g37
sg38
(dp281
sg40
g272
sg41
g273
sg42
S''
sbsS'output_atoms_dat'
p282
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p283
(dp284
g4
((dp285
(dp286
tp287
sg17
(dp288
sg19
(S'self'
p289
S'file'
p290
tp291
sg23
(dp292
g289
(NNNtp293
sg290
(NNNtp294
ssg30
g31
sg32
S'\n Write the molecule into atoms.dat-file.\n \n * file -- output file name\n '
p295
sg34
g282
sg35
g51
sg36
g37
sg38
(dp296
sg40
g285
sg41
g286
sg42
S''
sbsS'nr_bonds'
p297
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p298
(dp299
g4
((dp300
(dp301
tp302
sg17
(dp303
sg19
(S'self'
p304
tp305
sg23
(dp306
g304
(NNNtp307
ssg30
g31
sg32
S'\n Return the number of bonds (for homonuclear molecule).\n '
p308
sg34
g297
sg35
g51
sg36
g37
sg38
(dp309
sg40
g300
sg41
g301
sg42
S''
sbsS'input_I_info'
p310
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p311
(dp312
g4
((dp313
(dp314
tp315
sg17
(dp316
sg19
(S'self'
p317
S'file'
p318
tp319
sg23
(dp320
g317
(NNNtp321
sg318
(NNNtp322
ssg30
g31
sg32
S'\n Reads molecule from I_info -file, used for Cmdft program.\n \n * file -- the I_info file for input\n '
p323
sg34
g310
sg35
g51
sg36
g37
sg38
(dp324
sg40
g313
sg41
g314
sg42
S''
sbsS'input_atoms_dat'
p325
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p326
(dp327
g4
((dp328
(dp329
tp330
sg17
(dp331
sg19
(S'self'
p332
S'file'
p333
tp334
sg23
(dp335
g332
(NNNtp336
sg333
(NNNtp337
ssg30
g31
sg32
S'\n Read molecule from atoms.dat-file.\n \n * file -- the given atoms.dat -file\n '
p338
sg34
g325
sg35
g51
sg36
g37
sg38
(dp339
sg40
g328
sg41
g329
sg42
S''
sbsS'output_I_info'
p340
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p341
(dp342
g4
((dp343
(dp344
tp345
sg17
(dp346
sg19
(S'self'
p347
S'file'
p348
tp349
sg23
(dp350
g347
(NNNtp351
sg348
(NNNtp352
ssg30
g31
sg32
S'\n Write the molecule into a I_info-file for Cmdft.\n \n * file -- the output file name.\n '
p353
sg34
g340
sg35
g51
sg36
g37
sg38
(dp354
sg40
g343
sg41
g344
sg42
S''
sbsS'vtk_output'
p355
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p356
(dp357
g4
((dp358
(dp359
tp360
sg17
(dp361
sg19
(S'self'
p362
S'fn'
p363
tp364
sg23
(dp365
g362
(NNNtp366
sg363
(NNNtp367
ssg30
g31
sg32
S"\n Make a vtk-file of the current molecule.\n \n Output of coordinates, charges, velocities, forces, bonds, etc.\n \n * fn -- the output file name (e.g. 'molecule.vtk')\n "
p368
sg34
g355
sg35
g51
sg36
g37
sg38
(dp369
sg40
g358
sg41
g359
sg42
S''
sbsS'move_atom'
p370
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p371
(dp372
g4
((dp373
(dp374
tp375
sg17
(dp376
sg19
(S'self'
p377
S'atom'
p378
S'dr'
p379
tp380
sg23
(dp381
g377
(NNNtp382
sg379
(NNNtp383
sg378
(NNNtp384
ssg30
g31
sg32
S'\n Translate atom by the vector dr.\n \n * atom -- the atom index to be translated. First atom=0.\n * dr -- the translation vector\n '
p385
sg34
g370
sg35
g51
sg36
g37
sg38
(dp386
sg40
g373
sg41
g374
sg42
S''
sbsS'pair_distr_function'
p387
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p388
(dp389
g4
((dp390
(dp391
tp392
sg17
(dp393
sg19
(S'self'
p394
S'rmin'
p395
S'rmax'
p396
S'sigma'
p397
tp398
sg23
(dp399
g397
(I1
S'0.7'
Ntp400
sg394
(NNNtp401
sg396
(NNNtp402
sg395
(NNNtp403
ssg30
g31
sg32
S'\n Return the pair distribution function.\n \n * rmin -- the minimum of the function\n * rmax -- the maximum of the function\n * sigma -- Gaussian used in the broadening, this is its sigma\n '
p404
sg34
g387
sg35
g51
sg36
g37
sg38
(dp405
sg40
g390
sg41
g391
sg42
S''
sbstp406
sg30
g31
sg32
S"\n Class for molecules. \n \n Consists of many atoms (using class 'Atom'), and\n hosts also other additional information such as number of atoms,\n electrons in the whole molecule, binding info.\n "
p407
sg45
(lp408
sg34
g50
sg35
g2
sg36
g37
sg47
(lp409
sg38
(dp410
sg40
g53
sg41
g54
sg42
S''
sbsS'Element'
p411
(ihappydoclib.parseinfo.classinfo
ClassInfo
p412
(dp413
g4
((dp414
(dp415
S'energies'
p416
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p417
(dp418
g4
((dp419
(dp420
tp421
sg17
(dp422
sg19
(S'self'
p423
S'ret'
p424
tp425
sg23
(dp426
g423
(NNNtp427
sg424
(NNNtp428
ssg30
g31
sg32
S'\n Return some characterisics of the electronic structure.\n \n * ret -- the returned energy\n \n * IE -- ionization energy\n * EA -- electron affinity\n '
p429
sg34
g416
sg35
g412
sg36
g37
sg38
(dp430
sg40
g419
sg41
g420
sg42
S''
sbsS'read_element_info'
p431
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p432
(dp433
g4
((dp434
(dp435
tp436
sg17
(dp437
sg19
(S'self'
p438
S'file'
p439
tp440
sg23
(dp441
g438
(NNNtp442
sg439
(NNNtp443
ssg30
g31
sg32
S'\n Read data for the element from elements.dat-file.\n \n * file -- elements.dat -file\n '
p444
sg34
g431
sg35
g412
sg36
g37
sg38
(dp445
sg40
g434
sg41
g435
sg42
S''
sbsS'sp_occupations'
p446
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p447
(dp448
g4
((dp449
(dp450
tp451
sg17
(dp452
sg19
(S'self'
p453
S'excess_el'
p454
tp455
sg23
(dp456
g453
(NNNtp457
sg454
(I1
S'0'
Ntp458
ssg30
g31
sg32
S'\n Return the occupations for single particle states with given N_el.\n \n * excess_el -- number of excess electrons on element (wrt. neutral)\n '
p459
sg34
g446
sg35
g412
sg36
g37
sg38
(dp460
sg40
g449
sg41
g450
sg42
S''
sbsS'energy_as_separated'
p461
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p462
(dp463
g4
((dp464
(dp465
tp466
sg17
(dp467
sg19
(S'self'
p468
S'excess_el'
p469
tp470
sg23
(dp471
g468
(NNNtp472
sg469
(I1
S'0'
Ntp473
ssg30
g31
sg32
S'\n Return the energy of isolated atom .\n \n Return sum_i occ_i e_i.\n \n * excess_el -- number of excess electrons on element (wrt. neutral)\n '
p474
sg34
g461
sg35
g412
sg36
g37
sg38
(dp475
sg40
g464
sg41
g465
sg42
S''
sbsS'__call__'
p476
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p477
(dp478
g4
((dp479
(dp480
tp481
sg17
(dp482
sg19
(S'self'
p483
tp484
sg23
(dp485
g483
(NNNtp486
ssg30
g31
sg32
S'\n Print the element data (__dict__)\n '
p487
sg34
g476
sg35
g412
sg36
g37
sg38
(dp488
sg40
g479
sg41
g480
sg42
S''
sbsS'__init__'
p489
(ihappydoclib.parseinfo.functioninfo
FunctionInfo
p490
(dp491
g4
((dp492
(dp493
tp494
sg17
(dp495
sg19
(S'self'
p496
S'element'
p497
S'fil'
p498
tp499
sg23
(dp500
g496
(NNNtp501
sg498
(I1
S'None'
Ntp502
sg497
(NNNtp503
ssg30
g31
sg32
S'\n Initialize the element object.\n \n At least symbol given, and if file (=elements.dat used for HOTBIT)\n is present, read more element info from there.\n \n Parameters\n \n * element -- element symbol\n \n * fil -- the path of elements.dat -file\n \n '
p504
sg34
g489
sg35
g412
sg36
g37
sg38
(dp505
sg40
g492
sg41
g493
sg42
S''
sbstp506
sg30
g31
sg32
S'\n Class for elements. \n \n Contains element-specific information such\n as mass, name, ionization potentials etc.\n '
p507
sg45
(lp508
sg34
g411
sg35
g2
sg36
g37
sg47
(lp509
sg38
(dp510
sg40
g414
sg41
g415
sg42
S''
sbs(dp511
tp512
sS'_import_info'
p513
(ihappydoclib.parseinfo.imports
ImportInfo
p514
(dp515
S'_named_imports'
p516
(dp517
sS'_straight_imports'
p518
(lp519
sbsg30
g31
sg32
S'"""\n Contain classes related to molecular calculations. \n \n Atomic units used throughout unless otherwise stated.\n \n Author P. Koskinen 15.9 2006\n \n"""'
p520
sg34
S'md'
p521
sg35
Nsg36
g37
sg38
(dp522
S'include_comments'
p523
I1
sS'cacheFilePrefix'
p524
S'.happydoc.'
p525
sS'useCache'
p526
I1
sS'docStringFormat'
p527
S'StructuredText'
p528
ssg40
g5
sg41
g511
sg42
S''
sbt. | pekkosk/hotbit | box/.happydoc.md.py | Python | gpl-2.0 | 15,727 | [
"Gaussian",
"VTK"
] | 551bcc91a2f2a3db00481f7a7d6eb68af1fa407a5843d1e64c2a08f4f653efaa |
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 1: Fundamental Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example uses discrete simulated annealing with the Traveling Salesman Problem (TSP). The cities are placed
in a circle, so the ideal path is known. Because the cities are in a circle they should be visited in order
around the edge for the absolute optimal path.
http://en.wikipedia.org/wiki/Traveling_salesman_problem
Running the program produces the following output. Simulated annealing always performs the maximum number of
iterations, unless stopped early. Here we do not specify a stopping score, so the full 500 iterations are used.
Each line of output shows the iteration number, the score, the k (also the iteration number), kmax (the max
number of iterations), t (the temperature), and prob (the probability of accepting a worse solution than the
current). You can see that an optimal solution was not found, but we are close! There are large ranges of
numbers in order. We stay close to the edge of the circle.
/Users/jheaton/anaconda/bin/python /Users/jheaton/projects/aifh/vol1/python-examples/examples/example_anneal_disc_tsp.py
Iteration #1, Score: 649.0,k=1,kMax=500,t=388.021572487,prob=0.979593675247,699.0
Iteration #2, Score: 649.0,k=2,kMax=500,t=376.401851787,prob=0.938228607478,809.0
Iteration #3, Score: 649.0,k=3,kMax=500,t=365.130096043,prob=0.967669187719,826.0
Iteration #4, Score: 637.0,k=4,kMax=500,t=354.195885072,prob=0.99156587898,746.0
Iteration #5, Score: 637.0,k=5,kMax=500,t=343.589110735,prob=0.937976889178,752.0
Iteration #6, Score: 637.0,k=6,kMax=500,t=333.299967592,prob=0.96463681706,770.0
Iteration #7, Score: 637.0,k=7,kMax=500,t=323.318943837,prob=1.0,897.0
Iteration #8, Score: 637.0,k=8,kMax=500,t=313.636812505,prob=1.0,952.0
Iteration #9, Score: 637.0,k=9,kMax=500,t=304.244622945,prob=0.993447901548,738.0
Iteration #10, Score: 637.0,k=10,kMax=500,t=295.133692539,prob=0.934479210491,849.0
...
Iteration #497, Score: 70.0,k=497,kMax=500,t=0.000109549994463,prob=0.0,70.0
Iteration #498, Score: 70.0,k=498,kMax=500,t=0.000106269402794,prob=0.0,70.0
Iteration #499, Score: 70.0,k=499,kMax=500,t=0.000103087051948,prob=0.0,70.0
Iteration #500, Score: 70.0,k=500,kMax=500,t=0.0001,prob=0.0,70.0
Finished after 501 iterations, final score is 70.0
Final distance: 70.0
Final path: [28, 29, 30, 31, 32, 33, 34, 35, 36, 38, 40, 42, 43, 44, 45, 47, 48, 49, 46, 41, 39, 37, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
Process finished with exit code 0
"""
__author__ = 'jheaton'
import sys
import os
import numpy as np
from scipy.spatial import distance
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from train import TrainAnneal
class AnnealTSP(TrainAnneal):
"""
Use simulated annealing with the Traveling Salesman Problem (TSP). The cities are placed in a circle, so the
ideal path is known. Because the cities are in a circle they should be visited in order for the absolute
optimal path.
http://en.wikipedia.org/wiki/Traveling_salesman_problem
"""
def perform_randomization(self, vec):
"""
Randomize by swapping two cities in the path.
@param vec: The path to randomize.
"""
# Find two cities to swap.
while True:
index1 = np.random.randint(0, CITY_COUNT)
index2 = np.random.randint(0, CITY_COUNT)
if index1 != index2:
break
# Swap the two cities.
vec[index1], vec[index2] = vec[index2], vec[index1]
def score_funct(x):
"""
The score function simply returns the distance covered by that path.
@param x: The path to evaluate.
@return: The Euclidean distance between each city on the path.
"""
result = 0
for i in xrange(0, CITY_COUNT - 1):
result += distance.euclidean(x[i], x[i + 1])
return result
CITY_COUNT = 50
MAP_SIZE = 10
# Place the cities in a circle.
cities = []
ratio = (2.0 * np.pi) / CITY_COUNT
for i in xrange(0, CITY_COUNT):
x = int(np.cos(ratio * i) * (MAP_SIZE / 2.0) + (MAP_SIZE / 2.0))
y = int(np.sin(ratio * i) * (MAP_SIZE / 2.0) + (MAP_SIZE / 2.0))
cities.append([x, y])
# Pick a random city order. Here we are sampling without replacement. This means choose 50 random integers but
# do not repeat. We only want to visit a city once.
current_path = []
while len(current_path) < CITY_COUNT:
city_index = np.random.randint(0, CITY_COUNT)
if city_index not in current_path:
current_path.append(city_index)
# Run the annealing.
train = AnnealTSP()
train.display_iteration = True
train.max_iterations = 500
train.train(current_path, score_funct)
# Display results.
print("Final distance: " + str(score_funct(train.position)))
print("Final path: " + str(train.position))
| PeterLauris/aifh | vol1/python-examples/examples/example_anneal_disc_tsp.py | Python | apache-2.0 | 6,012 | [
"VisIt"
] | dd49133df65a51a400e1786c4d3d405b4a0a1091875d4945a7030803fca95b68 |
"""
This script runs an unbiased LAMMPS MD simulation, and stores the values of CVs along with
their corresponding potential and kinetic energies.
After that, the CVs are meshed at a given precision and the thermodynamical observables
such as F, E_min, TS and more are calculated from the values of PotEng.
"""
from __future__ import division, print_function
import scipy as sp
import scipy.linalg as LA
import os
from ase import units
from ase import Atoms
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
import pickle as pkl
dihedral_atoms_phi = [4,6,8,14] # C(O)-N-C(a)-C(O)
dihedral_atoms_psi = [6,8,14,16] # N-C(a)-C(O)-N
fun_group_phi = range(6) + [7]
fun_group_psi = range(15,22)
#############################################################
##### Utility functions to be added to ase.Atoms Class #####
def phi_(self, dihedral_list=dihedral_atoms_phi):
return self.get_dihedral(dihedral_list)
def psi_(self, dihedral_list=dihedral_atoms_psi):
return self.get_dihedral(dihedral_list)
def set_phi_(self, phi):
self.set_dihedral(dihedral_atoms_phi, phi, indices=fun_group_phi)
def set_psi_(self, psi):
self.set_dihedral(dihedral_atoms_psi, psi, indices=fun_group_psi)
def colvars(self):
s = sp.atleast_2d(sp.array([self.phi(), self.psi()]))
return s
def grid(x, y, z, resX=100, resY=100):
"Convert 3 column data to matplotlib grid"
xi = sp.linspace(min(x), max(x), resX)
yi = sp.linspace(min(y), max(y), resY)
Z = griddata(x, y, z, xi, yi, interp='linear')
X, Y = sp.meshgrid(xi, yi)
return X, Y, Z
def round_vector(vec, precision = 0.05):
return ((vec + 0.5 * precision) / precision).astype('int') * precision
### CODE STARTS HERE ###
run_from_scratch = True
T = 300
if run_from_scratch:
setattr(Atoms, 'phi', phi_)
setattr(Atoms, 'psi', psi_)
setattr(Atoms, 'colvars', colvars)
os.system('lmp_mpi < input_md')
# load trajectory and get atomic positions into adata
print("Reading positions from trajectory file...")
data = []
with open('lmp_md.xyz', 'r') as file:
for i, line in enumerate(file.readlines()):
if i % 31 > 8:
data.append(line.split()[2:5])
n_atoms = 22
print("Converting data...")
data = sp.asarray(data).astype('float')
data = data.reshape((len(data)/n_atoms, n_atoms, 3))
# write potential energies to file
print("Reading potential energies...")
os.system('grep PotEng log.lammps | awk {\'print $3\'} > PotEng.md')
energies = sp.loadtxt('PotEng.md')
os.system('grep KinEng log.lammps | awk {\'print $6\'} > KinEng.md')
kineng = sp.loadtxt('KinEng.md')
energies *= units.kcal / units.mol
kineng *= units.kcal / units.mol
# now extract CVs from positions
colvars = []
print("Converting positions into collective variables...")
for positions in data:
atoms = Atoms(['H']*n_atoms, positions)
colvars.append(atoms.colvars().flatten())
colvars = sp.asarray(colvars)
phipsi_pot_kin = sp.hstack((colvars, energies[:,None], kineng[:,None]))
print("Saving data...")
sp.savetxt('phi_psi_pot_kin_md300.csv', phipsi_pot_kin)
else:
# try:
# with open('energies.pkl', 'r') as file:
# energies_r = pkl.load(file)
# except:
data = sp.loadtxt('phi_psi_pot_md300.csv')
colvars = data[:,:2]
energies = data[:,2]
kineng = data[:,4]
colvars_r = round_vector(colvars)
phimin, phimax = 0, 2*sp.pi
psimin, psimax = 0, 2*sp.pi
phirange = phimax - phimin
psirange = psimax - psimin
aspect_ratio = psirange/phirange
first = True
# for imax in range(445200, len(energies), 100):
# imax = 111
print("%09d" % imax)
if first:
energies_r = {}
kineng_r = {}
first = False
for i, s in enumerate(colvars_r):
if ('%f-%f' % (s[0], s[1])) in energies_r.keys():
energies_r['%f-%f' % (s[0], s[1])].append(energies[i])
kineng_r['%f-%f' % (s[0], s[1])].append(kineng[i])
else:
energies_r['%f-%f' % (s[0], s[1])] = [energies[i]]
kineng_r['%f-%f' % (s[0], s[1])] = [kineng[i]]
colvars_2 = []
energies_mean = []
energies_min = []
n_confs = []
free_energies = []
meanpot = []
zeta = []
zeta_reduced = []
for s, energy in energies_r.iteritems():
kin = sp.array(kineng_r[s])
energy = sp.array(energy)
colvars_2.append(sp.array(s.split('-')).astype('float'))
meanpot.append(sp.exp(- kin / (units.kB * T)).sum())
zeta.append(sp.exp(- (energy + kin) / (units.kB * T)).sum())
zeta_reduced.append(sp.exp(- energy / (units.kB * T)).sum())
#
colvars_2 = sp.array(colvars_2)
# n_confs = sp.array(n_confs)
# energies_min = sp.array(energies_min)
# energies_mean = sp.array(energies_mean)
free_energies = - units.kB * T * sp.log(zeta_reduced)
meanpot = units.kB * T * sp.log(meanpot)
free_en_approx = meanpot / zeta
free_en_approx -= free_en_approx.mean() # shift zero value
colvars_2 = sp.array(colvars_2)
phi, psi = colvars_2[:,0], colvars_2[:,1]
phimin, phimax = phi.min(), phi.max()
psimin, psimax = psi.min(), psi.max()
phirange = phimax - phimin
psirange = psimax - psimin
aspect_ratio = psirange / phirange
print("Plotting trajectory...")
# fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
# sc = ax.scatter(phi, psi, c=energies_mean, marker = 's', s = 120,
# cmap = 'RdBu', alpha = .8, edgecolors='none')
# ax.set_xlim(phimin, phimax)
# ax.set_ylim(psimin, psimax)
# plt.colorbar(sc, format='%.3e')
# fig.savefig('energy_mean-%09d.png' %imax)
#
# plt.close()
# fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
# sc = ax.scatter(phi, psi, c=energies_min, marker = 's', s = 120,
# cmap = 'RdBu', alpha = .8, edgecolors='none')
# ax.set_xlim(phimin, phimax)
# ax.set_ylim(psimin, psimax)
# plt.colorbar(sc, format='%.3e')
# fig.savefig('energy_min-%09d.png' % imax)
#
# plt.close()
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=free_energies, marker = 's', s = 120,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('free_energy.png')
plt.close()
fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
sc = ax.scatter(phi, psi, c=free_en_approx, marker = 's', s = 120,
cmap = 'RdBu', alpha = .8, edgecolors='none')
ax.set_xlim(phimin, phimax)
ax.set_ylim(psimin, psimax)
plt.colorbar(sc, format='%.3e')
fig.savefig('free_energy_sandro.png')
#
# plt.close()
# fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
# sc = ax.scatter(phi, psi, c=energies_min-free_energies, marker = 's', s = 120,
# cmap = 'RdBu', alpha = .8, edgecolors='none')
# ax.set_xlim(phimin, phimax)
# ax.set_ylim(psimin, psimax)
# plt.colorbar(sc, format='%.3e')
# fig.savefig('TS_min-%09d.png' % imax)
#
# plt.close()
# fig, ax = plt.subplots(1,1,figsize=(10,10*aspect_ratio))
# sc = ax.scatter(phi, psi, c=energies_mean-free_energies, marker = 's', s = 120,
# cmap = 'RdBu', alpha = .8, edgecolors='none')
# ax.set_xlim(phimin, phimax)
# ax.set_ylim(psimin, psimax)
# plt.colorbar(sc, format='%.3e')
# fig.savefig('TS_mean-%09d.png' %imax)
# plt.close()
#
| marcocaccin/LearningMetaDynamics | MD_unconstrained/md_db0_build2.py | Python | gpl-2.0 | 7,243 | [
"ASE",
"LAMMPS"
] | 4806c1e46fe9155325857b3164b9e94cc8574a17ecc1aeac98d8a807a9e6426c |
'''
Copyright 2009, 2010 Brian S. Eastwood.
This file is part of Synctity.
Synctity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Synctity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Synctity. If not, see <http://www.gnu.org/licenses/>.
Created on Nov 18, 2009
'''
from collections import deque
import os
import shelve
import sys
from PyQt4 import QtCore, QtGui
import command
import rsync
import about_ui
import synctity_ui
APPLICATION_NAME="Synctity"
APPLICATION_VERSION="1.03"
APPLICATION_WEBSITE="https://github.com/beastwood/synctity"
DEFAULT_CONFIG=os.path.expanduser("~/synctity.db")
class ProfileModel(QtCore.QAbstractListModel):
'''
A model that presents a set of profiles as a list.
'''
def __init__(self, parent=None):
QtCore.QAbstractListModel.__init__(self, parent)
self.profiles = list()
def setProfiles(self, profiles):
self.profiles = profiles
self.reset()
def getProfiles(self):
return self.profiles
def isValid(self, index):
'''
Determines if an index references a profile in this model. Index
is a single number, not a QModelIndex.
'''
return index >= 0 and index < len(self.profiles)
def data(self, index, role=QtCore.Qt.DisplayRole):
'''
Return the representation of data in this model at a given index.
Index is a QModelIndex. If the role is Qt.DisplayRole and the index
is valid, returns the name of the profile.
'''
if (role == QtCore.Qt.DisplayRole and
index.isValid() and
self.isValid(index.row())):
data = self.profiles[index.row()].getName()
else:
data = QtCore.QVariant()
return data
def rowCount(self, parent=QtCore.QModelIndex()):
'''
Gets the number of profiles stored in this model
'''
return len(self.profiles)
def append(self, profile):
'''
Add a profile to this model, and return the new item's QModelIndex
'''
# find the index of inserting and notify any views
newIdx = len(self.profiles)
self.beginInsertRows(QtCore.QModelIndex(), newIdx, newIdx)
# add the profile to the model
self.profiles.append(profile)
# alert any views, and return a QModelIndex of the new element
self.endInsertRows()
return self.index(newIdx, 0)
def remove(self, index):
'''
Remove the profile at the given index. Index is a single number,
not a QModelIndex.
'''
if self.isValid(index):
# notify any views and remove the profile
self.beginRemoveRows(QtCore.QModelIndex(), index, index)
self.profiles.remove(self.profiles[index])
self.endRemoveRows()
def get(self, index):
'''
Get a reference to the profile at the given index. Index is a single
number, not a QModelIndex.
'''
if self.isValid(index):
return self.profiles[index]
def update(self, profile):
'''
Notify any views that the given profile has been modified.
'''
idx = self.profiles.index(profile)
if self.isValid(idx):
# build the QModelIndex, and emit the changed signal
modelIdx = self.index(idx, 0)
self.emit(QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"),
modelIdx, modelIdx)
class CommandModel(QtCore.QAbstractListModel):
'''
A list model for a set of rsync commands attached to a profile.
'''
def __init__(self, parent=None):
QtCore.QAbstractListModel.__init__(self, parent)
self.profile = None
def setProfile(self, profile):
'''
Set the profile that contains the commands this model represents
'''
self.profile = profile
# notify any views that this model has completely changed
self.reset()
def isValid(self, index):
'''
Determines whether a single-value index is valid for this model
'''
return (self.profile != None and
index >= 0 and
index < len(self.profile.getCommands()))
def rowCount(self, parent=QtCore.QModelIndex()):
'''
Gets the number of commands in this model
'''
if self.profile == None:
count = 0
else:
count = len(self.profile.getCommands())
return count
def data(self, index, role=QtCore.Qt.DisplayRole):
'''
Gets the model item at a given QModelIndex index. If the index is
valid for this model and role is Qt.DisplayRole, return the description
of the command at the index.
'''
if (role == QtCore.Qt.DisplayRole and
index.isValid() and
self.isValid(index.row())):
data = self.profile.getCommands()[index.row()].getDescription()
else:
data = QtCore.QVariant()
return data
def append(self, command):
'''
Add a command to this model. Returns the QModelIndex where the
addition was made.
'''
# get the index for the new addition and notify any views
newIdx = len(self.profile.getCommands())
self.beginInsertRows(QtCore.QModelIndex(), newIdx, newIdx)
# add the command to the underlying data
self.profile.add(command)
# notify views that the addition is complete
self.endInsertRows()
return self.index(newIdx, 0)
def remove(self, index):
'''
Removes the command at the given single-value index from this model.
'''
if self.isValid(index):
# if the index is valid, notify views and remove data
self.beginRemoveRows(QtCore.QModelIndex(), index, index)
self.profile.remove(index)
self.endRemoveRows()
def edit(self, index):
'''
Edit the command at the given single-value index.
'''
if self.isValid(index):
# build a command form for editing the command
selected = self.profile.get(index)
dialog = command.CommandForm()
dialog.setCommand(selected)
# launch as a modal dialog
dialog.exec_()
# notify any views that we have changed
self.emit(QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"),
self.index(index, 0), self.index(index, 0))
class ProfileRunner:
'''
ProfileRunner is responsible for running commands as separate processes
and sending the output to a text window. Commands are queued and started
as prior commands finish.
'''
def __init__(self, textEdit):
'''
Initialize a ProfileRunner. textEdit ought to be a QTextEdit.
'''
# output is sent to a text box
self.textConsole = textEdit;
# commands are held in a queue until ready to be run.
self.commands = deque()
# commands are run through a QProcess
self.process = QtCore.QProcess()
# connect the QProcess signals to our slots
QtGui.qApp.connect(self.process, QtCore.SIGNAL("started()"), self.onStarted)
QtGui.qApp.connect(self.process, QtCore.SIGNAL("readyReadStandardOutput()"), self.onStdout)
QtGui.qApp.connect(self.process, QtCore.SIGNAL("readyReadStandardError()"), self.onStderr)
QtGui.qApp.connect(self.process, QtCore.SIGNAL("finished(int)"), self.onFinished)
def onStarted(self):
'''
Do nothing when the process launches. Could print out the pid.
'''
pass
def onStdout(self):
'''
When the process generates standard output, print it to the text box.
'''
self.textConsole.insertPlainText(str(self.process.readAllStandardOutput()))
# auto scroll
scroll = self.textConsole.verticalScrollBar()
scroll.setValue(scroll.maximum())
def onStderr(self):
'''
When the process generates standard error, print it to the text box in red.
'''
color = self.textConsole.textColor()
self.textConsole.setTextColor(QtGui.QColor.fromHsvF(0.0, 0.9, 0.7))
self.textConsole.insertPlainText(str(self.process.readAllStandardError()))
self.textConsole.setTextColor(color)
# auto scroll
scroll = self.textConsole.verticalScrollBar()
scroll.setValue(scroll.maximum())
def onFinished(self, exitCode):
'''
Report the result of a finished command, and launch the next one.
'''
if exitCode != 0:
message = "There may have been an error with the transfer."
else:
message = ""
self.textConsole.append("Finished (%d)\n%s" % (exitCode, message))
# launch the next command
self.runNext()
def runProfile(self, profile, reverse=False):
'''
Queue up all commands in a profile and start running them.
'''
if not reverse:
# forward direction runs any pre-sync and post-sync commands
if profile.getPreSync() != '':
self.commands.append(profile.getPreSync())
for command in profile:
self.commands.append(command.forward())
if profile.getPostSync() != '':
self.commands.append(profile.getPostSync())
else:
for command in profile:
self.commands.append(command.reverse())
# launch the next command
self.runNext()
def runNext(self):
'''
Run the next command in the queue.
'''
# check if there are commands to run and that there is not a process
# currently running.
if (len(self.commands) > 0 and
self.process.state() == QtCore.QProcess.NotRunning):
# remove a command from the queue and start it
command = self.commands.popleft()
self.textConsole.append(command + '\n')
self.process.start(command)
class SynctityWindow(QtGui.QMainWindow):
'''
The main Synctity window, which enables editing and running profiles of
rsync commands.
'''
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
# set up user interface
self.ui = synctity_ui.Ui_Synctity()
self.ui.setupUi(self)
self.setWindowTitle("{0} {1}".format(APPLICATION_NAME, APPLICATION_VERSION))
# set up profile and command views
self.profileModel = ProfileModel(self)
self.ui.listProfiles.setModel(self.profileModel)
self.commandModel = CommandModel(self)
self.ui.listCommands.setModel(self.commandModel)
# initially disable profile editing
self.ui.groupProfile.setEnabled(False)
# setup process for running profiles
self.runner = ProfileRunner(self.ui.textConsole)
# filename used to store profiles
if os.path.exists(DEFAULT_CONFIG):
self.filename = DEFAULT_CONFIG
self.loadProfiles()
else:
self.filename = None
# add About menu item
# Note that on Mac OS, this menu item gets placed in the application menu, not the help menu
about = QtGui.QAction("About", self)
self.ui.menuHelp.addAction(about)
self.connect(about, QtCore.SIGNAL("triggered()"), self.onAbout)
# add Help menu item
help = QtGui.QAction("Manual", self)
self.ui.menuHelp.addAction(help)
self.connect(help, QtCore.SIGNAL("triggered()"), self.onHelp)
def currentProfile(self):
'''
Returns a reference to the currently-selected profile
'''
profIdx = self.ui.listProfiles.currentIndex()
return self.profileModel.get(profIdx.row())
def onAddProfile(self):
'''
Adds a new default profile to the list of profiles. The new
profile is selected.
'''
# add a profile to the model
index = self.profileModel.append(rsync.Profile())
# select profile and set focus to edit the profile name
self.ui.listProfiles.setCurrentIndex(index)
self.onSelectProfile(index)
self.ui.textProfileName.setFocus()
self.ui.textProfileName.selectAll()
def onRemoveProfile(self):
'''
Removes the currently selected profile from the list of profiles.
'''
# remove the selected profile
profIdx = self.ui.listProfiles.currentIndex()
self.profileModel.remove(profIdx.row())
# update profile editing for next selected profile
newIdx = self.ui.listProfiles.currentIndex()
self.onSelectProfile(newIdx)
def onSelectProfile(self, index):
'''
Responds to a profile being selected from the list. If a valid
profile is selected, updates the profile edit area to hold that
profile's data. If no valid profile is selected, disables the
profile edit area.
'''
profile = self.profileModel.get(index.row())
if profile != None:
self.ui.groupProfile.setEnabled(True)
self.ui.textProfileName.setText(profile.getName())
self.ui.textPreSync.setText(profile.getPreSync())
self.ui.textPostSync.setText(profile.getPostSync())
self.commandModel.setProfile(profile)
else:
self.ui.groupProfile.setEnabled(False)
def onProfileName(self):
'''
Renames the currently selected profile according to the text in the
profile name field.
'''
# get the currently selected profile
profile = self.currentProfile()
if profile != None:
# update the profile name
profile.setName(self.ui.textProfileName.text())
# notify the model that underlying data has changed
self.profileModel.update(profile)
def onTextSync(self):
'''
Updates the currently selected profile's pre and post sync commands.
'''
# get the currently selected profile
profile = self.currentProfile()
if profile != None:
# update the command
profile.setPreSync(self.ui.textPreSync.text())
profile.setPostSync(self.ui.textPostSync.text())
# notify the model that underlying data has changed
self.profileModel.update(profile)
def onPreSync(self):
qfile = QtGui.QFileDialog.getOpenFileName(self, "Select pre-sync command...")
if qfile != None and qfile != '':
self.ui.textPreSync.setText(qfile)
self.onTextSync()
def onPostSync(self):
qfile = QtGui.QFileDialog.getOpenFileName(self, "Select post-sync command...")
if qfile != None and qfile != '':
self.ui.textPostSync.setText(qfile)
self.onTextSync()
def onAddCommand(self):
'''
Adds a new rsync command to the profile currently being edited.
'''
# add a command to the command model
index = self.commandModel.append(rsync.Command())
# edit the command
self.commandModel.edit(index.row())
def onRemoveCommand(self):
'''
Removes the currently selected command from the profile currently being
edited.
'''
index = self.ui.listCommands.currentIndex()
self.commandModel.remove(index.row())
def onEditCommand(self, index):
'''
Edits the command at the given QModelIndex in the command model.
'''
self.commandModel.edit(index.row())
def onForward(self):
'''
Runs the currently selected rsync profile in the forward direction,
source -> destination.
'''
profile = self.currentProfile()
if profile != None:
self.runner.runProfile(profile, False)
def onReverse(self):
'''
Runs the currently selected rsync profile in the reverse direction,
destination -> source.
'''
profile = self.currentProfile()
if profile != None:
self.runner.runProfile(profile, True)
def loadProfiles(self):
'''
Loads a set of profiles from a file.
'''
try:
# open the shelve file, and grab an object called profiles
store = shelve.open(self.filename)
if "profiles" in store:
self.profileModel.setProfiles(store["profiles"])
else:
QtGui.QMessageBox.warning(self, "Cannot read file",
"Sorry, this file is not a valid Synctus file:\n" + self.filename)
store.close()
self.ui.statusbar.showMessage("Loaded profiles from " + self.filename)
except:
# opening shelve databases can easily throw an error if the file
# is invalid
QtGui.QMessageBox.warning(self, "Cannot read file",
"Sorry, this file is not a valid Synctus file:\n" + self.filename)
def writeProfiles(self):
'''
Saves the set of profiles to a file.
'''
try:
# open the shelve file, and store the profiles
store = shelve.open(self.filename)
store["profiles"] = self.profileModel.getProfiles()
store.close()
self.ui.statusbar.showMessage("Wrote profiles to " + self.filename)
except:
# exceptions are common when dealing with file IO
QtGui.QMessageBox.warning(self, "Cannot write file",
"Sorry, this file is not a valid Synctus file:\n" + self.filename)
def onLoad(self):
'''
Prompts the user for a file to load Synctus profiles from.
'''
qfile = QtGui.QFileDialog.getOpenFileName(self, "Load from...", DEFAULT_CONFIG)
if qfile != None and qfile != '':
self.filename = str(qfile)
self.loadProfiles()
# select the first profile that was loaded
if self.profileModel.rowCount() > 0:
index = self.profileModel.index(0, 0)
else:
index = QtCore.QModelIndex()
self.ui.listProfiles.setCurrentIndex(index)
self.onSelectProfile(index)
def onSave(self):
'''
Saves the Synctus profiles to a file.
'''
if self.filename == None:
self.onSaveAs()
else:
self.writeProfiles()
def onSaveAs(self):
'''
Prompts the user for a file to save Synctus profiles in.
'''
qfile = QtGui.QFileDialog.getSaveFileName(self, "Save to...", DEFAULT_CONFIG)
if qfile != None and qfile != '':
self.filename = str(qfile)
self.writeProfiles()
def onAbout(self):
'''
Displays information about the application.
'''
# build the help dialog
dialog = QtGui.QDialog(self)
ui = about_ui.Ui_Dialog()
ui.setupUi(dialog)
# set dynamic content
today = QtCore.QDate.currentDate()
ui.labelApplication.setText(APPLICATION_NAME)
ui.labelCopyright.setText("© 2010 - {0}".format(today.year()))
ui.labelVersion.setText("version {0}".format(APPLICATION_VERSION))
ui.labelWebsite.setText("<a href={0}>{0}</a>".format(APPLICATION_WEBSITE))
ui.labelWebsite.setOpenExternalLinks(True)
ui.labelIcon.setPixmap(QtGui.QPixmap(":/synctity"))
dialog.setWindowTitle("About {0}".format(APPLICATION_NAME))
dialog.show()
def onHelp(self):
'''
Opens the help manual.
'''
# find the manual index page
basepath = os.getenv("RESOURCEPATH")
if basepath is not None:
# in a Mac app bundle look in the Resources directory
helpfile = os.path.join(basepath, "html/index.html")
else:
# otherwise use the current file directory
basepath = os.path.split(os.path.abspath(__file__))[0]
helpfile = os.path.join(basepath, "doc/html/index.html")
# check whether the index file exists
if os.path.exists(helpfile):
helpfile = "file://" + helpfile
print "Opening", helpfile
QtGui.QDesktopServices.openUrl(QtCore.QUrl(helpfile))
else:
QtGui.QMessageBox.warning(self, "Could not find manual", "Could not find the help manual in\n{0}".format(helpfile))
def onQuit(self):
QtGui.qApp.exit(0)
def runSynctity():
app = QtGui.QApplication(sys.argv)
app.setApplicationName(APPLICATION_NAME)
app.setApplicationVersion(APPLICATION_VERSION)
myapp = SynctityWindow()
myapp.show()
myapp.raise_()
myapp.setFocus()
sys.exit(app.exec_())
if __name__ == "__main__":
runSynctity() | beastwood/synctity | synctity.py | Python | gpl-3.0 | 22,018 | [
"Brian"
] | 8aed7fc913986f953df6cbf2204e9718ff13d5932d63699d3d7000c1638659e0 |
#!/usr/bin/python
# script for computing overlap signatures from a bowtie output
# Christophe Antoniewski <drosofff@gmail.com>
# Usage signature.py <1:input> <2:format of input> <3:minsize query> <4:maxsize query> <5:minsize target> <6:maxsize target>
# <7:minscope> <8:maxscope> <9:output> <10:bowtie index> <11:procedure option> <12: graph (global or lattice)>
# <13: R code>
# version 2.0.0
import subprocess
import argparse
from smRtools import HandleSmRNAwindows
def Parser():
the_parser = argparse.ArgumentParser()
the_parser.add_argument(
'--input', action="store", type=str, help="input alignment file")
the_parser.add_argument('--inputFormat', action="store", type=str, choices=[
"tabular", "bam", "sam"], help="format of alignment file (tabular/bam/sam)")
the_parser.add_argument(
'--minquery', type=int, help="Minimum readsize of query reads (nt) - must be an integer")
the_parser.add_argument(
'--maxquery', type=int, help="Maximum readsize of query reads (nt) - must be an integer")
the_parser.add_argument(
'--mintarget', type=int, help="Minimum readsize of target reads (nt) - must be an integer")
the_parser.add_argument(
'--maxtarget', type=int, help="Maximum readsize of target reads (nt) - must be an integer")
the_parser.add_argument(
'--minscope', type=int, help="Minimum overlap analyzed (nt) - must be an integer")
the_parser.add_argument(
'--maxscope', type=int, help="Maximum overlap analyzed (nt) - must be an integer")
the_parser.add_argument(
'--outputOverlapDataframe', action="store", type=str, help="Overlap dataframe")
the_parser.add_argument('--referenceGenome', action='store',
help="path to the bowtie-indexed or fasta reference")
the_parser.add_argument('--extract_index', action='store_true',
help="specify if the reference is an indexed Bowtie reference")
the_parser.add_argument('--graph', action='store', choices=[
"global", "lattice"], help="small RNA signature is computed either globally or by item (global-lattice)")
the_parser.add_argument(
'--rcode', type=str, help="R code to be passed to the python script")
args = the_parser.parse_args()
return args
args = Parser()
if args.extract_index:
GenomeFormat = "bowtieIndex"
else:
GenomeFormat = "fastaSource"
if args.inputFormat == "tabular":
Genome = HandleSmRNAwindows(
args.input, args.inputFormat, args.referenceGenome, GenomeFormat)
elif args.inputFormat == "sam":
Genome = HandleSmRNAwindows(
args.input, args.inputFormat, args.referenceGenome, GenomeFormat)
else:
Genome = HandleSmRNAwindows(
args.input, args.inputFormat, args.referenceGenome, GenomeFormat)
# replace objDic by Genome.instanceDict or... objDic = Genome.instanceDict
objDic = Genome.instanceDict
args.maxscope += 1
general_frequency_table = dict(
[(i, 0) for i in range(args.minscope, args.maxscope)])
general_percent_table = dict(
[(i, 0) for i in range(args.minscope, args.maxscope)])
OUT = open(args.outputOverlapDataframe, "w")
if args.graph == "global":
# for normalized summing of local_percent_table(s)
readcount_dic = {}
Total_read_in_objDic = 0
for item in objDic:
readcount_dic[item] = objDic[item].readcount(
args.minquery, args.maxquery)
Total_read_in_objDic += readcount_dic[item]
######
for x in (objDic):
local_frequency_table = objDic[x].signature(
args.minquery, args.maxquery, args.mintarget, args.maxtarget, range(args.minscope, args.maxscope))
local_percent_table = objDic[x].hannon_signature(
args.minquery, args.maxquery, args.mintarget, args.maxtarget, range(args.minscope, args.maxscope))
try:
for overlap in local_frequency_table.keys():
general_frequency_table[overlap] = general_frequency_table.get(
overlap, 0) + local_frequency_table[overlap]
except:
pass
try:
for overlap in local_percent_table.keys():
general_percent_table[overlap] = general_percent_table.get(
overlap, 0) + (1. / Total_read_in_objDic * readcount_dic[x] * local_percent_table[overlap])
except:
pass
print >> OUT, "overlap\tnum of pairs\tprobability"
for classe in sorted(general_frequency_table):
print >> OUT, "%i\t%i\t%f" % (
classe, general_frequency_table[classe], general_percent_table[classe])
else:
print >> OUT, "overlap\tnum of pairs\tprobability\titem"
for x in (objDic):
local_frequency_table = objDic[x].signature(
args.minquery, args.maxquery, args.mintarget, args.maxtarget, range(args.minscope, args.maxscope))
local_percent_table = objDic[x].hannon_signature(
args.minquery, args.maxquery, args.mintarget, args.maxtarget, range(args.minscope, args.maxscope))
for classe in range(args.minscope, args.maxscope):
print >> OUT, "%i\t%i\t%f\t%s" % (
classe, local_frequency_table[classe], local_percent_table[classe], x)
OUT.close()
# Run the R script that is defined in the xml using the Rscript binary
# provided with R.
R_command = "Rscript " + args.rcode
process = subprocess.Popen(R_command.split())
| drosofff/tools-artbio | deprecated/msp_sr_signature/signature.py | Python | mit | 5,451 | [
"Bowtie"
] | 83e6feb2f06ea1e7262787c9aaf3ef872a30f7227c0391236ce05d33ca54a0b5 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import string_types
from copy import deepcopy
import matplotlib.pyplot as plt
from IPython.core.pylabtools import print_figure
from IPython.core.display import Image, SVG
import numpy as np
import pandas as pd
from scipy.spatial.distance import squareform
from skbio._base import SkbioObject
from skbio.stats._misc import _pprint_strs
from skbio.util import find_duplicates
from skbio.util._decorator import experimental
from skbio.util._misc import resolve_key
class DissimilarityMatrixError(Exception):
"""General error for dissimilarity matrix validation failures."""
pass
class DistanceMatrixError(DissimilarityMatrixError):
"""General error for distance matrix validation failures."""
pass
class MissingIDError(DissimilarityMatrixError):
"""Error for ID lookup that doesn't exist in the dissimilarity matrix."""
@experimental(as_of="0.4.0")
def __init__(self, missing_id):
super(MissingIDError, self).__init__()
self.args = ("The ID '%s' is not in the dissimilarity matrix." %
missing_id,)
class DissimilarityMatrix(SkbioObject):
"""Store dissimilarities between objects.
A `DissimilarityMatrix` instance stores a square, hollow, two-dimensional
matrix of dissimilarities between objects. Objects could be, for example,
samples or DNA sequences. A sequence of IDs accompanies the
dissimilarities.
Methods are provided to load and save dissimilarity matrices from/to disk,
as well as perform common operations such as extracting dissimilarities
based on object ID.
Parameters
----------
data : array_like or DissimilarityMatrix
Square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
(floats), or a structure that can be converted to a ``numpy.ndarray``
using ``numpy.asarray``. Can instead be a `DissimilarityMatrix` (or
subclass) instance, in which case the instance's data will be used.
Data will be converted to a float ``dtype`` if necessary. A copy will
*not* be made if already a ``numpy.ndarray`` with a float ``dtype``.
ids : sequence of str, optional
Sequence of strings to be used as object IDs. Must match the number of
rows/cols in `data`. If ``None`` (the default), IDs will be
monotonically-increasing integers cast as strings, with numbering
starting from zero, e.g., ``('0', '1', '2', '3', ...)``.
See Also
--------
DistanceMatrix
Notes
-----
The dissimilarities are stored in redundant (square-form) format [1]_.
The data are not checked for symmetry, nor guaranteed/assumed to be
symmetric.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
default_write_format = 'lsmat'
# Used in __str__
_matrix_element_name = 'dissimilarity'
@experimental(as_of="0.4.0")
def __init__(self, data, ids=None):
if isinstance(data, DissimilarityMatrix):
ids = data.ids if ids is None else ids
data = data.data
data = np.asarray(data, dtype='float')
if ids is None:
ids = (str(i) for i in range(data.shape[0]))
ids = tuple(ids)
self._validate(data, ids)
self._data = data
self._ids = ids
self._id_index = self._index_list(self._ids)
@property
@experimental(as_of="0.4.0")
def data(self):
"""Array of dissimilarities.
A square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
(floats). A copy is *not* returned.
Notes
-----
This property is not writeable.
"""
return self._data
@property
@experimental(as_of="0.4.0")
def ids(self):
"""Tuple of object IDs.
A tuple of strings, one for each object in the dissimilarity matrix.
Notes
-----
This property is writeable, but the number of new IDs must match the
number of objects in `data`.
"""
return self._ids
@ids.setter
def ids(self, ids_):
ids_ = tuple(ids_)
self._validate(self.data, ids_)
self._ids = ids_
self._id_index = self._index_list(self._ids)
@property
@experimental(as_of="0.4.0")
def dtype(self):
"""Data type of the dissimilarities."""
return self.data.dtype
@property
@experimental(as_of="0.4.0")
def shape(self):
"""Two-element tuple containing the dissimilarity matrix dimensions.
Notes
-----
As the dissimilarity matrix is guaranteed to be square, both tuple
entries will always be equal.
"""
return self.data.shape
@property
@experimental(as_of="0.4.0")
def size(self):
"""Total number of elements in the dissimilarity matrix.
Notes
-----
Equivalent to ``self.shape[0] * self.shape[1]``.
"""
return self.data.size
@property
@experimental(as_of="0.4.0")
def T(self):
"""Transpose of the dissimilarity matrix.
See Also
--------
transpose
"""
return self.transpose()
@experimental(as_of="0.4.0")
def transpose(self):
"""Return the transpose of the dissimilarity matrix.
Notes
-----
A deep copy is returned.
Returns
-------
DissimilarityMatrix
Transpose of the dissimilarity matrix. Will be the same type as
`self`.
"""
return self.__class__(self.data.T.copy(), deepcopy(self.ids))
@experimental(as_of="0.4.0")
def index(self, lookup_id):
"""Return the index of the specified ID.
Parameters
----------
lookup_id : str
ID whose index will be returned.
Returns
-------
int
Row/column index of `lookup_id`.
Raises
------
MissingIDError
If `lookup_id` is not in the dissimilarity matrix.
"""
if lookup_id in self:
return self._id_index[lookup_id]
else:
raise MissingIDError(lookup_id)
@experimental(as_of="0.4.0")
def redundant_form(self):
"""Return an array of dissimilarities in redundant format.
As this is the native format that the dissimilarities are stored in,
this is simply an alias for `data`.
Returns
-------
ndarray
Two-dimensional ``numpy.ndarray`` of dissimilarities in redundant
format.
Notes
-----
Redundant format is described in [1]_.
Does *not* return a copy of the data.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
return self.data
@experimental(as_of="0.4.0")
def copy(self):
"""Return a deep copy of the dissimilarity matrix.
Returns
-------
DissimilarityMatrix
Deep copy of the dissimilarity matrix. Will be the same type as
`self`.
"""
# We deepcopy IDs in case the tuple contains mutable objects at some
# point in the future.
return self.__class__(self.data.copy(), deepcopy(self.ids))
@experimental(as_of="0.4.0")
def filter(self, ids, strict=True):
"""Filter the dissimilarity matrix by IDs.
Parameters
----------
ids : iterable of str
IDs to retain. May not contain duplicates or be empty. Each ID must
be present in the dissimilarity matrix.
strict : bool, optional
If `strict` is ``True`` and an ID that is not found in the distance
matrix is found in `ids`, a ``MissingIDError`` exception will be
raised, otherwise the ID will be ignored.
Returns
-------
DissimilarityMatrix
Filtered dissimilarity matrix containing only the IDs specified in
`ids`. IDs will be in the same order as they appear in `ids`.
Raises
------
MissingIDError
If an ID in `ids` is not in the object's list of IDs.
"""
if strict:
idxs = [self.index(id_) for id_ in ids]
else:
# get the indices to slice the inner numpy array
idxs = []
# save the IDs that were found in the distance matrix
found_ids = []
for id_ in ids:
try:
idxs.append(self.index(id_))
found_ids.append(id_)
except MissingIDError:
pass
ids = found_ids
filtered_data = self._data[idxs][:, idxs]
return self.__class__(filtered_data, ids)
@experimental(as_of="0.4.0")
def plot(self, cmap=None, title=""):
"""Creates a heatmap of the dissimilarity matrix
Parameters
----------
cmap: str or matplotlib.colors.Colormap, optional
Sets the color scheme of the heatmap
If ``None``, defaults to the colormap specified in the matplotlib
rc file.
title: str, optional
Sets the title label of the heatmap
(Default is blank)
Returns
-------
matplotlib.figure.Figure
Figure containing the heatmap and colorbar of the plotted
dissimilarity matrix.
Examples
--------
.. plot::
Define a dissimilarity matrix with five objects labeled A-E:
>>> from skbio.stats.distance import DissimilarityMatrix
>>> dm = DissimilarityMatrix([[0, 1, 2, 3, 4], [1, 0, 1, 2, 3],
... [2, 1, 0, 1, 2], [3, 2, 1, 0, 1],
... [4, 3, 2, 1, 0]],
... ['A', 'B', 'C', 'D', 'E'])
Plot the dissimilarity matrix as a heatmap:
>>> fig = dm.plot(cmap='Reds', title='Example heatmap')
"""
# based on http://stackoverflow.com/q/14391959/3776794
fig, ax = plt.subplots()
# use pcolormesh instead of pcolor for performance
heatmap = ax.pcolormesh(self.data, cmap=cmap)
fig.colorbar(heatmap)
# center labels within each cell
ticks = np.arange(0.5, self.shape[0])
ax.set_xticks(ticks, minor=False)
ax.set_yticks(ticks, minor=False)
# Ensure there is no white border around the heatmap by manually
# setting the limits
ax.set_ylim(0, len(self.ids))
ax.set_xlim(0, len(self.ids))
# display data as it is stored in the dissimilarity matrix
# (default is to have y-axis inverted)
ax.invert_yaxis()
ax.set_xticklabels(self.ids, rotation=90, minor=False)
ax.set_yticklabels(self.ids, minor=False)
ax.set_title(title)
return fig
def _repr_png_(self):
return self._figure_data('png')
def _repr_svg_(self):
return self._figure_data('svg')
@property
@experimental(as_of="0.4.0")
def png(self):
"""Display heatmap in IPython Notebook as PNG.
"""
return Image(self._repr_png_(), embed=True)
@property
@experimental(as_of="0.4.0")
def svg(self):
"""Display heatmap in IPython Notebook as SVG.
"""
return SVG(self._repr_svg_())
def _figure_data(self, format):
fig = self.plot()
data = print_figure(fig, format)
# We MUST close the figure, otherwise IPython's display machinery
# will pick it up and send it as output, resulting in a double display
plt.close(fig)
return data
@experimental(as_of="0.4.0")
def __str__(self):
"""Return a string representation of the dissimilarity matrix.
Summary includes matrix dimensions, a (truncated) list of IDs, and
(truncated) array of dissimilarities.
Returns
-------
str
String representation of the dissimilarity matrix.
"""
return '%dx%d %s matrix\nIDs:\n%s\nData:\n' % (
self.shape[0], self.shape[1], self._matrix_element_name,
_pprint_strs(self.ids)) + str(self.data)
@experimental(as_of="0.4.0")
def __eq__(self, other):
"""Compare this dissimilarity matrix to another for equality.
Two dissimilarity matrices are equal if they have the same shape, IDs
(in the same order!), and have data arrays that are equal.
Checks are *not* performed to ensure that `other` is a
`DissimilarityMatrix` instance.
Parameters
----------
other : DissimilarityMatrix
Dissimilarity matrix to compare to for equality.
Returns
-------
bool
``True`` if `self` is equal to `other`, ``False`` otherwise.
"""
equal = True
# The order these checks are performed in is important to be as
# efficient as possible. The check for shape equality is not strictly
# necessary as it should be taken care of in np.array_equal, but I'd
# rather explicitly bail before comparing IDs or data. Use array_equal
# instead of (a == b).all() because of this issue:
# http://stackoverflow.com/a/10582030
try:
if self.shape != other.shape:
equal = False
elif self.ids != other.ids:
equal = False
elif not np.array_equal(self.data, other.data):
equal = False
except AttributeError:
equal = False
return equal
@experimental(as_of="0.4.0")
def __ne__(self, other):
"""Determine whether two dissimilarity matrices are not equal.
Parameters
----------
other : DissimilarityMatrix
Dissimilarity matrix to compare to.
Returns
-------
bool
``True`` if `self` is not equal to `other`, ``False`` otherwise.
See Also
--------
__eq__
"""
return not self == other
@experimental(as_of="0.4.0")
def __contains__(self, lookup_id):
"""Check if the specified ID is in the dissimilarity matrix.
Parameters
----------
lookup_id : str
ID to search for.
Returns
-------
bool
``True`` if `lookup_id` is in the dissimilarity matrix, ``False``
otherwise.
See Also
--------
index
"""
return lookup_id in self._id_index
@experimental(as_of="0.4.0")
def __getitem__(self, index):
"""Slice into dissimilarity data by object ID or numpy indexing.
Extracts data from the dissimilarity matrix by object ID, a pair of
IDs, or numpy indexing/slicing.
Parameters
----------
index : str, two-tuple of str, or numpy index
`index` can be one of the following forms: an ID, a pair of IDs, or
a numpy index.
If `index` is a string, it is assumed to be an ID and a
``numpy.ndarray`` row vector is returned for the corresponding ID.
Note that the ID's row of dissimilarities is returned, *not* its
column. If the matrix is symmetric, the two will be identical, but
this makes a difference if the matrix is asymmetric.
If `index` is a two-tuple of strings, each string is assumed to be
an ID and the corresponding matrix element is returned that
represents the dissimilarity between the two IDs. Note that the
order of lookup by ID pair matters if the matrix is asymmetric: the
first ID will be used to look up the row, and the second ID will be
used to look up the column. Thus, ``dm['a', 'b']`` may not be the
same as ``dm['b', 'a']`` if the matrix is asymmetric.
Otherwise, `index` will be passed through to
``DissimilarityMatrix.data.__getitem__``, allowing for standard
indexing of a ``numpy.ndarray`` (e.g., slicing).
Returns
-------
ndarray or scalar
Indexed data, where return type depends on the form of `index` (see
description of `index` for more details).
Raises
------
MissingIDError
If the ID(s) specified in `index` are not in the dissimilarity
matrix.
Notes
-----
The lookup based on ID(s) is quick.
"""
if isinstance(index, string_types):
return self.data[self.index(index)]
elif self._is_id_pair(index):
return self.data[self.index(index[0]), self.index(index[1])]
else:
return self.data.__getitem__(index)
def _validate(self, data, ids):
"""Validate the data array and IDs.
Checks that the data is at least 1x1 in size, 2D, square, hollow, and
contains only floats. Also checks that IDs are unique and that the
number of IDs matches the number of rows/cols in the data array.
Subclasses can override this method to perform different/more specific
validation (e.g., see `DistanceMatrix`).
Notes
-----
Accepts arguments instead of inspecting instance attributes to avoid
creating an invalid dissimilarity matrix before raising an error.
Otherwise, the invalid dissimilarity matrix could be used after the
exception is caught and handled.
"""
if 0 in data.shape:
raise DissimilarityMatrixError("Data must be at least 1x1 in "
"size.")
if len(data.shape) != 2:
raise DissimilarityMatrixError("Data must have exactly two "
"dimensions.")
if data.shape[0] != data.shape[1]:
raise DissimilarityMatrixError("Data must be square (i.e., have "
"the same number of rows and "
"columns).")
if data.dtype != np.double:
raise DissimilarityMatrixError("Data must contain only floating "
"point values.")
if np.trace(data) != 0:
raise DissimilarityMatrixError("Data must be hollow (i.e., the "
"diagonal can only contain zeros).")
duplicates = find_duplicates(ids)
if duplicates:
formatted_duplicates = ', '.join(repr(e) for e in duplicates)
raise DissimilarityMatrixError("IDs must be unique. Found the "
"following duplicate IDs: %s" %
formatted_duplicates)
if len(ids) != data.shape[0]:
raise DissimilarityMatrixError("The number of IDs (%d) must match "
"the number of rows/columns in the "
"data (%d)." %
(len(ids), data.shape[0]))
def _index_list(self, list_):
return {id_: idx for idx, id_ in enumerate(list_)}
def _is_id_pair(self, index):
return (isinstance(index, tuple) and
len(index) == 2 and
all(map(lambda e: isinstance(e, string_types), index)))
class DistanceMatrix(DissimilarityMatrix):
"""Store distances between objects.
A `DistanceMatrix` is a `DissimilarityMatrix` with the additional
requirement that the matrix data is symmetric. There are additional methods
made available that take advantage of this symmetry.
See Also
--------
DissimilarityMatrix
Notes
-----
The distances are stored in redundant (square-form) format [1]_. To
facilitate use with other scientific Python routines (e.g., scipy), the
distances can be retrieved in condensed (vector-form) format using
`condensed_form`.
`DistanceMatrix` only requires that the distances it stores are symmetric.
Checks are *not* performed to ensure the other three metric properties
hold (non-negativity, identity of indiscernibles, and triangle inequality)
[2]_. Thus, a `DistanceMatrix` instance can store distances that are not
metric.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
.. [2] http://planetmath.org/metricspace
"""
# Override here, used in superclass __str__
_matrix_element_name = 'distance'
@classmethod
@experimental(as_of="0.4.0-dev")
def from_iterable(cls, iterable, metric, key=None, keys=None):
"""Create DistanceMatrix from all pairs in an iterable given a metric.
Parameters
----------
iterable : iterable
Iterable containing objects to compute pairwise distances on.
metric : callable
A function that takes two arguments and returns a float
representing the distance between the two arguments.
key : callable or metadata key, optional
A function that takes one argument and returns a string
representing the id of the element in the distance matrix.
Alternatively, a key to a `metadata` property if it exists for
each element in the `iterable`. If None, then default ids will be
used.
keys : iterable, optional
An iterable of the same length as `iterable`. Each element will be
used as the respective key.
Returns
-------
DistanceMatrix
The `metric` applied to all pairwise elements in the `iterable`.
Raises
------
ValueError
If `key` and `keys` are both provided.
Notes
-----
Symmetry and hollowness are assumed when calculating the distances via
`metric`. Therefore, distances are only computed for the strictly
upper/lower triangle.
"""
iterable = list(iterable)
if key is not None and keys is not None:
raise ValueError("Cannot use both `key` and `keys` at the same"
" time.")
keys_ = None
if key is not None:
keys_ = [resolve_key(e, key) for e in iterable]
elif keys is not None:
keys_ = keys
dm = np.zeros((len(iterable),) * 2)
for i, a in enumerate(iterable):
for j, b in enumerate(iterable[:i]):
dm[i, j] = dm[j, i] = metric(a, b)
return cls(dm, keys_)
@experimental(as_of="0.4.0")
def condensed_form(self):
"""Return an array of distances in condensed format.
Returns
-------
ndarray
One-dimensional ``numpy.ndarray`` of distances in condensed format.
Notes
-----
Condensed format is described in [1]_.
The conversion is not a constant-time operation, though it should be
relatively quick to perform.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
return squareform(self._data, force='tovector', checks=False)
@experimental(as_of="0.4.0")
def permute(self, condensed=False):
"""Randomly permute both rows and columns in the matrix.
Randomly permutes the ordering of rows and columns in the matrix. The
same permutation is applied to both rows and columns in order to
maintain symmetry and hollowness. Only the rows/columns in the distance
matrix are permuted; the IDs are *not* permuted.
Parameters
----------
condensed : bool, optional
If ``True``, return the permuted distance matrix in condensed
format. Otherwise, return the permuted distance matrix as a new
``DistanceMatrix`` instance.
Returns
-------
DistanceMatrix or ndarray
Permuted distances as a new ``DistanceMatrix`` or as a ``ndarray``
in condensed format.
See Also
--------
condensed_form
Notes
-----
This method does not modify the distance matrix that it is called on.
It is more efficient to pass ``condensed=True`` than permuting the
distance matrix and then converting to condensed format.
"""
order = np.random.permutation(self.shape[0])
permuted = self._data[order][:, order]
if condensed:
return squareform(permuted, force='tovector', checks=False)
else:
return self.__class__(permuted, self.ids)
def _validate(self, data, ids):
"""Validate the data array and IDs.
Overrides the superclass `_validate`. Performs a check for symmetry in
addition to the checks performed in the superclass.
"""
super(DistanceMatrix, self)._validate(data, ids)
if (data.T != data).any():
raise DistanceMatrixError("Data must be symmetric.")
@experimental(as_of="0.4.0")
def randdm(num_objects, ids=None, constructor=None, random_fn=None):
"""Generate a distance matrix populated with random distances.
Using the default `random_fn`, distances are randomly drawn from a uniform
distribution over ``[0, 1)``.
Regardless of `random_fn`, the resulting distance matrix is guaranteed to
be symmetric and hollow.
Parameters
----------
num_objects : int
The number of objects in the resulting distance matrix. For example, if
`num_objects` is 3, a 3x3 distance matrix will be returned.
ids : sequence of str or None, optional
A sequence of strings to be used as IDs. ``len(ids)`` must be equal to
`num_objects`. If not provided, IDs will be monotonically-increasing
integers cast as strings (numbering starts at 1). For example,
``('1', '2', '3')``.
constructor : type, optional
`DissimilarityMatrix` or subclass constructor to use when creating the
random distance matrix. The returned distance matrix will be of this
type. If ``None`` (the default), a `DistanceMatrix` instance will be
returned.
random_fn : function, optional
Function to generate random values. `random_fn` must accept two
arguments (number of rows and number of columns) and return a 2D
``numpy.ndarray`` of floats (or something that can be cast to float).
If ``None`` (the default), ``numpy.random.rand`` will be used.
Returns
-------
DissimilarityMatrix
`DissimilarityMatrix` (or subclass) instance of random distances. Type
depends on `constructor`.
See Also
--------
numpy.random.rand
"""
if constructor is None:
constructor = DistanceMatrix
if random_fn is None:
random_fn = np.random.rand
data = np.tril(random_fn(num_objects, num_objects), -1)
data = data + data.T
if not ids:
ids = map(str, range(1, num_objects + 1))
return constructor(data, ids)
# helper functions for anosim and permanova
def _preprocess_input(distance_matrix, grouping, column):
"""Compute intermediate results not affected by permutations.
These intermediate results can be computed a single time for efficiency,
regardless of grouping vector permutations (i.e., when calculating the
p-value). These intermediate results are used by both ANOSIM and PERMANOVA.
Also validates and normalizes input (e.g., converting ``DataFrame`` column
into grouping vector).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Input must be a DistanceMatrix.")
if isinstance(grouping, pd.DataFrame):
if column is None:
raise ValueError(
"Must provide a column name if supplying a DataFrame.")
else:
grouping = _df_to_vector(distance_matrix, grouping, column)
elif column is not None:
raise ValueError(
"Must provide a DataFrame if supplying a column name.")
sample_size = distance_matrix.shape[0]
if len(grouping) != sample_size:
raise ValueError(
"Grouping vector size must match the number of IDs in the "
"distance matrix.")
# Find the group labels and convert grouping to an integer vector
# (factor).
groups, grouping = np.unique(grouping, return_inverse=True)
num_groups = len(groups)
if num_groups == len(grouping):
raise ValueError(
"All values in the grouping vector are unique. This method cannot "
"operate on a grouping vector with only unique values (e.g., "
"there are no 'within' distances because each group of objects "
"contains only a single object).")
if num_groups == 1:
raise ValueError(
"All values in the grouping vector are the same. This method "
"cannot operate on a grouping vector with only a single group of "
"objects (e.g., there are no 'between' distances because there is "
"only a single group).")
tri_idxs = np.triu_indices(sample_size, k=1)
distances = distance_matrix.condensed_form()
return sample_size, num_groups, grouping, tri_idxs, distances
def _df_to_vector(distance_matrix, df, column):
"""Return a grouping vector from a ``DataFrame`` column.
Parameters
----------
distance_marix : DistanceMatrix
Distance matrix whose IDs will be mapped to group labels.
df : pandas.DataFrame
``DataFrame`` (indexed by distance matrix ID).
column : str
Column name in `df` containing group labels.
Returns
-------
list
Grouping vector (vector of labels) based on the IDs in
`distance_matrix`. Each ID's label is looked up in the ``DataFrame``
under the column specified by `column`.
Raises
------
ValueError
If `column` is not in the ``DataFrame``, or a distance matrix ID is
not in the ``DataFrame``.
"""
if column not in df:
raise ValueError("Column '%s' not in DataFrame." % column)
grouping = df.loc[distance_matrix.ids, column]
if grouping.isnull().any():
raise ValueError(
"One or more IDs in the distance matrix are not in the data "
"frame.")
return grouping.tolist()
def _run_monte_carlo_stats(test_stat_function, grouping, permutations):
"""Run stat test and compute significance with Monte Carlo permutations."""
if permutations < 0:
raise ValueError(
"Number of permutations must be greater than or equal to zero.")
stat = test_stat_function(grouping)
p_value = np.nan
if permutations > 0:
perm_stats = np.empty(permutations, dtype=np.float64)
for i in range(permutations):
perm_grouping = np.random.permutation(grouping)
perm_stats[i] = test_stat_function(perm_grouping)
p_value = ((perm_stats >= stat).sum() + 1) / (permutations + 1)
return stat, p_value
def _build_results(method_name, test_stat_name, sample_size, num_groups, stat,
p_value, permutations):
"""Return ``pandas.Series`` containing results of statistical test."""
return pd.Series(
data=[method_name, test_stat_name, sample_size, num_groups, stat,
p_value, permutations],
index=['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations'],
name='%s results' % method_name)
| corburn/scikit-bio | skbio/stats/distance/_base.py | Python | bsd-3-clause | 32,394 | [
"scikit-bio"
] | a9c0fdec1e7481973d388cd1070bb191f4ecb45970ea6b12cfd4f654ee001c7f |
#!/usr/bin/env python
# Translated from Hanoi.cxx.
import vtk
class GV(object):
"""
Used to store global variables.
"""
def __init__(self, numberOfPucks=5, numberOfSteps=5, puckResolution=48, configuration=0):
self.numberOfPucks = numberOfPucks
self.numberOfSteps = numberOfSteps
self.puckResolution = puckResolution
self.configuration = configuration
self.gotFigure2 = False # Used to bail out of recursion if configuration == 2.
self.L = 1.0 # Puck height.
self.H = 1.1 * self.numberOfPucks * self.L # Peg height.
self.R = 0.5 # Peg radius.
self.rMin = 4.0 * self.R # The minimum allowable radius of disks.
self.rMax = 12.0 * self.R # The maximum allowable radius of disks
self.D = 1.1 * 1.25 * self.rMax # The distance between the pegs.
self.numberOfMoves = 0
def update(self, numberOfPucks, numberOfSteps, puckResolution, configuration):
self.numberOfPucks = numberOfPucks
self.numberOfSteps = numberOfSteps
self.puckResolution = puckResolution
self.configuration = configuration
self.H = 1.1 * self.numberOfPucks * self.L # Peg height.
# Globals
gv = GV()
renWin = vtk.vtkRenderWindow()
"""
For pegStack we use a list of lists where the sublists correspond to the
source, target and helper pegs.
Python lists can be used as a stack since they have append() (corresponding
to push()) and pop().
"""
pegStack = [[], [], []]
def hanoi():
colors = vtk.vtkNamedColors()
# Create the renderer and render window interactor.
ren = vtk.vtkRenderer()
renWin.AddRenderer(ren)
renWin.SetSize(1200, 750)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.SetBackground(colors.GetColor3d("PapayaWhip"))
camera = vtk.vtkCamera()
camera.SetPosition(41.0433, 27.9637, 30.442)
camera.SetFocalPoint(11.5603, -1.51931, 0.95899)
camera.SetClippingRange(18.9599, 91.6042)
camera.SetViewUp(0, 1, 0)
ren.SetActiveCamera(camera)
# Create geometry: table, pegs, and pucks.
pegGeometry = vtk.vtkCylinderSource()
pegGeometry.SetResolution(8)
pegMapper = vtk.vtkPolyDataMapper()
pegMapper.SetInputConnection(pegGeometry.GetOutputPort())
puckGeometry = vtk.vtkCylinderSource()
puckGeometry.SetResolution(gv.puckResolution)
puckMapper = vtk.vtkPolyDataMapper()
puckMapper.SetInputConnection(puckGeometry.GetOutputPort())
tableGeometry = vtk.vtkPlaneSource()
tableGeometry.SetResolution(10, 10)
tableMapper = vtk.vtkPolyDataMapper()
tableMapper.SetInputConnection(tableGeometry.GetOutputPort())
# Create the actors: table top, pegs, and pucks
# The table
table = vtk.vtkActor()
ren.AddActor(table)
table.SetMapper(tableMapper)
# table.GetProperty().SetColor(0.9569, 0.6431, 0.3765)
table.GetProperty().SetColor(colors.GetColor3d("SaddleBrown"))
table.AddPosition(gv.D, 0, 0)
table.SetScale(4 * gv.D, 2 * gv.D, 3 * gv.D)
table.RotateX(90)
# The pegs (using cylinder geometry). Note that the pegs have to translated
# in the y-direction because the cylinder is centered about the origin.
gv.H = 1.1 * gv.numberOfPucks * gv.L
peg = list()
for i in range(0, 3):
peg.append(vtk.vtkActor())
ren.AddActor(peg[i])
peg[i].SetMapper(pegMapper)
# peg[i].GetProperty().SetColor(1, 1, 1)
peg[i].GetProperty().SetColor(colors.GetColor3d("Lavender"))
peg[i].AddPosition(i * gv.D, gv.H / 2, 0)
peg[i].SetScale(1, gv.H, 1)
# The pucks (using cylinder geometry). Always loaded on peg# 0.
puck = list()
randomSequence = vtk.vtkMinimalStandardRandomSequence()
randomSequence.SetSeed(1)
for i in range(0, gv.numberOfPucks):
puck.append(vtk.vtkActor())
puck[i].SetMapper(puckMapper)
color = [0, 0, 0]
for j in range(0, 3):
color[j] = randomSequence.GetValue()
randomSequence.Next()
puck[i].GetProperty().SetColor(*color)
puck[i].AddPosition(0, i * gv.L + gv.L / 2, 0)
scale = gv.rMax - i * (gv.rMax - gv.rMin) / (gv.numberOfPucks - 1)
puck[i].SetScale(scale, 1, scale)
ren.AddActor(puck[i])
pegStack[0].append(puck[i])
# Reset the camera to view all actors.
renWin.Render()
renWin.SetWindowName("Towers of Hanoi")
if gv.configuration == 3:
WriteImage("hanoi0.png", renWin, rgba=False)
if gv.configuration != 1:
# Begin recursion.
Hanoi(gv.numberOfPucks - 1, 0, 2, 1)
Hanoi(1, 0, 1, 2)
if not gv.gotFigure2:
Hanoi(gv.numberOfPucks - 1, 2, 1, 0)
renWin.Render()
if gv.configuration == 3:
WriteImage("hanoi2.png", renWin, rgba=False)
# Report output.
s = 'Number of moves: {:d}\nPolygons rendered each frame: {:d}\nTotal number of frames: {:d}'
print(s.format(gv.numberOfMoves, 3 * 8 + 1 + gv.numberOfPucks * (2 + gv.puckResolution),
gv.numberOfMoves * 3 * gv.numberOfSteps))
iren.AddObserver('EndInteractionEvent', OrientationObserver(ren.GetActiveCamera()))
# Render the image.
iren.Initialize()
iren.Start()
def main():
maxPucks = 20
if not verify_parameters(maxPucks):
return
hanoi()
def get_program_parameters():
import argparse
description = 'Towers of Hanoi. .'
epilogue = '''
Where: -p specifies the number of pucks.
-s specifies the number of steps.
-r specifies the puck resolution.
-c specifies configuration.
0 final configuration.
1 initial configuration.
2 intermediate configuration.
3 final configuration and save images
Defaults: -p 5 -s 5 -r 48 -c 0
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--numberOfPucks', '-p', default=5, type=int, nargs='?', help='The number of pucks.')
parser.add_argument('--numberOfSteps', '-s', default=5, type=int, nargs='?', help='The number of steps.')
parser.add_argument('--puckResolution', '-r', default=48, type=int, nargs='?', help='The puck resolution.')
parser.add_argument('--configuration', '-c', default=0, type=int, nargs='?', help='The configuration.')
args = parser.parse_args()
return args.numberOfPucks, args.numberOfSteps, args.puckResolution, args.configuration
def verify_parameters(maxPucks):
numberOfPucks, numberOfSteps, puckResolution, configuration = get_program_parameters()
numberOfPucks = abs(numberOfPucks)
numberOfSteps = abs(numberOfSteps)
puckResolution = abs(puckResolution)
configuration = abs(configuration)
check = True
if numberOfPucks < 2:
print('Please use more pucks!')
check = False
if numberOfPucks > maxPucks:
print('Too many pucks specified! Maximum is', maxPucks)
check = False
if numberOfSteps < 3:
print('Please use more steps!')
check = False
if configuration > 3:
print('0 >= configuration <= 3')
check = False
if check:
gv.update(numberOfPucks, numberOfSteps, puckResolution, configuration)
return check
def MovePuck(peg1, peg2):
"""
This routine is responsible for moving pucks from peg1 to peg2.
:param peg1: Initial peg.
:param peg2: Final peg.
:return:
"""
gv.numberOfMoves += 1
# Get the actor to move
movingActor = pegStack[peg1].pop()
# Get the distance to move up.
distance = (gv.H - (gv.L * (len(pegStack[peg1]) - 1)) + gv.rMax) / gv.numberOfSteps
for i in range(0, gv.numberOfSteps):
movingActor.AddPosition(0, distance, 0)
renWin.Render()
# Get the distance to move across
distance = (peg2 - peg1) * gv.D / gv.numberOfSteps
flipAngle = 180.0 / gv.numberOfSteps
for i in range(0, gv.numberOfSteps):
movingActor.AddPosition(distance, 0, 0)
movingActor.RotateX(flipAngle)
renWin.Render()
if gv.numberOfMoves == 13 and i == 3: # for making book image
if gv.configuration == 3 or gv.configuration == 2:
cam = renWin.GetRenderers().GetFirstRenderer().GetActiveCamera()
camera1 = vtk.vtkCamera()
camera1.SetPosition(54.7263, 41.6467, 44.125)
camera1.SetFocalPoint(11.5603, -1.51931, 0.95899)
camera1.SetClippingRange(42.4226, 115.659)
camera1.SetViewUp(0, 1, 0)
renWin.GetRenderers().GetFirstRenderer().SetActiveCamera(camera1)
renWin.Render()
if gv.configuration == 3:
WriteImage("hanoi1.png", renWin, rgba=False)
if gv.configuration == 2:
gv.gotFigure2 = True
break
renWin.GetRenderers().GetFirstRenderer().SetActiveCamera(cam)
renWin.Render()
if gv.gotFigure2:
pegStack[peg2].append(movingActor)
return
# Get the distance to move down.
distance = ((gv.L * (len(pegStack[peg2]) - 1)) - gv.H - gv.rMax) / gv.numberOfSteps
for i in range(0, gv.numberOfSteps):
movingActor.AddPosition(0, distance, 0)
renWin.Render()
pegStack[peg2].append(movingActor)
def Hanoi(n, peg1, peg2, peg3):
"""
Tower of Hanoi.
:param n: Number of disks.
:param peg1: Source
:param peg2: Target
:param peg3: Helper
:return:
"""
# If gotFigure2 is true, we break out of the recursion.
if gv.gotFigure2:
return
if n != 1:
Hanoi(n - 1, peg1, peg3, peg2)
if gv.gotFigure2:
return
Hanoi(1, peg1, peg2, peg3)
Hanoi(n - 1, peg3, peg2, peg1)
else:
MovePuck(peg1, peg2)
class OrientationObserver(object):
def __init__(self, cam):
self.cam = cam
def __call__(self, caller, ev):
# Just do this to demonstrate who called callback and the event that triggered it.
print(caller.GetClassName(), "Event Id:", ev)
# Now print the camera orientation.
CameraOrientation(self.cam)
def CameraOrientation(cam):
fmt1 = "{:>15s}"
fmt2 = "{:9.6g}"
print(fmt1.format("Position:"), ', '.join(map(fmt2.format, cam.GetPosition())))
print(fmt1.format("Focal point:"), ', '.join(map(fmt2.format, cam.GetFocalPoint())))
print(fmt1.format("Clipping range:"), ', '.join(map(fmt2.format, cam.GetClippingRange())))
print(fmt1.format("View up:"), ', '.join(map(fmt2.format, cam.GetViewUp())))
print(fmt1.format("Distance:"), fmt2.format(cam.GetDistance()))
def WriteImage(fileName, renWin1, rgba=True):
"""
Write the render window view to an image file.
Image types supported are:
BMP, JPEG, PNM, PNG, PostScript, TIFF.
The default parameters are used for all writers, change as needed.
:param fileName: The file name, if no extension then PNG is assumed.
:param renWin1: The render window.
:param rgba: Used to set the buffer type.
:return:
"""
import os
if fileName:
# Select the writer to use.
path, ext = os.path.splitext(fileName)
ext = ext.lower()
if not ext:
ext = '.png'
fileName = fileName + ext
if ext == '.bmp':
writer = vtk.vtkBMPWriter()
elif ext == '.jpg':
writer = vtk.vtkJPEGWriter()
elif ext == '.pnm':
writer = vtk.vtkPNMWriter()
elif ext == '.ps':
if rgba:
rgba = False
writer = vtk.vtkPostScriptWriter()
elif ext == '.tiff':
writer = vtk.vtkTIFFWriter()
else:
writer = vtk.vtkPNGWriter()
windowto_image_filter = vtk.vtkWindowToImageFilter()
windowto_image_filter.SetInput(renWin1)
windowto_image_filter.SetScale(1) # image quality
if rgba:
windowto_image_filter.SetInputBufferTypeToRGBA()
else:
windowto_image_filter.SetInputBufferTypeToRGB()
# Read from the front buffer.
windowto_image_filter.ReadFrontBufferOff()
windowto_image_filter.Update()
writer.SetFileName(fileName)
writer.SetInputConnection(windowto_image_filter.GetOutputPort())
writer.Write()
else:
raise RuntimeError('Need a filename.')
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Visualization/Hanoi.py | Python | apache-2.0 | 12,641 | [
"VTK"
] | dd47338e91fbe40981ef0c44db4ad027e5c44bf8bf515fda4259e609355d5d20 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sip
sip.setapi('QDate', 2)
sip.setapi('QDateTime', 2)
sip.setapi('QString', 2)
sip.setapi('QTextStream', 2)
sip.setapi('QTime', 2)
sip.setapi('QUrl', 2)
sip.setapi('QVariant', 2)
from PyQt4 import QtCore, QtGui
import sys
import os
import api.character
import api.character.spells
here = ''
try:
here = os.path.abspath(os.path.dirname(__file__))
except NameError: # We are the main py2exe script, not a module
here = os.path.dirname(os.path.abspath(sys.argv[0]))
parent = os.path.abspath(os.path.dirname(here))
sys.path.append(here)
import mimetypes
import widgets
import dialogs
import sinks
import api.data.clans
import api.data.families
import api.data.schools
import api.character
import api.character.spells
import api.character.skills
import api.rules
from l5rcmcore import *
from util import log
def new_small_le(parent=None, ro=True):
le = QtGui.QLineEdit(parent)
le.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum)
le.setMaximumSize(QtCore.QSize(32, 24))
le.setReadOnly(ro)
return le
def new_horiz_line(parent=None):
line = QtGui.QFrame(parent)
line.setObjectName("hline")
line.setGeometry(QtCore.QRect(3, 3, 3, 3))
line.setFrameShape(QtGui.QFrame.HLine)
line.setFrameShadow(QtGui.QFrame.Sunken)
line.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
return line
def new_vert_line(parent=None):
line = QtGui.QFrame(parent)
line.setObjectName("vline")
line.setGeometry(QtCore.QRect(320, 150, 118, 3))
line.setFrameShape(QtGui.QFrame.VLine)
line.setFrameShadow(QtGui.QFrame.Sunken)
return line
def new_item_groupbox(name, widget):
grp = QtGui.QGroupBox(name, widget.parent())
vbox = QtGui.QVBoxLayout(grp)
vbox.addWidget(widget)
return grp
def new_small_plus_bt(parent=None):
bt = QtGui.QToolButton(parent)
bt.setAutoRaise(True)
bt.setText('+')
bt.setIcon(
QtGui.QIcon.fromTheme('gtk-add', QtGui.QIcon(
get_icon_path('add', (16, 16)))))
bt.setMaximumSize(16, 16)
bt.setMinimumSize(16, 16)
bt.setToolButtonStyle(QtCore.Qt.ToolButtonFollowStyle)
return bt
class ZoomableView(QtGui.QGraphicsView):
"""A QGraphicsView that zoom on CTRL+MouseWheel"""
def __init__(self, parent=None):
super(ZoomableView, self).__init__(parent)
self.wp = None
def wheelEvent(self, ev):
if ev.modifiers() & QtCore.Qt.ControlModifier:
factor = pow(1.16, ev.delta() / 240.0)
self.scale(factor, factor)
else:
super(ZoomableView, self).wheelEvent(ev)
def keyPressEvent(self, ev):
super(ZoomableView, self).keyPressEvent(ev)
if ev.modifiers() & QtCore.Qt.ControlModifier:
if ev.key() == QtCore.Qt.Key_0:
self.resetTransform()
elif ev.key() == QtCore.Qt.Key_Minus:
self.scale(0.80, 0.80)
elif ev.key() == QtCore.Qt.Key_Plus:
self.scale(1.20, 1.20)
def set_wallpaper(self, image):
self.wp = image
self.viewport().update()
def drawBackground(self, painter, rect):
super(ZoomableView, self).drawBackground(painter, rect)
def zoom_image():
sx, sy = 0, 0
tx, ty = rect.x(), rect.y()
sh, sw = self.wp.height(), self.wp.width()
if self.wp.width() > rect.width():
sx = (self.wp.width() - rect.width()) / 2
sw -= sx * 2
else:
tx += (rect.width() - self.wp.width()) / 2
if self.wp.height() > rect.height():
sy = (self.wp.height() - rect.height()) / 2
sh -= sy * 2
else:
ty += (rect.height() - self.wp.height()) / 2
return QtCore.QRectF(sx, sy, sw, sh), QtCore.QPointF(tx, ty)
if self.wp:
source_rect, target_point = zoom_image()
painter.drawImage(target_point, self.wp, source_rect)
class L5RMain(L5RCMCore):
default_size = QtCore.QSize(820, 720)
default_point_size = 8.25
num_tabs = 10
def __init__(self, locale=None, parent=None):
super(L5RMain, self).__init__(locale, parent)
log.ui.debug(u"Initialize L5RMain window")
# character file save path
self.save_path = ''
# slot sinks
self.sink1 = sinks.Sink1(self) # Menu Sink
self.sink2 = sinks.Sink2(self) # MeritFlaw Sink
self.sink3 = sinks.Sink3(self) # Weapons Sink
self.sink4 = sinks.Sink4(self) # Weapons Sink
# Build interface and menus
self.build_ui()
self.build_menu()
# Build page 1
self.build_ui_page_1()
self.build_ui_page_2()
self.build_ui_page_3()
self.build_ui_page_4()
self.build_ui_page_5()
self.build_ui_page_6()
self.build_ui_page_7()
self.build_ui_page_8()
self.build_ui_page_9()
self.build_ui_page_10()
self.build_ui_page_about()
self.tabs.setIconSize(QtCore.QSize(24, 24))
tabs_icons = ['samurai', 'music', 'burn', 'powers', 'userinfo', 'book',
'katana', 'disk', 'text', 'bag']
for i in range(0, self.num_tabs):
self.tabs.setTabIcon(i, QtGui.QIcon(get_tab_icon(tabs_icons[i])))
self.tabs.setTabText(i, '')
# about = app_icon
self.tabs.setTabIcon(self.num_tabs, QtGui.QIcon(get_app_icon_path()))
self.tabs.setTabText(self.num_tabs, '')
# donate button
self.setup_donate_button()
self.connect_signals()
def build_ui(self):
log.ui.debug(u"Build L5RMain UI")
# Main interface widgets
self.view = ZoomableView(self)
settings = QtCore.QSettings()
# Set Background Color
lBackgroundColor = settings.value('backgroundcolor')
color = QtGui.QColor()
if(lBackgroundColor is not None):
color = QtGui.QColor(lBackgroundColor)
if(not color.isValid()):
color = QtGui.QColor('#000000')
self.view.setStyleSheet("background-color:%s;" % color.name())
log.ui.debug(u"background color: %s", color.name())
self.widgets = QtGui.QFrame()
self.widgets.setFrameShape(QtGui.QFrame.StyledPanel)
self.widgets.setLineWidth(1)
self.tabs = QtGui.QTabWidget(self)
self.scene = QtGui.QGraphicsScene(self)
proxy_widget = self.scene.addWidget(self.widgets, QtCore.Qt.Widget)
#proxy_widget.setOpacity(float(settings.value('opacity', 0.96)))
self.view.setScene(self.scene)
self.view.setInteractive(True)
self.setCentralWidget(self.view)
self.nicebar = None
mvbox = QtGui.QVBoxLayout(self.widgets)
logo = QtGui.QLabel(self)
# Set Banner
lIsBannerEnabled = settings.value('isbannerenabled')
if lIsBannerEnabled is None:
lIsBannerEnabled = 1
settings.setValue('isbannerenabled', lIsBannerEnabled)
logo.setScaledContents(True)
logo.setPixmap(QtGui.QPixmap(get_app_file('banner_s.png')))
logo.setObjectName('BANNER')
if lIsBannerEnabled == 0:
logo.hide()
mvbox.addWidget(logo)
mvbox.addWidget(self.tabs)
log.ui.debug(u"show banner: %s", u"yes" if lIsBannerEnabled else u"no" )
self.mvbox = mvbox
# LOAD SETTINGS
geo = settings.value('geometry')
if geo is not None:
self.restoreGeometry(geo)
log.ui.info(u"restore geometry from settings")
else:
log.ui.info(u"using default geometry")
self.reset_geometry()
self.ic_idx = int(settings.value('insight_calculation', 1)) - 1
ic_calcs = [api.rules.insight_calculation_1,
api.rules.insight_calculation_2,
api.rules.insight_calculation_3]
if self.ic_idx not in range(0, len(ic_calcs)):
self.ic_idx = 0
log.rules.info(u"insight calculator settings: %d/%d", self.ic_idx+1, len(ic_calcs))
self.ic_calc_method = ic_calcs[self.ic_idx]
self.update_background_image()
def update_background_image(self):
settings = QtCore.QSettings()
wallpaper_ = settings.value('background_image', '')
if len(wallpaper_) == 0:
return
if os.path.exists(wallpaper_):
self.view.set_wallpaper(QtGui.QImage(wallpaper_))
log.ui.info(u"set background image: %s", wallpaper_)
else:
log.ui.warning(u"image not found: %s", wallpaper_)
def reset_geometry(self):
self.setGeometry(QtCore.QRect(100, 100, 820, 720))
def reset_layout_geometry(self):
self.mvbox.setGeometry(QtCore.QRect(1, 1, 727, 573))
def build_ui_page_1(self):
mfr = QtGui.QFrame(self)
self.tabs.addTab(mfr, self.tr("Character"))
mvbox = QtGui.QVBoxLayout(mfr)
mvbox.setContentsMargins(0, 0, 0, 0)
def add_pc_info(row, col):
fr_pc_info = QtGui.QFrame(self)
fr_pc_info.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum)
grid = QtGui.QGridLayout(fr_pc_info)
self.tx_pc_name = QtGui.QLineEdit(self)
self.tx_pc_rank = QtGui.QLineEdit(self)
self.lb_pc_clan = QtGui.QLabel(self)
self.lb_pc_family = QtGui.QLabel(self)
self.lb_pc_school = QtGui.QLabel(self)
self.tx_pc_exp = QtGui.QLineEdit(self)
self.tx_pc_ins = QtGui.QLineEdit(self)
# School
fr_school = QtGui.QFrame(self)
hb_school = QtGui.QHBoxLayout(fr_school)
hb_school.setContentsMargins(0, 0, 0, 0)
bt_edit_school = QtGui.QToolButton(self)
bt_edit_school.setToolTip(self.tr("Edit character first school"))
bt_edit_school.setAutoRaise(True)
bt_edit_school.setIcon(QtGui.QIcon(get_icon_path('edit', (16, 16))))
hb_school.addWidget(QtGui.QLabel(self.tr("School"), self))
hb_school.addWidget(bt_edit_school)
# Family
bt_edit_family = QtGui.QToolButton(self)
bt_edit_family.setToolTip(self.tr("Edit character family and clan"))
bt_edit_family.setAutoRaise(True)
bt_edit_family.setIcon(QtGui.QIcon(get_icon_path('edit', (16, 16))))
fr_family = QtGui.QFrame(self)
hb_family = QtGui.QHBoxLayout(fr_family)
hb_family.setContentsMargins(0, 0, 0, 0)
hb_family.addWidget(QtGui.QLabel(self.tr("Family")))
hb_family.addWidget(bt_edit_family)
# Place "generate random name" near the Name label
lb_name = QtGui.QLabel(self.tr("Name"), self)
bt_generate_male = QtGui.QToolButton(self)
bt_generate_male.setIcon(
QtGui.QIcon(get_icon_path('male', (16, 16))))
bt_generate_female = QtGui.QToolButton(self)
bt_generate_female.setIcon(
QtGui.QIcon(get_icon_path('female', (16, 16))))
bt_generate_male .setAutoRaise(True)
bt_generate_male .setToolTip(self.tr("Random male name"))
bt_generate_female.setAutoRaise(True)
bt_generate_female.setToolTip(self.tr("Random female name"))
hb_name = QtGui.QHBoxLayout()
hb_name.addWidget(lb_name)
hb_name.addWidget(bt_generate_male)
hb_name.addWidget(bt_generate_female)
# gender tag, connect signals
bt_generate_male .setProperty('gender', 'male')
bt_generate_female.setProperty('gender', 'female')
bt_generate_male .clicked.connect(self.sink1.generate_name)
bt_generate_female.clicked.connect(self.sink1.generate_name)
grid.addLayout(hb_name, 0, 0)
grid.addWidget(QtGui.QLabel(self.tr("Clan"), self), 1, 0)
grid.addWidget(fr_family, 2, 0)
grid.addWidget(fr_school, 3, 0)
self.bt_edit_family = bt_edit_family
self.bt_edit_school = bt_edit_school
# 3rd column
fr_exp = QtGui.QFrame(self)
hb_exp = QtGui.QHBoxLayout(fr_exp)
hb_exp.setContentsMargins(0, 0, 0, 0)
lb_exp = QtGui.QLabel(self.tr("Exp. Points"), self)
bt_exp = QtGui.QToolButton(self)
bt_exp.setToolTip(self.tr("Edit experience points"))
bt_exp.setAutoRaise(True)
bt_exp.setIcon(QtGui.QIcon(get_icon_path('edit', (16, 16))))
hb_exp.addWidget(lb_exp)
hb_exp.addWidget(bt_exp)
grid.addWidget(QtGui.QLabel(self.tr("Rank"), self), 0, 3)
grid.addWidget(fr_exp, 1, 3)
grid.addWidget(QtGui.QLabel(self.tr("Insight"), self), 2, 3)
self.bt_set_exp_points = bt_exp
# 2nd column
grid.addWidget(self.tx_pc_name, 0, 1, 1, 2)
grid.addWidget(self.lb_pc_clan, 1, 1, 1, 2)
grid.addWidget(self.lb_pc_family, 2, 1, 1, 2)
grid.addWidget(self.lb_pc_school, 3, 1, 1, 2)
# 4th column
grid.addWidget(self.tx_pc_rank, 0, 4, 1, 2)
grid.addWidget(self.tx_pc_exp, 1, 4, 1, 2)
grid.addWidget(self.tx_pc_ins, 2, 4, 1, 2)
self.tx_pc_rank.setReadOnly(True)
self.tx_pc_exp.setReadOnly(True)
self.tx_pc_ins.setReadOnly(True)
fr_pc_info.setLayout(grid)
mvbox.addWidget(fr_pc_info)
def build_trait_frame():
fr = QtGui.QFrame(self)
fr.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum)
hbox = QtGui.QHBoxLayout(fr)
grp = QtGui.QGroupBox(self.tr("Rings and Attributes"), self)
grid = QtGui.QGridLayout(grp)
grid.setSpacing(1)
# rings
rings = [(self.tr("Earth"), new_small_le(self)), (self.tr("Air"), new_small_le(self)),
(self.tr("Water"), new_small_le(self)), (self.tr("Fire"), new_small_le(self)),
(self.tr("Void"), new_small_le(self))]
# keep reference to the rings
self.rings = rings
for i in xrange(0, 4):
grid.addWidget(QtGui.QLabel(rings[i][0]), i, 0)
grid.addWidget(rings[i][1], i, 1)
# void ring with plus button
void_fr = QtGui.QFrame(self)
void_hbox = QtGui.QHBoxLayout(void_fr)
void_hbox.setContentsMargins(0, 0, 0, 0)
void_bt = new_small_plus_bt(self)
void_hbox.addWidget(rings[4][1])
void_hbox.addWidget(void_bt)
void_bt.clicked.connect(self.on_void_increase)
grid.addWidget(QtGui.QLabel(rings[4][0]), 4, 0)
grid.addWidget(void_fr, 4, 1)
attribs = []
# Earth ring
attribs.append((self.tr("Stamina"), new_small_le(self)))
attribs.append((self.tr("Willpower"), new_small_le(self)))
attribs[0][1].setProperty('attrib_id', models.ATTRIBS.STAMINA)
attribs[1][1].setProperty('attrib_id', models.ATTRIBS.WILLPOWER)
# Air ring
attribs.append((self.tr("Reflexes"), new_small_le(self)))
attribs.append((self.tr("Awareness"), new_small_le(self)))
attribs[2][1].setProperty('attrib_id', models.ATTRIBS.REFLEXES)
attribs[3][1].setProperty('attrib_id', models.ATTRIBS.AWARENESS)
# Water ring
attribs.append((self.tr("Strength"), new_small_le(self)))
attribs.append((self.tr("Perception"), new_small_le(self)))
attribs[4][1].setProperty('attrib_id', models.ATTRIBS.STRENGTH)
attribs[5][1].setProperty('attrib_id', models.ATTRIBS.PERCEPTION)
# Fire ring
attribs.append((self.tr("Agility"), new_small_le(self)))
attribs.append((self.tr("Intelligence"), new_small_le(self)))
attribs[6][1].setProperty('attrib_id', models.ATTRIBS.AGILITY)
attribs[7][1].setProperty('attrib_id', models.ATTRIBS.INTELLIGENCE)
self.attribs = attribs
# map increase trait signals
self.trait_sig_mapper = QtCore.QSignalMapper(self)
def _attrib_frame(i):
fr = QtGui.QFrame(self)
hbox = QtGui.QHBoxLayout(fr)
hbox.setContentsMargins(3, 0, 9, 0)
# small plus button
tag = str(attribs[i][1].property('attrib_id'))
bt = new_small_plus_bt(self)
hbox.addWidget(attribs[i][1])
hbox.addWidget(bt)
self.trait_sig_mapper.setMapping(bt, tag)
QtCore.QObject.connect(bt, QtCore.SIGNAL('clicked()'), self.trait_sig_mapper, QtCore.SLOT('map()'))
return fr
for i in xrange(0, 8, 2):
grid.addWidget(QtGui.QLabel(attribs[i][0]),
(i // 2), 2, 1, 1, QtCore.Qt.AlignLeft)
grid.addWidget(_attrib_frame(i), (i // 2), 3, 1, 1,
QtCore.Qt.AlignLeft)
grid.addWidget(QtGui.QLabel(attribs[i + 1][0]),
(i // 2), 4, 1, 1, QtCore.Qt.AlignLeft)
grid.addWidget(_attrib_frame(i + 1), (i // 2), 5, 1, 1,
QtCore.Qt.AlignLeft)
grid.addWidget(QtGui.QLabel(self.tr("<b>Void Points</b>")),
4, 2, 1, 3,
QtCore.Qt.AlignHCenter)
self.void_points = widgets.CkNumWidget(count=10, parent=self)
grid.addWidget(self.void_points, 5, 2, 1, 3,
QtCore.Qt.AlignHCenter)
hbox.addWidget(grp)
return fr
def build_flags_frame():
tx_flags = [self.tr("Honor"), self.tr("Glory"),
self.tr("Status"), self.tr("Shadowland Taint"),
self.tr("Infamy")]
ob_flags_p = []
ob_flags_r = []
fr = QtGui.QFrame(self)
# fr.setFrameShape(QtGui.QFrame.StyledPanel)
vbox = QtGui.QVBoxLayout(fr)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
row = 1
for f in tx_flags:
fr_ = QtGui.QFrame(self)
lay = QtGui.QGridLayout(fr_)
lay.setContentsMargins(0, 0, 0, 0)
lay.setSpacing(0)
lay.addWidget(QtGui.QLabel('<b>%s</b>' % f), row, 0)
l = new_small_le(self, False)
lay.addWidget(l, row, 1)
w = widgets.CkNumWidget(count=9, parent=self)
lay.addWidget(w, row + 1, 0, 1, 2, QtCore.Qt.AlignHCenter)
ob_flags_p.append(w)
ob_flags_r.append(l)
vbox.addWidget(fr_)
self.pc_flags_points = ob_flags_p
self.pc_flags_rank = ob_flags_r
return fr
def add_traits_and_flags():
trait_frame = build_trait_frame()
flags_frame = build_flags_frame()
fr = QtGui.QFrame(self)
hbox = QtGui.QHBoxLayout(fr)
fr.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Maximum)
hbox.addWidget(trait_frame)
hbox.addWidget(flags_frame)
mvbox.addWidget(fr)
def add_pc_quantities(row, col):
fr = QtGui.QFrame(self)
fr.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Maximum)
hbox = QtGui.QHBoxLayout(fr)
monos_ = QtGui.QFont('Monospace')
monos_.setStyleHint(QtGui.QFont.Courier)
# fr.setFont(monos_)
# initiative
grp = QtGui.QGroupBox(self.tr("Initiative"), self)
grd = QtGui.QFormLayout(grp)
self.tx_base_init = QtGui.QLineEdit(self)
self.tx_mod_init = QtGui.QLineEdit(self)
self.tx_cur_init = QtGui.QLineEdit(self)
self.tx_base_init.setReadOnly(True)
self.tx_mod_init .setReadOnly(True)
self.tx_cur_init .setReadOnly(True)
grd.addRow(self.tr("Base"), self.tx_base_init)
grd.addRow(self.tr("Modifier"), self.tx_mod_init)
grd.addRow(self.tr("Current"), self.tx_cur_init)
hbox.addWidget(grp, 1)
# Armor TN
grp = QtGui.QGroupBox(self.tr("Armor TN"), self)
grd = QtGui.QFormLayout(grp)
self.tx_armor_nm = QtGui.QLineEdit(self)
self.tx_base_tn = QtGui.QLineEdit(self)
self.tx_armor_tn = QtGui.QLineEdit(self)
self.tx_armor_rd = QtGui.QLineEdit(self)
self.tx_cur_tn = QtGui.QLineEdit(self)
self.tx_armor_nm.setReadOnly(True)
self.tx_base_tn .setReadOnly(True)
self.tx_armor_tn.setReadOnly(True)
self.tx_armor_rd.setReadOnly(True)
self.tx_cur_tn .setReadOnly(True)
grd.addRow(self.tr("Name"), self.tx_armor_nm)
grd.addRow(self.tr("Base"), self.tx_base_tn)
grd.addRow(self.tr("Armor"), self.tx_armor_tn)
grd.addRow(self.tr("Reduction"), self.tx_armor_rd)
grd.addRow(self.tr("Current"), self.tx_cur_tn)
hbox.addWidget(grp, 1)
# Wounds
grp = QtGui.QGroupBox(self.tr("Wounds"), self)
grd = QtGui.QGridLayout(grp)
wnd = [(QtGui.QLabel(self), new_small_le(self), new_small_le(self)),
(QtGui.QLabel(self), new_small_le(self), new_small_le(self)),
(QtGui.QLabel(self), new_small_le(self), new_small_le(self)),
(QtGui.QLabel(self), new_small_le(self), new_small_le(self)),
(QtGui.QLabel(self), new_small_le(self), new_small_le(self)),
(QtGui.QLabel(self), new_small_le(self), new_small_le(self)),
(QtGui.QLabel(self), new_small_le(self), new_small_le(self)),
(QtGui.QLabel(self.tr("Out"), self),
new_small_le(self),
new_small_le(self))]
self.wounds = wnd
self.wnd_lb = grp
row_ = 0
col_ = 0
for i in xrange(0, len(wnd)):
if i == 4:
col_ = 3
row_ = 0
grd.addWidget(wnd[i][0], row_, col_)
grd.addWidget(wnd[i][0], row_, col_)
grd.addWidget(wnd[i][1], row_, col_ + 1)
grd.addWidget(wnd[i][2], row_, col_ + 2)
row_ += 1
hbox.addWidget(grp, 2)
mvbox.addWidget(fr)
add_pc_info(0, 0)
mvbox.addWidget(new_horiz_line(self))
add_traits_and_flags()
mvbox.addWidget(new_horiz_line(self))
add_pc_quantities(4, 0)
def _build_generic_page(self, models_):
mfr = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(mfr)
views_ = []
for k, t, m, d, tb, on_double_click in models_:
grp = QtGui.QGroupBox(k, self)
hbox = QtGui.QHBoxLayout(grp)
view = None
if t == 'table':
view = QtGui.QTableView(self)
view.setSortingEnabled(True)
view.horizontalHeader().setResizeMode(
QtGui.QHeaderView.Interactive)
view.horizontalHeader().setStretchLastSection(True)
view.horizontalHeader().setCascadingSectionResizes(True)
if d is not None and len(d) == 2:
col_ = d[0]
obj_ = d[1]
view.setItemDelegateForColumn(col_, obj_)
elif t == 'list':
view = QtGui.QListView(self)
if on_double_click:
view.doubleClicked.connect(on_double_click)
view.setModel(m)
if d is not None:
view.setItemDelegate(d)
if tb is not None:
hbox.addWidget(tb)
hbox.addWidget(view)
vbox.addWidget(grp)
views_.append(view)
return mfr, views_
def _build_spell_frame(self, model, layout):
grp = QtGui.QGroupBox(self.tr("Spells"), self)
hbox = QtGui.QHBoxLayout(grp)
fr_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(fr_)
vbox.setContentsMargins(3, 3, 3, 3)
# advantages/disadvantage vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
cb_buy = self.act_buy_spell
cb_remove = self.act_del_spell
cb_memo = self.act_memo_spell
self.add_spell_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('buy', (16, 16))),
self.tr("Add new spell"), cb_buy)
self.del_spell_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('minus', (16, 16))),
self.tr("Remove spell"), cb_remove)
self.memo_spell_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('book', (16, 16))),
self.tr("Memorize/Forget spell"), cb_memo)
self.del_spell_bt.setEnabled(False)
vtb.addStretch()
return vtb
# View
view = QtGui.QTableView(fr_)
view.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
view.setSortingEnabled(True)
view.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive)
view.horizontalHeader().setStretchLastSection(True)
view.horizontalHeader().setCascadingSectionResizes(True)
view.setModel(model)
sm = view.selectionModel()
sm.currentRowChanged.connect(self.on_spell_selected)
self.spell_table_view = view
# Affinity/Deficiency
self.lb_affin = QtGui.QLabel(self.tr("None"), self)
self.lb_defic = QtGui.QLabel(self.tr("None"), self)
aff_fr = QtGui.QFrame(self)
aff_fr.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum)
fl = QtGui.QFormLayout(aff_fr)
fl.addRow(self.tr("<b><i>Affinity</i></b>"), self.lb_affin)
fl.addRow(self.tr("<b><i>Deficiency</i></b>"), self.lb_defic)
fl.setHorizontalSpacing(60)
fl.setVerticalSpacing(5)
fl.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(aff_fr)
vbox.addWidget(view)
hbox.addWidget(_make_vertical_tb())
hbox.addWidget(fr_)
layout.addWidget(grp)
view.doubleClicked.connect(self.sink4.on_spell_item_activate)
return view
def _build_tech_frame(self, model, layout):
grp = QtGui.QGroupBox(self.tr("Techs"), self)
vbox = QtGui.QVBoxLayout(grp)
# View
view = QtGui.QListView(self)
view.setModel(model)
view.setItemDelegate(models.TechItemDelegate(self))
vbox.addWidget(view)
layout.addWidget(grp)
view.doubleClicked.connect(self.sink4.on_tech_item_activate)
return view
def _build_kata_frame(self, model, layout):
grp = QtGui.QGroupBox(self.tr("Kata"), self)
hbox = QtGui.QHBoxLayout(grp)
fr_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(fr_)
vbox.setContentsMargins(3, 3, 3, 3)
# advantages/disadvantage vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
cb_buy = self.sink2.act_buy_kata
cb_remove = self.sink2.act_del_kata
self.add_kata_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('buy', (16, 16))),
self.tr("Add new Kata"), cb_buy)
self.del_kata_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('minus', (16, 16))),
self.tr("Remove Kata"), cb_remove)
self.add_kata_bt.setEnabled(True)
self.del_kata_bt.setEnabled(True)
vtb.addStretch()
return vtb
# View
view = QtGui.QTableView(self)
view.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
view.setSortingEnabled(True)
view.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive)
view.horizontalHeader().setStretchLastSection(True)
view.horizontalHeader().setCascadingSectionResizes(True)
view.setModel(model)
view.doubleClicked.connect(self.sink4.on_kata_item_activate)
self.ka_table_view = view
vbox.addWidget(view)
hbox.addWidget(_make_vertical_tb())
hbox.addWidget(fr_)
layout.addWidget(grp)
return view
def _build_kiho_frame(self, model, layout):
grp = QtGui.QGroupBox(self.tr("Kiho"), self)
hbox = QtGui.QHBoxLayout(grp)
fr_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(fr_)
vbox.setContentsMargins(3, 3, 3, 3)
# advantages/disadvantage vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
cb_buy = self.sink2.act_buy_kiho
cb_remove = self.sink2.act_del_kiho
cb_buy_tattoo = self.sink2.act_buy_tattoo
self.add_kiho_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('buy', (16, 16))),
self.tr("Add new Kiho"), cb_buy)
self.add_tattoo_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('buy', (16, 16))),
self.tr("Add new Tattoo"), cb_buy_tattoo)
self.del_kiho_bt = vtb.addButton(
QtGui.QIcon(get_icon_path('minus', (16, 16))),
self.tr("Remove Kiho"), cb_remove)
self.add_kiho_bt.setEnabled(True)
self.del_kiho_bt.setEnabled(True)
vtb.addStretch()
return vtb
# View
view = QtGui.QTableView(self)
view.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
view.setSortingEnabled(True)
view.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive)
view.horizontalHeader().setStretchLastSection(True)
view.horizontalHeader().setCascadingSectionResizes(True)
view.setModel(model)
view.doubleClicked.connect(self.sink4.on_kiho_item_activate)
self.ki_table_view = view
vbox.addWidget(view)
hbox.addWidget(_make_vertical_tb())
hbox.addWidget(fr_)
layout.addWidget(grp)
return view
def build_ui_page_2(self):
self.sk_view_model = models.SkillTableViewModel(self)
self.ma_view_model = models.MaViewModel(self)
# enable sorting through a proxy model
self.sk_sort_model = models.ColorFriendlySortProxyModel(self)
self.sk_sort_model.setDynamicSortFilter(True)
self.sk_sort_model.setSourceModel(self.sk_view_model)
# skills vertical toolbar
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
vtb.addButton(QtGui.QIcon(get_icon_path('add', (16, 16))),
self.tr("Add skill rank"), self.on_buy_skill_rank)
vtb.addButton(QtGui.QIcon(get_icon_path('buy', (16, 16))),
self.tr("Buy skill emphasys"), self.show_buy_emph_dlg)
vtb.addButton(QtGui.QIcon(get_icon_path('buy', (16, 16))),
self.tr("Buy another skill"), self.show_buy_skill_dlg)
vtb.addStretch()
models_ = [
(
"Skills",
'table',
self.sk_sort_model,
None,
vtb,
self.sink4.on_skill_item_activate
),
(
self.tr("Mastery Abilities"),
'list',
self.ma_view_model,
models.MaItemDelegate(self),
None,
None
)
]
frame_, views_ = self._build_generic_page(models_)
if len(views_) > 0:
self.skill_table_view = views_[0]
self.tabs.addTab(frame_, self.tr("Skills"))
def build_ui_page_3(self):
self.sp_view_model = models.SpellTableViewModel(self)
self.th_view_model = models.TechViewModel(self)
# enable sorting through a proxy model
self.sp_sort_model = models.ColorFriendlySortProxyModel(self)
self.sp_sort_model.setDynamicSortFilter(True)
self.sp_sort_model.setSourceModel(self.sp_view_model)
frame_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(frame_)
self._build_spell_frame(self.sp_sort_model, vbox)
self._build_tech_frame(self.th_view_model, vbox)
self.tabs.addTab(frame_, self.tr("Techniques"))
def build_ui_page_4(self):
self.ka_view_model = models.KataTableViewModel(self)
self.ki_view_model = models.KihoTableViewModel(self)
# enable sorting through a proxy model
self.ka_sort_model = models.ColorFriendlySortProxyModel(self)
self.ka_sort_model.setDynamicSortFilter(True)
self.ka_sort_model.setSourceModel(self.ka_view_model)
self.ki_sort_model = models.ColorFriendlySortProxyModel(self)
self.ki_sort_model.setDynamicSortFilter(True)
self.ki_sort_model.setSourceModel(self.ki_view_model)
frame_ = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(frame_)
self.kata_view = self._build_kata_frame(self.ka_sort_model, vbox)
self.kiho_view = self._build_kiho_frame(self.ki_sort_model, vbox)
self.tabs.addTab(frame_, self.tr("Powers"))
def build_ui_page_5(self):
mfr = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(mfr)
# advantages/disadvantage vertical toolbar
def _make_vertical_tb(tag, has_edit, has_remove):
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
cb_buy = (self.sink2.act_buy_merit if tag == 'merit'
else self.sink2.act_buy_flaw)
cb_edit = (self.sink2.act_edit_merit if tag == 'merit'
else self.sink2.act_edit_flaw)
cb_remove = (self.sink2.act_del_merit if tag == 'merit'
else self.sink2.act_del_flaw)
vtb.addButton(QtGui.QIcon(get_icon_path('buy', (16, 16))),
self.tr("Add Perk"), cb_buy)
if has_edit:
vtb.addButton(QtGui.QIcon(get_icon_path('edit', (16, 16))),
self.tr("Edit Perk"), cb_edit)
if has_remove:
vtb.addButton(QtGui.QIcon(get_icon_path('minus', (16, 16))),
self.tr("Remove Perk"), cb_remove)
vtb.addStretch()
return vtb
self.merits_view_model = models.PerkViewModel('merit')
self.flaws_view_model = models.PerkViewModel('flaws')
merit_view = QtGui.QListView(self)
merit_view.setModel(self.merits_view_model)
merit_view.setItemDelegate(models.PerkItemDelegate(self))
merit_vtb = _make_vertical_tb('merit', True, True)
fr_ = QtGui.QFrame(self)
hb_ = QtGui.QHBoxLayout(fr_)
hb_.setContentsMargins(3, 3, 3, 3)
hb_.addWidget(merit_vtb)
hb_.addWidget(merit_view)
vbox.addWidget(new_item_groupbox(self.tr("Advantages"), fr_))
flaw_view = QtGui.QListView(self)
flaw_view.setModel(self.flaws_view_model)
flaw_view.setItemDelegate(models.PerkItemDelegate(self))
flaw_vtb = _make_vertical_tb('flaw', True, True)
fr_ = QtGui.QFrame(self)
hb_ = QtGui.QHBoxLayout(fr_)
hb_.setContentsMargins(3, 3, 3, 3)
hb_.addWidget(flaw_vtb)
hb_.addWidget(flaw_view)
vbox.addWidget(new_item_groupbox(self.tr("Disadvantages"), fr_))
self.merit_view = merit_view
self.flaw_view = flaw_view
self.tabs.addTab(mfr, self.tr("Perks"))
def build_ui_page_6(self):
mfr = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(mfr)
fr_ = QtGui.QFrame(self)
fr_h = QtGui.QHBoxLayout(fr_)
fr_h.setContentsMargins(0, 0, 0, 0)
fr_h.addWidget(
QtGui.QLabel(self.tr("""<p><i>Select the advancement to refund and hit the button</i></p>"""), self))
bt_refund_adv = QtGui.QPushButton(self.tr("Refund"), self)
bt_refund_adv.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt_refund_adv.clicked.connect(self.sink1.refund_advancement)
fr_h.addWidget(bt_refund_adv)
vbox.addWidget(fr_)
self.adv_view_model = models.AdvancementViewModel(self)
lview = QtGui.QListView(self)
lview.setModel(self.adv_view_model)
lview.setItemDelegate(models.AdvancementItemDelegate(self))
vbox.addWidget(lview)
self.adv_view = lview
self.tabs.addTab(mfr, self.tr("Advancements"))
def build_ui_page_7(self):
self.melee_view_model = models.WeaponTableViewModel('melee', self)
self.ranged_view_model = models.WeaponTableViewModel('ranged', self)
self.arrow_view_model = models.WeaponTableViewModel('arrow', self)
def _make_sortable(model):
# enable sorting through a proxy model
sort_model_ = models.ColorFriendlySortProxyModel(self)
sort_model_.setDynamicSortFilter(True)
sort_model_.setSourceModel(model)
return sort_model_
# weapon vertical toolbar
def _make_vertical_tb(has_custom, has_edit, has_qty, filt):
vtb = widgets.VerticalToolBar(self)
vtb.setProperty('filter', filt)
vtb.addStretch()
vtb.addButton(QtGui.QIcon(get_icon_path('buy', (16, 16))),
self.tr("Add weapon"), self.sink3.show_add_weapon)
if has_custom:
vtb.addButton(QtGui.QIcon(get_icon_path('custom', (16, 16))),
self.tr("Add custom weapon"), self.sink3.show_add_cust_weapon)
if has_edit:
vtb.addButton(QtGui.QIcon(get_icon_path('edit', (16, 16))),
self.tr("Edit weapon"), self.sink3.edit_selected_weapon)
vtb.addButton(QtGui.QIcon(get_icon_path('minus', (16, 16))),
self.tr("Remove weapon"), self.sink3.remove_selected_weapon)
if has_qty:
vtb.addButton(QtGui.QIcon(get_icon_path('add', (16, 16))),
self.tr("Increase Quantity"), self.sink3.on_increase_item_qty)
vtb.addButton(QtGui.QIcon(get_icon_path('minus', (16, 16))),
self.tr("Decrease Quantity"), self.sink3.on_decrease_item_qty)
vtb.addStretch()
return vtb
melee_vtb = _make_vertical_tb(True, True, False, 'melee')
ranged_vtb = _make_vertical_tb(True, True, False, 'ranged')
arrow_vtb = _make_vertical_tb(False, False, True, 'arrow')
models_ = [
(self.tr("Melee Weapons"), 'table', _make_sortable(self.melee_view_model), None, melee_vtb, None),
(self.tr("Ranged Weapons"), 'table', _make_sortable(self.ranged_view_model), None, ranged_vtb, None),
(self.tr("Arrows"), 'table', _make_sortable(self.arrow_view_model), None, arrow_vtb, None)
]
frame_, views_ = self._build_generic_page(models_)
melee_vtb .setProperty('source', views_[0])
ranged_vtb.setProperty('source', views_[1])
arrow_vtb .setProperty('source', views_[2])
self.tabs.addTab(frame_, self.tr("Weapons"))
def build_ui_page_8(self):
# modifiers
self.mods_view_model = models.ModifiersTableViewModel(self)
self.mods_view_model.user_change.connect(self.update_from_model)
def _make_sortable(model):
# enable sorting through a proxy model
sort_model_ = models.ColorFriendlySortProxyModel(self)
sort_model_.setDynamicSortFilter(True)
sort_model_.setSourceModel(model)
return sort_model_
# weapon vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
vtb.addButton(QtGui.QIcon(get_icon_path('buy', (16, 16))),
self.tr("Add modifier"), self.sink4.add_new_modifier)
vtb.addButton(QtGui.QIcon(get_icon_path('edit', (16, 16))),
self.tr("Edit modifier"), self.sink4.edit_selected_modifier)
vtb.addButton(QtGui.QIcon(get_icon_path('minus', (16, 16))),
self.tr("Remove modifier"), self.sink4.remove_selected_modifier)
vtb.addStretch()
return vtb
vtb = _make_vertical_tb()
models_ = [
(self.tr("Modifiers"), 'table', _make_sortable(self.mods_view_model), None, vtb, None)
]
frame_, views_ = self._build_generic_page(models_)
self.mod_view = views_[0]
vtb .setProperty('source', self.mod_view)
self.tabs.addTab(frame_, self.tr("Modifiers"))
def build_ui_page_9(self):
mfr = QtGui.QFrame(self)
vbox = QtGui.QVBoxLayout(mfr)
self.tx_pc_notes = widgets.SimpleRichEditor(self)
vbox.addWidget(self.tx_pc_notes)
def build_pers_info():
grp = QtGui.QGroupBox(self.tr("Personal Informations"), self)
grp.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred)
hgrp = QtGui.QHBoxLayout(grp)
# anagraphic
afr = QtGui.QFrame(self)
afl = QtGui.QFormLayout(afr)
self.tx_pc_sex = QtGui.QLineEdit(self)
self.tx_pc_age = QtGui.QLineEdit(self)
self.tx_pc_height = QtGui.QLineEdit(self)
self.tx_pc_weight = QtGui.QLineEdit(self)
self.tx_pc_hair = QtGui.QLineEdit(self)
self.tx_pc_eyes = QtGui.QLineEdit(self)
afl.addRow(self.tr("Sex"), self.tx_pc_sex)
afl.addRow(self.tr("Age"), self.tx_pc_age)
afl.addRow(self.tr("Height"), self.tx_pc_height)
afl.addRow(self.tr("Weight"), self.tx_pc_weight)
afl.addRow(self.tr("Hair"), self.tx_pc_hair)
afl.addRow(self.tr("Eyes"), self.tx_pc_eyes)
hgrp.addWidget(afr)
# separator
hgrp.addWidget(new_vert_line())
# parents
bfr = QtGui.QFrame(self)
bfl = QtGui.QFormLayout(bfr)
self.tx_pc_father = QtGui.QLineEdit(self)
self.tx_pc_mother = QtGui.QLineEdit(self)
self.tx_pc_bro = QtGui.QLineEdit(self)
self.tx_pc_sis = QtGui.QLineEdit(self)
self.tx_pc_marsta = QtGui.QLineEdit(self)
self.tx_pc_spouse = QtGui.QLineEdit(self)
self.tx_pc_childr = QtGui.QLineEdit(self)
bfl.addRow(self.tr("Father"), self.tx_pc_father)
bfl.addRow(self.tr("Mother"), self.tx_pc_mother)
bfl.addRow(self.tr("Brothers"), self.tx_pc_bro)
bfl.addRow(self.tr("Sisters"), self.tx_pc_sis)
bfl.addRow(self.tr("Marital Status"), self.tx_pc_marsta)
bfl.addRow(self.tr("Spouse"), self.tx_pc_spouse)
bfl.addRow(self.tr("Children"), self.tx_pc_childr)
hgrp.addWidget(bfr)
self.pers_info_widgets = [
self.tx_pc_sex, self.tx_pc_age,
self.tx_pc_height, self.tx_pc_weight,
self.tx_pc_hair, self.tx_pc_eyes,
self.tx_pc_father, self.tx_pc_mother,
self.tx_pc_bro, self.tx_pc_marsta,
self.tx_pc_sis, self.tx_pc_spouse, self.tx_pc_childr]
# link personal information widgets
self.tx_pc_sex.link = 'sex'
self.tx_pc_age.link = 'age'
self.tx_pc_height.link = 'height'
self.tx_pc_weight.link = 'weight'
self.tx_pc_hair.link = 'hair'
self.tx_pc_eyes.link = 'eyes'
self.tx_pc_father.link = 'father'
self.tx_pc_mother.link = 'mother'
self.tx_pc_bro.link = 'brothers'
self.tx_pc_sis.link = 'sisters'
self.tx_pc_marsta.link = 'marsta'
self.tx_pc_spouse.link = 'spouse'
self.tx_pc_childr.link = 'childr'
return grp
vbox.addWidget(build_pers_info())
self.tabs.addTab(mfr, self.tr("Notes"))
def build_ui_page_10(self):
self.equip_view_model = models.EquipmentListModel(self)
# self.equip_view_model.user_change.connect(self.update_from_model)
def _make_sortable(model):
# enable sorting through a proxy model
sort_model_ = models.ColorFriendlySortProxyModel(self)
sort_model_.setDynamicSortFilter(True)
sort_model_.setSourceModel(model)
return sort_model_
# weapon vertical toolbar
def _make_vertical_tb():
vtb = widgets.VerticalToolBar(self)
vtb.addStretch()
vtb.addButton(QtGui.QIcon(get_icon_path('buy', (16, 16))),
self.tr("Add equipment"), self.sink4.add_equipment)
vtb.addButton(QtGui.QIcon(get_icon_path('minus', (16, 16))),
self.tr("Remove equipment"), self.sink4.remove_selected_equipment)
vtb.addStretch()
return vtb
vtb = _make_vertical_tb()
models_ = [
(self.tr("Equipment"), 'list', _make_sortable(self.equip_view_model), None, vtb, None)
]
frame_, views_ = self._build_generic_page(models_)
self.equip_view = views_[0]
font = self.equip_view.font()
font.setPointSize(11.5)
self.equip_view.setFont(font)
self.money_widget = widgets.MoneyWidget(self)
frame_.layout().setSpacing(12)
frame_.layout().addWidget(new_horiz_line(self))
frame_.layout().addWidget(self.money_widget)
self.money_widget.valueChanged.connect(
self.sink4.on_money_value_changed)
vtb .setProperty('source', self.equip_view)
self.tabs.addTab(frame_, self.tr("Equipment"))
def build_ui_page_about(self):
mfr = QtGui.QFrame(self)
hbox = QtGui.QHBoxLayout()
hbox.setAlignment(QtCore.Qt.AlignCenter)
hbox.setSpacing(30)
logo = QtGui.QLabel(self)
logo.setPixmap(QtGui.QPixmap(get_app_icon_path((64, 64))))
hbox.addWidget(logo, 0, QtCore.Qt.AlignTop)
vbox = QtGui.QVBoxLayout(mfr)
vbox.setAlignment(QtCore.Qt.AlignCenter)
vbox.setSpacing(30)
info = """<html><style>a { color: palette(text); }</style><body><h1>%s</h1>
<p>Version %s</p>
<p><a href="%s">%s</a></p>
<p>Report bugs and send in your ideas <a href="%s">here</a></p>
<p>To know about Legend of the Five rings please visit
<a href="%s">L5R RPG Home Page</a>
</p>
<p>
All right on Legend of The Five Rings RPG are possession of
<p>
<a href="%s">Alderac Entertainment Group (AEG)</a>
</p>
</p>
<p style='color:palette(mid)'>© 2015 %s</p>
<p>Special Thanks:</p>
<p style="margin-left: 10;">
Paul Tar, Jr aka Geiko (Lots of cool stuff)</p>
<p style="margin-left: 10;">Derrick D. Cochran (OS X Distro)
</p>
</body></html>""" % (APP_DESC,
QtGui.QApplication.applicationVersion(
),
PROJECT_PAGE_LINK, PROJECT_PAGE_NAME,
BUGTRAQ_LINK, L5R_RPG_HOME_PAGE,
ALDERAC_HOME_PAGE, AUTHOR_NAME)
lb_info = QtGui.QLabel(info, self)
lb_info.setOpenExternalLinks(True)
lb_info.setWordWrap(True)
hbox.addWidget(lb_info)
def on_contact_link_activate():
url = QtCore.QUrl(L5RCM_GPLUS_PAGE)
QtGui.QDesktopServices.openUrl(url)
def on_community_link_activate():
url = QtCore.QUrl(L5RCM_GPLUS_COMM)
QtGui.QDesktopServices.openUrl(url)
bt_contact_gplus = QtGui.QCommandLinkButton(
"Contact me", "but bring good news", self)
bt_contact_gplus.setIcon(
QtGui.QIcon(get_icon_path('new-g-plus-icon', (16, 16))))
# bt_contact_gplus.setFlat(True)
bt_contact_gplus.clicked.connect(on_contact_link_activate)
bt_community_gplus = QtGui.QCommandLinkButton(
"Join the G+ Community", "for answers and support", self)
bt_community_gplus.setIcon(
QtGui.QIcon(get_icon_path('new-g-plus-icon', (16, 16))))
# bt_community_gplus.setFlat(True)
bt_community_gplus.clicked.connect(on_community_link_activate)
gplus_form = QtGui.QVBoxLayout()
gplus_form.addWidget(bt_contact_gplus)
gplus_form.addWidget(bt_community_gplus)
gplus_form.setSpacing(6)
gplus_hbox = QtGui.QHBoxLayout()
gplus_hbox.setContentsMargins(0, 0, 50, 0)
gplus_hbox.addStretch()
gplus_hbox.addLayout(gplus_form)
vbox.addLayout(hbox)
vbox.addLayout(gplus_hbox)
self.tabs.addTab(mfr, self.tr("About"))
def build_menu(self):
settings = QtCore.QSettings()
self.app_menu_tb = QtGui.QToolButton(self.widgets)
self.app_menu = QtGui.QMenu("AppMenu", self.app_menu_tb)
# File Menu
# actions: new, open, save
new_act = QtGui.QAction(self.tr("&New Character"), self)
open_act = QtGui.QAction(self.tr("&Open Character..."), self)
save_act = QtGui.QAction(self.tr("&Save Character..."), self)
export_pdf_act = QtGui.QAction(self.tr("Ex&port as PDF..."), self)
export_npc_act = QtGui.QAction(self.tr("Export NPC sheet..."), self)
exit_act = QtGui.QAction(self.tr("E&xit"), self)
new_act .setShortcut(QtGui.QKeySequence.New)
open_act.setShortcut(QtGui.QKeySequence.Open)
save_act.setShortcut(QtGui.QKeySequence.Save)
exit_act.setShortcut(QtGui.QKeySequence.Quit)
new_act .triggered.connect(self.sink1.new_character)
open_act.triggered.connect(self.sink1.load_character)
save_act.triggered.connect(self.sink1.save_character)
exit_act.triggered.connect(self.close)
export_pdf_act .triggered.connect(self.sink1.export_character_as_pdf)
export_npc_act .triggered.connect(self.sink4.show_npc_export_dialog)
# Advancement menu
# actions buy advancement, view advancements
resetadv_act = QtGui.QAction(self.tr("&Reset advancements"), self)
refund_act = QtGui.QAction(self.tr("Refund last advancement"), self)
refund_act .setShortcut(QtGui.QKeySequence.Undo)
resetadv_act.triggered.connect(self.sink1.reset_adv)
refund_act .triggered.connect(self.sink1.refund_last_adv)
# Outfit menu
# actions, select armor, add weapon, add misc item
sel_armor_act = QtGui.QAction(self.tr("Wear Armor..."), self)
sel_cust_armor_act = QtGui.QAction(
self.tr("Wear Custom Armor..."), self)
add_weap_act = QtGui.QAction(self.tr("Add Weapon..."), self)
add_cust_weap_act = QtGui.QAction(
self.tr("Add Custom Weapon..."), self)
sel_armor_act .triggered.connect(self.sink1.show_wear_armor)
sel_cust_armor_act.triggered.connect(self.sink1.show_wear_cust_armor)
add_weap_act .triggered.connect(self.sink3.show_add_weapon)
add_cust_weap_act .triggered.connect(self.sink3.show_add_cust_weapon)
# Rules menu
set_wound_mult_act = QtGui.QAction(
self.tr("Set Health Multiplier..."), self)
damage_act = QtGui.QAction(
self.tr("Cure/Inflict Damage..."), self)
# insight calculation submenu
m_insight_calc = self.app_menu.addMenu(
self.tr("Insight Calculation"))
self.ic_act_grp = QtGui.QActionGroup(self)
ic_default_act = QtGui.QAction(
self.tr("Default"), self)
ic_no_rank1_1 = QtGui.QAction(
self.tr("Ignore Rank 1 Skills"), self)
ic_no_rank1_2 = QtGui.QAction(
self.tr("Account Rank 1 School Skills"), self)
ic_default_act.setProperty('method', 1)
ic_no_rank1_1 .setProperty('method', 2)
ic_no_rank1_2 .setProperty('method', 3)
ic_list = [ic_default_act, ic_no_rank1_1, ic_no_rank1_2]
for act in ic_list:
self.ic_act_grp.addAction(act)
act.setCheckable(True)
m_insight_calc.addAction(act)
ic_list[self.ic_idx].setChecked(True)
# health calculation submenu
m_health_calc = self.app_menu.addMenu(self.tr("Health Display"))
self.hm_act_grp = QtGui.QActionGroup(self)
hm_default_act = QtGui.QAction(self.tr("Default"), self)
hm_cumulative_act = QtGui.QAction(self.tr("Health left"), self)
hm_totwounds_act = QtGui.QAction(self.tr("Total wounds"), self)
hm_default_act .setProperty('method', 'default')
hm_cumulative_act.setProperty('method', 'stacked')
hm_totwounds_act .setProperty('method', 'wounds')
hm_list = [hm_default_act, hm_cumulative_act, hm_totwounds_act]
hm_mode = settings.value('health_method', 'wounds')
for act in hm_list:
self.hm_act_grp.addAction(act)
act.setCheckable(True)
m_health_calc.addAction(act)
if act.property('method') == hm_mode:
act.setChecked(True)
set_wound_mult_act.triggered.connect(self.sink1.on_set_wnd_mult)
damage_act .triggered.connect(self.sink1.on_damage_act)
# Data menu
import_data_act = QtGui.QAction(self.tr("Import Data pack..."), self)
manage_data_act = QtGui.QAction(
self.tr("Manage Data packs..."), self)
reload_data_act = QtGui.QAction(self.tr("Reload data"), self)
# Options
m_options = self.app_menu.addMenu(
self.tr("Options"))
self.options_act_grp = QtGui.QActionGroup(self)
self.options_act_grp.setExclusive(False)
options_set_background_act = QtGui.QAction(
self.tr("Set background image..."), self)
options_rem_background_act = QtGui.QAction(
self.tr("Remove background image"), self)
options_set_background_color_act = QtGui.QAction(
self.tr("Set background color..."), self)
options_banner_act = QtGui.QAction(
self.tr("Toggle banner display"), self)
options_buy_for_free_act = QtGui.QAction(
self.tr("Free Shopping"), self)
options_open_data_dir_act = QtGui.QAction(
self.tr("Open Data Directory"), self)
options_dice_roll_act = QtGui.QAction(
self.tr("Dice &Roller..."), self)
options_list = [
options_set_background_act, options_rem_background_act, options_set_background_color_act, options_banner_act,
options_buy_for_free_act, options_open_data_dir_act, options_dice_roll_act] # , options_reset_geometry_act
for i, act in enumerate(options_list):
self.options_act_grp.addAction(act)
m_options.addAction(act)
if i % 2 == 0:
m_options.addSeparator()
options_buy_for_free_act.setCheckable(True)
options_buy_for_free_act.setChecked(False)
settings = QtCore.QSettings()
options_banner_act.setCheckable(True)
options_banner_act.setChecked(settings.value('isbannerenabled') == 1)
options_set_background_act.triggered.connect(
self.sink1.on_set_background)
options_rem_background_act.triggered.connect(
self.sink1.on_rem_background)
options_set_background_color_act.triggered.connect(
self.sink1.on_set_background_color)
options_banner_act.triggered.connect(
self.sink1.on_toggle_display_banner)
options_buy_for_free_act.toggled.connect(
self.sink1.on_toggle_buy_for_free)
options_open_data_dir_act.triggered.connect(
self.sink1.open_data_dir_act)
options_dice_roll_act.triggered.connect(self.sink1.show_dice_roller)
# options_reset_geometry_act.triggered.connect(self.sink1.on_reset_geometry)
# GENERAL MENU
self.app_menu_tb.setAutoRaise(True)
self.app_menu_tb.setToolButtonStyle(QtCore.Qt.ToolButtonFollowStyle)
self.app_menu_tb.setPopupMode(QtGui.QToolButton.InstantPopup)
self.app_menu_tb.setIconSize(QtCore.QSize(32, 32))
self.app_menu_tb.setIcon(QtGui.QIcon.fromTheme(
"application-menu", QtGui.QIcon(get_icon_path('gear', (32, 32)))))
self.app_menu_tb.setArrowType(QtCore.Qt.NoArrow)
# FILE MENU
self.app_menu.addAction(new_act)
self.app_menu.addAction(open_act)
self.app_menu.addAction(save_act)
self.app_menu.addAction(export_pdf_act)
self.app_menu.addAction(export_npc_act)
self.app_menu.addSeparator()
# OPTIONS
self.app_menu.addMenu(m_options)
self.app_menu.addSeparator()
# ADV
self.app_menu.addAction(resetadv_act)
self.app_menu.addAction(refund_act)
self.app_menu.addSeparator()
# OUTFIT
self.app_menu.addAction(sel_armor_act)
self.app_menu.addAction(sel_cust_armor_act)
self.app_menu.addAction(add_weap_act)
self.app_menu.addAction(add_cust_weap_act)
self.app_menu.addSeparator()
# RULES
self.app_menu.addAction(set_wound_mult_act)
self.app_menu.addSeparator()
# INSIGHT
self.app_menu.addMenu(m_insight_calc)
# HEALTH
self.app_menu.addMenu(m_health_calc)
self.app_menu.addAction(damage_act)
self.app_menu.addSeparator()
# DATA
self.app_menu.addAction(import_data_act)
self.app_menu.addAction(manage_data_act)
self.app_menu.addAction(reload_data_act)
self.app_menu.addSeparator()
# EXIT
self.app_menu.addAction(exit_act)
self.app_menu_tb.setMenu(self.app_menu)
self.tabs.setCornerWidget(self.app_menu_tb, QtCore.Qt.TopLeftCorner)
import_data_act .triggered.connect(self.sink4.import_data_act)
manage_data_act .triggered.connect(self.sink4.manage_data_act)
reload_data_act .triggered.connect(self.sink4.reload_data_act)
def init(self):
""" second step initialization """
pass
def setup_donate_button(self):
self.statusBar().showMessage(
self.tr("You can donate to the project by clicking on the button")
)
self.paypal_bt = QtGui.QPushButton(self)
self.paypal_bt.setIcon(
QtGui.QIcon(get_icon_path('btn_donate_SM', None)))
self.paypal_bt.setIconSize(QtCore.QSize(74, 21))
self.paypal_bt.setFlat(True)
self.paypal_bt.clicked.connect(self.please_donate)
self.statusBar().addPermanentWidget(self.paypal_bt)
def connect_signals(self):
# notify only user edit
self.tx_mod_init.editingFinished.connect(self.update_from_model)
# update model name
self.tx_pc_name.editingFinished.connect(self.on_pc_name_change)
# personal information
for widget in self.pers_info_widgets:
widget.editingFinished.connect(self.on_pers_info_change)
for widget in self.pc_flags_points:
widget.valueChanged.connect(self.on_flag_points_change)
for tx in self.pc_flags_rank:
tx.editingFinished.connect(self.on_flag_rank_change)
self.void_points.valueChanged.connect(self.on_void_points_change)
#self.trait_sig_mapper.mapped.connect(self.on_trait_increase)
QtCore.QObject.connect(self.trait_sig_mapper,
QtCore.SIGNAL('mapped(const QString &)'),
self.on_trait_increase)
#self.trait_sig_mapper.connect(QtCore.SIGNAL("mapped(const QString &)"),
# self,
# QtCore.SLOT("on_trait_increase(const QString &)"))
self.ic_act_grp.triggered.connect(self.on_change_insight_calculation)
self.hm_act_grp.triggered.connect(self.on_change_health_visualization)
self.bt_edit_family.clicked.connect(self.sink4.on_edit_family)
self.bt_edit_school.clicked.connect(self.sink4.on_edit_first_school)
self.bt_set_exp_points.clicked.connect(self.sink1.on_set_exp_limit)
def show_nicebar(self, wdgs):
self.nicebar = QtGui.QFrame(self)
self.nicebar.setStyleSheet('''
QWidget { background: beige;}
QPushButton {
color: #333;
border: 2px solid rgb(200,200,200);
border-radius: 7px;
padding: 5px;
background: qradialgradient(cx: 0.3, cy: -0.4,
fx: 0.3, fy: -0.4, radius: 1.35, stop: 0 #fff,
stop: 1 rgb(255,170,0));
min-width: 80px;
}
QPushButton:hover {
background: qradialgradient(cx: 0.3, cy: -0.4,
fx: 0.3, fy: -0.4, radius: 1.35, stop: 0 #fff,
stop: 1 rgb(255,100,30));
}
QPushButton:pressed {
background: qradialgradient(cx: 0.4, cy: -0.1,
fx: 0.4, fy: -0.1, radius: 1.35, stop: 0 #fff,
stop: 1 rgb(255,200,50));
}
''')
self.nicebar.setMinimumSize(0, 32)
# nicebar layout
hbox = QtGui.QHBoxLayout(self.nicebar)
hbox.setContentsMargins(9, 1, 9, 1)
for w in wdgs:
hbox.addWidget(w)
self.mvbox.insertWidget(1, self.nicebar)
self.nicebar.setVisible(True)
def hide_nicebar(self):
if not self.nicebar:
return
self.nicebar.setVisible(False)
del self.nicebar
self.nicebar = None
def on_trait_increase(self, tag):
"""raised when user click on the small '+' button near traits"""
trait_ = api.data.get_trait_by_index(int(tag))
if not trait_:
log.ui.error(u"trait not found by index: %s", tag)
return
if self.increase_trait(int(tag)) == CMErrors.NOT_ENOUGH_XP:
self.not_enough_xp_advise(self)
def on_void_increase(self):
"""raised when user click on the small '+' button near void ring"""
if self.increase_void() == CMErrors.NOT_ENOUGH_XP:
self.not_enough_xp_advise(self)
def do_buy_kata(self, kata):
"""attempt to buy a new kata"""
if self.buy_kata(kata) == CMErrors.NOT_ENOUGH_XP:
self.not_enough_xp_advise(self)
def do_buy_kiho(self, kiho):
"""attempt to buy a new kiho"""
if self.buy_kiho(kiho) == CMErrors.NOT_ENOUGH_XP:
self.not_enough_xp_advise(self)
def on_pc_name_change(self):
self.pc.name = self.tx_pc_name.text()
def on_pers_info_change(self):
w = self.sender()
if hasattr(w, 'link'):
self.pc.set_property(w.link, w.text())
def on_flag_points_change(self):
fl = self.sender()
pt = fl.value
if fl == self.pc_flags_points[0]:
val = int(self.pc_flags_rank[0].text())
api.character.set_honor(float(val + float(pt) / 10))
elif fl == self.pc_flags_points[1]:
val = int(self.pc_flags_rank[1].text())
api.character.set_glory(float(val + float(pt) / 10))
elif fl == self.pc_flags_points[2]:
val = int(self.pc_flags_rank[2].text())
api.character.set_status(float(val + float(pt) / 10))
elif fl == self.pc_flags_points[3]:
val = int(self.pc_flags_rank[3].text())
api.character.set_taint(float(val + float(pt) / 10))
else:
val = int(self.pc_flags_rank[4].text())
api.character.set_infamy(float(val + float(pt) / 10))
def on_flag_rank_change(self):
fl = self.sender()
val = int(fl.text())
if fl == self.pc_flags_rank[0]:
pt = self.pc_flags_points[0].value
api.character.set_honor(float(val + float(pt) / 10))
elif fl == self.pc_flags_rank[1]:
pt = self.pc_flags_points[1].value
api.character.set_glory(float(val + float(pt) / 10))
elif fl == self.pc_flags_rank[2]:
pt = self.pc_flags_points[2].value
api.character.set_status(float(val + float(pt) / 10))
elif fl == self.pc_flags_rank[3]:
pt = self.pc_flags_points[3].value
api.character.set_taint(float(val + float(pt) / 10))
else:
pt = self.pc_flags_points[4].value
api.character.set_infamy(float(val + float(pt) / 10))
def on_void_points_change(self):
val = self.void_points.value
self.pc.set_void_points(val)
def on_buy_skill_rank(self):
# get selected skill
sm_ = self.skill_table_view.selectionModel()
if sm_.hasSelection():
model_ = self.skill_table_view.model()
skill_id = model_.data(sm_.currentIndex(), QtCore.Qt.UserRole)
err_ = self.buy_next_skill_rank(skill_id)
if err_ != CMErrors.NO_ERROR:
if err_ == CMErrors.NOT_ENOUGH_XP:
self.not_enough_xp_advise(self)
return
idx = None
for i in range(0, self.skill_table_view.model().rowCount()):
idx = self.skill_table_view.model().index(i, 0)
if model_.data(idx, QtCore.Qt.UserRole) == skill_id:
break
if idx.isValid():
sm_.setCurrentIndex(idx, (QtGui.QItemSelectionModel.Select |
QtGui.QItemSelectionModel.Rows))
def act_choose_skills(self):
dlg = dialogs.SelWcSkills(self.pc, self)
if dlg.exec_() == QtGui.QDialog.Accepted:
api.character.rankadv.clear_skills_to_choose()
self.update_from_model()
def act_memo_spell(self):
# get selected spell
sm_ = self.spell_table_view.selectionModel()
if sm_.hasSelection():
model_ = self.spell_table_view.model()
spell_itm = model_.data(sm_.currentIndex(), QtCore.Qt.UserRole)
err_ = CMErrors.NO_ERROR
if spell_itm.memo:
self.remove_advancement_item(spell_itm.adv)
else:
err_ = self.memo_spell(spell_itm.spell_id)
if err_ != CMErrors.NO_ERROR:
if err_ == CMErrors.NOT_ENOUGH_XP:
self.not_enough_xp_advise(self)
return
idx = None
for i in xrange(0, self.spell_table_view.model().rowCount()):
idx = self.spell_table_view.model().index(i, 0)
if model_.data(idx, QtCore.Qt.UserRole).spell_id == spell_itm.spell_id:
break
if idx.isValid():
sm_.setCurrentIndex(idx, (QtGui.QItemSelectionModel.Select |
QtGui.QItemSelectionModel.Rows))
def act_buy_spell(self):
dlg = dialogs.SpellAdvDialog(self.pc, 'freeform', self)
dlg.setWindowTitle(self.tr('Add New Spell'))
dlg.set_header_text(
self.tr("<center><h2>Select the spell to learn</h2></center>"))
if dlg.exec_() == QtGui.QDialog.Accepted:
self.update_from_model()
def act_del_spell(self):
# get selected spell
sm_ = self.spell_table_view.selectionModel()
if sm_.hasSelection():
model_ = self.spell_table_view.model()
spell_itm = model_.data(sm_.currentIndex(), QtCore.Qt.UserRole)
if spell_itm.memo:
return
self.remove_spell(spell_itm.spell_id)
def on_spell_selected(self, current, previous):
# get selected spell
model_ = self.spell_table_view.model()
spell_itm = model_.data(current, QtCore.Qt.UserRole)
# toggle remove
self.del_spell_bt.setEnabled(not spell_itm.memo)
def check_rank_advancement(self):
if self.nicebar:
return
potential_insight_rank_ = api.character.insight_rank()
actual_insight_rank_ = api.character.insight_rank(strict=True)
log.rules.debug(u"check rank advancement. potential rank: %d, actual rank: %d",
potential_insight_rank_, actual_insight_rank_)
if potential_insight_rank_ > actual_insight_rank_:
# HEY, NEW RANK DUDE!
lb = QtGui.QLabel(self.tr("You reached the next rank, you have an opportunity"
" to decide your destiny."), self)
bt = QtGui.QPushButton(self.tr("Advance rank"), self)
bt.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect(self.show_advance_rank_dlg)
self.show_nicebar([lb, bt])
def check_school_new_spells(self):
if self.nicebar:
return
# Show nicebar if can get other spells
if api.character.rankadv.has_granted_free_spells():
lb = QtGui.QLabel(
self.tr("You now fit the requirements to learn other Spells"), self)
bt = QtGui.QPushButton(self.tr("Learn Spells"), self)
bt.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect(self.learn_next_school_spells)
self.show_nicebar([lb, bt])
def check_free_kihos(self):
if self.nicebar:
return
# Show nicebar if can get free kihos
if api.character.rankadv.get_gained_kiho_count() > 0:
lb = QtGui.QLabel(
self.tr("You can learn {0} kihos for free").format(api.character.rankadv.get_gained_kiho_count()), self)
bt = QtGui.QPushButton(self.tr("Learn Kihos"), self)
bt.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect(self.learn_next_free_kiho)
self.show_nicebar([lb, bt])
def check_new_skills(self):
if self.nicebar:
return
# Show nicebar if pending wildcard skills
if api.character.rankadv.has_granted_skills_to_choose():
lb = QtGui.QLabel(
self.tr("Your school gives you the choice of certain skills"), self)
bt = QtGui.QPushButton(self.tr("Choose Skills"), self)
bt.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect(self.act_choose_skills)
self.show_nicebar([lb, bt])
def check_affinity_wc(self):
if self.nicebar:
return
rank_ = api.character.rankadv.get_last()
if not rank_:
return
log.app.info(u"check if the player can choose his affinity/deficiency: [%s] / [%s] ",
u", ".join(rank_.affinities_to_choose),
u", ".join(rank_.deficiencies_to_choose))
if api.character.rankadv.has_granted_affinities_to_choose():
lb = QtGui.QLabel(
self.tr("You school grant you to choose an elemental affinity."), self)
bt = QtGui.QPushButton(self.tr("Choose Affinity"), self)
bt.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect(self.show_select_affinity)
self.show_nicebar([lb, bt])
elif api.character.rankadv.has_granted_deficiencies_to_choose():
lb = QtGui.QLabel(
self.tr("You school grant you to choose an elemental deficiency."), self)
bt = QtGui.QPushButton(self.tr("Choose Deficiency"), self)
bt.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Preferred)
bt.clicked.connect(self.show_select_deficiency)
self.show_nicebar([lb, bt])
def learn_next_school_spells(self):
dlg = dialogs.SpellAdvDialog(self.pc, 'bounded', self)
dlg.setWindowTitle(self.tr('Choose School Spells'))
dlg.set_header_text(self.tr("<center><h2>Your school has granted you \
the right to choose some spells.</h2> \
<h3><i>Choose with care.</i></h3></center>"))
if dlg.exec_() == QtGui.QDialog.Accepted:
api.character.rankadv.clear_spells_to_choose()
self.update_from_model()
def learn_next_free_kiho(self):
dlg = dialogs.KihoDialog(self.pc, self)
if dlg.exec_() == QtGui.QDialog.Accepted:
self.update_from_model()
def show_advance_rank_dlg(self):
dlg = dialogs.NextRankDlg(self.pc, self)
if dlg.exec_() == QtGui.QDialog.Accepted:
self.update_from_model()
def show_buy_skill_dlg(self):
dlg = dialogs.BuyAdvDialog(self.pc, 'skill', self)
dlg.exec_()
self.update_from_model()
def show_buy_emph_dlg(self):
# get selected skill
sm_ = self.skill_table_view.selectionModel()
if sm_.hasSelection():
model_ = self.skill_table_view.model()
skill_id = model_.data(sm_.currentIndex(), QtCore.Qt.UserRole)
dlg = dialogs.BuyAdvDialog(self.pc, 'emph', self)
dlg.fix_skill_id(skill_id)
dlg.exec_()
self.update_from_model()
def show_select_affinity(self):
rank_ = api.character.rankadv.get_last()
if not rank_:
return
to_choose = rank_.affinities_to_choose.pop()
chooses = None
if 'nonvoid' in to_choose:
chooses = [api.data.get_ring(x).text for x in api.data.rings() if x != 'void']
else:
chooses = [api.data.get_ring(x).text for x in api.data.rings()]
affinity, is_ok = QtGui.QInputDialog.getItem(self,
"L5R: CM",
self.tr(
"Select your elemental affinity"),
chooses, 0, False)
if is_ok:
ring_ = [x for x in api.data.rings() if api.data.get_ring(x).text == affinity]
if len(ring_):
rank_.affinities.append(ring_[0])
else:
rank_.affinities_to_choose.append(to_choose)
self.update_from_model()
def show_select_deficiency(self):
rank_ = api.character.rankadv.get_last()
if not rank_:
return
to_choose = rank_.deficiencies_to_choose.pop()
chooses = None
if 'nonvoid' in to_choose:
chooses = [api.data.rings.get(x).text for x in api.data.rings() if x != 'void']
else:
chooses = [api.data.rings.get(x).text for x in api.data.rings()]
deficiency, is_ok = QtGui.QInputDialog.getItem(self,
"L5R: CM",
self.tr(
"Select your elemental deficiency"),
chooses, 0, False)
if is_ok:
ring_ = [x for x in api.data.rings() if api.data.get_ring(x).text == deficiency]
if len(ring_):
rank_.deficiencies.append(ring_[0])
else:
rank_.deficiencies.append(to_choose)
self.update_from_model()
def load_character_from(self, path):
with QtSignalLock(self.pers_info_widgets + [self.tx_pc_name]):
if not self.pc:
self.create_new_character()
if self.pc.load_from(path):
self.save_path = path
if not api.character.books.fulfills_dependencies():
# warn about missing dependencies
self.warn_about_missing_books()
# immediately create a new character
self.create_new_character()
return False
print('successfully loaded character from {0}'.format(self.save_path))
self.tx_pc_notes.set_content(self.pc.extra_notes)
self.update_from_model()
else:
print('character load failure')
def set_clan(self, clan_id):
"""Set UI clan"""
clan_ = api.data.clans.get(clan_id)
if clan_:
self.lb_pc_clan.setText(clan_.name)
else:
self.lb_pc_clan.setText(self.tr("No Clan"))
def set_family(self, family_id):
"""Set UI family"""
family_ = api.data.families.get(family_id)
if family_:
self.lb_pc_family.setText(family_.name)
else:
self.lb_pc_family.setText(self.tr("No Family"))
def set_school(self, school_id):
"""Set UI school"""
school_ = api.data.schools.get(school_id)
if school_:
self.lb_pc_school.setText(school_.name)
else:
self.lb_pc_school.setText(self.tr("No School"))
def set_void_points(self, value):
if self.void_points.value == value:
return
self.void_points.set_value(value)
def set_flag(self, flag, value):
rank, points = api.rules.split_decimal(value)
# set rank
self.pc_flags_rank[flag].setText(str(rank))
# set points
self.pc_flags_points[flag].set_value(int(points * 10))
def set_honor(self, value):
self.set_flag(0, value)
def set_glory(self, value):
self.set_flag(1, value)
def set_status(self, value):
self.set_flag(2, value)
def set_taint(self, value):
self.set_flag(3, value)
def set_infamy(self, value):
self.set_flag(4, value)
def update_from_model(self):
with QtSignalLock(self.pers_info_widgets+[self.tx_pc_name]):
self.tx_pc_name.setText(self.pc.name)
self.set_clan(self.pc.clan)
self.set_family(api.character.get_family())
self.set_school(api.character.schools.get_current())
for w in self.pers_info_widgets:
if hasattr(w, 'link'):
w.setText(self.pc.get_property(w.link))
pc_xp = api.character.xp()
self.tx_pc_exp.setText('{0} / {1}'.format(pc_xp, self.pc.exp_limit))
# rings
for i, r in enumerate(api.data.rings()):
self.rings[i][1].setText(str(api.character.ring_rank(r)))
# traits
for i, t in enumerate(api.data.traits()):
self.attribs[i][1].setText(str(api.character.trait_rank(t)))
# pc rank
self.tx_pc_rank.setText(str(api.character.insight_rank()))
self.tx_pc_ins .setText(str(api.character.insight()))
# pc flags
with QtSignalLock(self.pc_flags_points+self.pc_flags_rank+[self.void_points]):
self.set_honor(api.character.honor())
self.set_glory(api.character.glory())
self.set_infamy(api.character.infamy())
self.set_status(api.character.status())
self.set_taint(api.character.taint())
self.set_void_points(self.pc.void_points)
# armor
self.tx_armor_nm .setText(str(api.character.get_armor_name()))
self.tx_base_tn .setText(str(api.character.get_base_tn()))
self.tx_armor_tn .setText(str(api.character.get_armor_tn()))
self.tx_armor_rd .setText(str(api.character.get_full_rd()))
self.tx_cur_tn .setText(str(api.character.get_full_tn()))
# armor description
self.tx_armor_nm.setToolTip(str(api.character.get_armor_desc()))
self.display_health()
self.update_wound_penalties()
self.wnd_lb.setTitle(
self.tr("Health / Wounds (x%d)") % self.pc.health_multiplier)
# initiative
self.tx_base_init.setText(
api.rules.format_rtk_t(api.rules.get_base_initiative()))
self.tx_mod_init.setText(
api.rules.format_rtk_t(api.rules.get_init_modifiers()))
self.tx_cur_init.setText(
api.rules.format_rtk_t(api.rules.get_tot_initiative()))
# affinity / deficiency
affinities_ = []
for a in api.character.spells.affinities():
ring_ = api.data.get_ring(a)
if not ring_:
affinities_.append(a)
else:
affinities_.append(ring_.text)
deficiencies_ = []
for a in api.character.spells.deficiencies():
ring_ = api.data.get_ring(a)
if not ring_:
deficiencies_.append(a)
else:
deficiencies_.append(ring_.text)
self.lb_affin.setText(u', '.join(affinities_))
self.lb_defic.setText(u', '.join(deficiencies_))
# money
with QtSignalLock([self.money_widget]):
self.money_widget.set_value(api.character.get_money())
self.hide_nicebar()
self.check_new_skills()
self.check_affinity_wc()
self.check_rank_advancement()
self.check_school_new_spells()
self.check_free_kihos()
# disable step 0-1-2 if any xp are spent
has_adv = len(self.pc.advans) > 0
self.bt_edit_family.setEnabled(not has_adv)
self.bt_edit_school.setEnabled(not has_adv)
# Update view-models
self.sk_view_model .update_from_model(self.pc)
self.ma_view_model .update_from_model(self.pc)
self.adv_view_model .update_from_model(self.pc)
self.th_view_model .update_from_model(self.pc)
self.merits_view_model.update_from_model(self.pc)
self.flaws_view_model .update_from_model(self.pc)
self.sp_view_model .update_from_model(self.pc)
self.melee_view_model .update_from_model(self.pc)
self.ranged_view_model.update_from_model(self.pc)
self.arrow_view_model .update_from_model(self.pc)
self.mods_view_model .update_from_model(self.pc)
self.ka_view_model .update_from_model(self.pc)
self.ki_view_model .update_from_model(self.pc)
self.equip_view_model .update_from_model(self.pc)
def update_wound_penalties(self):
WOUND_PENALTIES_NAMES = [
self.tr("Healthy"),
self.tr("Nicked"),
self.tr("Grazed"),
self.tr("Hurt"),
self.tr("Injured"),
self.tr("Crippled"),
self.tr("Down"),
]
for i in reversed(range(0, 7)):
if i < 7:
penalty = api.rules.get_wound_penalties(i)
text = u"{0} (+{1})".format(WOUND_PENALTIES_NAMES[i], penalty)
else:
text = WOUND_PENALTIES_NAMES[i]
self.wounds[i][0].setText(text)
# TODO Create a generate mechanism for data-pack defined bonus to penalties.
# TODO toku bushi school removes some penalties
def display_health(self):
settings = QtCore.QSettings()
method = settings.value('health_method', 'wounds')
if method == 'default':
self.display_health_default()
elif method == 'wounds':
self.display_total_wounds()
else:
self.display_health_stacked()
def display_health_default(self):
wounds_table = api.rules.get_wounds_table()
for i, (i_inc, i_total, i_stacked, i_inc_wounds, i_total_wounds, i_stacked_wounds) in enumerate(wounds_table):
self.wounds[i][1].setText(str(i_inc))
self.wounds[i][2].setText(str(i_inc_wounds) if i_inc_wounds else '')
def display_health_stacked(self):
wounds_table = api.rules.get_wounds_table()
for i, (i_inc, i_total, i_stacked, i_inc_wounds, i_total_wounds, i_stacked_wounds) in enumerate(wounds_table):
self.wounds[i][1].setText(str(i_total))
self.wounds[i][2].setText(str(i_total_wounds) if i_total_wounds else '')
def display_total_wounds(self):
wounds_table = api.rules.get_wounds_table()
for i, (i_inc, i_total, i_stacked, i_inc_wounds, i_total_wounds, i_stacked_wounds) in enumerate(wounds_table):
self.wounds[i][1].setText(str(i_stacked))
self.wounds[i][2].setText(str(i_stacked_wounds) if i_stacked_wounds else '')
def advise_conversion(self, *args):
settings = QtCore.QSettings()
if settings.value('advise_conversion', 'true') == 'false':
return
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(self.tr("The character has been updated."))
msgBox.setInformativeText(self.tr("This character was created with an older version of the program.\n"
"I've done my best to convert and update your character, hope you don't mind :).\n"
"I also created a backup of your character file in\n\n%s.") % args)
do_not_prompt_again = QtGui.QCheckBox(
self.tr("Do not prompt again"), msgBox)
# PREVENT MSGBOX TO CLOSE ON CLICK
do_not_prompt_again.blockSignals(True)
msgBox.addButton(QtGui.QMessageBox.Ok)
msgBox.addButton(do_not_prompt_again, QtGui.QMessageBox.ActionRole)
msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
msgBox.exec_()
if do_not_prompt_again.checkState() == QtCore.Qt.Checked:
settings.setValue('advise_conversion', 'false')
def advise_successfull_import(self, count):
settings = QtCore.QSettings()
if settings.value('advise_successfull_import', 'true') == 'false':
return
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(
self.tr("{0} data pack(s) imported succesfully.").format(count))
do_not_prompt_again = QtGui.QCheckBox(
self.tr("Do not prompt again"), msgBox)
# PREVENT MSGBOX TO CLOSE ON CLICK
do_not_prompt_again.blockSignals(True)
msgBox.addButton(QtGui.QMessageBox.Ok)
msgBox.addButton(do_not_prompt_again, QtGui.QMessageBox.ActionRole)
msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
msgBox.setIcon(QtGui.QMessageBox.Information)
msgBox.exec_()
if do_not_prompt_again.checkState() == QtCore.Qt.Checked:
settings.setValue('advise_successfull_import', 'false')
def advise_error(self, message, dtl=None):
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('L5R: CM')
msgBox.setTextFormat(QtCore.Qt.RichText)
msgBox.setText(message)
if dtl:
msgBox.setInformativeText(dtl)
msgBox.setIcon(QtGui.QMessageBox.Critical)
msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
msgBox.exec_()
def advise_warning(self, message, dtl=None):
msgBox = QtGui.QMessageBox(self)
msgBox.setTextFormat(QtCore.Qt.RichText)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(message)
if dtl:
msgBox.setInformativeText(dtl)
msgBox.setIcon(QtGui.QMessageBox.Warning)
msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
msgBox.exec_()
def ask_warning(self, message, dtl=None):
msgBox = QtGui.QMessageBox(self)
msgBox.setTextFormat(QtCore.Qt.RichText)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(message)
if dtl:
msgBox.setInformativeText(dtl)
msgBox.setIcon(QtGui.QMessageBox.Warning)
msgBox.addButton(QtGui.QMessageBox.Ok)
msgBox.addButton(QtGui.QMessageBox.Cancel)
msgBox.setDefaultButton(QtGui.QMessageBox.Cancel)
return msgBox.exec_() == QtGui.QMessageBox.Ok
def ask_to_save(self):
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(self.tr("The character has been modified."))
msgBox.setInformativeText(self.tr("Do you want to save your changes?"))
msgBox.addButton(QtGui.QMessageBox.Save)
msgBox.addButton(QtGui.QMessageBox.Discard)
msgBox.addButton(QtGui.QMessageBox.Cancel)
msgBox.setDefaultButton(QtGui.QMessageBox.Save)
return msgBox.exec_()
def ask_to_upgrade(self, target_version):
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('L5R: CM')
msgBox.setText(
self.tr("L5R: CM v%s is available for download.") % target_version)
msgBox.setInformativeText(
self.tr("Do you want to open the download page?"))
msgBox.addButton(QtGui.QMessageBox.Yes)
msgBox.addButton(QtGui.QMessageBox.No)
msgBox.setDefaultButton(QtGui.QMessageBox.No)
return msgBox.exec_()
def not_enough_xp_advise(self, parent=None):
if parent is None:
parent = self
QtGui.QMessageBox.warning(parent, self.tr("Not enough XP"),
self.tr("Cannot purchase.\nYou've reached the XP Limit."))
return
def closeEvent(self, ev):
# update interface last time, to set unsaved states
self.update_from_model()
# SAVE GEOMETRY
settings = QtCore.QSettings()
settings.setValue('geometry', self.saveGeometry())
if self.pc.insight_calculation == api.rules.insight_calculation_2:
settings.setValue('insight_calculation', 2)
elif self.pc.insight_calculation == api.rules.insight_calculation_3:
settings.setValue('insight_calculation', 3)
else:
settings.setValue('insight_calculation', 1)
if self.pc.is_dirty():
resp = self.ask_to_save()
if resp == QtGui.QMessageBox.Save:
self.sink1.save_character()
elif resp == QtGui.QMessageBox.Cancel:
ev.ignore()
else:
super(L5RMain, self).closeEvent(ev)
else:
super(L5RMain, self).closeEvent(ev)
def select_save_path(self):
settings = QtCore.QSettings()
last_dir = settings.value('last_open_dir', QtCore.QDir.homePath())
char_name = self.get_character_full_name()
proposed = os.path.join(last_dir, char_name)
fileName = QtGui.QFileDialog.getSaveFileName(
self,
self.tr("Save Character"),
proposed,
self.tr("L5R Character files (*.l5r)"))
# user pressed cancel or didn't enter a name
if fileName == u'':
return None
last_dir = os.path.dirname(fileName)
if last_dir != '':
# print 'save last_dir: %s' % last_dir
settings.setValue('last_open_dir', last_dir)
if fileName.endswith('.l5r'):
return fileName
return fileName + '.l5r'
def select_load_path(self):
settings = QtCore.QSettings()
last_dir = settings.value('last_open_dir', QtCore.QDir.homePath())
fileName = QtGui.QFileDialog.getOpenFileName(
self,
self.tr("Load Character"),
last_dir,
self.tr("L5R Character files (*.l5r)"))
last_dir = os.path.dirname(fileName)
if last_dir != '':
settings.setValue('last_open_dir', last_dir)
return fileName
def select_export_file(self, file_ext='.txt'):
supported_ext = ['.pdf']
supported_filters = [self.tr("PDF Files(*.pdf)")]
settings = QtCore.QSettings()
last_dir = settings.value('last_open_dir', QtCore.QDir.homePath())
char_name = self.get_character_full_name()
proposed = os.path.join(last_dir, char_name)
fileName = QtGui.QFileDialog.getSaveFileName(
self,
self.tr("Export Character"),
proposed,
";;".join(supported_filters))
# user pressed cancel or didn't enter a name
if fileName == u'':
return None
last_dir = os.path.dirname(fileName[0])
if last_dir != '':
settings.setValue('last_open_dir', last_dir)
if fileName.endswith(file_ext):
return fileName
return fileName + file_ext
def select_import_data_pack(self):
supported_ext = ['.zip', '.l5rcmpack']
supported_filters = [self.tr("L5R:CM Data Pack(*.l5rcmpack *.zip)"),
self.tr("Zip Archive(*.zip)")]
settings = QtCore.QSettings()
last_data_dir = settings.value(
'last_open_data_dir', QtCore.QDir.homePath())
ret = QtGui.QFileDialog.getOpenFileNames(
self,
self.tr("Load data pack"),
last_data_dir,
";;".join(supported_filters))
files = ret
if not len(files):
return None
last_data_dir = os.path.dirname(files[0])
if last_data_dir != '':
# print 'save last_dir: %s' % last_dir
settings.setValue('last_open_data_dir', last_data_dir)
return files
def on_change_insight_calculation(self):
method = self.sender().checkedAction().property('method')
api.character.set_insight_calculation_method(method)
self.update_from_model()
def on_change_health_visualization(self):
method = self.sender().checkedAction().property('method')
settings = QtCore.QSettings()
settings.setValue('health_method', method)
self.update_from_model()
def create_new_character(self):
self.sink1.new_character()
self.pc.unsaved = False
def get_health_rank(self, idx):
return self.wounds[idx][1].text()
def warn_about_missing_books(self):
text = self.tr("<h3>Missing books</h3>")
text += self.tr("<p>To load this character you need this additional books:</p>")
dtl_text = u"<ul>"
for b in api.character.books.get_missing_dependencies():
dtl_text += "<li>{book_nm} >= {book_ver}</li>".format(
book_nm=b.name, book_ver=b.version)
dtl_text += u"</ul>"
self.advise_error(text, dtl_text)
# MAIN ###
#def dump_slots(obj, out_file):
# with open(out_file, 'wt') as fobj:
# mobj = obj.metaObject()
# for i in xrange(mobj.methodOffset(), mobj.methodCount()):
# if mobj.method(i).methodType() == QtCore.QMetaMethod.Slot:
# fobj.write(
# mobj.method(i).signature() + ' ' + mobj.method(i).tag() + '\n')
OPEN_CMD_SWITCH = '--open'
IMPORT_CMD_SWITCH = '--import'
MIME_L5R_CHAR = "applications/x-l5r-character"
MIME_L5R_PACK = "applications/x-l5r-pack"
def main():
try:
app = QtGui.QApplication(sys.argv)
log.app.info(u"START")
# setup mimetypes
mimetypes.add_type(MIME_L5R_CHAR, ".l5r")
mimetypes.add_type(MIME_L5R_PACK, ".l5rcmpack")
QtCore.QCoreApplication.setApplicationName(APP_NAME)
QtCore.QCoreApplication.setApplicationVersion(APP_VERSION)
QtCore.QCoreApplication.setOrganizationName(APP_ORG)
log.app.info(u"%s %s %s by %s", APP_NAME, APP_VERSION, APP_DESC, APP_ORG)
app.setWindowIcon(QtGui.QIcon(get_app_icon_path()))
# Setup translation
settings = QtCore.QSettings()
use_machine_locale = settings.value('use_machine_locale', 1)
app_translator = QtCore.QTranslator(app)
qt_translator = QtCore.QTranslator(app)
log.app.debug(u"use machine locale: %s, machine locale: %s",
"yes" if use_machine_locale else "no", QtCore.QLocale.system().name())
if use_machine_locale == 1:
use_locale = QtCore.QLocale.system().name()
else:
use_locale = settings.value('use_locale')
qt_loc = 'qt_{0}'.format(use_locale[:2])
app_loc = get_app_file('i18n/{0}'.format(use_locale))
log.app.debug(u"current locale: %s, qt locale: %s, app locale file: %s", use_locale, qt_loc, app_loc)
log.app.debug(u"qt translation path: %s", QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath))
qt_translator .load(
qt_loc, QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath))
app.installTranslator(qt_translator)
app_translator.load(app_loc)
app.installTranslator(app_translator)
# start main form
l5rcm = L5RMain(use_locale)
l5rcm.setWindowTitle(APP_DESC + ' v' + APP_VERSION)
l5rcm.init()
# initialize new character
l5rcm.create_new_character()
if len(sys.argv) > 1:
if OPEN_CMD_SWITCH in sys.argv:
log.app.debug(u"open character from command line")
of = sys.argv.index(OPEN_CMD_SWITCH)
l5rcm.load_character_from(sys.argv[of + 1])
elif IMPORT_CMD_SWITCH in sys.argv:
imf = sys.argv.index(IMPORT_CMD_SWITCH)
pack_path = sys.argv[imf + 1]
log.app.debug(u"import datapack from command line: %s", pack_path)
app.quit()
return l5rcm.import_data_pack(pack_path)
else:
# check mimetype
log.app.debug(u"import file from command line ( should guess mimetype )")
file_path = sys.argv[1]
mime = mimetypes.guess_type(file_path)
log.app.info(u"open file: %s, mime type: %s", file_path, mime)
if mime[0] == MIME_L5R_CHAR:
l5rcm.load_character_from(file_path)
elif mime[0] == MIME_L5R_PACK:
app.quit()
return l5rcm.import_data_pack(file_path)
l5rcm.show()
# alert if not datapacks are installed
l5rcm.check_datapacks()
# REMOVE CHECK FOR UPDATES UNTIL BETTER IMPLEMENTED
# l5rcm.check_updates()
return app.exec_()
except Exception as e:
log.app.exception(e)
finally:
log.app.info("KTHXBYE")
if __name__ == '__main__':
sys.exit(main())
| Kaniabi/l5r-character-manager-3 | l5r/main.py | Python | gpl-3.0 | 100,415 | [
"VisIt"
] | f132151bfc50ae2409b8829bf17141604d96ade3d2b7bbeb8fad9989aee4093f |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
task_config = {}
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Give a rating to a dialog between two people'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = 'Give a rating to a dialog between two people.'
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'chat,dialog,rating'
"""A short name indicating the turker's role in the conversation.
"""
task_config['worker_agent_id'] = 'Teacher'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config['task_description'] = \
'''\'\'\'
In this task, you are going to read a dialog between two people, and you will need to give a rating on how good the response is.<br><br>
Example:<br><br>
------------------- Task Begin ------------------- <br><br>
<b>Model Evaluator</b>:<br>
This is the author of the article . These were my picks and it 's an opinion . I did say Quantum was mediocre to bad and it 's because the trailer is so incredible and Casino Royale was so great that it was a let down . Also are you really gon na say Phantom Menace wasnt a terrible movie that had a great trailer .<br><br>
How would you rate the following response (from 0 to 10):<br><br>
True its an opinion as is my comment . I 'd say quantum of solace was meh , bland . But it had one of the best bond villains around . As for phantom menace , I 'd say it gets far more hate than it deserves . Did I personally enjoy it ? Yes . Was it a good movie ? Not especially . Did it live up to the hype ? God no ? Was it terrible ? Not even close . Attack of the clones on the other hand , that was dreck .<br><br>
<b>Worker</b>:<br>
8<br><br>
------------------- Task Done ------------------- <br><br>
If you are ready, please click "Accept HIT" to start this task.
\'\'\''''
| calee88/ParlAI | parlai/mturk/tasks/model_evaluator/task_config.py | Python | bsd-3-clause | 2,630 | [
"CASINO"
] | 8c63c44dc7146dcc97d866aaf11d63708724a9261ca67330de82b53c79ad131e |
import unittest
from octopus.server.shell_mananger import ShellManager
class TestShellManager(unittest.TestCase):
def testUnreachableServer(self):
self.hostname = 'localhost'
self.port = '1337'
shell_manager = ShellManager(self.hostname, self.port)
shells = shell_manager.list()
self.assertRaises(ConnectionRefusedError, list, shells)
| octopus-platform/octopus-tools | tests/orientdb_shell_manager.py | Python | lgpl-3.0 | 383 | [
"Octopus"
] | 687dfe2c320848738afbb77dffcb18bf30831a0e7241bf158b6dcb1acbe5ae6c |
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawLoops(myscreen,loops,loopColor):
# draw the loops
nloop = 0
for lop in loops:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopColor) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=loopColor) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopColor) )
previous=p
n=n+1
print "rendered loop ",nloop, " with ", len(lop), " points"
nloop = nloop+1
def getLoops(wl,zh,diam):
t_before = time.time()
wl.reset()
wl.setZ(zh)
wl.run()
t_after = time.time()
calctime = t_after-t_before
print " Waterline done in ", calctime," s"
return wl.getLoops()
if __name__ == "__main__":
print ocl.version()
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../../stl/demo.stl")
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
myscreen.addActor(stl)
#stl.SetWireframe() # render tux as wireframe
stl.SetSurface() # render tux as surface
stl.SetColor(camvtk.cyan)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STL surface read,", s.size(), "triangles"
#zh = 1.0
t_before = time.time()
diam = 0.5
zheights=[0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6]
zheights=[float(1.0)]
wl = ocl.Waterline()
#wl = ocl.AdaptiveWaterline()
wl.setSTL(s)
length= 10
cutter = ocl.BallCutter( diam , length )
wl.setCutter(cutter)
wl.setSampling(0.0314)
for zh in zheights:
print "calculating Waterline at z= ", zh
cutter_loops = getLoops(wl,zh,diam)
drawLoops(myscreen,cutter_loops,camvtk.red)
t_after = time.time()
calctime = t_after-t_before
print " TOTAL Waterline time is: ", calctime," s"
print "done."
myscreen.camera.SetPosition(15, 13, 7)
myscreen.camera.SetFocalPoint(5, 5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
| JohnyEngine/CNC | opencamlib/scripts/waterline/waterline_8_tux_adaptive.py | Python | apache-2.0 | 2,739 | [
"VTK"
] | 9b96e53963ff4f3943b74bee2e5b93ec8e6acca807fde324eba1f0781b714e96 |
"""
===========================================
===========================================
Author: Shujia Huang & Siyang Liu
Date : 2014-05-20 08:50:06
"""
import sys
import numpy as np
from sklearn import mixture
from sklearn.utils.extmath import logsumexp
# My own class
import VariantDatum as vd
import VariantRecalibratorArgumentCollection as VRAC
class VariantRecalibratorEngine:
def __init__(self, vrac=None):
self.VRAC = VRAC.VariantRecalibratorArgumentCollection()
if vrac: self.VRAC = vrac
self.MIN_PROB_CONVERGENCE = 2e-3
self.MIN_ACCEPTABLE_LOD_SCORE = -20000.0
def ClassifyData(self, dataSize):
# Classify the data into TrainingSet, Cross-ValidationSet and TestSet. Reture the data indexes
# Call in GenerateModel
trainSetSize = int (np.round(self.VRAC.TRAIN_SIZE_RATE * dataSize))
cvSetSize = int (np.round(self.VRAC.CV_SIZE_RATE * dataSize) )
testSetSize = int (np.round(self.VRAC.TEST_SIZE_RATE * dataSize) )
trainSetIdx = range(trainSetSize) # The index array of training data
cvSetIdx = range(trainSetSize, cvSetSize + trainSetSize) # The index array of cross-validation data
testSetIdx = range(cvSetSize + trainSetSize, dataSize ) # The index array of Test data
return trainSetIdx, cvSetIdx, testSetIdx
def GenerateModel(self, data, maxGaussians):
if len(data) == 0: raise ValueError ('[ERROR] No data found. The size is %d' %len(data))
if not isinstance(data[0], vd.VariantDatum):
raise ValueError ('[ERROR] The data type should be "VariantDatum" in GenerateModel() of class VariantRecalibratorEngine(), but found %s'% str(type(data[0])))
if maxGaussians <= 0: raise ValueError ('[ERROR] maxGaussians must be a positive integer but found: %d' % maxGaussians)
gmms = [ mixture.GMM(n_components = n + 1, covariance_type = 'full', thresh = self.MIN_PROB_CONVERGENCE,
n_iter = self.VRAC.NITER , n_init = self.VRAC.NINIT, params = 'wmc',
init_params = 'wmc') for n in range(maxGaussians) ]
trainingData = np.array([d.annotations for d in data]);
#np.random.shuffle(trainingData) # Random shuffling
#trainSetIdx, cvSetIdx, testSetIdx = self.ClassifyData(len(trainingData))
minBIC, bics = np.inf, []
for g in gmms:
print >> sys.stderr, '[INFO] Trying %d gaussian in GMM process training ...' % g.n_components
g.fit(trainingData); bic = g.bic(trainingData)
bics.append(bic)
if bic == float('inf') or (bic < minBIC and g.converged_):
bestgmm, minBIC = g, bic
print >> sys.stderr, ' -- Converge infomation of training process:', g.converged_
print >> sys.stderr, '[INFO] All the BIC:', bics
print >> sys.stderr, '[INFO] Model Training Done. And take the model with %d gaussiones which with BIC %f.' % (len(bestgmm.means_), minBIC)
return bestgmm
def EvaluateData(self, data, gmm, evaluateContrastively = False):
if not isinstance(data[0], vd.VariantDatum):
raise ValueError ('[ERROR] The data type should be "VariantDatum" in EvaluateData() of class VariantRecalibratorEngine(), but found %s'% str(type(data[0])))
print >> sys.stderr, '[INFO] Evaluating full set of', len(data), 'variants ...'
for i,_ in enumerate(data):
thisLod = gmm.score(data[i].annotations[np.newaxis,:]) / np.log(10) # log likelihood and the base is 10
thisLod = thisLod[0]
if np.math.isnan(thisLod):
gmm.converged_ = False
return
if evaluateContrastively:
# data[i].lod must has been assigned by good model or something like that
# contrastive evaluation: (prior + positive model - negative model)
data[i].lod = data[i].prior + data[i].lod - thisLod
if thisLod == float('inf'): data[i].lod = self.MIN_ACCEPTABLE_LOD_SCORE * (1.0 + np.random.rand(1)[0])
else:
data[i].lod = thisLod # positive model only so set the lod and return
return self
def CalculateWorstPerformingAnnotation(self, data, goodModel, badModel):
for i, d in enumerate(data):
probDiff = [self.EvaluateDatumInOneDimension(goodModel, d, k) - self.EvaluateDatumInOneDimension(badModel, d, k) for k in range(len(d.annotations))]
data[i].worstAnnotation = np.argsort(probDiff)[0] # Get the index of the worst annotations
return self
def EvaluateDatumInOneDimension(self, gmm, datum, iii):
pVarInGaussianLogE = [np.log(w) + NormalDistributionLoge(gmm.means_[k][iii], gmm.covars_[k][iii][iii], datum.annotations[iii]) for k,w in enumerate(gmm.weights_)]
return logsumexp(np.array(pVarInGaussianLogE)) / np.log(10) # np.log10(Sum(pi_k * p(v|n,k)))
def NormalDistributionLoge(mu, sigma, x):
if sigma <= 0: raise ValueError ('[ERROR] sd: Standard deviation of normal must be > 0 but found: %f' % sigma)
if mu == float('inf') or mu == float('-inf') or sigma == float('inf') or sigma == float('-inf') or \
x == float('inf') or x == float('-inf'):
raise ValueError ('[ERROR] mean, sd, or, x: Normal parameters must be well formatted (non-INF, non-NAN)')
a = -1.0 * (np.log(sigma) + 0.5 * np.log(2 * np.pi))
b = -0.5 * ((x - mu) / sigma) ** 2
return a + b # The Natural log
| ShujiaHuang/AsmVar | src/AsmvarVarScore/modul/VariantRecalibratorEngine.py | Python | mit | 5,602 | [
"Gaussian"
] | 17dc68fbd6dd03692b4343f6efe316064e919cc015fd2fc1aa7d5640e3927138 |
# Copyright (c) 2015-2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import click
from molecule import util
from molecule.command import base
from molecule.dependency import ansible_galaxy
from molecule.dependency import shell
class Dependency(base.Base):
def execute(self, exit=True):
"""
Execute the actions that should run prior to a converge and return a
tuple.
:param exit: (Unused) Provided to complete method signature.
:return: Return a tuple provided by :meth:`.AnsiblePlaybook.execute`.
"""
debug = self.args.get('debug')
if self.molecule.state.installed_deps:
return (None, None)
dependency_name = self.molecule.dependency
if dependency_name == 'galaxy':
dd = self.molecule.config.config.get('dependency')
if dd.get('requirements_file'):
msg = "Downloading dependencies with '{}'...".format(
dependency_name)
util.print_info(msg)
g = ansible_galaxy.AnsibleGalaxy(
self.molecule.config.config, debug=debug)
g.execute()
self.molecule.state.change_state('installed_deps', True)
elif dependency_name == 'shell':
dd = self.molecule.config.config.get('dependency')
if dd.get('command'):
msg = "Downloading dependencies with '{}'...".format(
dependency_name)
util.print_info(msg)
s = shell.Shell(self.molecule.config.config, debug=debug)
s.execute()
self.molecule.state.change_state('installed_deps', True)
return (None, None)
@click.command()
@click.pass_context
def dependency(ctx): # pragma: no cover
""" Perform dependent actions on the current role. """
d = Dependency(ctx.obj.get('args'), {})
d.execute
util.sysexit(d.execute()[0])
| rgreinho/molecule | molecule/command/dependency.py | Python | mit | 2,997 | [
"Galaxy"
] | 1cc2395234bdf8f7ae9ad7d1dea43892325e25b2625544c8c8eb6282b6ebf987 |
########################################################################
# $HeadURL $
# File: RegisterOperation.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/19 13:55:14
########################################################################
""" :mod: RegisterFile
==================
.. module: RegisterFile
:synopsis: register operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
RegisterFile operation handler
"""
__RCSID__ = "$Id $"
# #
# @file RegisterOperation.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/19 13:55:24
# @brief Definition of RegisterOperation class.
# # imports
from DIRAC import S_OK, S_ERROR
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
########################################################################
class RegisterFile( OperationHandlerBase ):
"""
.. class:: RegisterOperation
RegisterFile operation handler
"""
def __init__( self, operation = None, csPath = None ):
"""c'tor
:param self: self reference
:param Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
OperationHandlerBase.__init__( self, operation, csPath )
# # RegisterFile specific monitor info
gMonitor.registerActivity( "RegisterAtt", "Attempted file registrations",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterOK", "Successful file registrations",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterFail", "Failed file registrations",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
def __call__( self ):
""" call me maybe """
# # counter for failed files
failedFiles = 0
# # catalog to use
catalog = self.operation.Catalog
dm = DataManager( catalogs = catalog )
# # get waiting files
waitingFiles = self.getWaitingFilesList()
# # loop over files
for opFile in waitingFiles:
gMonitor.addMark( "RegisterAtt", 1 )
# # get LFN
lfn = opFile.LFN
# # and others
fileTuple = ( lfn , opFile.PFN, opFile.Size, self.operation.targetSEList[0], opFile.GUID, opFile.Checksum )
# # call DataManager
registerFile = dm.registerFile( fileTuple )
# # check results
if not registerFile["OK"] or lfn in registerFile["Value"]["Failed"]:
gMonitor.addMark( "RegisterFail", 1 )
self.dataLoggingClient().addFileRecord( lfn, "RegisterFail", catalog, "", "RegisterFile" )
reason = registerFile.get( "Message", registerFile.get( "Value", {} ).get( "Failed", {} ).get( lfn, 'Unknown' ) )
errorStr = "failed to register LFN %s: %s" % ( lfn, reason )
opFile.Error = errorStr
self.log.warn( errorStr )
failedFiles += 1
else:
gMonitor.addMark( "RegisterOK", 1 )
self.dataLoggingClient().addFileRecord( lfn, "Register", catalog, "", "RegisterFile" )
self.log.info( "file %s has been registered at %s" % ( lfn, catalog ) )
opFile.Status = "Done"
# # final check
if failedFiles:
self.log.info( "all files processed, %s files failed to register" % failedFiles )
self.operation.Error = "some files failed to register"
return S_ERROR( self.operation.Error )
return S_OK()
| sposs/DIRAC | DataManagementSystem/Agent/RequestOperations/RegisterFile.py | Python | gpl-3.0 | 3,599 | [
"DIRAC"
] | 4168366998290cfcf420db21cc137683b399503791ecef8522ec5b70e61a5328 |
# Starts Gevent which runs Flask
from gevent.pywsgi import WSGIServer
from neuron.app import app
def start_gevent(app_port):
http_server = WSGIServer(('', app_port), app)
http_server.serve_forever()
if __name__ == "__main__":
start_gevent(5000)
| Andrew-Shay/Neuron | neuron/start_gevent.py | Python | mit | 262 | [
"NEURON"
] | e38e6d3a9680f9e6d0ad8de4c5ddda5f2b1e54be67fb6bad4ac2dfa16445b2a4 |
from ....interfaces import utility as util # utility
from ....pipeline import engine as pe # pypeline engine
from ....interfaces import camino as camino
from ....interfaces import fsl as fsl
from ....interfaces import camino2trackvis as cam2trk
from ....algorithms import misc as misc
from ...misc.utils import get_affine, get_data_dims, get_vox_dims
def create_camino_dti_pipeline(name="dtiproc"):
"""Creates a pipeline that does the same diffusion processing as in the
:doc:`../../users/examples/dmri_camino_dti` example script. Given a diffusion-weighted image,
b-values, and b-vectors, the workflow will return the tractography
computed from diffusion tensors and from PICo probabilistic tractography.
Example
-------
>>> import os
>>> nipype_camino_dti = create_camino_dti_pipeline("nipype_camino_dti")
>>> nipype_camino_dti.inputs.inputnode.dwi = os.path.abspath('dwi.nii')
>>> nipype_camino_dti.inputs.inputnode.bvecs = os.path.abspath('bvecs')
>>> nipype_camino_dti.inputs.inputnode.bvals = os.path.abspath('bvals')
>>> nipype_camino_dti.run() # doctest: +SKIP
Inputs::
inputnode.dwi
inputnode.bvecs
inputnode.bvals
Outputs::
outputnode.fa
outputnode.trace
outputnode.tracts_pico
outputnode.tracts_dt
outputnode.tensors
"""
inputnode1 = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode1")
"""
Setup for Diffusion Tensor Computation
--------------------------------------
In this section we create the nodes necessary for diffusion analysis.
First, the diffusion image is converted to voxel order.
"""
image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel")
fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme")
fsl2scheme.inputs.usegradmod = True
"""
Second, diffusion tensors are fit to the voxel-order data.
"""
dtifit = pe.Node(interface=camino.DTIFit(), name='dtifit')
"""
Next, a lookup table is generated from the schemefile and the
signal-to-noise ratio (SNR) of the unweighted (q=0) data.
"""
dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen")
dtlutgen.inputs.snr = 16.0
dtlutgen.inputs.inversion = 1
"""
In this tutorial we implement probabilistic tractography using the PICo algorithm.
PICo tractography requires an estimate of the fibre direction and a model of its
uncertainty in each voxel; this is produced using the following node.
"""
picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs")
picopdfs.inputs.inputmodel = 'dt'
"""
An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography.
"""
bet = pe.Node(interface=fsl.BET(), name="bet")
bet.inputs.mask = True
"""
Finally, tractography is performed.
First DT streamline tractography.
"""
trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt")
"""
Now camino's Probablistic Index of connectivity algorithm.
In this tutorial, we will use only 1 iteration for time-saving purposes.
"""
trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico")
trackpico.inputs.iterations = 1
"""
Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse.
"""
cam2trk_dt = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt")
cam2trk_dt.inputs.min_length = 30
cam2trk_dt.inputs.voxel_order = 'LAS'
cam2trk_pico = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico")
cam2trk_pico.inputs.min_length = 30
cam2trk_pico.inputs.voxel_order = 'LAS'
"""
Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, using the following two nodes.
"""
# vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines")
# procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines")
# procstreamlines.inputs.outputtracts = 'oogl'
"""
We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the fractional anisotropy and diffusivity trace maps and their associated headers.
"""
fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(), name='fa')
# md = pe.Node(interface=camino.MD(),name='md')
trace = pe.Node(interface=camino.ComputeTensorTrace(), name='trace')
dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig')
analyzeheader_fa = pe.Node(interface=camino.AnalyzeHeader(), name="analyzeheader_fa")
analyzeheader_fa.inputs.datatype = "double"
analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace')
# analyzeheader_md = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_md")
# analyzeheader_md.inputs.datatype = "double"
# analyzeheader_trace = analyzeheader_md.clone('analyzeheader_trace')
fa2nii = pe.Node(interface=misc.CreateNifti(), name='fa2nii')
trace2nii = fa2nii.clone("trace2nii")
"""
Since we have now created all our nodes, we can now define our workflow and start making connections.
"""
tractography = pe.Workflow(name='tractography')
tractography.connect([(inputnode1, bet, [("dwi", "in_file")])])
"""
File format conversion
"""
tractography.connect([(inputnode1, image2voxel, [("dwi", "in_file")]),
(inputnode1, fsl2scheme, [("bvecs", "bvec_file"),
("bvals", "bval_file")])
])
"""
Tensor fitting
"""
tractography.connect([(image2voxel, dtifit, [['voxel_order', 'in_file']]),
(fsl2scheme, dtifit, [['scheme', 'scheme_file']])
])
"""
Workflow for applying DT streamline tractogpahy
"""
tractography.connect([(bet, trackdt, [("mask_file", "seed_file")])])
tractography.connect([(dtifit, trackdt, [("tensor_fitted", "in_file")])])
"""
Workflow for applying PICo
"""
tractography.connect([(bet, trackpico, [("mask_file", "seed_file")])])
tractography.connect([(fsl2scheme, dtlutgen, [("scheme", "scheme_file")])])
tractography.connect([(dtlutgen, picopdfs, [("dtLUT", "luts")])])
tractography.connect([(dtifit, picopdfs, [("tensor_fitted", "in_file")])])
tractography.connect([(picopdfs, trackpico, [("pdfs", "in_file")])])
# Mean diffusivity still appears broken
# tractography.connect([(dtifit, md,[("tensor_fitted","in_file")])])
# tractography.connect([(md, analyzeheader_md,[("md","in_file")])])
# tractography.connect([(inputnode, analyzeheader_md,[(('dwi', get_vox_dims), 'voxel_dims'),
# (('dwi', get_data_dims), 'data_dims')])])
# This line is commented out because the ProcStreamlines node keeps throwing memory errors
# tractography.connect([(track, procstreamlines,[("tracked","in_file")])])
"""
Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the
tensor fitting.
This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with the original DWI image from the input node, to the header-generating nodes. This ensures that the files will be correct and readable.
"""
tractography.connect([(dtifit, fa, [("tensor_fitted", "in_file")])])
tractography.connect([(fa, analyzeheader_fa, [("fa", "in_file")])])
tractography.connect([(inputnode1, analyzeheader_fa, [(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(fa, fa2nii, [('fa', 'data_file')])])
tractography.connect([(inputnode1, fa2nii, [(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_fa, fa2nii, [('header', 'header_file')])])
tractography.connect([(dtifit, trace, [("tensor_fitted", "in_file")])])
tractography.connect([(trace, analyzeheader_trace, [("trace", "in_file")])])
tractography.connect([(inputnode1, analyzeheader_trace, [(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(trace, trace2nii, [('trace', 'data_file')])])
tractography.connect([(inputnode1, trace2nii, [(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_trace, trace2nii, [('header', 'header_file')])])
tractography.connect([(dtifit, dteig, [("tensor_fitted", "in_file")])])
tractography.connect([(trackpico, cam2trk_pico, [('tracked', 'in_file')])])
tractography.connect([(trackdt, cam2trk_dt, [('tracked', 'in_file')])])
tractography.connect([(inputnode1, cam2trk_pico, [(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(inputnode1, cam2trk_dt, [(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode")
outputnode = pe.Node(interface=util.IdentityInterface(fields=["fa",
"trace",
"tracts_pico",
"tracts_dt",
"tensors"]),
name="outputnode")
workflow = pe.Workflow(name=name)
workflow.base_output_dir = name
workflow.connect([(inputnode, tractography, [("dwi", "inputnode1.dwi"),
("bvals", "inputnode1.bvals"),
("bvecs", "inputnode1.bvecs")])])
workflow.connect([(tractography, outputnode, [("cam2trk_dt.trackvis", "tracts_dt"),
("cam2trk_pico.trackvis", "tracts_pico"),
("fa2nii.nifti_file", "fa"),
("trace2nii.nifti_file", "trace"),
("dtifit.tensor_fitted", "tensors")])
])
return workflow
| sgiavasis/nipype | nipype/workflows/dmri/camino/diffusion.py | Python | bsd-3-clause | 10,837 | [
"ParaView",
"VTK"
] | 5ca9d2fb2e23bbb0d0d4de54e66503e0171b1fd211cf3b8114f7a980140158ce |
#!/usr/bin/env python
"""Modules for calculating thermochemical information from computational
outputs."""
import os
import sys
import numpy as np
from ase import units
def rotationalinertia(atoms):
"""Calculates the three principle moments of inertia for an ASE atoms
object. This uses the atomic masses from ASE, which (if not explicitly
specified by the user) gives an inexact approximation of an isotopically
averaged result. Units are in amu*angstroms**2."""
# Calculate the center of mass.
xcm, ycm, zcm = atoms.get_center_of_mass()
masses = atoms.get_masses()
# Calculate moments of inertia in the current frame of reference.
Ixx = 0.
Iyy = 0.
Izz = 0.
Ixy = 0.
Ixz = 0.
Iyz = 0.
for index, atom in enumerate(atoms):
m = masses[index]
x = atom.get_x() - xcm
y = atom.get_y() - ycm
z = atom.get_z() - zcm
Ixx += m * (y**2. + z**2.)
Iyy += m * (x**2. + z**2.)
Izz += m * (x**2. + y**2.)
Ixy += m * x * y
Ixz += m * x * z
Iyz += m * y * z
# Create the inertia tensor in the current frame of reference.
I_ = np.matrix([[ Ixx, -Ixy, -Ixz],
[-Ixy, Iyy, -Iyz],
[-Ixz, -Iyz, Izz]])
# Find the eigenvalues, which are the principle moments of inertia.
I = np.linalg.eigvals(I_)
return I
class ThermoChem:
"""Base class containing common methods used in thermochemistry
calculations."""
def get_ZPE_correction(self):
"""Returns the zero-point vibrational energy correction in eV."""
zpe = 0.
for energy in self.vib_energies:
zpe += 0.5 * energy
return zpe
def _vibrational_energy_contribution(self, temperature):
"""Calculates the change in internal energy due to vibrations from
0K to the specified temperature for a set of vibrations given in
inverse centimeters and a temperature given in Kelvin. Returns the
energy change in eV."""
kT = units.kB * temperature
dU = 0.
for energy in self.vib_energies:
dU += energy / (np.exp(energy/kT) - 1.)
return dU
def _vibrational_entropy_contribution(self, temperature):
"""Calculates the entropy due to vibrations for a set of vibrations
given in inverse centimeters and a temperature given in Kelvin.
Returns the entropy in eV/K."""
kT = units.kB * temperature
S_v = 0.
for energy in self.vib_energies:
x = energy / kT
S_v += x / (np.exp(x)-1.) - np.log(1-np.exp(-x))
S_v *= units.kB
return S_v
def _vprint(self, text):
"""Print output if verbose flag True."""
if self.verbose:
sys.stdout.write(text + os.linesep)
class HarmonicThermo(ThermoChem):
"""Class for calculating thermodynamic properties in the approximation
that all degrees of freedom are treated harmonically. Often used for
adsorbates.
Inputs:
vib_energies : list
a list of the harmonic energies of the adsorbate (e.g., from
ase.vibrations.Vibrations.get_energies). The number of
energies should match the number of degrees of freedom of the
adsorbate; i.e., 3*n, where n is the number of atoms. Note that
this class does not check that the user has supplied the correct
number of energies. Units of energies are eV.
electronicenergy : float
the electronic energy in eV
(If the electronicenergy is unspecified, then the methods of this
class can be interpreted as the energy corrections.)
"""
def __init__(self, vib_energies, electronicenergy=None):
self.vib_energies = vib_energies
# Check for imaginary frequencies.
if sum(np.iscomplex(self.vib_energies)):
raise ValueError('Imaginary vibrational energies are present.')
else:
self.vib_energies = np.real(self.vib_energies) # clear +0.j
if electronicenergy:
self.electronicenergy = electronicenergy
else:
self.electronicenergy = 0.
def get_internal_energy(self, temperature, verbose=True):
"""Returns the internal energy, in eV, in the harmonic approximation
at a specified temperature (K)."""
self.verbose = verbose
write = self._vprint
fmt = '%-15s%13.3f eV'
write('Internal energy components at T = %.2f K:' % temperature)
write('='*31)
U = 0.
write(fmt % ('E_elec', self.electronicenergy))
U += self.electronicenergy
zpe = self.get_ZPE_correction()
write(fmt % ('E_ZPE', zpe))
U += zpe
dU_v = self._vibrational_energy_contribution(temperature)
write(fmt % ('Cv_harm (0->T)', dU_v))
U += dU_v
write('-'*31)
write(fmt % ('U', U))
write('='*31)
return U
def get_entropy(self, temperature, verbose=True):
"""Returns the entropy, in eV/K, in the harmonic approximation
at a specified temperature (K)."""
self.verbose = verbose
write = self._vprint
fmt = '%-15s%13.7f eV/K%13.3f eV'
write('Entropy components at T = %.2f K:' % temperature)
write('='*49)
write('%15s%13s %13s'%('', 'S', 'T*S'))
S = 0.
S_v = self._vibrational_entropy_contribution(temperature)
write(fmt%('S_harm', S_v, S_v*temperature))
S += S_v
write('-'*49)
write(fmt%('S', S, S*temperature))
write('='*49)
return S
def get_free_energy(self, temperature, verbose=True):
"""Returns the free energy, in eV, in the harmonic approximation
at a specified temperature (K)."""
self.verbose = True
write = self._vprint
U = self.get_internal_energy(temperature, verbose=verbose)
write('')
S = self.get_entropy(temperature, verbose=verbose)
G = U - temperature * S
write('')
write('Free energy components at T = %.2f K:' % temperature)
write('='*23)
fmt = '%5s%15.3f eV'
write(fmt % ('U', U))
write(fmt % ('-T*S', -temperature * S))
write('-'*23)
write(fmt % ('G', G))
write('='*23)
return G
class IdealGasThermo(ThermoChem):
"""Class for calculating thermodynamic properties of a molecule
based on statistical mechanical treatments in the ideal gas
approximation.
Inputs for enthalpy calculations:
vib_energies : list
a list of the vibrational energies of the molecule (e.g., from
ase.vibrations.Vibrations.get_energies). The number of vibrations
used is automatically calculated by the geometry and the number of
atoms. If more are specified than are needed, then the lowest
numbered vibrations are neglected. If either atoms or natoms is
unspecified, then uses the entire list. Units are eV.
geometry : 'monatomic', 'linear', or 'nonlinear'
geometry of the molecule
electronicenergy : float
the electronic energy in eV
(If electronicenergy is unspecified, then the methods of this
class can be interpreted as the enthalpy and free energy
corrections.)
natoms : integer
the number of atoms, used along with 'geometry' to determine how
many vibrations to use. (Not needed if an atoms object is supplied
in 'atoms' or if the user desires the entire list of vibrations
to be used.)
Extra inputs needed for for entropy / free energy calculations:
atoms : an ASE atoms object
used to calculate rotational moments of inertia and molecular mass
symmetrynumber : integer
symmetry number of the molecule. See, for example, Table 10.1 and
Appendix B of C. Cramer "Essentials of Computational Chemistry",
2nd Ed.
spin : float
the total electronic spin. (0 for molecules in which all electrons
are paired, 0.5 for a free radical with a single unpaired electron,
1.0 for a triplet with two unpaired electrons, such as O_2.)
"""
def __init__(self, vib_energies, geometry, electronicenergy=None,
atoms=None, symmetrynumber=None, spin=None, natoms=None):
if electronicenergy == None:
self.electronicenergy = 0.
else:
self.electronicenergy = electronicenergy
self.geometry = geometry
self.atoms = atoms
self.sigma = symmetrynumber
self.spin = spin
if natoms == None:
if atoms:
natoms = len(atoms)
# Cut the vibrations to those needed from the geometry.
if natoms:
if geometry == 'nonlinear':
self.vib_energies = vib_energies[-(3*natoms-6):]
elif geometry == 'linear':
self.vib_energies = vib_energies[-(3*natoms-5):]
elif geometry == 'monatomic':
self.vib_energies = []
else:
self.vib_energies = vib_energies
# Make sure no imaginary frequencies remain.
if sum(np.iscomplex(self.vib_energies)):
raise ValueError('Imaginary frequencies are present.')
else:
self.vib_energies = np.real(self.vib_energies) # clear +0.j
self.referencepressure = 101325. # Pa
def get_enthalpy(self, temperature, verbose=True):
"""Returns the enthalpy, in eV, in the ideal gas approximation
at a specified temperature (K)."""
self.verbose = verbose
write = self._vprint
fmt = '%-15s%13.3f eV'
write('Enthalpy components at T = %.2f K:' % temperature)
write('='*31)
H = 0.
write(fmt % ('E_elec', self.electronicenergy))
H += self.electronicenergy
zpe = self.get_ZPE_correction()
write(fmt % ('E_ZPE', zpe))
H += zpe
Cv_t = 3./2. * units.kB # translational heat capacity (3-d gas)
write(fmt % ('Cv_trans (0->T)', Cv_t * temperature))
H += Cv_t * temperature
if self.geometry == 'nonlinear': # rotational heat capacity
Cv_r = 3./2.*units.kB
elif self.geometry == 'linear':
Cv_r = units.kB
elif self.geometry == 'monatomic':
Cv_r = 0.
write(fmt % ('Cv_rot (0->T)', Cv_r * temperature))
H += Cv_r*temperature
dH_v = self._vibrational_energy_contribution(temperature)
write(fmt % ('Cv_vib (0->T)', dH_v))
H += dH_v
Cp_corr = units.kB * temperature
write(fmt % ('(C_v -> C_p)', Cp_corr))
H += Cp_corr
write('-'*31)
write(fmt % ('H', H))
write('='*31)
return H
def get_entropy(self, temperature, pressure, verbose=True):
"""Returns the entropy, in eV/K, in the ideal gas approximation
at a specified temperature (K) and pressure (Pa)."""
if self.atoms == None or self.sigma == None or self.spin == None:
raise RuntimeError('atoms, symmetrynumber, and spin must be '
'specified for entropy and free energy '
'calculations.')
self.verbose = verbose
write = self._vprint
fmt = '%-15s%13.7f eV/K%13.3f eV'
write('Entropy components at T = %.2f K and P = %.1f Pa:' %
(temperature, pressure))
write('='*49)
write('%15s%13s %13s' % ('', 'S', 'T*S'))
S = 0.
# Translational entropy (term inside the log is in SI units).
mass = sum(self.atoms.get_masses()) * units._amu # kg/molecule
S_t = (2*np.pi*mass*units._k*temperature/units._hplanck**2)**(3./2)
S_t *= units._k * temperature / self.referencepressure
S_t = units.kB * (np.log(S_t) + 5./2.)
write(fmt % ('S_trans (1 atm)', S_t, S_t * temperature))
S += S_t
# Rotational entropy (term inside the log is in SI units).
if self.geometry == 'monatomic':
S_r = 0.
elif self.geometry == 'nonlinear':
inertias = (rotationalinertia(self.atoms) * units._amu /
(10.**10)**2) # kg m^2
S_r = np.sqrt(np.pi*np.product(inertias)) / self.sigma
S_r *= (8. * np.pi**2 * units._k * temperature /
units._hplanck**2)**(3./2.)
S_r = units.kB * (np.log(S_r) + 3./2.)
elif self.geometry == 'linear':
inertias = (rotationalinertia(self.atoms) * units._amu /
(10.**10)**2) # kg m^2
inertia = max(inertias) # should be two identical and one zero
S_r = (8 * np.pi**2 * inertia * units._k * temperature /
self.sigma / units._hplanck**2)
S_r = units.kB * ( np.log(S_r) + 1.)
write(fmt % ('S_rot', S_r, S_r*temperature))
S += S_r
# Electronic entropy.
S_e = units.kB * np.log(2*self.spin+1)
write(fmt % ('S_elec', S_e, S_e * temperature))
S += S_e
# Vibrational entropy.
S_v = self._vibrational_entropy_contribution(temperature)
write(fmt % ('S_vib', S_v, S_v* temperature))
S += S_v
# Pressure correction to translational entropy.
S_p = - units.kB * np.log(pressure/self.referencepressure)
write(fmt % ('S (1 atm -> P)', S_p, S_p * temperature))
S += S_p
write('-'*49)
write(fmt % ('S', S, S * temperature))
write('='*49)
return S
def get_free_energy(self, temperature, pressure, verbose=True):
"""Returns the free energy, in eV, in the ideal gas approximation
at a specified temperature (K) and pressure (Pa)."""
self.verbose = verbose
write = self._vprint
H = self.get_enthalpy(temperature, verbose=verbose)
write('')
S = self.get_entropy(temperature, pressure, verbose=verbose)
G = H - temperature * S
write('')
write('Free energy components at T = %.2f K and P = %.1f Pa:' %
(temperature, pressure))
write('='*23)
fmt = '%5s%15.3f eV'
write(fmt % ('H', H))
write(fmt % ('-T*S', -temperature * S))
write('-'*23)
write(fmt % ('G', G))
write('='*23)
return G
| slabanja/ase | ase/thermochemistry.py | Python | gpl-2.0 | 14,410 | [
"ASE"
] | 7fa5edc9d586f0c1f8f35402150fbec51406d6714b027a76b5c1a95e956a8b44 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, getopt
import re
import os
import csv
import shlex, subprocess
import pickle
import html2text
import time, datetime
import math
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.2f')
from operator import itemgetter, attrgetter
reload(sys)
sys.setdefaultencoding('utf8')
import urllib2
from bs4 import BeautifulSoup
# Print script usage information and exit
def help():
print 'downloadAllTeamsInfos.py -y <year> -o <outputFile>'
sys.exit(2)
#Clean description
def cleanDescription(description):
strings=["""MENU ▤
* HOME
* TEAM
* Team
* ★ Collaborations
* PROJECT
* ★ Description
* ★ Design
* Experiments
* ★ Proof of Concept
* ★ Demonstrate
* Results
* Notebook
* PARTS
* Parts
* ★ Basic Parts
* ★ Composite Parts
* ★ Part Collection
* SAFETY
* ★ ATTRIBUTIONS
* HUMAN PRACTICES
* Human Practices
* ★ Silver
* ★ Gold
* ★ Integrated Practices
* ★ Engagement
* AWARDS
* ★ Entrepreneurship
* ★ Hardware
* ★ Software
* ★ Measurement
* ★ Model
""","""## Welcome to iGEM 2016!
Your team has been approved and you are ready to start the iGEM season!
##### Before you start:
Please read the following pages:
* Requirements page
* Wiki Requirements page
* Template Documentation
##### Styling your wiki
You may style this page as you like or you can simply leave the style as it
is. You can easily keep the styling and edit the content of these default wiki
pages with your project information and completely fulfill the requirement to
document your project.
While you may not win Best Wiki with this styling, your team is still eligible
for all other awards. This default wiki meets the requirements, it improves
navigability and ease of use for visitors, and you should not feel it is
necessary to style beyond what has been provided.
##### Wiki template information
We have created these wiki template pages to help you get started and to help
you think about how your team will be evaluated. You can find a list of all
the pages tied to awards here at the Pages for awards link. You must edit
these pages to be evaluated for medals and awards, but ultimately the design,
layout, style and all other elements of your team wiki is up to you!
##### Editing your wiki
On this page you can document your project, introduce your team members,
document your progress and share your iGEM experience with the rest of the
world!
Use WikiTools - Edit in the black menu bar to edit this page
##### Tips
This wiki will be your team’s first interaction with the rest of the world, so
here are a few tips to help you get started:
* State your accomplishments! Tell people what you have achieved from the start.
* Be clear about what you are doing and how you plan to do this.
* You have a global audience! Consider the different backgrounds that your users come from.
* Make sure information is easy to find; nothing should be more than 3 clicks away.
* Avoid using very small fonts and low contrast colors; information should be easy to read.
* Start documenting your project as early as possible; don’t leave anything to the last minute before the Wiki Freeze. For a complete list of deadlines visit the iGEM 2016 calendar
* Have lots of fun!
##### Inspiration
You can also view other team wikis for inspiration! Here are some examples:
* 2014 SDU Denmark
* 2014 Aalto-Helsinki
* 2014 LMU-Munich
* 2014 Michigan
* 2014 ITESM-Guadalajara
* 2014 SCU-China
##### Uploading pictures and files
You can upload your pictures and files to the iGEM 2016 server. Remember to
keep all your pictures and files within your team's namespace or at least
include your team's name in the file name.
When you upload, set the "Destination Filename" to
`T--YourOfficialTeamName--NameOfFile.jpg`. (If you don't do this, someone else
might upload a different file with the same "Destination Filename", and your
file would be erased!)
UPLOAD FILES
""","""### Loading ...""","""### ★ ALERT!
""","""
This page is used by the judges to evaluate your team for the improve a
previous part or project gold medal criterion.""","""
Delete this box in order to be evaluated for this medal. See more information
at Instructions for Pages for awards.
Tell us about your project, describe what moves you and why this is something
important for your team.""","""
##### What should this page contain?
* A clear and concise description of your project.
* A detailed explanation of why your team chose to work on this particular project.
* References and sources to document your research.
* Use illustrations and other visual resources to explain your project.
##### Advice on writing your Project Description""","""
We encourage you to put up a lot of information and content on your wiki, but
we also encourage you to include summaries as much as possible. If you think
of the sections in your project description as the sections in a publication,
you should try to be consist, accurate and unambiguous in your achievements.
Judges like to read your wiki and know exactly what you have achieved. This is
how you should think about these sections; from the point of view of the judge
evaluating you at the end of the year.
""","""##### References
iGEM teams are encouraged to record references you use during the course of
your research. They should be posted somewhere on your wiki so that judges and
other visitors can see how you thought about your project and what works
inspired you.
##### Inspiration
See how other teams have described and presented their projects:
* Imperial
* UC Davis
* SYSU Software
""","""Loading menubar.....""","""
#
####
×
""","""Toggle navigation
* Project
* Description
* Design
* Experiment
* Proof Of Concept
* Demonstration
* Results
* Notebook
* Gallery
* Team
* Team Members
* Advisors
* Collaborations
* Parts
* Parts
* Basic Parts
* Composite Parts
* Part Collection
* Awards
* Entrepreneurship
* Hardware
* Software
* Measurement
* Model
* Medals
* Bronze
* Silver
* Gold
* Human Practices
* Human Practices
* Silver
* Gold
* Integrated Practices
* Engagement
* Safety
* Attributions""",""" * Team
* Us
* Collaborations
* Project
* Overview
* Results
* Project Build
* Application
* Documentation
* * Attributions
* Notebook
* Timeline
* Experiments
* Safety
* Human Practices
* Overview
* Silver
* Gold
* Integrated Practices
* Engagement
* Awards
* Hardware
* Software
* Entrepreneurship
* Measurement
* Model
""","""##### Before you start:
Please read the following pages:
* Requirements page
* Wiki Requirements page
* Template Documentation
##### Styling your wiki
You may style this page as you like or you can simply leave the style as it
is. You can easily keep the styling and edit the content of these default wiki
pages with your project information and completely fulfill the requirement to
document your project.
While you may not win Best Wiki with this styling, your team is still eligible
for all other awards. This default wiki meets the requirements, it improves
navigability and ease of use for visitors, and you should not feel it is
necessary to style beyond what has been provided.
##### Wiki template information
We have created these wiki template pages to help you get started and to help
you think about how your team will be evaluated. You can find a list of all
the pages tied to awards here at the Pages for awards link. You must edit
these pages to be evaluated for medals and awards, but ultimately the design,
layout, style and all other elements of your team wiki is up to you!
##### Editing your wiki
On this page you can document your project, introduce your team members,
document your progress and share your iGEM experience with the rest of the
world!
Use WikiTools - Edit in the black menu bar to edit this page
##### Tips
This wiki will be your team’s first interaction with the rest of the world, so
here are a few tips to help you get started:
* State your accomplishments! Tell people what you have achieved from the start.
* Be clear about what you are doing and how you plan to do this.
* You have a global audience! Consider the different backgrounds that your users come from.
* Make sure information is easy to find; nothing should be more than 3 clicks away.
* Avoid using very small fonts and low contrast colors; information should be easy to read.
* Start documenting your project as early as possible; don’t leave anything to the last minute before the Wiki Freeze. For a complete list of deadlines visit the iGEM 2016 calendar
* Have lots of fun!
##### Inspiration
You can also view other team wikis for inspiration! Here are some examples:
* 2014 SDU Denmark
* 2014 Aalto-Helsinki
* 2014 LMU-Munich
* 2014 Michigan
* 2014 ITESM-Guadalajara
* 2014 SCU-China
##### Uploading pictures and files
You can upload your pictures and files to the iGEM 2016 server. Remember to
keep all your pictures and files within your team's namespace or at least
include your team's name in the file name.
When you upload, set the "Destination Filename" to
`T--YourOfficialTeamName--NameOfFile.jpg`. (If you don't do this, someone else
might upload a different file with the same "Destination Filename", and your
file would be erased!)""","""UPLOAD FILES""","""
×
""","""UPLOAD FILES"""]
for string in strings:
description=description.replace(string,"")
return description
# Return the team information for a year in a dictionary format
# This function fetches the abstract and the description page
# of the team
def getTeamsInfo(year):
url="http://igem.org/Team_List.cgi?year=%s&division=igem&team_list_download=1" % year
tempFile="team_list_%s.csv" % year
command_line="wget %s -O %s" % (url, tempFile)
devnull = open('/dev/null', 'w')
process = subprocess.Popen(shlex.split(command_line), stdout=devnull, stderr=devnull)
retcode = process.wait()
csvfile=open(tempFile)
reader = csv.DictReader(csvfile)
result=[]
for row in reader:
teamName=row[' Team ']
teamId=row['Team ID ']
teamUrl='http://igem.org/Team.cgi?team_id=%s' % teamId
teamInfo={'name':teamName,'url':teamUrl,'id':teamId}
teamPage=getTeamPage(teamId)
# if teamPage["abstract"]==u'-- No abstract provided yet --\n':
# teamPage["abstract"]=''
if teamPage["title"]==u'-- Not provided yet --':
teamPage["title"]=''
teamInfo.update(teamPage)
teamDescription=getTeamDescription(year,teamName)
teamDescription=cleanDescription(teamDescription)
teamInfo.update({"description":teamDescription})
if not (len(teamDescription)<50 and teamPage["abstract"]==u'-- No abstract provided yet --\n'):
print teamPage
print teamDescription
result.append(teamInfo)
teamsInfo=result
return teamsInfo
# Get title and abstract of a team from the iGEM page
def getTeamPage(teamId):
url="http://igem.org/Team.cgi?id=%s" % teamId
tempFile="abstract_%s.html" % teamId
command_line="wget %s -O %s" % (url, tempFile)
devnull = open('/dev/null', 'w')
process = subprocess.Popen(shlex.split(command_line), stdout=devnull, stderr=devnull)
retcode = process.wait()
f = open(tempFile, 'r')
html_doc=f.read()
command_line="rm %s" % tempFile
process = subprocess.Popen(shlex.split(command_line), stdout=devnull, stderr=devnull)
retcode = process.wait()
soup = BeautifulSoup(html_doc, 'html.parser')
title_and_abstract=soup.find(id="table_abstract").tr.td.get_text()
separatorIndex=title_and_abstract.find("\n")
title=title_and_abstract[0:separatorIndex]
abstract=title_and_abstract[separatorIndex+1:]
result={"title":title,"abstract":abstract}
return result
# Get team description from team website
def getTeamDescription (year,teamName):
urlMain="http://%s.igem.org/Team:%s" % (year, teamName)
urlDescription="http://%s.igem.org/Team:%s/Description" % (year, teamName)
result=""
for url in [urlMain,urlDescription]:
try:
html=urllib2.urlopen(url).read()
h=html2text.HTML2Text()
h.ignore_links=True
h.ignore_images=True
text=h.handle(html)
if text.find("What should this page contain?")>=0:
result+=""
else:
result+=text
except urllib2.HTTPError:
result+=""
return result
# urlDescription="http://%s.igem.org/Team:%s/Description" % (year, teamName)
# urlMain="http://%s.igem.org/Team:%s" % (year, teamName)
# finalText=""
# for url in [urlDescription, urlMain]:
# try:
# html=urllib2.urlopen(url).read()
# h=html2text.HTML2Text()
# h.ignore_links=True
# h.ignore_images=True
# text=h.handle(html)
# if text.find("What should this page contain?")>=0:
# text=""
# except urllib2.HTTPError:
# text=""
# finalText=finalText+text
# return finalText
# Write a dump of the teams info in a Pickle file
def writeTeamsInfo(teamsInfo,outputFile):
t_now = time.time()
timeStamp = datetime.datetime.fromtimestamp(t_now).strftime('%Y%m%d%H%M%S')
f = open(outputFile,'w')
pickle.dump(teamsInfo,f)
f.close()
# teamPage["abstract"]!=u'-- No abstract provided yet --\n'
# teamsInfoCleared=[teamInfo for teamInfo in teamsInfo if teamInfo["abstract"] != u'-- No abstract provided yet --\n']
# f = open('teamsInfoCleared.txt','w')
# f.write(str(teamsInfoCleared))
# f.close()
return teamsInfo
# Main program
def main(argv):
year = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hy:o:")
except getopt.GetoptError:
help()
if len(opts) == 0:
help()
for opt, arg in opts:
if opt == '-h':
help()
elif opt in ("-y"):
year = arg
elif opt in ("-o"):
outputfile = arg
# For debugging purposes
print 'Year is', year
print 'Output file is', outputfile
teamsInfo=getTeamsInfo(year)
writeTeamsInfo(teamsInfo,outputfile)
return 1
if __name__ == "__main__":
main(sys.argv[1:]) | ychahibi/ntnu-trondheim-igem | igem_matchmaker/backend/downloadAllTeamsInfos.py | Python | apache-2.0 | 14,769 | [
"VisIt"
] | 5484bdbe8c90d2492465375282574bf99d0949aba2be0c3ab28fab93c079fb53 |
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import gw as gw_c
class KnowValues(unittest.TestCase):
def test_rf0_ref(self):
""" This is GW """
mol = gto.M( verbose = 1, atom = '''H 0 0 0; H 0.17 0.7 0.587''', basis = 'cc-pvdz',)
gto_mf = scf.RHF(mol)
gto_mf.kernel()
gw = gw_c(mf=gto_mf, gto=mol)
ww = [0.0+1j*4.0, 1.0+1j*0.1, -2.0-1j*0.1]
rf0_fm = gw.rf0_cmplx_vertex_ac(ww)
rf0_mv = np.zeros_like(rf0_fm)
vec = np.zeros((gw.nprod), dtype=gw.dtypeComplex)
for iw,w in enumerate(ww):
for mu in range(gw.nprod):
vec[:] = 0.0; vec[mu] = 1.0
rf0_mv[iw, mu,:] = gw.apply_rf0(vec, w)
#print(rf0_fm.shape, rf0_mv.shape)
#print('abs(rf0_fm-rf0_mv)', abs(rf0_fm-rf0_mv).sum()/rf0_fm.size)
#print(abs(rf0_fm[0,:,:]-rf0_mv[0,:,:]).sum())
#print(rf0_fm[0,:,:])
self.assertTrue(abs(rf0_fm-rf0_mv).sum()/rf0_fm.size<1e-15)
if __name__ == "__main__": unittest.main()
| gkc1000/pyscf | pyscf/nao/test/test_0052_gw_rf0_ref.py | Python | apache-2.0 | 1,018 | [
"PySCF"
] | 5bc3705a7e74a61f1ecf238aa3d4caa8e6db2ebec7cc4aa14f9ada98e919297c |
"""
Test functions for models.GLM
"""
import os
import numpy as np
from numpy.testing import *
import statsmodels.api as sm
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.tools.tools import add_constant
from statsmodels.tools.sm_exceptions import PerfectSeparationError
from nose import SkipTest
# Test Precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
class CheckModelResults(object):
'''
res2 should be either the results from RModelWrap
or the results as defined in model_results_data
'''
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, self.decimal_bse)
decimal_resids = DECIMAL_4
def test_residuals(self):
resids = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance, self.res1.resid_working,
self.res1.resid_anscombe, self.res1.resid_response))
assert_almost_equal(resids, self.res2.resids, self.decimal_resids)
decimal_aic_R = DECIMAL_4
def test_aic_R(self):
# R includes the estimation of the scale as a lost dof
# Doesn't with Gamma though
if self.res1.scale != 1:
dof = 2
else:
dof = 0
assert_almost_equal(self.res1.aic+dof, self.res2.aic_R,
self.decimal_aic_R)
decimal_aic_Stata = DECIMAL_4
def test_aic_Stata(self):
# Stata uses the below llf for aic definition for these families
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu, scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))/self.res1.nobs
else:
aic = self.res1.aic/self.res1.nobs
assert_almost_equal(aic, self.res2.aic_Stata, self.decimal_aic_Stata)
decimal_deviance = DECIMAL_4
def test_deviance(self):
assert_almost_equal(self.res1.deviance, self.res2.deviance,
self.decimal_deviance)
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_loglike = DECIMAL_4
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu, scale=1)
else:
llf = self.res1.llf
assert_almost_equal(llf, self.res2.llf, self.decimal_loglike)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
assert_almost_equal(self.res1.null_deviance, self.res2.null_deviance,
self.decimal_null_deviance)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic_Stata,
self.decimal_bic)
def test_degrees(self):
assert_equal(self.res1.model.df_resid,self.res2.df_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
self.decimal_fittedvalues)
class TestGlmGaussian(CheckModelResults):
def __init__(self):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_params = DECIMAL_2
self.decimal_bic = DECIMAL_0
self.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
self.data = load()
self.data.exog = add_constant(self.data.exog)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Gaussian()).fit()
from results.results_glm import Longley
self.res2 = Longley()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# Gauss = r.gaussian
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm, family=Gauss)
# self.res2.resids = np.array(self.res2.resid)[:,None]*np.ones((1,5))
# self.res2.null_deviance = 185008826 # taken from R. Rpy bug?
class TestGaussianLog(CheckModelResults):
def __init__(self):
# Test Precision
self.decimal_aic_R = DECIMAL_0
self.decimal_aic_Stata = DECIMAL_2
self.decimal_loglike = DECIMAL_0
self.decimal_null_deviance = DECIMAL_1
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
# y = 1.0 - .02*x - .001*x**2 + 0.001 * np.random.randn(nobs)
self.X = np.c_[np.ones((nobs,1)),x,x**2]
self.lny = np.exp(-(-1.0 + 0.02*x + 0.0001*x**2)) +\
0.001 * np.random.randn(nobs)
GaussLog_Model = GLM(self.lny, self.X, \
family=sm.families.Gaussian(sm.families.links.log))
self.res1 = GaussLog_Model.fit()
from results.results_glm import GaussianLog
self.res2 = GaussianLog()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# GaussLogLink = r.gaussian(link = "log")
# GaussLog_Res_R = RModel(self.lny, self.X, r.glm, family=GaussLogLink)
# self.res2 = GaussLog_Res_R
class TestGaussianInverse(CheckModelResults):
def __init__(self):
# Test Precisions
self.decimal_bic = DECIMAL_1
self.decimal_aic_R = DECIMAL_1
self.decimal_aic_Stata = DECIMAL_3
self.decimal_loglike = DECIMAL_1
self.decimal_resids = DECIMAL_3
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
y = 1.0 + 2.0 * x + x**2 + 0.1 * np.random.randn(nobs)
self.X = np.c_[np.ones((nobs,1)),x,x**2]
self.y_inv = (1. + .02*x + .001*x**2)**-1 + .001 * np.random.randn(nobs)
InverseLink_Model = GLM(self.y_inv, self.X,
family=sm.families.Gaussian(sm.families.links.inverse_power))
InverseLink_Res = InverseLink_Model.fit()
self.res1 = InverseLink_Res
from results.results_glm import GaussianInverse
self.res2 = GaussianInverse()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# InverseLink = r.gaussian(link = "inverse")
# InverseLink_Res_R = RModel(self.y_inv, self.X, r.glm, family=InverseLink)
# self.res2 = InverseLink_Res_R
class TestGlmBinomial(CheckModelResults):
def __init__(self):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
self.decimal_resids = DECIMAL_1
self.decimal_bic = DECIMAL_2
from statsmodels.datasets.star98 import load
from results.results_glm import Star98
data = load()
data.exog = add_constant(data.exog)
self.res1 = GLM(data.endog, data.exog, \
family=sm.families.Binomial()).fit()
#NOTE: if you want to replicate with RModel
#res2 = RModel(data.endog[:,0]/trials, data.exog, r.glm,
# family=r.binomial, weights=trials)
self.res2 = Star98()
#TODO:
#Non-Canonical Links for the Binomial family require the algorithm to be
#slightly changed
#class TestGlmBinomialLog(CheckModelResults):
# pass
#class TestGlmBinomialLogit(CheckModelResults):
# pass
#class TestGlmBinomialProbit(CheckModelResults):
# pass
#class TestGlmBinomialCloglog(CheckModelResults):
# pass
#class TestGlmBinomialPower(CheckModelResults):
# pass
#class TestGlmBinomialLoglog(CheckModelResults):
# pass
#class TestGlmBinomialLogc(CheckModelResults):
#TODO: need include logc link
# pass
class TestGlmBernoulli(CheckModelResults):
def __init__(self):
from results.results_glm import Lbw
self.res2 = Lbw()
self.res1 = GLM(self.res2.endog, self.res2.exog,
family=sm.families.Binomial()).fit()
#class TestGlmBernoulliIdentity(CheckModelResults):
# pass
#class TestGlmBernoulliLog(CheckModelResults):
# pass
#class TestGlmBernoulliProbit(CheckModelResults):
# pass
#class TestGlmBernoulliCloglog(CheckModelResults):
# pass
#class TestGlmBernoulliPower(CheckModelResults):
# pass
#class TestGlmBernoulliLoglog(CheckModelResults):
# pass
#class test_glm_bernoulli_logc(CheckModelResults):
# pass
class TestGlmGamma(CheckModelResults):
def __init__(self):
'''
Tests Gamma family with canonical inverse link (power -1)
'''
# Test Precisions
self.decimal_aic_R = -1 #TODO: off by about 1, we are right with Stata
self.decimal_resids = DECIMAL_2
from statsmodels.datasets.scotland import load
from results.results_glm import Scotvote
data = load()
data.exog = add_constant(data.exog)
res1 = GLM(data.endog, data.exog, \
family=sm.families.Gamma()).fit()
self.res1 = res1
# res2 = RModel(data.endog, data.exog, r.glm, family=r.Gamma)
res2 = Scotvote()
res2.aic_R += 2 # R doesn't count degree of freedom for scale with gamma
self.res2 = res2
class TestGlmGammaLog(CheckModelResults):
def __init__(self):
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_aic_R = DECIMAL_0
self.decimal_fittedvalues = DECIMAL_3
from results.results_glm import CancerLog
res2 = CancerLog()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.log)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.Gamma(link="log"))
# self.res2.null_deviance = 27.92207137420696 # From R (bug in rpy)
# self.res2.bic = -154.1582089453923 # from Stata
class TestGlmGammaIdentity(CheckModelResults):
def __init__(self):
# Test Precisions
self.decimal_resids = -100 #TODO Very off from Stata?
self.decimal_params = DECIMAL_2
self.decimal_aic_R = DECIMAL_0
self.decimal_loglike = DECIMAL_1
from results.results_glm import CancerIdentity
res2 = CancerIdentity()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.identity)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.Gamma(link="identity"))
# self.res2.null_deviance = 27.92207137420696 # from R, Rpy bug
class TestGlmPoisson(CheckModelResults):
def __init__(self):
'''
Tests Poisson family with canonical log link.
Test results were obtained by R.
'''
from results.results_glm import Cpunish
from statsmodels.datasets.cpunish import load
self.data = load()
self.data.exog[:,3] = np.log(self.data.exog[:,3])
self.data.exog = add_constant(self.data.exog)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Poisson()).fit()
self.res2 = Cpunish()
#class TestGlmPoissonIdentity(CheckModelResults):
# pass
#class TestGlmPoissonPower(CheckModelResults):
# pass
class TestGlmInvgauss(CheckModelResults):
def __init__(self):
'''
Tests the Inverse Gaussian family in GLM.
Notes
-----
Used the rndivgx.ado file provided by Hardin and Hilbe to
generate the data. Results are read from model_results, which
were obtained by running R_ig.s
'''
# Test Precisions
self.decimal_aic_R = DECIMAL_0
self.decimal_loglike = DECIMAL_0
from results.results_glm import InvGauss
res2 = InvGauss()
res1 = GLM(res2.endog, res2.exog, \
family=sm.families.InverseGaussian()).fit()
self.res1 = res1
self.res2 = res2
class TestGlmInvgaussLog(CheckModelResults):
def __init__(self):
# Test Precisions
self.decimal_aic_R = -10 # Big difference vs R.
self.decimal_resids = DECIMAL_3
from results.results_glm import InvGaussLog
res2 = InvGaussLog()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.InverseGaussian(link=\
sm.families.links.log)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.inverse_gaussian(link="log"))
# self.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# self.res2.llf = -12162.72308 # from Stata, R's has big rounding diff
class TestGlmInvgaussIdentity(CheckModelResults):
def __init__(self):
# Test Precisions
self.decimal_aic_R = -10 #TODO: Big difference vs R
self.decimal_fittedvalues = DECIMAL_3
self.decimal_params = DECIMAL_3
from results.results_glm import Medpar1
data = Medpar1()
self.res1 = GLM(data.endog, data.exog,
family=sm.families.InverseGaussian(link=\
sm.families.links.identity)).fit()
from results.results_glm import InvGaussIdentity
self.res2 = InvGaussIdentity()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.inverse_gaussian(link="identity"))
# self.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# self.res2.llf = -12163.25545 # from Stata, big diff with R
class TestGlmNegbinomial(CheckModelResults):
def __init__(self):
'''
Test Negative Binomial family with canonical log link
'''
# Test Precision
self.decimal_resid = DECIMAL_1
self.decimal_params = DECIMAL_3
self.decimal_resids = -1 # 1 % mismatch at 0
self.decimal_fittedvalues = DECIMAL_1
from statsmodels.datasets.committee import load
self.data = load()
self.data.exog[:,2] = np.log(self.data.exog[:,2])
interaction = self.data.exog[:,2]*self.data.exog[:,1]
self.data.exog = np.column_stack((self.data.exog,interaction))
self.data.exog = add_constant(self.data.exog)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.NegativeBinomial()).fit()
from results.results_glm import Committee
res2 = Committee()
res2.aic_R += 2 # They don't count a degree of freedom for the scale
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# r.library('MASS') # this doesn't work when done in rmodelwrap?
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.negative_binomial(1))
# self.res2.null_deviance = 27.8110469364343
#class TestGlmNegbinomial_log(CheckModelResults):
# pass
#class TestGlmNegbinomial_power(CheckModelResults):
# pass
#class TestGlmNegbinomial_nbinom(CheckModelResults):
# pass
#NOTE: hacked together version to test poisson offset
class TestGlmPoissonOffset(CheckModelResults):
@classmethod
def setupClass(cls):
from results.results_glm import Cpunish
from statsmodels.datasets.cpunish import load
data = load()
data.exog[:,3] = np.log(data.exog[:,3])
data.exog = add_constant(data.exog)
exposure = [100] * len(data.endog)
cls.res1 = GLM(data.endog, data.exog, family=sm.families.Poisson(),
exposure=exposure).fit()
cls.res1.params[-1] += np.log(100) # add exposure back in to param
# to make the results the same
cls.res2 = Cpunish()
def test_prefect_pred():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris = np.genfromtxt(os.path.join(cur_dir, 'results', 'iris.csv'),
delimiter=",", skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = add_constant(X, prepend=True)
glm = GLM(y, X, family=sm.families.Binomial())
assert_raises(PerfectSeparationError, glm.fit)
def test_attribute_writable_resettable():
"""
Regression test for mutables and class constructors.
"""
data = sm.datasets.longley.load()
endog, exog = data.endog, data.exog
glm_model = sm.GLM(endog, exog)
assert_equal(glm_model.family.link.power, 1.0)
glm_model.family.link.power = 2.
assert_equal(glm_model.family.link.power, 2.0)
glm_model2 = sm.GLM(endog, exog)
assert_equal(glm_model2.family.link.power, 1.0)
if __name__=="__main__":
#run_module_suite()
#taken from Fernando Perez:
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
exit=False)
| pprett/statsmodels | statsmodels/genmod/tests/test_glm.py | Python | bsd-3-clause | 17,451 | [
"Gaussian"
] | ffe620636f45a8211d20bf2e2aa52724e9bea56fbe0df4ff908518ce4d5f14a7 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.lb
import espressomd.shapes
import espressomd.lbboundaries
import itertools
class LBBoundariesBase:
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.cell_system.skin = 0.1
wall_shape1 = espressomd.shapes.Wall(normal=[1., 0., 0.], dist=2.5)
wall_shape2 = espressomd.shapes.Wall(normal=[-1., 0., 0.], dist=-7.5)
def test_add(self):
boundary = espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1)
self.system.lbboundaries.add(boundary)
self.assertEqual(boundary, self.system.lbboundaries[0])
def test_remove(self):
lbb = self.system.lbboundaries
b1 = lbb.add(
espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1))
b2 = lbb.add(
espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1))
lbb.remove(b1)
self.assertNotIn(b1, lbb)
self.assertIn(b2, lbb)
def test_size(self):
lbb = self.system.lbboundaries
self.assertEqual(lbb.size(), 0)
lbb.add(espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1))
self.assertEqual(lbb.size(), 1)
lbb.add(espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1))
self.assertEqual(lbb.size(), 2)
def test_empty(self):
lbb = self.system.lbboundaries
self.assertTrue(lbb.empty())
lbb.add(espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1))
self.assertFalse(lbb.empty())
def test_clear(self):
lbb = self.system.lbboundaries
lbb.add(espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1))
lbb.add(espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1))
lbb.clear()
self.assertTrue(lbb.empty())
def check_boundary_flags(self, boundarynumbers):
rng = range(20)
for i in itertools.product(range(0, 5), rng, rng):
self.assertEqual(self.lbf[i].boundary, boundarynumbers[0])
for i in itertools.product(range(5, 15), rng, rng):
self.assertEqual(self.lbf[i].boundary, boundarynumbers[1])
for i in itertools.product(range(15, 20), rng, rng):
self.assertEqual(self.lbf[i].boundary, boundarynumbers[2])
self.system.lbboundaries.clear()
for i in itertools.product(rng, rng, rng):
self.assertEqual(self.lbf[i].boundary, 0)
def test_boundary_flags(self):
lbb = self.system.lbboundaries
lbb.add(espressomd.lbboundaries.LBBoundary(shape=self.wall_shape1))
lbb.add(espressomd.lbboundaries.LBBoundary(shape=self.wall_shape2))
self.check_boundary_flags([1, 0, 2])
def test_union(self):
union = espressomd.shapes.Union()
union.add([self.wall_shape1, self.wall_shape2])
self.system.lbboundaries.add(
espressomd.lbboundaries.LBBoundary(shape=union))
self.check_boundary_flags([1, 0, 1])
@utx.skipIfMissingFeatures(["LB_BOUNDARIES"])
class LBBoundariesCPU(ut.TestCase, LBBoundariesBase):
lbf = None
def setUp(self):
if not self.lbf:
self.lbf = espressomd.lb.LBFluid(
visc=1.0,
dens=1.0,
agrid=0.5,
tau=1.0)
self.system.actors.add(self.lbf)
def tearDown(self):
self.system.lbboundaries.clear()
self.system.actors.remove(self.lbf)
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(["LB_BOUNDARIES_GPU"])
class LBBoundariesGPU(ut.TestCase, LBBoundariesBase):
lbf = None
def setUp(self):
if not self.lbf:
self.lbf = espressomd.lb.LBFluidGPU(
visc=1.0,
dens=1.0,
agrid=0.5,
tau=1.0)
self.system.actors.add(self.lbf)
def tearDown(self):
self.system.lbboundaries.clear()
self.system.actors.remove(self.lbf)
if __name__ == "__main__":
ut.main()
| fweik/espresso | testsuite/python/lb_boundary.py | Python | gpl-3.0 | 4,708 | [
"ESPResSo"
] | e09a595e41d9367dab5037369a227e2bf43879f02ad741ed0b803790c6dbd319 |
#!/bin/env python
import time
import simtk.openmm as mm
from simtk.openmm import app
from simtk.openmm import Platform
from simtk.unit import *
import numpy as np
from mdtraj.reporters import NetCDFReporter
from smarty import *
import sys
import numpy as np
molname = [sys.argv[1]]
mol_filename = ['Mol2_files/'+m+'.mol2' for m in molname]
time_step = 0.8 #Femtoseconds
temperature = 300 #kelvin
friction = 1 # per picosecond
num_steps = 7500000
trj_freq = 1000 #steps
data_freq = 1000 #steps
# Load OEMol
for ind,j in enumerate(mol_filename):
mol = oechem.OEGraphMol()
ifs = oechem.oemolistream(j)
flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield
ifs.SetFlavor( oechem.OEFormat_MOL2, flavor)
oechem.OEReadMolecule(ifs, mol )
oechem.OETriposAtomNames(mol)
# Get positions
coordinates = mol.GetCoords()
natoms = len(coordinates)
positions = np.zeros([natoms,3], np.float64)
for index in range(natoms):
(x,y,z) = coordinates[index]
positions[index,0] = x
positions[index,1] = y
positions[index,2] = z
positions = Quantity(positions, unit.angstroms)
# Load forcefield
forcefield = ForceField(get_data_filename('forcefield/smirff99Frosst.ffxml'))
# Define system
topology = generateTopologyFromOEMol(mol)
params = forcefield.getParameter(smirks='[#1:1]-[#8]')
params['rmin_half']='0.01'
params['epsilon']='0.01'
forcefield.setParameter(params, smirks='[#1:1]-[#8]')
system = forcefield.createSystem(topology, [mol])
paramlist1 = np.arange(float(sys.argv[2]),float(sys.argv[3]),float(sys.argv[4]))
j = sys.argv[5]
smirkseries = sys.argv[6]#'[#6X4:1]-[#1:2]'
paramtype1 = sys.argv[7]#'length'
paramtype2 = sys.argv[8]
param = forcefield.getParameter(smirks=smirkseries)
for i in paramlist1:
param[paramtype1] = str(i)
param[paramtype2] = str(j)
forcefield.setParameter(param, smirks=smirkseries)
system = forcefield.createSystem(topology, [mol])
#Do simulation
integrator = mm.LangevinIntegrator(temperature*kelvin, friction/picoseconds, time_step*femtoseconds)
platform = mm.Platform.getPlatformByName('Reference')
simulation = app.Simulation(topology, system, integrator)
simulation.context.setPositions(positions)
simulation.context.setVelocitiesToTemperature(temperature*kelvin)
netcdf_reporter = NetCDFReporter('traj4ns_c1143/'+molname[ind]+'_'+smirkseries+'_'+paramtype1+str(i)+'_'+paramtype2+str(j)+'.nc', trj_freq)
simulation.reporters.append(netcdf_reporter)
simulation.reporters.append(app.StateDataReporter('StateData4ns_c1143/data_'+molname[ind]+'_'+smirkseries+'_'+paramtype1+str(i)+'_'+paramtype2+str(j)+'.csv', data_freq, step=True, potentialEnergy=True, temperature=True, density=True))
print("Starting simulation")
start = time.clock()
simulation.step(num_steps)
end = time.clock()
print("Elapsed time %.2f seconds" % (end-start))
netcdf_reporter.close()
print("Done!")
#Do simulation
#integrator = mm.LangevinIntegrator(temperature*kelvin, friction/picoseconds, time_step*femtoseconds)
#platform = mm.Platform.getPlatformByName('Reference')
#simulation = app.Simulation(topology, system, integrator)
#simulation.context.setPositions(positions)
#simulation.context.setVelocitiesToTemperature(temperature*kelvin)
#netcdf_reporter = NetCDFReporter('traj/'+molname+'.nc', trj_freq)
#simulation.reporters.append(netcdf_reporter)
#simulation.reporters.append(app.StateDataReporter('StateData/data_'+molname+'.csv', data_freq, step=True, potentialEnergy=True, temperature=True, density=True))
#print("Starting simulation")
#start = time.clock()
#simulation.step(num_steps)
#end = time.clock()
#print("Elapsed time %.2f seconds" % (end-start))
#netcdf_reporter.close()
#print("Done!")
| bmanubay/open-forcefield-tools | single-molecule-property-generation/run_molecule_v3.py | Python | mit | 3,988 | [
"MDTraj",
"OpenMM"
] | 3516becb5d13e76198d514e551c9980aca77d08faa760f6ecd69421f2598b94a |
# Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# get set abstraction for graph representation
from functools import reduce
# TODO - Subclass graph?
class MultiGraph(object):
"""A directed multigraph abstraction with labeled edges."""
def __init__(self, nodes=[]):
"""Initializes a new MultiGraph object."""
self._adjacency_list = {} # maps parent -> set of (child, label) pairs
for n in nodes:
self._adjacency_list[n] = set()
self._label_map = {} # maps label -> set of (parent, child) pairs
def __eq__(self, g):
"""Returns true if g is equal to this graph."""
return isinstance(g, MultiGraph) and \
(self._adjacency_list == g._adjacency_list) and \
(self._label_map == g._label_map)
def __ne__(self, g):
"""Returns true if g is not equal to this graph."""
return not self.__eq__(g)
def __repr__(self):
"""Returns a unique string representation of this graph."""
s = "<MultiGraph: "
for key in sorted(self._adjacency_list):
values = sorted(self._adjacency_list[key])
s += "(%r: %s)" % (key, ",".join(repr(v) for v in values))
return s + ">"
def __str__(self):
"""Returns a concise string description of this graph."""
nodenum = len(self._adjacency_list)
edgenum = reduce(lambda x, y: x+y,
[len(v) for v in self._adjacency_list.values()])
labelnum = len(self._label_map)
return "<MultiGraph: " + \
str(nodenum) + " node(s), " + \
str(edgenum) + " edge(s), " + \
str(labelnum) + " unique label(s)>"
def add_node(self, node):
"""Adds a node to this graph."""
if node not in self._adjacency_list:
self._adjacency_list[node] = set()
def add_edge(self, source, to, label=None):
"""Adds an edge to this graph."""
if source not in self._adjacency_list:
raise ValueError("Unknown <from> node: " + str(source))
if to not in self._adjacency_list:
raise ValueError("Unknown <to> node: " + str(to))
edge = (to, label)
self._adjacency_list[source].add(edge)
if label not in self._label_map:
self._label_map[label] = set()
self._label_map[label].add((source, to))
def child_edges(self, parent):
"""Returns a list of (child, label) pairs for parent."""
if parent not in self._adjacency_list:
raise ValueError("Unknown <parent> node: " + str(parent))
return sorted(self._adjacency_list[parent])
def children(self, parent):
"""Returns a list of unique children for parent."""
return sorted(set(x[0] for x in self.child_edges(parent)))
def edges(self, label):
"""Returns a list of all the edges with this label."""
if label not in self._label_map:
raise ValueError("Unknown label: " + str(label))
return sorted(self._label_map[label])
def labels(self):
"""Returns a list of all the edge labels in this graph."""
return list(self._label_map.keys())
def nodes(self):
"""Returns a list of the nodes in this graph."""
return list(self._adjacency_list.keys())
def parent_edges(self, child):
"""Returns a list of (parent, label) pairs for child."""
if child not in self._adjacency_list:
raise ValueError("Unknown <child> node: " + str(child))
parents = []
for parent, children in self._adjacency_list.items():
for x in children:
if x[0] == child:
parents.append((parent, x[1]))
return sorted(parents)
def parents(self, child):
"""Returns a list of unique parents for child."""
return sorted(set(x[0] for x in self.parent_edges(child)))
def remove_node(self, node):
"""Removes node and all edges connected to it."""
if node not in self._adjacency_list:
raise ValueError("Unknown node: " + str(node))
# remove node (and all out-edges) from adjacency list
del self._adjacency_list[node]
# remove all in-edges from adjacency list
for n in self._adjacency_list:
self._adjacency_list[n] = set(x for x in self._adjacency_list[n]
if x[0] != node)
# remove all refering pairs in label map
for label in list(self._label_map.keys()): # we're editing this!
lm = set(x for x in self._label_map[label]
if (x[0] != node) and (x[1] != node))
# remove the entry completely if the label is now unused
if lm:
self._label_map[label] = lm
else:
del self._label_map[label]
def remove_edge(self, parent, child, label):
"""Removes edge. -- NOT IMPLEMENTED"""
# hm , this is a multigraph - how should this be implemented?
raise NotImplementedError("remove_edge is not yet implemented")
# auxilliary graph functions
def df_search(graph, root=None):
"""Depth first search of g.
Returns a list of all nodes that can be reached from the root node
in depth-first order.
If root is not given, the search will be rooted at an arbitrary node.
"""
seen = {}
search = []
if len(graph.nodes()) < 1:
return search
if root is None:
root = (graph.nodes())[0]
seen[root] = 1
search.append(root)
current = graph.children(root)
while len(current) > 0:
node = current[0]
current = current[1:]
if node not in seen:
search.append(node)
seen[node] = 1
current = graph.children(node) + current
return search
def bf_search(graph, root=None):
"""Breadth first search of g.
Returns a list of all nodes that can be reached from the root node
in breadth-first order.
If root is not given, the search will be rooted at an arbitrary node.
"""
seen = {}
search = []
if len(graph.nodes()) < 1:
return search
if root is None:
root = (graph.nodes())[0]
seen[root] = 1
search.append(root)
current = graph.children(root)
while len(current) > 0:
node = current[0]
current = current[1:]
if node not in seen:
search.append(node)
seen[node] = 1
current.extend(graph.children(node))
return search
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/Pathway/Rep/MultiGraph.py | Python | gpl-2.0 | 6,743 | [
"Biopython"
] | 4885b9325aff6f2bf470883981dc74d88f209a0d6ad5043fd98a7abe483893a6 |
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Helper functions for the view subpackage"""
from HTMLParser import HTMLParser
from PyQt4 import QtCore, QtGui
from datetime import datetime, time, date
import re
import logging
import operator
from camelot.core.sql import like_op
from sqlalchemy.sql.operators import between_op
from camelot.core.utils import ugettext
from camelot.core.utils import ugettext_lazy as _
logger = logging.getLogger('camelot.view.utils')
#
# Cached date and time formats, for internal use only
#
_local_date_format = None
_local_datetime_format = None
_local_time_format = None
def local_date_format():
"""Get the local data format and cache it for reuse"""
global _local_date_format
if not _local_date_format:
locale = QtCore.QLocale()
format_sequence = re.split('y*', unicode(locale.dateFormat(locale.ShortFormat)))
# make sure a year always has 4 numbers
format_sequence.insert(-1, 'yyyy')
_local_date_format = unicode(u''.join(format_sequence))
return _local_date_format
def local_datetime_format():
"""Get the local datatime format and cache it for reuse"""
global _local_datetime_format
if not _local_datetime_format:
locale = QtCore.QLocale()
format_sequence = re.split('y*', unicode(locale.dateTimeFormat(locale.ShortFormat)))
# make sure a year always has 4 numbers
format_sequence.insert(-1, 'yyyy')
_local_datetime_format = unicode(u''.join(format_sequence))
return _local_datetime_format
def local_time_format():
"""Get the local time format and cache it for reuse"""
global _local_time_format
if not _local_time_format:
locale = QtCore.QLocale()
_local_time_format = unicode(locale.timeFormat(locale.ShortFormat) )
return _local_time_format
def default_language(*args):
"""takes arguments, to be able to use this function as a
default field attribute"""
locale = QtCore.QLocale()
return unicode(locale.name())
class ParsingError(Exception): pass
def string_from_string(s):
if not s:
return None
return unicode(s)
def bool_from_string(s):
if s is None: raise ParsingError()
if s.lower() not in ['false', 'true']: raise ParsingError()
return eval(s.lower().capitalize())
def _insert_string(original, new, pos):
'''Inserts new inside original at pos.'''
return original[:pos] + new + original[pos:]
def date_from_string(s):
s = s.strip()
if not s:
return None
from PyQt4.QtCore import QDate
import string
f = local_date_format()
dt = QDate.fromString(s, f)
if not dt.isValid():
#
# if there is a mismatch of 1 in length between format and
# string, prepend a 0, to handle the case of 1/11/2011
#
if len(f) == len(s) + 1:
s = '0' + s
dt = QDate.fromString(s, f)
if not dt.isValid():
#
# try alternative separators
#
separators = u''.join([c for c in f if c not in string.ascii_letters])
if separators:
alternative_string = u''.join([(c if c in string.digits else separators[0]) for c in s])
dt = QDate.fromString(alternative_string, f)
if not dt.isValid():
# try parsing without separators
# attention : using non ascii letters will fail on windows
# string.letters then contains non ascii letters of which we don't know the
# encoding, so we cannot convert them to unicode to compare them
only_letters_format = u''.join([c for c in f if c in string.ascii_letters])
only_letters_string = u''.join([c for c in s if c in (string.ascii_letters+string.digits)])
dt = QDate.fromString(only_letters_string, only_letters_format)
if not dt.isValid():
# try parsing without the year, and take the current year by default
only_letters_format = u''.join([c for c in only_letters_format if c not in ['y']])
dt = QDate.fromString(only_letters_string, only_letters_format)
if not dt.isValid():
raise ParsingError()
# # try parsing without year and month, and take the current year and month by default
# only_letters_format = u''.join([c for c in only_letters_format if c not in ['M']])
# dt = QDate.fromString(only_letters_string, only_letters_format)
# if not dt.isValid():
# raise ParsingError()
# else:
# today = date.today()
# return date(today.year, today.month, dt.day())
else:
return date(date.today().year, dt.month(), dt.day())
return date(dt.year(), dt.month(), dt.day())
def time_from_string(s):
s = s.strip()
if not s:
return None
from PyQt4.QtCore import QTime
f = local_time_format()
tm = QTime.fromString(s, f)
if not tm.isValid():
raise ParsingError()
return time( tm.hour(), tm.minute(), tm.second() )
def datetime_from_string(s):
s = s.strip()
if not s:
return None
from PyQt4.QtCore import QDateTime
f = local_datetime_format()
dt = QDateTime.fromString(s, f)
if not dt.isValid():
raise ParsingError()
return datetime(dt.date().year(), dt.date().month(), dt.date().day(),
dt.time().hour(), dt.time().minute(), dt.time().second())
def code_from_string(s, separator):
return s.split(separator)
def int_from_string(s):
if s is None: raise ParsingError()
if s.isspace(): return int()
s = s.strip()
if len(s) == 0: return int()
try:
# Convert to float first, to be able to convert a string like '1.0'
# to 1
i = int( float( s ) )
except ValueError:
raise ParsingError()
return i
def float_from_string(s):
if not s:
return None
locale = QtCore.QLocale()
# floats in python are implemented as double in C
f, ok = locale.toDouble(s)
if not ok:
raise ParsingError()
return f
def pyvalue_from_string(pytype, s):
if pytype is str:
return str(s)
elif pytype is unicode:
return unicode(s)
elif pytype is bool:
return bool_from_string(s)
elif pytype is date:
return date_from_string(s)
elif pytype is time:
return date_from_string(s)
elif pytype is datetime:
return datetime_from_string(s)
elif pytype is float:
return float_from_string(s)
elif pytype is int:
return int_from_string(s)
def to_string( value ):
if value == None:
return u''
return unicode( value )
def enumeration_to_string(value):
return ugettext(unicode(value or u'').replace('_', ' ').capitalize())
operator_names = {
operator.eq : _( u'=' ),
operator.ne : _( u'!=' ),
operator.lt : _( u'<' ),
operator.le : _( u'<=' ),
operator.gt : _( u'>' ),
operator.ge : _( u'>=' ),
like_op : _( u'like' ),
between_op: _( u'between' ),
}
def text_from_richtext( unstripped_text ):
"""function that returns a list of lines with escaped data, to be used in
templates for example
:arg unstripped_text: string
:return: list of strings
"""
strings = ['']
if not unstripped_text:
return strings
class HtmlToTextParser(HTMLParser):
def handle_endtag(self, tag):
if tag == 'br':
strings.append('')
def handle_data(self, data):
from xml.sax.saxutils import escape
data = data.strip()
if data:
strings.append(escape(data))
parser = HtmlToTextParser()
parser.feed(unstripped_text.strip())
return strings
def resize_widget_to_screen( widget, fraction = 0.75 ):
"""Resize a widget to fill a certain fraction of the screen
:param widget: the widget to resize
:param fraction: the fraction of the screen to fill after the resize
"""
desktop = QtGui.QApplication.desktop()
available_geometry = desktop.availableGeometry( widget )
# use the size of the screen instead to set the dialog size
widget.resize( available_geometry.width() * 0.75,
available_geometry.height() * 0.75 ) | jeroendierckx/Camelot | camelot/view/utils.py | Python | gpl-2.0 | 9,298 | [
"VisIt"
] | a2b9d760e7c4d0db16b320e85413ddf56e6001420f1b9cc30df89f1b824a0295 |
# $HeadURL$
__RCSID__ = "$Id$"
import types
import threading
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Core.Utilities.ThreadSafe import Synchronizer
from DIRAC.FrameworkSystem.Client.Logger import gLogger
gEventSync = Synchronizer()
class EventDispatcher:
def __init__( self ):
self.__events = {}
self.__processingEvents = set()
@gEventSync
def registerEvent( self, eventName ):
if eventName in self.__events:
return
self.__events[ eventName ] = []
@gEventSync
def addListener( self, eventName, functor ):
if eventName not in self.__events:
return S_ERROR( "Event %s is not registered" % eventName )
if functor in self.__events[ eventName ]:
return S_OK()
self.__events[ eventName ].append( functor )
return S_OK()
@gEventSync
def removeListener( self, eventName, functor ):
if eventName not in self.__events:
return S_ERROR( "Event %s is not registered" % eventName )
if functor not in self.__events[ eventName ]:
return S_OK()
iPos = self.__events[ eventName ].find( functor )
del( self.__events[ eventName ][ iPos ] )
return S_OK()
def isEventBeingProcessed( self, eventName ):
return eventName in self.__processingEvents
def getRegisteredEvents( self ):
return sorted( self.__events )
def triggerEvent( self, eventName, params = False, threaded = False ):
if threaded:
th = threading.Thread( target = self.__realTrigger, args = ( eventName, params ) )
th.setDaemon( 1 )
th.start()
return S_OK( 0 )
else:
return self.__realTrigger( eventName, params )
def __realTrigger( self, eventName, params ):
gEventSync.lock()
try:
if eventName not in self.__events:
return S_ERROR( "Event %s is not registered" % eventName )
if eventName in self.__processingEvents:
return S_OK( 0 )
eventFunctors = list( self.__events[ eventName ] )
self.__processingEvents.add( eventName )
finally:
gEventSync.unlock()
finalResult = S_OK()
for functor in eventFunctors:
try:
result = functor( eventName, params )
except Exception:
gLogger.exception( "Listener %s for event %s raised an exception" % ( functor.__name__, eventName ) )
continue
if type( result ) != types.DictType or 'OK' not in result:
gLogger.error( "Listener %s for event %s did not return a S_OK/S_ERROR structure" % ( functor.__name__, eventName ) )
continue
if not result[ 'OK' ]:
finalResult = result
break
gEventSync.lock()
try:
self.__processingEvents.discard( eventName )
finally:
try:
gEventSync.unlock()
except:
pass
if not finalResult[ 'OK' ]:
return finalResult
return S_OK( len( eventFunctors ) )
gEventDispatcher = EventDispatcher()
| sposs/DIRAC | Core/Utilities/EventDispatcher.py | Python | gpl-3.0 | 2,888 | [
"DIRAC"
] | 5446f8ab0da22ff68d1e43c0f4f0c839afa6e93333d0255393b9cb4b99b7c1e4 |
#!/usr/bin/env python
import os
import numpy as np
if __name__ == '__main__':
from pyscf.pbc import gto
from pyscf.pbc.scf import RHF
from qharv.cross import pqscf
import sys
sys.path.insert(0,'../basis')
from basis import bfd_basis
mygs = 16 # grid density
# grid density for visualization
myvgs = 6
vgs = np.array([myvgs]*3)
grid_shape = 2*vgs + 1
# define isosurface levels
default_up = 0.75
default_dn = 0.25
up_dn_map = {
0:{'up':0.9,'dn':-.1},
1:{'up':0.6,'dn':0.3},
3:{'up':0.65,'dn':0.3},
}
chkfile_fname = 'bfd.h5'
moR_fname = 'moR.dat'
rho_fname = 'rho.dat'
alat0 = 3.6
axes = (np.ones((3,3))-np.eye(3))*alat0/2.0
elem = ['C','C']
pos = np.array([[0,0,0],[0.5,0.5,0.5]])*alat0
atoms = pqscf.atom_text(elem,pos)
gs = np.array([mygs]*3)
basis = bfd_basis()
cell = gto.M(a=axes,atom=atoms,verbose=3
,gs=gs,pseudo={'C':'bfd'},basis=basis)
mf = RHF(cell)
mf.chkfile = chkfile_fname
mf.conv_tol = 1e-6
# run or load RHF
if os.path.isfile(chkfile_fname):
from pyscf import lib
mf.__dict__.update(lib.chkfile.load(chkfile_fname,'scf'))
else:
mf.kernel()
# end if
# grid density for molecular orbital
mydgs = 16
dgs = np.array([mydgs]*3)
moR_fname = 'gs%d_'%mydgs+moR_fname
# run or load moR
if os.path.isfile(moR_fname):
moR = np.loadtxt(moR_fname)
else:
from pyscf.pbc.gto.cell import gen_uniform_grids
from pyscf.pbc.dft.numint import eval_ao
coords = gen_uniform_grids(cell,gs=dgs)
aoR = eval_ao(cell,coords)
moR = np.dot(aoR,mf.mo_coeff)
np.savetxt(moR_fname,moR)
# end if
mo_to_plot = [0,1,3,4]
from qharv.inspect import volumetric
from skimage import measure
for iorb in mo_to_plot:
val = moR[:,iorb].reshape(2*dgs+1)
fval= volumetric.spline_volumetric(val)
grid= volumetric.axes_func_on_grid3d(axes,fval,grid_shape)
myup = default_up
mydn = default_dn
if iorb in up_dn_map.keys():
myup = up_dn_map[iorb]['up']
mydn = up_dn_map[iorb]['dn']
# end if
lmin,lmax = grid.min(),grid.max()
levelup = lmin+myup*(lmax-lmin)
leveldn = lmin+mydn*(lmax-lmin)
if levelup > 0:
verts,faces,normals,values = measure.marching_cubes_lewiner(grid,levelup)
text = volumetric.wavefront_obj(verts,faces.astype(int),normals)
with open('orb%d_up.obj'%iorb,'w') as f:
f.write(text)
if leveldn > 0:
verts,faces,normals,values = measure.marching_cubes_lewiner(grid,leveldn)
text = volumetric.wavefront_obj(verts,faces,normals)
with open('orb%d_dn.obj'%iorb,'w') as f:
f.write(text)
# end for iorb
# end __main__
| Paul-St-Young/share | algorithms/iso3d/hf/obj_from_chf.py | Python | mit | 2,691 | [
"PySCF"
] | 91365799fe3cc974a8d3fc06b882c0d561614388aa109db5f6a81ff3cc79724d |
import os, sys
import logging
from optparse import OptionParser
import matplotlib
matplotlib.use('PDF')
from hiclib import mapping
from mirnylib import h5dict, genome, plotting
from hiclib import fragmentHiC, binnedData
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
from mirnylib.plotting import mat_img, removeAxes, removeBorder, niceShow
from hiclib.binnedData import binnedDataAnalysis, experimentalBinnedData
# manage option and arguments processing
def main():
global options
global args
usage = '''usage: %prog [options] reads.[fastq|sra|bam]+
takes fastq or sra files and runs the hiclib pipeline on it
Note, read pairs in fastq format (possible gzipped) or bam need to be stated next to each other, i.e. fastq_r1 fastq_r2
'''
parser = OptionParser(usage)
parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="print status messages to stdout")
parser.add_option("-e", "--restrictionEnzyme", type="string", dest="enzyme", default="",
help="Name of the restriction enzyme, e.g. BglII")
parser.add_option("-n", "--experimentName", type="string", dest="experiment", default="",
help="Name of the experiment")
parser.add_option("-b", "--bowtie", type="string", dest="bowtie", default="",
help="location of bowtie [default: %default]")
parser.add_option("-r", "--referenceGenome", type="string", dest="genome", default="",
help="genome in fasta format [default: %default]")
parser.add_option("-g", "--gapFile", type="string", dest="gapFile", default="",
help="location of the gapfile [default: %default]")
parser.add_option("-i", "--index", type="string", dest="index", default="",
help="location of genome index including the basename")
parser.add_option("-l", "--readLength", type="int", dest="readLength", default=100,
help="length of the reads [default: %default]")
parser.add_option("-m", "--minSeqLength", type="int", dest="minSeqLength", default=20,
help="minimum length of the reads when doing iterative mapping [default: %default]")
parser.add_option("-x", "--stepSize", type="int", dest="stepSize", default=15,
help="stepsize for iterative mapping [default: %default]")
parser.add_option("-c", "--chromosome", type="string", dest="chromosome", default="",
help="focus on specific chromosome, e.g. 16, X or Y [default: %default]")
parser.add_option("-F", "--inputFormat", type="string", dest="inputFormat", default="fastq",
help="format of the input file, either fastq, sra or bam [default: %default]")
parser.add_option("-O", "--outputDir", type="string", dest="outputDir", default="",
help="output directory [default: %default]")
parser.add_option("-C", "--cpus", type="int", dest="cpus", default=1,
help="number of cpus to use [default: %default]")
parser.add_option("-T", "--tmpDir", type="string", dest="tmpDir", default="/tmp",
help="directory for temp files [default: %default]")
parser.add_option("-S", "--sra-reader", type="string", dest="sra", default="fastq-dump",
help="location of sra reader fastq-dump in case input is SRA [default: %default]")
(options, args) = parser.parse_args()
if (len(args) < 1):
parser.print_help()
parser.error("[ERROR] Incorrect number of arguments, need at least one read file")
if (options.inputFormat != 'fastq' and options.inputFormat != 'sra' and options.inputFormat != 'bam'):
print >> sys.stderr, "[ERROR] Input format not supported: %s" % (options.inputFormat)
sys.exit(1)
if ((options.inputFormat == 'fastq' or options.inputFormat == 'bam') and len(args) % 2 != 0):
print >> sys.stderr, "[ERROR] Both reads are required for files in fastq"
sys.exit(1)
if (options.genome == ""):
print >> sys.stderr, "[ERROR] Please specify the location of the reference genome in fasta format"
sys.exit(1)
if (options.inputFormat != 'bam' and options.index == ""):
print >> sys.stderr, "[ERROR] Please specify the location of the bowtie2 index for the reference genome"
sys.exit(1)
if (options.enzyme == ""):
print >> sys.stderr, "[ERROR] Please specify the restriction enzyme (supported enzymes: http://www.biopython.org/DIST/docs/api/Bio.Restriction-module.html)"
sys.exit(1)
if (options.experiment == ""):
print >> sys.stderr, "[ERROR] Please provide a name for the experiment, e.g. [Cellline]_[Enzymename]_[Replica]"
sys.exit(1)
if (options.outputDir != ""):
options.outputDir += os.sep
if (options.verbose):
print >> sys.stdout, "restrictionEnzyme: %s" % (options.enzyme)
print >> sys.stdout, "experimentName: %s" % (options.experiment)
print >> sys.stdout, "bowtie: %s" % (options.bowtie)
print >> sys.stdout, "referenceGenome: %s" % (options.genome)
print >> sys.stdout, "index: %s" % (options.index)
print >> sys.stdout, "readLength: %d" % (options.readLength)
print >> sys.stdout, "outputDir: %s" % (options.outputDir)
print >> sys.stdout, "tmpDir: %s" % (options.tmpDir)
print >> sys.stdout, "cpus: %s" % (options.cpus)
print >> sys.stdout, "inputFormat: %s" % (options.inputFormat)
print >> sys.stdout, "sra-reader: %s" % (options.sra)
process()
def correctedScalingPlot(resolution, filename, experiment, genome, mouse=False, **kwargs):
"Paper figure to compare scaling before/after correction"
global pp
if (options.verbose):
print >> sys.stdout, "correctedScalingPlot: res: %d file1: %s exp1:%s gen:%s" % (resolution, filename, experiment, genome)
plt.figure()
Tanay = binnedDataAnalysis(resolution, genome)
Tanay.simpleLoad(filename, experiment)
Tanay.removePoorRegions()
Tanay.removeDiagonal()
Tanay.plotScaling(experiment, label="Raw data", color="#A7A241")
Tanay.iterativeCorrectWithSS()
Tanay.plotScaling(experiment, label="Corrected", color="#344370")
ax = plt.gca()
plotting.removeAxes()
fs = 6
plt.xlabel("Genomic distance (MB)", fontsize=6)
plt.ylabel("Contact probability", fontsize=6)
for xlabel_i in ax.get_xticklabels():
xlabel_i.set_fontsize(fs)
for xlabel_i in ax.get_yticklabels():
xlabel_i.set_fontsize(fs)
legend = plt.legend(loc=0, prop={"size": 6})
legend.draw_frame(False)
plt.xscale("log")
plt.yscale("log")
plt.show()
pp.savefig()
def doArmPlot(resolution, filename, experiment, genome, mouse=False, **kwargs):
"Plot an single interarm map - paper figure"
global pp
plt.figure()
if (options.verbose):
print >> sys.stdout, "doArmPlot: res: %d file: %s exp:%s gen:%s" % (resolution, filename, experiment, genome)
Tanay = binnedDataAnalysis(resolution, genome)
Tanay.simpleLoad(filename, experiment)
if mouse == True:
Tanay.fakeTranslocations([(0, 0, None, 12, 52000000, None),
(4, 45000000, None, 12, 0, 30000000),
(9, 0, 50000000, 12, 0, 35000000)])
Tanay.removeChromosome(19)
else:
Tanay.removeChromosome(22)
Tanay.removeDiagonal(1)
Tanay.removePoorRegions()
Tanay.truncTrans()
Tanay.fakeCis()
#mat_img(Tanay.dataDict["GM-all"])
#plt.figure(figsize = (3.6,3.6))
Tanay.averageTransMap(experiment, **kwargs)
#plotting.removeBorder()
cb = plt.colorbar(orientation="vertical")
#cb.set_ticks([-0.05,0.05,0.15])
for xlabel_i in cb.ax.get_xticklabels():
xlabel_i.set_fontsize(6)
def mapFile(fastq, read):
global options
global args
fileName, fileExtension = os.path.splitext(fastq)
bamOutput = options.outputDir+fileName.split(os.sep)[-1]+'.bam'
if (fileExtension == '.sra'):
if (options.verbose):
print >> sys.stdout, "Map short read archive %s utilizing %s" % (fastq, options.sra)
mapping.iterative_mapping(
bowtie_path=options.bowtie,
bowtie_index_path=options.index,
fastq_path=fastq,
out_sam_path=bamOutput,
min_seq_len=options.minSeqLength,
len_step=options.stepSize,
seq_start=options.readLength*(read-1),
seq_end=options.readLength*(read),
nthreads=options.cpus,
temp_dir=options.tmpDir,
bowtie_flags='--very-sensitive',
bash_reader=options.sra+' -Z')
else:
if (options.verbose):
print >> sys.stdout, "Map fastq %s" % (fastq)
mapping.iterative_mapping(
bowtie_path=options.bowtie,
bowtie_index_path=options.index,
fastq_path=fastq,
out_sam_path=bamOutput,
min_seq_len=options.minSeqLength,
len_step=options.stepSize,
nthreads=options.cpus,
temp_dir=options.tmpDir,
bowtie_flags='--very-sensitive')
return bamOutput
def mapFiles():
bams = []
if (options.inputFormat == 'fastq'):
if (options.verbose):
print >> sys.stdout, "** Process fastq files"
for i in range(0, len(args),2):
if (options.verbose):
print >> sys.stdout, "** Map first input file"
bams+=[mapFile(args[i], 1)]
if (options.verbose):
print >> sys.stdout, "** Map second input file"
bams+=[mapFile(args[i+1], 2)]
else:
if (options.verbose):
print >> sys.stdout, "** Process sra files"
for i in range(0, len(args)):
if (options.verbose):
print >> sys.stdout, "** Map first input file"
bams+=[mapFile(args[i], 1)]
if (options.verbose):
print >> sys.stdout, "** Map second input file"
bams+=[mapFile(args[i], 2)]
return bams
def collectMappedReads(bam_read1, bam_read2, mapped_reads, genome_db):
global options
global args
mapping.parse_sam(
sam_basename1=bam_read1,
sam_basename2=bam_read2,
out_dict=mapped_reads,
genome_db=genome_db,
enzyme_name=options.enzyme)
def filterFragments(genome_db):
'''
Filter the data at the level of individual restriction fragments
The following reads are remove from the dataset:
- the reads that start within the 5 bp range from the restriction site
- the identical read pairs, with both ends starting at exactly the same positions
- the reads coming from extremely large and extremely small restriction fragments (length > 10^5 bp or length < 100 bp)
- the reads coming from the top 1% most frequently detected restriction fragments
The rationale behind each of the filters is discussed in the hiclib publication. The API documentation contains the description of the filters.
'''
fragments = fragmentHiC.HiCdataset(
filename=options.outputDir+options.experiment+'-fragment_dataset.hdf5',
genome=genome_db,
maximumMoleculeLength=500,
mode='w')
# Load the parsed reads into the HiCdataset. The dangling-end filter is applied
# at this stage, with maximumMoleculeLength specified at the initiation of the
# object.
fragments.parseInputData(dictLike=options.outputDir+options.experiment+'-mapped_reads.hdf5')
# save unfiltered data
fragments.save(options.outputDir+options.experiment+'-fragment_unfiltered.hdf5')
# Removes reads that start within 5 bp (default) near rsite
fragments.filterRsiteStart()
# Removes duplicate molecules in DS reads
fragments.filterDuplicates()
# Removes very large and small fragments
fragments.filterLarge()
# Removes fragments with most and/or least # counts
fragments.filterExtreme(cutH=0.01, cutL=0)
# Get Fragment weights
fragments.calculateFragmentWeights()
# save filtered data
fragments.save(options.outputDir+options.experiment+'-fragment_filtered.hdf5')
# save heatmap
fragments.saveHeatmap(options.outputDir+options.experiment+'-1M.hdf5')
# save heatmap chr by chr (high resolution scale)
# fragments.saveByChromosomeHeatmap(options.outputDir+options.experiment+'-chrbychr.hdf5')
def iterativeFiltering(genome_db, filesuffix):
'''
Filter the data at the binned level and perform the iterative correction.
'''
# Read resolution from the dataset.
raw_heatmap = h5dict.h5dict(options.outputDir+options.experiment+filesuffix, mode='r')
resolution = int(raw_heatmap['resolution'])
# Create a binnedData object, load the data.
BD = binnedData.binnedData(resolution, genome_db)
BD.simpleLoad(options.outputDir+options.experiment+filesuffix, options.experiment)
# Remove the contacts between loci located within the same bin.
BD.removeDiagonal()
# Remove bins with less than half of a bin sequenced.
BD.removeBySequencedCount(0.5)
# Remove 1% of regions with low coverage.
BD.removePoorRegions(cutoff=1)
# Truncate top 0.05% of interchromosomal counts (possibly, PCR blowouts).
BD.truncTrans(high=0.0005)
# Remove empty bins
BD.removeZeros()
# Perform iterative correction.
BD.iterativeCorrectWithoutSS()
# Save the iteratively corrected heatmap.
BD.export(options.experiment, options.outputDir+options.experiment+'-IC'+filesuffix)
plt.figure()
plotting.plot_matrix(np.log(BD.dataDict[options.experiment]))
pp.savefig()
def process():
global options
global args
global pp
if (options.verbose):
print >> sys.stdout, "*** START processing"
fig = plt.figure()
pp = PdfPages(options.outputDir+options.experiment+'.pdf')
logging.basicConfig(level=logging.DEBUG)
if (options.verbose):
print >> sys.stdout, "** Create directories"
if not os.path.exists(options.tmpDir):
os.mkdir(options.tmpDir)
if not os.path.exists(options.outputDir):
os.mkdir(options.outputDir)
if (options.verbose):
print >> sys.stdout, "** Create data objects"
mapped_reads = h5dict.h5dict(options.outputDir+options.experiment+'-mapped_reads.hdf5')
if options.chromosome != "":
genome_db = genome.Genome(options.genome, gapFile=options.gapFile, readChrms=[options.chromosome])
else:
genome_db = genome.Genome(options.genome, gapFile=options.gapFile, readChrms=['#', 'X', 'Y'])
genome_db.setEnzyme(options.enzyme)
bams = []
if (options.inputFormat != 'bam'):
bams = mapFiles()
else:
bams = args[0:]
if (options.verbose):
print >> sys.stdout, "** Collect mapped reads"
collectMappedReads(bams[0], bams[1], mapped_reads, genome_db)
if (options.verbose):
print >> sys.stdout, "** Filter fragments"
filterFragments(genome_db)
if (options.verbose):
print >> sys.stdout, "** Iterative filtering of fragments"
iterativeFiltering(genome_db, '-1M.hdf5')
# plotting
correctedScalingPlot(1000000, options.outputDir+options.experiment+'-1M.hdf5', options.experiment, genome_db)
doArmPlot(1000000, options.outputDir+options.experiment+'-1M.hdf5', options.experiment, genome_db)
if (options.verbose):
print >> sys.stdout, "*** FINISHED processing"
pp.close()
######################################
# main
######################################
if __name__ == "__main__":
main()
| aehrc/ngsane | tools/hiclibMapping.py | Python | bsd-3-clause | 14,871 | [
"Biopython",
"Bowtie"
] | da6aedcf37787bf1b520f89f6b8c34144768bea5a0c086c2a56b3588dcaac726 |
# -*- coding: utf-8 -*-
#
# test_growth_curves.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy.integrate import quad
import math
import numpy
from numpy import testing
import unittest
import nest
import time
import sys
HAVE_OPENMP = nest.ll_api.sli_func("is_threaded")
class SynapticElementIntegrator(object):
"""
Generic class which describes how to compute the number of
Synaptic Element based on Ca value
Each derived class should overwrite the get_se(self, t) method
"""
def __init__(self, tau_ca=10000.0, beta_ca=0.001):
"""
Constructor
:param tau_ca (float): time constant of Ca decay
:param beta_ca (float): each spike increase Ca value by this value
"""
self.tau_ca = tau_ca
self.beta_ca = beta_ca
self.t_minus = 0
self.ca_minus = 0
self.se_minus = 0
def reset(self):
self.t_minus = 0
self.ca_minus = 0
self.se_minus = 0
def handle_spike(self, t):
"""
Add beta_ca to the value of Ca at t = spike time
Also update the number of synaptic element
:param t (float): spike time
"""
assert t >= self.t_minus
# Update the number of synaptic element
self.se_minus = self.get_se(t)
# update Ca value
self.ca_minus = self.get_ca(t) + self.beta_ca
self.t_minus = t
def get_ca(self, t):
"""
:param t (float): current time
:return: Ca value
"""
assert t >= self.t_minus
ca = self.ca_minus * math.exp((self.t_minus - t) / self.tau_ca)
if ca > 0:
return ca
else:
return 0
def get_se(self, t):
"""
:param t (float): current time
:return: Number of synaptic element
Should be overwritten
"""
return 0.0
class LinearExactSEI(SynapticElementIntegrator):
"""
Compute the number of synaptic element corresponding to a
linear growth curve
dse/dCa = nu * (1 - Ca/eps)
Use the exact solution
"""
def __init__(self, eps=0.7, growth_rate=1.0, *args, **kwargs):
"""
Constructor
:param eps: fix point
:param growth_rate: scaling of the growth curve
.. seealso:: SynapticElementIntegrator()
"""
super(LinearExactSEI, self).__init__(*args, **kwargs)
self.eps = eps
self.growth_rate = growth_rate
def get_se(self, t):
"""
:param t (float): current time
:return: Number of synaptic element
"""
assert t >= self.t_minus
se = 1 / self.eps * (
self.growth_rate * self.tau_ca * (
self.get_ca(t) - self.ca_minus
) + self.growth_rate * self.eps * (t - self.t_minus)
) + self.se_minus
if se > 0:
return se
else:
return 0
class LinearNumericSEI(SynapticElementIntegrator):
"""
Compute the number of synaptic element corresponding to a
linear growth curve
dse/dCa = nu * (1 - Ca/eps)
Use numerical integration (see scipy.integrate.quad)
"""
def __init__(self, eps=0.7, growth_rate=1.0, *args, **kwargs):
"""
Constructor
:param eps: fix point
:param growth_rate: scaling of the growth curve
.. seealso:: SynapticElementIntegrator()
"""
super(LinearNumericSEI, self).__init__(*args, **kwargs)
self.eps = eps
self.growth_rate = growth_rate
def get_se(self, t):
"""
:param t (float): current time
:return: Number of synaptic element
"""
assert t >= self.t_minus
se = self.se_minus + quad(self.growth_curve, self.t_minus, t)[0]
if se > 0:
return se
else:
return 0
def growth_curve(self, t):
return self.growth_rate * (1.0 - (self.get_ca(t) / self.eps))
class GaussianNumericSEI(SynapticElementIntegrator):
"""
Compute the number of synaptic element corresponding to a
linear growth curve
dse/dCa = nu * (2 * exp( ((Ca - xi)/zeta)^2 ) - 1)
with:
xi = (eta + eps) / 2.0
zeta = (eta - eps) / (2.0 * sqrt(ln(2.0)))
Use numerical integration (see scipy.integrate.quad)
"""
def __init__(self, eta=0.1, eps=0.7, growth_rate=1.0, *args, **kwargs):
"""
Constructor
:param eps: low fix point
:param eta: high fix point
:param growth_rate: scaling of the growth curve
.. seealso:: SynapticElementIntegrator()
"""
super(GaussianNumericSEI, self).__init__(*args, **kwargs)
self.zeta = (eta - eps) / (2.0 * math.sqrt(math.log(2.0)))
self.xi = (eta + eps) / 2.0
self.growth_rate = growth_rate
def get_se(self, t):
"""
:param t (float): current time
:return: Number of synaptic element
"""
assert t >= self.t_minus
se = self.se_minus + quad(self.growth_curve, self.t_minus, t)[0]
if se > 0:
return se
else:
return 0
def growth_curve(self, t):
return self.growth_rate * (
2 * math.exp(
- math.pow((self.get_ca(t) - self.xi) / self.zeta, 2)
) - 1
)
class SigmoidNumericSEI(SynapticElementIntegrator):
"""
Compute the number of synaptic element corresponding to a
sigmoid growth curve
dse/dCa = nu * ((2.0 / exp( (Ca - eps)/psi)) - 1.0)
Use numerical integration (see scipy.integrate.quad)
"""
def __init__(self, eps=0.7, growth_rate=1.0, psi=0.1, *args, **kwargs):
"""
Constructor
:param eps: set point
:param psi: controls width of growth curve
:param growth_rate: scaling of the growth curve
.. seealso:: SynapticElementIntegrator()
"""
super(SigmoidNumericSEI, self).__init__(*args, **kwargs)
self.eps = eps
self.psi = psi
self.growth_rate = growth_rate
def get_se(self, t):
"""
:param t (float): current time
:return: Number of synaptic element
"""
assert t >= self.t_minus
se = self.se_minus + quad(self.growth_curve, self.t_minus, t)[0]
if se > 0:
return se
else:
return 0
def growth_curve(self, t):
return self.growth_rate * (
(2.0 / (1.0 + math.exp(
(self.get_ca(t) - self.eps) / self.psi
))) - 1.0
)
@unittest.skipIf(not HAVE_OPENMP, 'NEST was compiled without multi-threading')
class TestGrowthCurve(unittest.TestCase):
"""
Unittest class to test the GrowthCurve used with nest
"""
def setUp(self):
nest.ResetKernel()
nest.SetKernelStatus({"total_num_virtual_procs": 4})
nest.set_verbosity('M_DEBUG')
self.sim_time = 10000.0
self.sim_step = 100
nest.SetKernelStatus(
{'structural_plasticity_update_interval': self.sim_time + 1})
self.se_integrator = []
self.sim_steps = None
self.ca_nest = None
self.ca_python = None
self.se_nest = None
self.se_python = None
# build
self.pop = nest.Create('iaf_psc_alpha', 10)
self.spike_detector = nest.Create('spike_detector')
nest.Connect(self.pop, self.spike_detector, 'all_to_all')
noise = nest.Create('poisson_generator')
nest.SetStatus(noise, {"rate": 800000.0})
nest.Connect(noise, self.pop, 'all_to_all')
def simulate(self):
self.sim_steps = numpy.arange(0, self.sim_time, self.sim_step)
self.ca_nest = numpy.zeros(
(len(self.pop), len(self.sim_steps)))
self.ca_python = numpy.zeros(
(len(self.se_integrator), len(self.sim_steps)))
self.se_nest = numpy.zeros(
(len(self.pop), len(self.sim_steps)))
self.se_python = numpy.zeros(
(len(self.se_integrator), len(self.sim_steps)))
for t_i, t in enumerate(self.sim_steps):
for n_i in range(len(self.pop)):
self.ca_nest[n_i][t_i], synaptic_elements = nest.GetStatus(
self.pop[n_i], ('Ca', 'synaptic_elements'))[0]
self.se_nest[n_i][t_i] = synaptic_elements['se']['z']
nest.Simulate(self.sim_step)
tmp = nest.GetStatus(self.spike_detector, 'events')[0]
spikes_all = tmp['times']
senders_all = tmp['senders']
for n_i, n in enumerate(self.pop):
spikes = spikes_all[senders_all == n.get('global_id')]
[sei.reset() for sei in self.se_integrator]
spike_i = 0
for t_i, t in enumerate(self.sim_steps):
while spike_i < len(spikes) and spikes[spike_i] <= t:
[sei.handle_spike(spikes[spike_i])
for sei in self.se_integrator]
spike_i += 1
for sei_i, sei in enumerate(self.se_integrator):
self.ca_python[sei_i, t_i] = sei.get_ca(t)
self.se_python[sei_i, t_i] = sei.get_se(t)
for sei_i, sei in enumerate(self.se_integrator):
testing.assert_almost_equal(
self.ca_nest[n_i], self.ca_python[sei_i], decimal=5)
testing.assert_almost_equal(
self.se_nest[n_i], self.se_python[sei_i], decimal=5)
def test_linear_growth_curve(self):
beta_ca = 0.0001
tau_ca = 10000.0
growth_rate = 0.0001
eps = 0.10
nest.SetStatus(
self.pop,
{
'beta_Ca': beta_ca,
'tau_Ca': tau_ca,
'synaptic_elements': {
'se': {
'growth_curve': 'linear',
'growth_rate': growth_rate,
'eps': eps,
'z': 0.0
}
}
}
)
self.se_integrator.append(LinearExactSEI(
tau_ca=tau_ca, beta_ca=beta_ca, eps=eps, growth_rate=growth_rate))
self.se_integrator.append(LinearNumericSEI(
tau_ca=tau_ca, beta_ca=beta_ca, eps=eps, growth_rate=growth_rate))
self.simulate()
# check that we got the same values from one run to another
# expected = self.se_nest[:, 10]
# print(self.se_nest[:, 10].__repr__())
expected = numpy.array([
0.08376263, 0.08374046, 0.08376031, 0.08376756, 0.08375428,
0.08378699, 0.08376784, 0.08369779, 0.08374215, 0.08370484
])
pop_as_list = list(self.pop)
for n in self.pop:
testing.assert_almost_equal(
self.se_nest[pop_as_list.index(n), 10], expected[
pop_as_list.index(n)],
decimal=8)
def test_gaussian_growth_curve(self):
beta_ca = 0.0001
tau_ca = 10000.0
growth_rate = 0.0001
eta = 0.05
eps = 0.10
nest.SetStatus(
self.pop,
{
'beta_Ca': beta_ca,
'tau_Ca': tau_ca,
'synaptic_elements': {
'se': {
'growth_curve': 'gaussian',
'growth_rate': growth_rate,
'eta': eta, 'eps': eps, 'z': 0.0
}
}
}
)
print("hjelp")
self.se_integrator.append(
GaussianNumericSEI(tau_ca=tau_ca, beta_ca=beta_ca,
eta=eta, eps=eps, growth_rate=growth_rate))
self.simulate()
# check that we got the same values from one run to another
# expected = self.se_nest[:, 30]
# print(self.se_nest[:, 30].__repr__())
expected = numpy.array([
0.10044035, 0.10062526, 0.1003149, 0.10046311, 0.1005713,
0.10031755, 0.10032216, 0.10040191, 0.10058179, 0.10068598
])
pop_as_list = list(self.pop)
for n in self.pop:
testing.assert_almost_equal(
self.se_nest[pop_as_list.index(n), 30], expected[
pop_as_list.index(n)],
decimal=5)
def test_sigmoid_growth_curve(self):
beta_ca = 0.0001
tau_ca = 10000.0
growth_rate = 0.0001
eps = 0.10
psi = 0.10
local_nodes = nest.GetLocalNodeCollection(self.pop)
local_nodes.set(
{
'beta_Ca': beta_ca,
'tau_Ca': tau_ca,
'synaptic_elements': {
'se': {
'growth_curve': 'sigmoid',
'growth_rate': growth_rate,
'eps': eps, 'psi': 0.1, 'z': 0.0
}
}
})
self.se_integrator.append(
SigmoidNumericSEI(tau_ca=tau_ca, beta_ca=beta_ca,
eps=eps, psi=psi, growth_rate=growth_rate))
self.simulate()
# check that we got the same values from one run to another
# expected = self.se_nest[:, 30]
# print(self.se_nest[:, 30].__repr__())
expected = numpy.array([
0.07801164, 0.07796841, 0.07807825, 0.07797382, 0.07802574,
0.07805961, 0.07808139, 0.07794451, 0.07799474, 0.07794458
])
local_pop_as_list = list(local_nodes)
for count, n in enumerate(self.pop):
loc = self.se_nest[local_pop_as_list.index(n), 30]
ex = expected[count]
testing.assert_almost_equal(loc, ex, decimal=5)
def suite():
test_suite = unittest.makeSuite(TestGrowthCurve, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
| weidel-p/nest-simulator | pynest/nest/tests/test_sp/test_growth_curves.py | Python | gpl-2.0 | 14,529 | [
"Gaussian"
] | 2a196bb3a1b336d226cc81d0278e632f12ba921f4055e172f80fddee190f5d28 |
# Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for Python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
sem
zmap
zscore
gstd
iqr
median_absolute_deviation
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
multiscale_graphcorr
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
epps_singleton_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
brunnermunzel
combine_pvalues
Statistical Distances
---------------------
.. autosummary::
:toctree: generated/
wasserstein_distance
energy_distance
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
Support Functions
-----------------
.. autosummary::
:toctree: generated/
rankdata
rvs_ratio_uniforms
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import sys
import math
if sys.version_info >= (3, 5):
from math import gcd
else:
from fractions import gcd
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma
from scipy._lib.six import callable, string_types
from scipy.spatial.distance import cdist
from scipy.ndimage import measurements
from scipy._lib._version import NumpyVersion
from scipy._lib._util import _lazywhere, check_random_state, MapWrapper
import scipy.special as special
from scipy import linalg
from . import distributions
from . import mstats_basic
from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
siegelslopes)
from ._stats import (_kendall_dis, _toint64, _weightedrankedtau,
_local_correlations)
from ._rvs_sampling import rvs_ratio_uniforms
from ._hypotests import epps_singleton_2samp
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'iqr', 'gstd', 'median_absolute_deviation',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'PearsonRConstantInputWarning', 'PearsonRNearConstantInputWarning',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau',
'multiscale_graphcorr',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata', 'rvs_ratio_uniforms',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel', 'epps_singleton_2samp']
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# This can happen when attempting to sum things which are not
# numbers (e.g. as in the function `mode`). Try an alternative method:
try:
contains_nan = np.nan in set(a.ravel())
except TypeError:
# Don't know what to do. Fall back to omitting nan values and
# issue a warning.
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Return the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculate the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a >= 0):
# Harmonic mean only defined if greater than or equal to to zero.
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
with np.errstate(divide='ignore'):
return size / np.sum(1.0 / a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than or equal to zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if a.dtype == object and np.nan in set(a.ravel()):
# Fall back to a slower method since np.unique does not work with NaN
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mode1D(a):
vals, cnts = np.unique(a, return_counts=True)
return vals[cnts.argmax()], cnts.max()
# np.apply_along_axis will convert the _mode1D tuples to a numpy array, casting types in the process
# This recreates the results without that issue
# View of a, rotated so the requested axis is last
in_dims = list(range(a.ndim))
a_view = np.transpose(a, in_dims[:axis] + in_dims[axis+1:] + [axis])
inds = np.ndindex(a_view.shape[:-1])
modes = np.empty(a_view.shape[:-1], dtype=a.dtype)
counts = np.zeros(a_view.shape[:-1], dtype=np.int)
for ind in inds:
modes[ind], counts[ind] = _mode1D(a_view[ind])
newshape = list(a.shape)
newshape[axis] = 1
return ModeResult(modes.reshape(newshape), counts.reshape(newshape))
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float)
if limits is None:
return a.var(ddof=ddof, axis=axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanvar(amnan, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
Array of values.
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmax : float, int or ndarray
Trimmed maximum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Trimmed sample standard deviation.
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Trimmed standard error of the mean.
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
Input array.
moment : int or array_like of ints, optional
Order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See Also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.full(np.asarray(moment).shape, np.nan, dtype=np.float64)
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Compute the coefficient of variation.
The coefficient of variation is the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import variation
>>> variation([1, 2, 3, 4, 5])
0.47140452079103173
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
r"""
Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
Parameters
----------
a : ndarray
Input array.
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math::
g_1=\frac{m_3}{m_2^{3/2}}
where
.. math::
m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and :math:`\bar{x}` is
the sample mean. If ``bias`` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math::
G_1=\frac{k_3}{k_2^{3/2}}=
\frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
Data for which the kurtosis is calculated.
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
In Fisher's definiton, the kurtosis of the normal distribution is zero.
In the following example, the kurtosis is close to zero, because it was
calculated from the dataset, not from the continuous distribution.
>>> from scipy.stats import norm, kurtosis
>>> data = norm.rvs(size=1000, random_state=3)
>>> kurtosis(data)
-0.06928694200380558
The distribution with a higher kurtosis has a heavier tail.
The zero valued kurtosis of the normal distribution in Fisher's definition
can serve as a reference point.
>>> import matplotlib.pyplot as plt
>>> import scipy.stats as stats
>>> from scipy.stats import kurtosis
>>> x = np.linspace(-5, 5, 100)
>>> ax = plt.subplot()
>>> distnames = ['laplace', 'norm', 'uniform']
>>> for distname in distnames:
... if distname == 'uniform':
... dist = getattr(stats, distname)(loc=-2, scale=4)
... else:
... dist = getattr(stats, distname)
... data = dist.rvs(size=1000)
... kur = kurtosis(data, fisher=True)
... y = dist.pdf(x)
... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3)))
... ax.legend()
The Laplace distribution has a heavier tail than the normal distribution.
The uniform distribution (which has negative kurtosis) has the thinnest
tail.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
return vals - 3 if fisher else vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, each column is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.166666666666666,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
Two-sided p-value for the hypothesis test.
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
Examples
--------
>>> from scipy.stats import skewtest
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)
>>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)
>>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = a.shape[axis]
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
Array of the sample data.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The two-sided p-value for the hypothesis test.
Notes
-----
Valid only for n>20. This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
Examples
--------
>>> from scipy.stats import kurtosistest
>>> kurtosistest(list(range(20)))
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
>>> np.random.seed(28041990)
>>> s = np.random.normal(0, 1, 1000)
>>> kurtosistest(s)
KurtosistestResult(statistic=1.2317590987707365, pvalue=0.21803908613450895)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = a.shape[axis]
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
np.power((1-2.0/A)/np.abs(denom), 1/3.0))
if np.any(denom == 0):
msg = "Test statistic not defined in some cases due to division by " \
"zero. Return nan in that case..."
warnings.warn(msg, RuntimeWarning)
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
Examples
--------
>>> from scipy import stats
>>> pts = 1000
>>> np.random.seed(28041990)
>>> a = np.random.normal(0, 1, size=pts)
>>> b = np.random.normal(2, 1, size=pts)
>>> x = np.concatenate((a, b))
>>> k2, p = stats.normaltest(x)
>>> alpha = 1e-3
>>> print("p = {:g}".format(p))
p = 3.27207e-11
>>> if p < alpha: # null hypothesis: x comes from a normal distribution
... print("The null hypothesis can be rejected")
... else:
... print("The null hypothesis cannot be rejected")
The null hypothesis can be rejected
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = x.size
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
@np.deprecate(message="`itemfreq` is deprecated and will be removed in a "
"future version. Use instead `np.unique(..., return_counts=True)`")
def itemfreq(a):
"""
Return a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
Specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
The following options are available (default is 'fraction'):
* 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``
* 'lower': ``i``
* 'higher': ``j``
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted_ = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted_, i,
interpolation_method, axis)
for i in per]
return np.array(score)
if not (0 <= per <= 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted_.ndim
idx = per / 100. * (sorted_.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted_.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
Compute the percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
Specifies the interpretation of the resulting score.
The following options are available (default is 'rank'):
* 'rank': Average percentage ranking of score. In case of multiple
matches, average the percentage rankings of all matching scores.
* 'weak': This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80% means that 80%
of values are less than or equal to the provided score.
* 'strict': Similar to "weak", except that only values that are
strictly less than the given score are counted.
* 'mean': The average of the "weak" and "strict" scores, often used
in testing. See https://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
if np.isnan(score):
return np.nan
a = np.asarray(a)
n = len(a)
if n == 0:
return 100.0
if kind == 'rank':
left = np.count_nonzero(a < score)
right = np.count_nonzero(a <= score)
pct = (right + left + (1 if right > left else 0)) * 50.0/n
return pct
elif kind == 'strict':
return np.count_nonzero(a < score) / n * 100
elif kind == 'weak':
return np.count_nonzero(a <= score) / n * 100
elif kind == 'mean':
pct = (np.count_nonzero(a < score) + np.count_nonzero(a <= score)) / n * 50
return pct
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Create a histogram.
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit.
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / a.shape[0]
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
return np.array(arrays)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Compute standard error of the mean.
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the z score.
Compute the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
mns = np.nanmean(a=a, axis=axis, keepdims=True)
sstd = np.nanstd(a=a, axis=axis, ddof=ddof, keepdims=True)
else:
mns = a.mean(axis=axis, keepdims=True)
sstd = a.std(axis=axis, ddof=ddof, keepdims=True)
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis, keepdims=True)
sstd = compare.std(axis=axis, ddof=ddof, keepdims=True)
return (scores - mns) / sstd
def gstd(a, axis=0, ddof=1):
"""
Calculate the geometric standard deviation of an array.
The geometric standard deviation describes the spread of a set of numbers
where the geometric mean is preferred. It is a multiplicative factor, and
so a dimensionless quantity.
It is defined as the exponent of the standard deviation of ``log(a)``.
Mathematically the population geometric standard deviation can be
evaluated as::
gstd = exp(std(log(a)))
.. versionadded:: 1.3.0
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int, tuple or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degree of freedom correction in the calculation of the
geometric standard deviation. Default is 1.
Returns
-------
ndarray or float
An array of the geometric standard deviation. If `axis` is None or `a`
is a 1d array a float is returned.
Notes
-----
As the calculation requires the use of logarithms the geometric standard
deviation only supports strictly positive values. Any non-positive or
infinite values will raise a `ValueError`.
The geometric standard deviation is sometimes confused with the exponent of
the standard deviation, ``exp(std(a))``. Instead the geometric standard
deviation is ``exp(std(log(a)))``.
The default value for `ddof` is different to the default value (0) used
by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.
Examples
--------
Find the geometric standard deviation of a log-normally distributed sample.
Note that the standard deviation of the distribution is one, on a
log scale this evaluates to approximately ``exp(1)``.
>>> from scipy.stats import gstd
>>> np.random.seed(123)
>>> sample = np.random.lognormal(mean=0, sigma=1, size=1000)
>>> gstd(sample)
2.7217860664589946
Compute the geometric standard deviation of a multidimensional array and
of a given axis.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> gstd(a, axis=None)
2.2944076136018947
>>> gstd(a, axis=2)
array([[1.82424757, 1.22436866, 1.13183117],
[1.09348306, 1.07244798, 1.05914985]])
>>> gstd(a, axis=(1,2))
array([2.12939215, 1.22120169])
The geometric standard deviation further handles masked arrays.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> ma = np.ma.masked_where(a > 16, a)
>>> ma
masked_array(
data=[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[--, --, --, --],
[--, --, --, --]]],
mask=[[[False, False, False, False],
[False, False, False, False],
[False, False, False, False]],
[[False, False, False, False],
[ True, True, True, True],
[ True, True, True, True]]],
fill_value=999999)
>>> gstd(ma, axis=2)
masked_array(
data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],
[1.0934830582350938, --, --]],
mask=[[False, False, False],
[False, True, True]],
fill_value=999999)
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
try:
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
return np.exp(np.std(log(a), axis=axis, ddof=ddof))
except RuntimeWarning as w:
if np.isinf(a).any():
raise ValueError(
'Infinite value encountered. The geometric standard deviation '
'is defined for strictly positive values only.')
a_nan = np.isnan(a)
a_nan_any = a_nan.any()
# exclude NaN's from negativity check, but
# avoid expensive masking for arrays with no NaN
if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or
(not a_nan_any and np.less_equal(a, 0).any())):
raise ValueError(
'Non positive value encountered. The geometric standard '
'deviation is defined for strictly positive values only.')
elif 'Degrees of freedom <= 0 for slice' == str(w):
raise ValueError(w)
else:
# Remaining warnings don't need to be exceptions.
return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof))
except TypeError:
raise ValueError(
'Invalid array input. The inputs could not be '
'safely coerced to any supported types')
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
r"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`.
The following options are available (default is 'linear'):
* 'linear': `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower': `i`.
* 'higher': `j`.
* 'nearest': `i` or `j` whichever is nearest.
* 'midpoint': `(i + j) / 2`.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, string_types):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = _iqr_nanpercentile
else:
percentile_func = _iqr_percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
if np.isnan(rng).any():
raise ValueError("range must not contain NaNs")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims, contains_nan=contains_nan)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def median_absolute_deviation(x, axis=0, center=np.median, scale=1.4826,
nan_policy='propagate'):
"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.3.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the function
signature ``func(arr, axis)``.
scale : int, optional
The scaling factor applied to the MAD. The default scale (1.4826)
ensures consistency with the standard deviation for normally distributed
data.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
References
----------
.. [1] "Median absolute deviation" https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_absolute_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_absolute_deviation(x)
1.2280762773108278
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_absolute_deviation(x)
1.2340335571164334
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_absolute_deviation(x)
array([5.1891, 3.7065, 2.2239])
>>> stats.median_absolute_deviation(x, axis=None)
2.9652
"""
x = asarray(x)
# Consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'propagate':
return np.nan
if contains_nan and nan_policy == 'omit':
# Way faster than carrying the masks around
arr = ma.masked_invalid(x).compressed()
else:
arr = x
if axis is None:
med = center(arr)
mad = np.median(np.abs(arr - med))
else:
med = np.apply_over_axes(center, arr, axis)
mad = np.median(np.abs(arr - med), axis=axis)
return scale * mad
def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around older versions of `numpy`.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if contains_nan and NumpyVersion(np.__version__) < '1.10.0a':
# I see no way to avoid the version check to ensure that the corrected
# NaN behavior has been implemented except to call `percentile` on a
# small array.
msg = "Keyword nan_policy='propagate' not correctly supported for " \
"numpy versions < 1.10.x. The default behavior of " \
"`numpy.percentile` will be used."
warnings.warn(msg, RuntimeWarning)
try:
# For older versions of numpy, there are two things that can cause a
# problem here: missing keywords and non-scalar axis. The former can be
# partially handled with a warning, the latter can be handled fully by
# hacking in an implementation similar to numpy's function for
# providing multi-axis functionality
# (`numpy.lib.function_base._ureduce` for the curious).
result = np.percentile(x, q, axis=axis, keepdims=keepdims,
interpolation=interpolation)
except TypeError:
if interpolation != 'linear' or keepdims:
# At time or writing, this means np.__version__ < 1.9.0
warnings.warn("Keywords interpolation and keepdims not supported "
"for your version of numpy", RuntimeWarning)
try:
# Special processing if axis is an iterable
original_size = len(axis)
except TypeError:
# Axis is a scalar at this point
pass
else:
axis = np.unique(np.asarray(axis) % x.ndim)
if original_size > axis.size:
# mimic numpy if axes are duplicated
raise ValueError("duplicate value in axis")
if axis.size == x.ndim:
# axis includes all axes: revert to None
axis = None
elif axis.size == 1:
# no rolling necessary
axis = axis[0]
else:
# roll multiple axes to the end and flatten that part out
for ax in axis[::-1]:
x = np.rollaxis(x, ax, x.ndim)
x = x.reshape(x.shape[:-axis.size] +
(np.prod(x.shape[-axis.size:]),))
axis = -1
result = np.percentile(x, q, axis=axis)
return result
def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False,
contains_nan=False):
"""
Private wrapper that works around the following:
1. A bug in `np.nanpercentile` that was around until numpy version
1.11.0.
2. A bug in `np.percentile` NaN handling that was fixed in numpy
version 1.10.0.
3. The non-existence of `np.nanpercentile` before numpy version
1.9.0.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if hasattr(np, 'nanpercentile'):
# At time or writing, this means np.__version__ < 1.9.0
result = np.nanpercentile(x, q, axis=axis,
interpolation=interpolation,
keepdims=keepdims)
# If non-scalar result and nanpercentile does not do proper axis roll.
# I see no way of avoiding the version test since dimensions may just
# happen to match in the data.
if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a':
axis = np.asarray(axis)
if axis.size == 1:
# If only one axis specified, reduction happens along that dimension
if axis.ndim == 0:
axis = axis[None]
result = np.rollaxis(result, axis[0])
else:
# If multiple axes, reduced dimeision is last
result = np.rollaxis(result, -1)
else:
msg = "Keyword nan_policy='omit' not correctly supported for numpy " \
"versions < 1.9.x. The default behavior of numpy.percentile " \
"will be used."
warnings.warn(msg, RuntimeWarning)
result = _iqr_percentile(x, q, axis=axis)
return result
#####################################
# TRIMMING FUNCTIONS #
#####################################
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Perform iterative sigma-clipping of array elements.
Starting from the full sample, all elements outside the critical range are
removed, i.e. all elements of the input array `c` that satisfy either of
the following conditions::
c < mean(c) - std(c)*low
c > mean(c) + std(c)*high
The iteration continues with the updated sample until no
elements are outside the (updated) range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c >= critlower) & (c <= critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slice off a proportion of items from both ends of an array.
Slice off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slice off less if proportion results in a non-integer slice index (i.e.
conservatively slices off `proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slice off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slice off less if proportion results in a non-integer slice index
(i.e. conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution.
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of both tails of the distribution.
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : Compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[tuple(sl)], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Perform one-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] R. Lowry, "Concepts and Applications of Inferential Statistics",
Chapter 14, 2014, http://vassarstats.net/textbook/
.. [2] G.W. Heiman, "Understanding research methods and statistics: An
integrated introduction for psychology", Houghton, Mifflin and
Company, 2001.
.. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / bign)
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / len(a)
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= _square_of_sums(alldata) / bign
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / dfbn
msw = sswn / dfwn
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
class PearsonRConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficent "
"is not defined.")
self.args = (msg,)
class PearsonRNearConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is nearly constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is nearly constant; the computed "
"correlation coefficent may be inaccurate.")
self.args = (msg,)
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Positive correlations imply that as x increases, so does y. Negative
correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is
the mean of the vector :math:`y`.
Under the assumption that x and y are drawn from independent normal
distributions (so the population correlation coefficient is 0), the
probability density function of the sample correlation coefficient r
is ([1]_, [2]_)::
(1 - r**2)**(n/2 - 2)
f(r) = ---------------------
B(1/2, n/2 - 1)
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. For a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pearsonr(a, b)
(0.8660254037844386, 0.011724811003954649)
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return np.nan, np.nan
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
return dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0])), 1.0
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n/2 - 1
prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""
Perform a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin halves in two-sided test."""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets. Unlike the
Pearson correlation, the Spearman correlation does not assume that both
datasets are normally distributed. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in ``a``
and ``b`` combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
return SpearmanrResult(np.nan, np.nan)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
return SpearmanrResult(np.nan, np.nan)
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).sum(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
olderr = np.seterr(divide='ignore')
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), dof)
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value.
pvalue : float
Two-sided p-value.
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
https://doi.org/10.1002/9781118445112.stat06227
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate', method='auto'):
"""
Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the 1945 "tau-b" version of Kendall's
tau [2]_, which can account for ties and which reduces to the 1938 "tau-a"
version [1]_ in absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
The following options are available (default is 'auto'):
* 'auto': selects the appropriate method based on a trade-off between
speed and accuracy
* 'asymptotic': uses a normal approximation valid for large samples
* 'exact': computes the exact p-value, but can only be used if no ties
are present
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See Also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y, method=method)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
# Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), Charles Griffin & Co., 1970.
c = min(dis, tot-dis)
if size <= 0:
raise ValueError
elif c < 0 or 2*c > size*(size-1):
raise ValueError
elif size == 1:
pvalue = 1.0
elif size == 2:
pvalue = 1.0
elif c == 0:
pvalue = 2.0/math.factorial(size) if size < 171 else 0.0
elif c == 1:
pvalue = 2.0/math.factorial(size-1) if (size-1) < 171 else 0.0
else:
new = [0.0]*(c+1)
new[0] = 1.0
new[1] = 1.0
for j in range(3,size+1):
old = new[:]
for k in range(1,min(j,c+1)):
new[k] += new[k-1]
for k in range(j,c+1):
new[k] += new[k-1] - old[k-j]
pvalue = 2.0*sum(new)/math.factorial(size) if size < 171 else 0.0
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + (
2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. *
size * (size - 1) * (size - 2))
pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2))
else:
raise ValueError("Unknown method "+str(method)+" specified, please use auto, exact or asymptotic.")
return KendalltauResult(tau, pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""
Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element, and a weigher function, which
assigns a weight based from the rank to each element. The weight of an
exchange is then the sum or the product of the weights of the ranks of
the exchanged elements. The default parameters compute
:math:`\tau_\mathrm h`: an exchange between elements with rank
:math:`r` and :math:`s` (starting from zero) has weight
:math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters.
Note that if you are computing the weighted :math:`\tau` on arrays of
ranks, rather than of scores (i.e., a larger value implies a lower
rank) you must negate the ranks, so that elements of higher rank are
associated with a larger value.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank : array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See Also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
return WeightedTauResult(np.nan, np.nan) # Return NaN if arrays are empty
# If there are NaNs we apply _toint64()
if np.isnan(np.sum(x)):
x = _toint64(x)
if np.isnan(np.sum(x)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size))
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive), np.nan)
# FROM MGCPY: https://github.com/neurodata/mgcpy
class _ParallelP(object):
"""
Helper function to calculate parallel p-value.
"""
def __init__(self, x, y, compute_distance, random_states):
self.x = x
self.y = y
self.compute_distance = compute_distance
self.random_states = random_states
def __call__(self, index):
permx = self.random_states[index].permutation(self.x)
permy = self.random_states[index].permutation(self.y)
# calculate permuted stats, store in null distribution
perm_stat = _mgc_stat(permx, permy, self.compute_distance)[0]
return perm_stat
def _perm_test(x, y, stat, compute_distance, reps=1000, workers=-1,
random_state=None):
r"""
Helper function that calculates the p-value. See below for uses.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)`.
stat : float
The sample test statistic.
compute_distance : callable
A function that computes the distance or similarity among the samples
within each data matrix. Set to `None` if `x` and `y` are already
distance.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is 1000 replications.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
available to the Process. Alternatively supply a map-like callable,
such as `multiprocessing.Pool.map` for evaluating the population in
parallel. This evaluation is carried out as `workers(func, iterable)`.
Requires that `func` be pickleable.
random_state : int or np.random.RandomState instance, optional
If already a RandomState instance, use it.
If seed is an int, return a new RandomState instance seeded with seed.
If None, use np.random.RandomState. Default is None.
Returns
-------
pvalue : float
The sample test p-value.
null_dist : list
The approximated null distribution.
"""
# generate seeds for each rep (change to new parallel random number
# capabilities in numpy >= 1.17+)
random_state = check_random_state(random_state)
random_states = [np.random.RandomState(random_state.randint(1 << 32,
size=4, dtype=np.uint32)) for _ in range(reps)]
# parallelizes with specified workers over number of reps and set seeds
mapwrapper = MapWrapper(workers)
parallelp = _ParallelP(x=x, y=y, compute_distance=compute_distance,
random_states=random_states)
null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
# calculate p-value and significant permutation map through list
pvalue = (null_dist >= stat).sum() / reps
# correct for a p-value of 0. This is because, with bootstrapping
# permutations, a p-value of 0 is incorrect
if pvalue == 0:
pvalue = 1 / reps
return pvalue, null_dist
def _euclidean_dist(x):
return cdist(x, x)
MGCResult = namedtuple('MGCResult', ('stat', 'pvalue', 'mgc_dict'))
def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
workers=1, is_twosamp=False, random_state=None):
r"""
Computes the Multiscale Graph Correlation (MGC) test statistic.
Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
called the "scale". A priori, however, it is not know which scales will be
most informative. So, MGC computes all distance pairs, and then efficiently
computes the distance correlations for all scales. The local correlations
illustrate which scales are relatively informative about the relationship.
The key, therefore, to successfully discover and decipher relationships
between disparate data modalities is to adaptively determine which scales
are the most informative, and the geometric implication for the most
informative scales. Doing so not only provides an estimate of whether the
modalities are related, but also provides insight into how the
determination was made. This is especially important in high-dimensional
data, where simple visualizations do not reveal relationships to the
unaided human eye. Characterizations of this implementation in particular
have been derived from and benchmarked within in [2]_.
Parameters
----------
x, y : ndarray
If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
the number of samples and `p` and `q` are the number of dimensions,
then the MGC independence test will be run. Alternatively, ``x`` and
``y`` can have shapes ``(n, n)`` if they are distance or similarity
matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
two-sample MGC test will be run.
compute_distance : callable, optional
A function that computes the distance or similarity among the samples
within each data matrix. Set to ``None`` if ``x`` and ``y`` are
already distance matrices. The default uses the euclidean norm metric.
If you are calling a custom function, either create the distance
matrix before-hand or create a function of the form
``compute_distance(x)`` where `x` is the data matrix for which
pairwise distances are calculated.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is ``1000``.
workers : int or map-like callable, optional
If ``workers`` is an int the population is subdivided into ``workers``
sections and evaluated in parallel (uses ``multiprocessing.Pool
<multiprocessing>``). Supply ``-1`` to use all cores available to the
Process. Alternatively supply a map-like callable, such as
``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable. The default is ``1``.
is_twosamp : bool, optional
If `True`, a two sample test will be run. If ``x`` and ``y`` have
shapes ``(n, p)`` and ``(m, p)``, this optional will be overriden and
set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
``(n, p)`` and a two sample test is desired. The default is ``False``.
random_state : int or np.random.RandomState instance, optional
If already a RandomState instance, use it.
If seed is an int, return a new RandomState instance seeded with seed.
If None, use np.random.RandomState. Default is None.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
pvalue : float
The p-value obtained via permutation.
mgc_dict : dict
Contains additional useful additional returns containing the following
keys:
- mgc_map : ndarray
A 2D representation of the latent geometry of the relationship.
of the relationship.
- opt_scale : (int, int)
The estimated optimal scale as a `(x, y)` pair.
- null_dist : list
The null distribution derived from the permuted matrices
See Also
--------
pearsonr : Pearson correlation coefficient and p-value for testing
non-correlation.
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
Notes
-----
A description of the process of MGC and applications on neuroscience data
can be found in [1]_. It is performed using the following steps:
#. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
modified to be mean zero columnwise. This results in two
:math:`n \times n` distance matrices :math:`A` and :math:`B` (the
centering and unbiased modification) [3]_.
#. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
* The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
are calculated for each property. Here, :math:`G_k (i, j)` indicates
the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
the :math:`i`-th row of :math:`B`
* Let :math:`\circ` denotes the entry-wise matrix product, then local
correlations are summed and normalized using the following statistic:
.. math::
c^{kl} = \frac{\sum_{ij} A G_k B H_l}
{\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
#. The MGC test statistic is the smoothed optimal local correlation of
:math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
(which essentially set all isolated large correlations) as 0 and
connected large correlations the same as before, see [3]_.) MGC is,
.. math::
MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
\right)
The test statistic returns a value between :math:`(-1, 1)` since it is
normalized.
The p-value returned is calculated using a permutation test. This process
is completed by first randomly permuting :math:`y` to estimate the null
distribution and then calculating the probability of observing a test
statistic, under the null, at least as extreme as the observed test
statistic.
MGC requires at least 5 samples to run with reliable results. It can also
handle high-dimensional data sets.
In addition, by manipulating the input data matrices, the two-sample
testing problem can be reduced to the independence testing problem [4]_.
Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
:math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
follows:
.. math::
X = [U | V] \in \mathcal{R}^{p \times (n + m)}
Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
Then, the MGC statistic can be calculated as normal. This methodology can
be extended to similar tests such as distance correlation [4]_.
.. versionadded:: 1.4.0
References
----------
.. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
Maggioni, M., & Shen, C. (2019). Discovering and deciphering
relationships across disparate data modalities. ELife.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
mgcpy: A Comprehensive High Dimensional Independence Testing Python
Package. ArXiv:1907.02088 [Cs, Stat].
.. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
correlation to multiscale graph correlation. Journal of the American
Statistical Association.
.. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
Distance and Kernel Methods for Hypothesis Testing. ArXiv:1806.05514
[Cs, Stat].
Examples
--------
>>> from scipy.stats import multiscale_graphcorr
>>> x = np.arange(100)
>>> y = x
>>> stat, pvalue, _ = multiscale_graphcorr(x, y, workers=-1)
>>> '%.1f, %.3f' % (stat, pvalue)
'1.0, 0.001'
Alternatively,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.1f, %.3f' % (mgc.stat, mgc.pvalue)
'1.0, 0.001'
To run an unpaired two-sample test,
>>> x = np.arange(100)
>>> y = np.arange(79)
>>> mgc = multiscale_graphcorr(x, y, random_state=1)
>>> '%.3f, %.2f' % (mgc.stat, mgc.pvalue)
'0.033, 0.02'
or, if shape of the inputs are the same,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y, is_twosamp=True)
>>> '%.3f, %.1f' % (mgc.stat, mgc.pvalue)
'-0.008, 1.0'
"""
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise ValueError("x and y must be ndarrays")
# convert arrays of type (n,) to (n, 1)
if x.ndim == 1:
x = x[:, np.newaxis]
elif x.ndim != 2:
raise ValueError("Expected a 2-D array `x`, found shape "
"{}".format(x.shape))
if y.ndim == 1:
y = y[:, np.newaxis]
elif y.ndim != 2:
raise ValueError("Expected a 2-D array `y`, found shape "
"{}".format(y.shape))
nx, px = x.shape
ny, py = y.shape
# check for NaNs
_contains_nan(x, nan_policy='raise')
_contains_nan(y, nan_policy='raise')
# check for positive or negative infinity and raise error
if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
raise ValueError("Inputs contain infinities")
if nx != ny:
if px == py:
# reshape x and y for two sample testing
is_twosamp = True
else:
raise ValueError("Shape mismatch, x and y must have shape [n, p] "
"and [n, q] or have shape [n, p] and [m, p].")
if nx < 5 or ny < 5:
raise ValueError("MGC requires at least 5 samples to give reasonable "
"results.")
# convert x and y to float
x = x.astype(np.float64)
y = y.astype(np.float64)
# check if compute_distance_matrix if a callable()
if not callable(compute_distance) and compute_distance is not None:
raise ValueError("Compute_distance must be a function.")
# check if number of reps exists, integer, or > 0 (if under 1000 raises
# warning)
if not isinstance(reps, int) or reps < 0:
raise ValueError("Number of reps must be an integer greater than 0.")
elif reps < 1000:
msg = ("The number of replications is low (under 1000), and p-value "
"calculations may be unreliable. Use the p-value result, with "
"caution!")
warnings.warn(msg, RuntimeWarning)
if is_twosamp:
x, y = _two_sample_transform(x, y)
# calculate MGC stat
stat, stat_dict = _mgc_stat(x, y, compute_distance)
stat_mgc_map = stat_dict["stat_mgc_map"]
opt_scale = stat_dict["opt_scale"]
# calculate permutation MGC p-value
pvalue, null_dist = _perm_test(x, y, stat, compute_distance, reps=reps,
workers=workers, random_state=random_state)
# save all stats (other than stat/p-value) in dictionary
mgc_dict = {"mgc_map": stat_mgc_map,
"opt_scale": opt_scale,
"null_dist": null_dist}
return MGCResult(stat, pvalue, mgc_dict)
def _mgc_stat(x, y, compute_distance):
r"""
Helper function that calculates the MGC stat. See above for use.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)` or `(n, n)` and `(n, n)`
if distance matrices.
compute_distance : callable
A function that computes the distance or similarity among the samples
within each data matrix. Set to `None` if `x` and `y` are already
distance.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
stat_dict : dict
Contains additional useful additional returns containing the following
keys:
- stat_mgc_map : ndarray
MGC-map of the statistics.
- opt_scale : (float, float)
The estimated optimal scale as a `(x, y)` pair.
"""
# set distx and disty to x and y when compute_distance = None
distx = x
disty = y
if compute_distance is not None:
# compute distance matrices for x and y
distx = compute_distance(x)
disty = compute_distance(y)
# calculate MGC map and optimal scale
stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
n, m = stat_mgc_map.shape
if m == 1 or n == 1:
# the global scale at is the statistic calculated at maximial nearest
# neighbors. There is not enough local scale to search over, so
# default to global scale
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = m * n
else:
samp_size = len(distx) - 1
# threshold to find connected region of significant local correlations
sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
# maximum within the significant region
stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
stat_dict = {"stat_mgc_map": stat_mgc_map,
"opt_scale": opt_scale}
return stat, stat_dict
def _threshold_mgc_map(stat_mgc_map, samp_size):
r"""
Finds a connected region of significance in the MGC-map by thresholding.
Parameters
----------
stat_mgc_map : ndarray
All local correlations within `[-1,1]`.
samp_size : int
The sample size of original data.
Returns
-------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
"""
m, n = stat_mgc_map.shape
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance. Threshold is based on a beta
# approximation.
per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
# the global scale at is the statistic calculated at maximial nearest
# neighbors. Threshold is the maximium on the global and local scales
threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
# find the largest connected component of significant correlations
sig_connect = stat_mgc_map > threshold
if np.sum(sig_connect) > 0:
sig_connect, _ = measurements.label(sig_connect)
_, label_counts = np.unique(sig_connect, return_counts=True)
# skip the first element in label_counts, as it is count(zeros)
max_label = np.argmax(label_counts[1:]) + 1
sig_connect = sig_connect == max_label
else:
sig_connect = np.array([[False]])
return sig_connect
def _smooth_mgc_map(sig_connect, stat_mgc_map):
"""
Finds the smoothed maximal within the significant region R.
If area of R is too small it returns the last local correlation. Otherwise,
returns the maximum within significant_connected_region.
Parameters
----------
sig_connect: ndarray
A binary matrix with 1's indicating the significant region.
stat_mgc_map: ndarray
All local correlations within `[-1, 1]`.
Returns
-------
stat : float
The sample MGC statistic within `[-1, 1]`.
opt_scale: (float, float)
The estimated optimal scale as an `(x, y)` pair.
"""
m, n = stat_mgc_map.shape
# the global scale at is the statistic calculated at maximial nearest
# neighbors. By default, statistic and optimal scale are global.
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = [m, n]
if np.linalg.norm(sig_connect) != 0:
# proceed only when the connected region's area is sufficiently large
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance
if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
max_corr = max(stat_mgc_map[sig_connect])
# find all scales within significant_connected_region that maximize
# the local correlation
max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
if max_corr >= stat:
stat = max_corr
k, l = max_corr_index
one_d_indices = k * n + l # 2D to 1D indexing
k = np.max(one_d_indices) // n
l = np.max(one_d_indices) % n
opt_scale = [k+1, l+1] # adding 1s to match R indexing
return stat, opt_scale
def _two_sample_transform(u, v):
"""
Helper function that concatenates x and y for two sample MGC stat. See
above for use.
Parameters
----------
u, v : ndarray
`u` and `v` have shapes `(n, p)` and `(m, p)`,
Returns
-------
x : ndarray
Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
`(2n, p)`.
y : ndarray
Label matrix for `x` where 0 refers to samples that comes from `u` and
1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
"""
nx = u.shape[0]
ny = v.shape[0]
x = np.concatenate([u, v], axis=0)
y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
return x, y
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculate the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.16
Sample 2 200 45 0.225 0.174375
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)
Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculate the t-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Notes
-----
Examples for use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = n - 1
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution F(x) of an observed
random variable against a given distribution G(x). Under the null
hypothesis, the two distributions are identical, F(x)=G(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array_like, or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided, see explanation in Notes
* 'greater': one-sided, see explanation in Notes
mode : {'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'approx'):
* 'approx': use approximation to exact distribution of test statistic
* 'asymp': use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function G(x) of the
hypothesis, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculate a one-way chi-square test.
The chi-square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
scipy.stats.power_divergence
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not chi-square, in which case this test
is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def _compute_prob_inside_method(m, n, g, h):
"""
Count the proportion of paths that stay strictly inside two diagonal lines.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The proportion of paths that stay inside the two lines.
Count the integer lattice paths from (0, 0) to (m, n) which satisfy
|x/m - y/n| < h / lcm(m, n).
The paths make steps of size +1 in either positive x or positive y directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Probability is symmetrical in m, n. Computation below uses m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Count the integer lattice paths from (0, 0) to (m, n) which satisfy
# |nx/g - my/g| < h.
# Compute matrix A such that:
# A(x, 0) = A(0, y) = 1
# A(x, y) = A(x, y-1) + A(x-1, y), for x,y>=1, except that
# A(x, y) = 0 if |x/m - y/n|>= h
# Probability is A(m, n)/binom(m+n, n)
# Optimizations exist for m==n, m==n*p.
# Only need to preserve a single column of A, and only a sliding window of it.
# minj keeps track of the slide.
minj, maxj = 0, min(int(np.ceil(h / mg)), n + 1)
curlen = maxj - minj
# Make a vector long enough to hold maximum window needed.
lenA = min(2 * maxj + 2, n + 1)
# This is an integer calculation, but the entries are essentially
# binomial coefficients, hence grow quickly.
# Scaling after each column is computed avoids dividing by a
# large binomial coefficent at the end. Instead it is incorporated
# one factor at a time during the computation.
dtype = np.float64
A = np.zeros(lenA, dtype=dtype)
# Initialize the first column
A[minj:maxj] = 1
for i in range(1, m + 1):
# Generate the next column.
# First calculate the sliding window
lastminj, lastmaxj, lastlen = minj, maxj, curlen
minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0)
minj = min(minj, n)
maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1)
if maxj <= minj:
return 0
# Now fill in the values
A[0:maxj - minj] = np.cumsum(A[minj - lastminj:maxj - lastminj])
curlen = maxj - minj
if lastlen > curlen:
# Set some carried-over elements to 0
A[maxj - minj:maxj - minj + (lastlen - curlen)] = 0
# Peel off one term from each of top and bottom of the binomial coefficient.
scaling_factor = i * 1.0 / (n + i)
A *= scaling_factor
return A[maxj - minj - 1]
def _compute_prob_outside_square(n, h):
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... ) / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h):
"""
Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Exceptions
----------
FloatingPointError: Raised if the intermediate computation goes outside
the range of a float.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# 0 <= x_j <= m is the smallest integer for which n*x_j - m*j < g*h
xj = [int(np.ceil((h + mg * j)/ng)) for j in range(n+1)]
xj = [_ for _ in xj if _ <= m]
lxj = len(xj)
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return np.round(special.binom(m + n, n))
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
# The binomial coefficient is an integer, but special.binom() may return a float.
# Round it to the nearest integer.
for j in range(1, lxj):
Bj = np.round(special.binom(xj[j] + j, j))
if not np.isfinite(Bj):
raise FloatingPointError()
for i in range(j):
bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i))
dec = bin * B[i]
Bj -= dec
B[j] = Bj
if not np.isfinite(Bj):
raise FloatingPointError()
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = np.round(special.binom((m-xj[j]) + (n - j), n-j))
term = B[j] * bin
if not np.isfinite(term):
raise FloatingPointError()
num_paths += term
return np.round(num_paths)
def ks_2samp(data1, data2, alternative='two-sided', mode='auto'):
"""
Compute the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution. The alternative hypothesis
can be either 'two-sided' (default), 'less' or 'greater'.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided, see explanation in Notes
* 'greater': one-sided, see explanation in Notes
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use approximation to exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
Two-tailed p-value.
See Also
--------
kstest
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample KS test, the distribution is
assumed to be continuous.
In the one-sided test, the alternative is that the empirical
cumulative distribution function F(x) of the data1 variable is "less"
or "greater" than the empirical cumulative distribution function G(x)
of the data2 variable, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.
If the KS statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333334, 5.129279597781977e-05)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14691437867433876)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
LARGE_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= LARGE_N
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
minS = -np.min(cddiffs)
maxS = np.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)}
d = alt2Dvalue[alternative]
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
original_mode = mode
if mode == 'auto':
if max(n1, n2) <= LARGE_N:
mode = 'exact'
else:
mode = 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int).max / n2g:
mode = 'asymp'
warnings.warn(
"Exact ks_2samp calculation not possible with samples sizes "
"%d and %d. Switching to 'asymp' " % (n1, n2), RuntimeWarning)
saw_fp_error = False
if mode == 'exact':
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
prob = 1.0
else:
try:
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = 1 - _compute_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
prob = 1.0
for j in range(h):
prob = (n1 - j) * prob / (n1 + j + 1)
else:
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1)
if not np.isfinite(bin) or not np.isfinite(num_paths) or num_paths > bin:
raise FloatingPointError()
prob = num_paths / bin
except FloatingPointError:
# Switch mode
mode = 'asymp'
saw_fp_error = True
# Can't raise warning here, inside the try
finally:
if saw_fp_error:
if original_mode == 'exact':
warnings.warn(
"ks_2samp: Exact calculation overflowed. "
"Switching to mode=%s" % mode, RuntimeWarning)
else:
if prob > 1 or prob < 0:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(
"ks_2samp: Exact calculation incurred large"
" rounding error. Switching to mode=%s" % mode,
RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
if alternative == 'two-sided':
en = np.sqrt(n1 * n2 / (n1 + n2))
# Switch to using kstwo.sf() when it becomes available.
# prob = distributions.kstwo.sf(d, int(np.round(en)))
prob = distributions.kstwobign.sf(en * d)
else:
m, n = max(n1, n2), min(n1, n2)
z = np.sqrt(m*n/(m+n)) * d
# Use Hodges' suggested approximation Eqn 5.3
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = (0 if prob < 0 else (1 if prob > 1 else prob))
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `~scipy.stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Compute the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : {None, 'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is None):
* None: computes p-value half the size of the 'two-sided' p-value and
a different U statistic. The default behavior is not the same as
using 'less' or 'greater'; it only exists for backward compatibility
and is deprecated.
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
Use of the None option is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
References
----------
.. [1] https://en.wikipedia.org/wiki/Mann-Whitney_U_test
.. [2] H.B. Mann and D.R. Whitney, "On a Test of Whether one of Two Random
Variables is Stochastically Larger than the Other," The Annals of
Mathematical Statistics, vol. 18, no. 1, pp. 50-60, 1947.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples.
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed.
pvalue : float
The two-sided p-value of the test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples.
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties.
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution.
See Also
--------
f_oneway : 1-way ANOVA.
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements.
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
totaln = np.sum(n, dtype=float)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Compute the Friedman test for repeated measurements.
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
The test statistic, correcting for ties.
pvalue : float
The associated p-value assuming that the test statistic has a chi
squared distribution.
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] https://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / (k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t",
nan_policy='propagate'):
"""
Compute the Brunner-Munzel test on samples x and y.
The Brunner-Munzel test is a nonparametric test of the null hypothesis that
when values are taken one by one from each group, the probabilities of
getting large values in both groups are equal.
Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
assumption of equivariance of two groups. Note that this does not assume
the distributions are same. This test works on two independent samples,
which may have different sizes.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
distribution : {'t', 'normal'}, optional
Defines how to get the p-value.
The following options are available (default is 't'):
* 't': get the p-value by t-distribution
* 'normal': get the p-value by standard normal distribution.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_).
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
Examples
--------
>>> from scipy import stats
>>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
>>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
>>> w, p_value = stats.brunnermunzel(x1, x2)
>>> w
3.1374674823029505
>>> p_value
0.0057862086661515377
"""
x = np.asarray(x)
y = np.asarray(y)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == "omit" or npy == "omit":
nan_policy = "omit"
if contains_nan and nan_policy == "propagate":
return BrunnerMunzelResult(np.nan, np.nan)
elif contains_nan and nan_policy == "omit":
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.brunnermunzel(x, y, alternative, distribution)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x, y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
pass
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests bearing upon the same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}, optional
Name of method to use to combine p-values.
The following methods are available (default is 'fisher'):
* 'fisher': Fisher's method (Fisher's combined probability test), the
sum of the logarithm of the p-values
* 'pearson': Pearson's method (similar to Fisher's but uses sum of the
complement of the p-values inside the logarithms)
* 'tippett': Tippett's method (minimum of p-values)
* 'stouffer': Stouffer's Z-score method
* 'mudholkar_george': the difference of Fisher's and Pearson's methods
divided by 2
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method.
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [6]_ [7]_.
The Pearson's method uses :math:`log(1-p_i)` inside the sum whereas Fisher's
method uses :math:`log(p_i)` [4]_. For Fisher's and Pearson's method, the
sum of the logarithms is multiplied by -2 in the implementation. This
quantity has a chi-square distribution that determines the p-value. The
`mudholkar_george` method is the difference of the Fisher's and Pearson's
test statistics, each of which include the -2 factor [4]_. However, the
`mudholkar_george` method does not include these -2 factors. The test
statistic of `mudholkar_george` is the sum of logisitic random variables and
equation 3.6 in [3]_ is used to approximate the p-value based on Student's
t-distribution.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [4] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [5] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [6] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [7] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = -2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.sf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher', 'pearson', \
'mudholkar_george', 'tippett', 'or 'stouffer'", method)
return (statistic, pval)
#####################################
# STATISTICAL DISTANCES #
#####################################
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the first Wasserstein distance between two 1D distributions.
This distance is also known as the earth mover's distance, since it can be
seen as the minimum amount of "work" required to transform :math:`u` into
:math:`v`, where "work" is measured as the amount of distribution weight
that must be moved, multiplied by the distance it has to be moved.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The first Wasserstein distance between the distributions :math:`u` and
:math:`v` is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively.
If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
:math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [2]_ for a proof of the equivalence of both definitions.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
"""
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramer-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramer-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
if p == 2:
return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
deltas)), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
def _sum_of_squares(a, axis=0):
"""
Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See Also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def _square_of_sums(a, axis=0):
"""
Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See Also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
def rankdata(a, method='average'):
"""
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| jamestwebber/scipy | scipy/stats/stats.py | Python | bsd-3-clause | 257,732 | [
"DIRAC"
] | 1ff33fd8e2ed92246bfbd9a74068149b121a6c2f452cc0f98145532394886102 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import unittest.mock as mock
import os.path
import tempfile
import pandas as pd
import skbio
import qiime2
import numpy as np
import numpy.testing as npt
from q2_demux._demux import (BarcodeSequenceFastqIterator,
BarcodePairedSequenceFastqIterator)
from q2_demux import emp_single, emp_paired, summarize
from q2_types.per_sample_sequences import (
FastqGzFormat, FastqManifestFormat, YamlFormat)
from q2_demux._summarize._visualizer import _PlotQualView
class BarcodeSequenceFastqIteratorTests(unittest.TestCase):
def test_valid(self):
barcodes = [('@s1/2 abc/2', 'AAAA', '+', 'YYYY'),
('@s2/2 abc/2', 'AAAA', '+', 'PPPP'),
('@s3/2 abc/2', 'AACC', '+', 'PPPP'),
('@s4/2 abc/2', 'AACC', '+', 'PPPP')]
sequences = [('@s1/1 abc/1', 'GGG', '+', 'YYY'),
('@s2/1 abc/1', 'CCC', '+', 'PPP'),
('@s3/1 abc/1', 'AAA', '+', 'PPP'),
('@s4/1 abc/1', 'TTT', '+', 'PPP')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
for i, (barcode, sequence) in enumerate(bsi):
self.assertEqual(barcode, barcodes[i])
self.assertEqual(sequence, sequences[i])
def test_too_few_barcodes(self):
barcodes = [('@s1/2 abc/2', 'AAAA', '+', 'YYYY'),
('@s2/2 abc/2', 'AAAA', '+', 'PPPP'),
('@s3/2 abc/2', 'AACC', '+', 'PPPP')]
sequences = [('@s1/1 abc/1', 'GGG', '+', 'YYY'),
('@s2/1 abc/1', 'CCC', '+', 'PPP'),
('@s3/1 abc/1', 'AAA', '+', 'PPP'),
('@s4/1 abc/1', 'TTT', '+', 'PPP')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
with self.assertRaises(ValueError):
list(bsi)
def test_too_few_sequences(self):
barcodes = [('@s1/2 abc/2', 'AAAA', '+', 'YYYY'),
('@s2/2 abc/2', 'AAAA', '+', 'PPPP'),
('@s3/2 abc/2', 'AACC', '+', 'PPPP'),
('@s4/2 abc/2', 'AACC', '+', 'PPPP')]
sequences = [('@s1/1 abc/1', 'GGG', '+', 'YYY')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
with self.assertRaises(ValueError):
list(bsi)
def test_mismatched_id(self):
barcodes = [('@s1/2 abc/2', 'AAAA', '+', 'YYYY'),
('@s2/2 abc/2', 'AAAA', '+', 'PPPP'),
('@s3/2 abc/2', 'AACC', '+', 'PPPP'),
('@s4/2 abc/2', 'AACC', '+', 'PPPP')]
sequences = [('@s1/1 abc/1', 'GGG', '+', 'YYY'),
('@s2/1 abc/1', 'CCC', '+', 'PPP'),
('@s3/1 abc/1', 'AAA', '+', 'PPP'),
('@s5/1 abc/1', 'TTT', '+', 'PPP')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
with self.assertRaises(ValueError):
list(bsi)
def test_mismatched_description(self):
barcodes = [('@s1/2 abc/2', 'AAAA', '+', 'YYYY'),
('@s2/2 abc/2', 'AAAA', '+', 'PPPP'),
('@s3/2 abc/2', 'AACC', '+', 'PPPP'),
('@s4/2 abc/2', 'AACC', '+', 'PPPP')]
sequences = [('@s1/1 abc/1', 'GGG', '+', 'YYY'),
('@s2/1 abc/1', 'CCC', '+', 'PPP'),
('@s3/1 abc/1', 'AAA', '+', 'PPP'),
('@s4/1 abd/1', 'TTT', '+', 'PPP')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
with self.assertRaises(ValueError):
list(bsi)
def test_mismatched_handles_slashes_in_id(self):
# mismatch is detected as being before the last slash, even if there
# is more than one slash
barcodes = [('@s1/2/2 abc/2', 'AAAA', '+', 'YYYY')]
sequences = [('@s1/1/1 abc/1', 'GGG', '+', 'YYY')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
with self.assertRaises(ValueError):
list(bsi)
def test_mismatched_handles_slashes_in_description(self):
# mismatch is detected as being before the last slash, even if there
# is more than one slash
barcodes = [('@s1/2 a/2/2', 'AAAA', '+', 'YYYY')]
sequences = [('@s1/1 a/1/1', 'GGG', '+', 'YYY')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
with self.assertRaises(ValueError):
list(bsi)
def test_no_description(self):
barcodes = [('@s1/2', 'AAAA', '+', 'YYYY'),
('@s2/2', 'AAAA', '+', 'PPPP'),
('@s3/2', 'AACC', '+', 'PPPP'),
('@s4/2', 'AACC', '+', 'PPPP')]
sequences = [('@s1/1', 'GGG', '+', 'YYY'),
('@s2/1', 'CCC', '+', 'PPP'),
('@s3/1', 'AAA', '+', 'PPP'),
('@s4/1', 'TTT', '+', 'PPP')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
for i, (barcode, sequence) in enumerate(bsi):
self.assertEqual(barcode, barcodes[i])
self.assertEqual(sequence, sequences[i])
def test_only_one_description(self):
barcodes = [('@s1/2 abc', 'AAAA', '+', 'YYYY'),
('@s2/2 abc', 'AAAA', '+', 'PPPP'),
('@s3/2 abc', 'AACC', '+', 'PPPP'),
('@s4/2 abc', 'AACC', '+', 'PPPP')]
sequences = [('@s1/1', 'GGG', '+', 'YYY'),
('@s2/1', 'CCC', '+', 'PPP'),
('@s3/1', 'AAA', '+', 'PPP'),
('@s4/1', 'TTT', '+', 'PPP')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
with self.assertRaises(ValueError):
list(bsi)
barcodes = [('@s1/2', 'AAAA', '+', 'YYYY'),
('@s2/2', 'AAAA', '+', 'PPPP'),
('@s3/2', 'AACC', '+', 'PPPP'),
('@s4/2', 'AACC', '+', 'PPPP')]
sequences = [('@s1/1 abc', 'GGG', '+', 'YYY'),
('@s2/1 abc', 'CCC', '+', 'PPP'),
('@s3/1 abc', 'AAA', '+', 'PPP'),
('@s4/1 abc', 'TTT', '+', 'PPP')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
with self.assertRaises(ValueError):
list(bsi)
class EmpTestingUtils:
def _decode_qual_to_phred(self, qual_str):
# this function is adapted from scikit-bio
qual = np.fromstring(qual_str, dtype=np.uint8) - 33
return qual
def _compare_sequence_to_record(self, sequence, fields):
header_line = ' '.join([sequence.metadata['id'],
sequence.metadata['description']])
self.assertEqual(fields[0][1:], header_line)
self.assertEqual(fields[1], str(sequence))
npt.assert_array_equal(self._decode_qual_to_phred(fields[3]),
sequence.positional_metadata['quality'])
def _compare_manifests(self, act_manifest, exp_manifest):
# strip comment lines before comparing
act_manifest = [l for l in act_manifest if not l.startswith('#')]
self.assertEqual(act_manifest, exp_manifest)
def _validate_sample_fastq(self, fastq, sequences, indices):
seqs = skbio.io.read(fastq, format='fastq', phred_offset=33,
compression='gzip', constructor=skbio.DNA)
seqs = list(seqs)
self.assertEqual(len(seqs), len(indices))
for idx, i in enumerate(indices):
self._compare_sequence_to_record(seqs[idx], sequences[i])
class EmpSingleTests(unittest.TestCase, EmpTestingUtils):
def setUp(self):
barcodes = [('@s1/2 abc/2', 'AAAA', '+', 'YYYY'),
('@s2/2 abc/2', 'TTAA', '+', 'PPPP'),
('@s3/2 abc/2', 'AACC', '+', 'PPPP'),
('@s4/2 abc/2', 'TTAA', '+', 'PPPP'),
('@s5/2 abc/2', 'AACC', '+', 'PPPP'),
('@s6/2 abc/2', 'AAAA', '+', 'PPPP'),
('@s7/2 abc/2', 'CGGC', '+', 'PPPP'),
('@s8/2 abc/2', 'GGAA', '+', 'PPPP'),
('@s9/2 abc/2', 'CGGC', '+', 'PPPP'),
('@s10/2 abc/2', 'CGGC', '+', 'PPPP'),
('@s11/2 abc/2', 'GGAA', '+', 'PPPP')]
self.sequences = [('@s1/1 abc/1', 'GGG', '+', 'YYY'),
('@s2/1 abc/1', 'CCC', '+', 'PPP'),
('@s3/1 abc/1', 'AAA', '+', 'PPP'),
('@s4/1 abc/1', 'TTT', '+', 'PPP'),
('@s5/1 abc/1', 'ATA', '+', 'PPP'),
('@s6/1 abc/1', 'TAT', '+', 'PPP'),
('@s7/1 abc/1', 'CGC', '+', 'PPP'),
('@s8/1 abc/1', 'GCG', '+', 'PPP'),
('@s9/1 abc/1', 'ACG', '+', 'PPP'),
('@s10/1 abc/1', 'GCA', '+', 'PPP'),
('@s11/1 abc/1', 'TGA', '+', 'PPP')]
self.bsi = BarcodeSequenceFastqIterator(barcodes, self.sequences)
barcode_map = pd.Series(['AAAA', 'AACC', 'TTAA', 'GGAA', 'CGGC'],
index=['sample1', 'sample2', 'sample3',
'sample4', 'sample5'])
self.barcode_map = qiime2.MetadataCategory(barcode_map)
def test_valid(self):
actual = emp_single(self.bsi, self.barcode_map)
output_fastq = list(actual.sequences.iter_views(FastqGzFormat))
# five per-sample files were written
self.assertEqual(len(output_fastq), 5)
# sequences in sample1 are correct
self._validate_sample_fastq(
output_fastq[0][1].open(), self.sequences, [0, 5])
# sequences in sample2 are correct
self._validate_sample_fastq(
output_fastq[1][1].open(), self.sequences, [2, 4])
# sequences in sample3 are correct
self._validate_sample_fastq(
output_fastq[2][1].open(), self.sequences, [1, 3])
# sequences in sample4 are correct
self._validate_sample_fastq(
output_fastq[3][1].open(), self.sequences, [7, 10])
# sequences in sample5 are correct
self._validate_sample_fastq(
output_fastq[4][1].open(), self.sequences, [6, 8, 9])
# manifest is correct
act_manifest = list(actual.manifest.view(FastqManifestFormat).open())
exp_manifest = ['sample-id,filename,direction\n',
'sample1,sample1_1_L001_R1_001.fastq.gz,forward\n',
'sample3,sample3_2_L001_R1_001.fastq.gz,forward\n',
'sample2,sample2_3_L001_R1_001.fastq.gz,forward\n',
'sample5,sample5_4_L001_R1_001.fastq.gz,forward\n',
'sample4,sample4_5_L001_R1_001.fastq.gz,forward\n']
self._compare_manifests(act_manifest, exp_manifest)
# metadata is correct
act_metadata = list(actual.metadata.view(YamlFormat).open())
exp_metadata = ["{phred-offset: 33}\n"]
self.assertEqual(act_metadata, exp_metadata)
@mock.patch('q2_demux._demux.OPEN_FH_LIMIT', 3)
def test_valid_small_open_fh_limit(self):
self.test_valid()
def test_variable_length_barcodes(self):
barcodes = pd.Series(['AAA', 'AACC'], index=['sample1', 'sample2'])
barcodes = qiime2.MetadataCategory(barcodes)
with self.assertRaises(ValueError):
emp_single(self.bsi, barcodes)
def test_duplicate_barcodes(self):
barcodes = pd.Series(['AACC', 'AACC'], index=['sample1', 'sample2'])
barcodes = qiime2.MetadataCategory(barcodes)
with self.assertRaises(ValueError):
emp_single(self.bsi, barcodes)
def test_no_matched_barcodes(self):
barcodes = pd.Series(['CCCC', 'GGCC'], index=['sample1', 'sample2'])
barcodes = qiime2.MetadataCategory(barcodes)
with self.assertRaises(ValueError):
emp_single(self.bsi, barcodes)
def test_rev_comp_mapping_barcodes(self):
barcodes = pd.Series(['TTTT', 'GGTT', 'TTAA', 'TTCC', 'GCCG'],
index=['sample1', 'sample2', 'sample3', 'sample4',
'sample5'])
barcodes = qiime2.MetadataCategory(barcodes)
actual = emp_single(self.bsi, barcodes, rev_comp_mapping_barcodes=True)
output_fastq = list(actual.sequences.iter_views(FastqGzFormat))
# five per-sample files were written
self.assertEqual(len(output_fastq), 5)
# sequences in sample1 are correct
self._validate_sample_fastq(
output_fastq[0][1].open(), self.sequences, [0, 5])
# sequences in sample2 are correct
self._validate_sample_fastq(
output_fastq[1][1].open(), self.sequences, [2, 4])
# sequences in sample3 are correct
self._validate_sample_fastq(
output_fastq[2][1].open(), self.sequences, [1, 3])
# sequences in sample4 are correct
self._validate_sample_fastq(
output_fastq[3][1].open(), self.sequences, [7, 10])
# sequences in sample5 are correct
self._validate_sample_fastq(
output_fastq[4][1].open(), self.sequences, [6, 8, 9])
# manifest is correct
act_manifest = list(actual.manifest.view(FastqManifestFormat).open())
exp_manifest = ['sample-id,filename,direction\n',
'sample1,sample1_1_L001_R1_001.fastq.gz,forward\n',
'sample3,sample3_2_L001_R1_001.fastq.gz,forward\n',
'sample2,sample2_3_L001_R1_001.fastq.gz,forward\n',
'sample5,sample5_4_L001_R1_001.fastq.gz,forward\n',
'sample4,sample4_5_L001_R1_001.fastq.gz,forward\n']
self._compare_manifests(act_manifest, exp_manifest)
# metadata is correct
act_metadata = list(actual.metadata.view(YamlFormat).open())
exp_metadata = ["{phred-offset: 33}\n"]
self.assertEqual(act_metadata, exp_metadata)
def test_rev_comp_barcodes(self):
barcodes = [('@s1/2 abc/2', 'TTTT', '+', 'YYYY'),
('@s2/2 abc/2', 'TTAA', '+', 'PPPP'),
('@s3/2 abc/2', 'GGTT', '+', 'PPPP'),
('@s4/2 abc/2', 'TTAA', '+', 'PPPP'),
('@s5/2 abc/2', 'GGTT', '+', 'PPPP'),
('@s6/2 abc/2', 'TTTT', '+', 'PPPP'),
('@s7/2 abc/2', 'GCCG', '+', 'PPPP'),
('@s8/2 abc/2', 'TTCC', '+', 'PPPP'),
('@s9/2 abc/2', 'GCCG', '+', 'PPPP'),
('@s10/2 abc/2', 'GCCG', '+', 'PPPP'),
('@s11/2 abc/2', 'TTCC', '+', 'PPPP')]
bsi = BarcodeSequenceFastqIterator(barcodes, self.sequences)
actual = emp_single(bsi, self.barcode_map, rev_comp_barcodes=True)
output_fastq = list(actual.sequences.iter_views(FastqGzFormat))
# five per-sample files were written
self.assertEqual(len(output_fastq), 5)
# sequences in sample1 are correct
self._validate_sample_fastq(
output_fastq[0][1].open(), self.sequences, [0, 5])
# sequences in sample2 are correct
self._validate_sample_fastq(
output_fastq[1][1].open(), self.sequences, [2, 4])
# sequences in sample3 are correct
self._validate_sample_fastq(
output_fastq[2][1].open(), self.sequences, [1, 3])
# sequences in sample4 are correct
self._validate_sample_fastq(
output_fastq[3][1].open(), self.sequences, [7, 10])
# sequences in sample5 are correct
self._validate_sample_fastq(
output_fastq[4][1].open(), self.sequences, [6, 8, 9])
# manifest is correct
act_manifest = list(actual.manifest.view(FastqManifestFormat).open())
exp_manifest = ['sample-id,filename,direction\n',
'sample1,sample1_1_L001_R1_001.fastq.gz,forward\n',
'sample3,sample3_2_L001_R1_001.fastq.gz,forward\n',
'sample2,sample2_3_L001_R1_001.fastq.gz,forward\n',
'sample5,sample5_4_L001_R1_001.fastq.gz,forward\n',
'sample4,sample4_5_L001_R1_001.fastq.gz,forward\n']
self._compare_manifests(act_manifest, exp_manifest)
# metadata is correct
act_metadata = list(actual.metadata.view(YamlFormat).open())
exp_metadata = ["{phred-offset: 33}\n"]
self.assertEqual(act_metadata, exp_metadata)
def test_barcode_trimming(self):
# these barcodes are longer then the ones in the mapping file, so
# only the first barcode_length bases should be read
barcodes = [('@s1/2 abc/2', 'AAAAG', '+', 'YYYY'),
('@s2/2 abc/2', 'TTAAG', '+', 'PPPP'),
('@s3/2 abc/2', 'AACCG', '+', 'PPPP'),
('@s4/2 abc/2', 'TTAAG', '+', 'PPPP'),
('@s5/2 abc/2', 'AACCG', '+', 'PPPP'),
('@s6/2 abc/2', 'AAAAG', '+', 'PPPP'),
('@s7/2 abc/2', 'CGGCG', '+', 'PPPP'),
('@s8/2 abc/2', 'GGAAG', '+', 'PPPP'),
('@s9/2 abc/2', 'CGGCG', '+', 'PPPP'),
('@s10/2 abc/2', 'CGGCG', '+', 'PPPP'),
('@s11/2 abc/2', 'GGAAG', '+', 'PPPP')]
bsi = BarcodeSequenceFastqIterator(barcodes, self.sequences)
actual = emp_single(bsi, self.barcode_map)
output_fastq = list(actual.sequences.iter_views(FastqGzFormat))
# five per-sample files were written
self.assertEqual(len(output_fastq), 5)
# sequences in sample1 are correct
self._validate_sample_fastq(
output_fastq[0][1].open(), self.sequences, [0, 5])
# sequences in sample2 are correct
self._validate_sample_fastq(
output_fastq[1][1].open(), self.sequences, [2, 4])
# sequences in sample3 are correct
self._validate_sample_fastq(
output_fastq[2][1].open(), self.sequences, [1, 3])
# sequences in sample4 are correct
self._validate_sample_fastq(
output_fastq[3][1].open(), self.sequences, [7, 10])
# sequences in sample5 are correct
self._validate_sample_fastq(
output_fastq[4][1].open(), self.sequences, [6, 8, 9])
# manifest is correct
act_manifest = list(actual.manifest.view(FastqManifestFormat).open())
exp_manifest = ['sample-id,filename,direction\n',
'sample1,sample1_1_L001_R1_001.fastq.gz,forward\n',
'sample3,sample3_2_L001_R1_001.fastq.gz,forward\n',
'sample2,sample2_3_L001_R1_001.fastq.gz,forward\n',
'sample5,sample5_4_L001_R1_001.fastq.gz,forward\n',
'sample4,sample4_5_L001_R1_001.fastq.gz,forward\n']
self._compare_manifests(act_manifest, exp_manifest)
# metadata is correct
act_metadata = list(actual.metadata.view(YamlFormat).open())
exp_metadata = ["{phred-offset: 33}\n"]
self.assertEqual(act_metadata, exp_metadata)
class EmpPairedTests(unittest.TestCase, EmpTestingUtils):
def setUp(self):
self.barcodes = [('@s1/2 abc/2', 'AAAA', '+', 'YYYY'),
('@s2/2 abc/2', 'TTAA', '+', 'PPPP'),
('@s3/2 abc/2', 'AACC', '+', 'PPPP'),
('@s4/2 abc/2', 'TTAA', '+', 'PPPP'),
('@s5/2 abc/2', 'AACC', '+', 'PPPP'),
('@s6/2 abc/2', 'AAAA', '+', 'PPPP'),
('@s7/2 abc/2', 'CGGC', '+', 'PPPP'),
('@s8/2 abc/2', 'GGAA', '+', 'PPPP'),
('@s9/2 abc/2', 'CGGC', '+', 'PPPP'),
('@s10/2 abc/2', 'CGGC', '+', 'PPPP'),
('@s11/2 abc/2', 'GGAA', '+', 'PPPP')]
self.forward = [('@s1/1 abc/1', 'GGG', '+', 'YYY'),
('@s2/1 abc/1', 'CCC', '+', 'PPP'),
('@s3/1 abc/1', 'AAA', '+', 'PPP'),
('@s4/1 abc/1', 'TTT', '+', 'PPP'),
('@s5/1 abc/1', 'ATA', '+', 'PPP'),
('@s6/1 abc/1', 'TAT', '+', 'PPP'),
('@s7/1 abc/1', 'CGC', '+', 'PPP'),
('@s8/1 abc/1', 'GCG', '+', 'PPP'),
('@s9/1 abc/1', 'ACG', '+', 'PPP'),
('@s10/1 abc/1', 'GCA', '+', 'PPP'),
('@s11/1 abc/1', 'TGA', '+', 'PPP')]
self.reverse = [('@s1/1 abc/1', 'CCC', '+', 'YYY'),
('@s2/1 abc/1', 'GGG', '+', 'PPP'),
('@s3/1 abc/1', 'TTT', '+', 'PPP'),
('@s4/1 abc/1', 'AAA', '+', 'PPP'),
('@s5/1 abc/1', 'TAT', '+', 'PPP'),
('@s6/1 abc/1', 'ATA', '+', 'PPP'),
('@s7/1 abc/1', 'GCG', '+', 'PPP'),
('@s8/1 abc/1', 'CGC', '+', 'PPP'),
('@s9/1 abc/1', 'CGT', '+', 'PPP'),
('@s10/1 abc/1', 'TGC', '+', 'PPP'),
('@s11/1 abc/1', 'TCA', '+', 'PPP')]
self.bpsi = BarcodePairedSequenceFastqIterator(
self.barcodes, self.forward, self.reverse)
barcode_map = pd.Series(['AAAA', 'AACC', 'TTAA', 'GGAA', 'CGGC'],
index=['sample1', 'sample2', 'sample3',
'sample4', 'sample5'])
self.barcode_map = qiime2.MetadataCategory(barcode_map)
def check_valid(self, *args, **kwargs):
actual = emp_paired(*args, **kwargs)
# five forward sample files
forward_fastq = [
view for path, view in actual.sequences.iter_views(FastqGzFormat)
if 'R1_001.fastq' in path.name]
self.assertEqual(len(forward_fastq), 5)
# five reverse sample files
reverse_fastq = [
view for path, view in actual.sequences.iter_views(FastqGzFormat)
if 'R2_001.fastq' in path.name]
self.assertEqual(len(reverse_fastq), 5)
# FORWARD:
# sequences in sample1 are correct
self._validate_sample_fastq(
forward_fastq[0].open(), self.forward, [0, 5])
# sequences in sample2 are correct
self._validate_sample_fastq(
forward_fastq[1].open(), self.forward, [2, 4])
# sequences in sample3 are correct
self._validate_sample_fastq(
forward_fastq[2].open(), self.forward, [1, 3])
# sequences in sample4 are correct
self._validate_sample_fastq(
forward_fastq[3].open(), self.forward, [7, 10])
# sequences in sample5 are correct
self._validate_sample_fastq(
forward_fastq[4].open(), self.forward, [6, 8, 9])
# REVERSE:
# sequences in sample1 are correct
self._validate_sample_fastq(
reverse_fastq[0].open(), self.reverse, [0, 5])
# sequences in sample2 are correct
self._validate_sample_fastq(
reverse_fastq[1].open(), self.reverse, [2, 4])
# sequences in sample3 are correct
self._validate_sample_fastq(
reverse_fastq[2].open(), self.reverse, [1, 3])
# sequences in sample4 are correct
self._validate_sample_fastq(
reverse_fastq[3].open(), self.reverse, [7, 10])
# sequences in sample5 are correct
self._validate_sample_fastq(
reverse_fastq[4].open(), self.reverse, [6, 8, 9])
# manifest is correct
act_manifest = list(actual.manifest.view(FastqManifestFormat).open())
exp_manifest = ['sample-id,filename,direction\n',
'sample1,sample1_1_L001_R1_001.fastq.gz,forward\n',
'sample1,sample1_1_L001_R2_001.fastq.gz,reverse\n',
'sample3,sample3_2_L001_R1_001.fastq.gz,forward\n',
'sample3,sample3_2_L001_R2_001.fastq.gz,reverse\n',
'sample2,sample2_3_L001_R1_001.fastq.gz,forward\n',
'sample2,sample2_3_L001_R2_001.fastq.gz,reverse\n',
'sample5,sample5_4_L001_R1_001.fastq.gz,forward\n',
'sample5,sample5_4_L001_R2_001.fastq.gz,reverse\n',
'sample4,sample4_5_L001_R1_001.fastq.gz,forward\n',
'sample4,sample4_5_L001_R2_001.fastq.gz,reverse\n']
self._compare_manifests(act_manifest, exp_manifest)
# metadata is correct
act_metadata = list(actual.metadata.view(YamlFormat).open())
exp_metadata = ["{phred-offset: 33}\n"]
self.assertEqual(act_metadata, exp_metadata)
def test_valid(self):
self.check_valid(self.bpsi, self.barcode_map)
@mock.patch('q2_demux._demux.OPEN_FH_LIMIT', 6)
def test_valid_small_open_fh_limit(self):
self.test_valid()
def test_variable_length_barcodes(self):
barcodes = pd.Series(['AAA', 'AACC'], index=['sample1', 'sample2'])
barcodes = qiime2.MetadataCategory(barcodes)
with self.assertRaises(ValueError):
emp_paired(self.bpsi, barcodes)
def test_duplicate_barcodes(self):
barcodes = pd.Series(['AACC', 'AACC'], index=['sample1', 'sample2'])
barcodes = qiime2.MetadataCategory(barcodes)
with self.assertRaises(ValueError):
emp_paired(self.bpsi, barcodes)
def test_no_matched_barcodes(self):
barcodes = pd.Series(['CCCC', 'GGCC'], index=['sample1', 'sample2'])
barcodes = qiime2.MetadataCategory(barcodes)
with self.assertRaises(ValueError):
emp_paired(self.bpsi, barcodes)
def test_rev_comp_mapping_barcodes(self):
barcodes = pd.Series(['TTTT', 'GGTT', 'TTAA', 'TTCC', 'GCCG'],
index=['sample1', 'sample2', 'sample3', 'sample4',
'sample5'])
barcodes = qiime2.MetadataCategory(barcodes)
self.check_valid(self.bpsi, barcodes, rev_comp_mapping_barcodes=True)
def test_rev_comp_barcodes(self):
barcodes = [('@s1/2 abc/2', 'TTTT', '+', 'YYYY'),
('@s2/2 abc/2', 'TTAA', '+', 'PPPP'),
('@s3/2 abc/2', 'GGTT', '+', 'PPPP'),
('@s4/2 abc/2', 'TTAA', '+', 'PPPP'),
('@s5/2 abc/2', 'GGTT', '+', 'PPPP'),
('@s6/2 abc/2', 'TTTT', '+', 'PPPP'),
('@s7/2 abc/2', 'GCCG', '+', 'PPPP'),
('@s8/2 abc/2', 'TTCC', '+', 'PPPP'),
('@s9/2 abc/2', 'GCCG', '+', 'PPPP'),
('@s10/2 abc/2', 'GCCG', '+', 'PPPP'),
('@s11/2 abc/2', 'TTCC', '+', 'PPPP')]
bpsi = BarcodePairedSequenceFastqIterator(
barcodes, self.forward, self.reverse)
self.check_valid(bpsi, self.barcode_map, rev_comp_barcodes=True)
def test_barcode_trimming(self):
# these barcodes are longer then the ones in the mapping file, so
# only the first barcode_length bases should be read
barcodes = [('@s1/2 abc/2', 'AAAAG', '+', 'YYYY'),
('@s2/2 abc/2', 'TTAAG', '+', 'PPPP'),
('@s3/2 abc/2', 'AACCG', '+', 'PPPP'),
('@s4/2 abc/2', 'TTAAG', '+', 'PPPP'),
('@s5/2 abc/2', 'AACCG', '+', 'PPPP'),
('@s6/2 abc/2', 'AAAAG', '+', 'PPPP'),
('@s7/2 abc/2', 'CGGCG', '+', 'PPPP'),
('@s8/2 abc/2', 'GGAAG', '+', 'PPPP'),
('@s9/2 abc/2', 'CGGCG', '+', 'PPPP'),
('@s10/2 abc/2', 'CGGCG', '+', 'PPPP'),
('@s11/2 abc/2', 'GGAAG', '+', 'PPPP')]
bpsi = BarcodePairedSequenceFastqIterator(
barcodes, self.forward, self.reverse)
self.check_valid(bpsi, self.barcode_map)
class SummarizeTests(unittest.TestCase):
def setUp(self):
self.barcodes = [('@s1/2 abc/2', 'AAAA', '+', 'YYYY'),
('@s2/2 abc/2', 'AAAA', '+', 'PPPP'),
('@s3/2 abc/2', 'AAAA', '+', 'PPPP'),
('@s4/2 abc/2', 'AACC', '+', 'PPPP')]
self.sequences = [('@s1/1 abc/1', 'GGG', '+', 'YYY'),
('@s2/1 abc/1', 'CCC', '+', 'PPP'),
('@s3/1 abc/1', 'AAA', '+', 'PPP'),
('@s4/1 abc/1', 'TTT', '+', 'PPP')]
def test_basic(self):
bsi = BarcodeSequenceFastqIterator(self.barcodes, self.sequences)
barcode_map = pd.Series(['AAAA', 'AACC'], index=['sample1', 'sample2'])
barcode_map = qiime2.MetadataCategory(barcode_map)
demux_data = emp_single(bsi, barcode_map)
# test that an index.html file is created and that it has size > 0
# TODO: Remove _PlotQualView wrapper
with tempfile.TemporaryDirectory() as output_dir:
result = summarize(output_dir, _PlotQualView(demux_data,
paired=False), n=2)
self.assertTrue(result is None)
index_fp = os.path.join(output_dir, 'overview.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.getsize(index_fp) > 0)
csv_fp = os.path.join(output_dir, 'per-sample-fastq-counts.csv')
self.assertTrue(os.path.exists(csv_fp))
self.assertTrue(os.path.getsize(csv_fp) > 0)
pdf_fp = os.path.join(output_dir, 'demultiplex-summary.pdf')
self.assertTrue(os.path.exists(pdf_fp))
self.assertTrue(os.path.getsize(pdf_fp) > 0)
png_fp = os.path.join(output_dir, 'demultiplex-summary.png')
self.assertTrue(os.path.exists(png_fp))
self.assertTrue(os.path.getsize(png_fp) > 0)
with open(index_fp, 'r') as fh:
html = fh.read()
self.assertIn('<td>Minimum:</td><td>1</td>', html)
self.assertIn('<td>Maximum:</td><td>3</td>', html)
def test_single_sample(self):
bsi = BarcodeSequenceFastqIterator(self.barcodes[:1],
self.sequences[:1])
barcode_map = pd.Series(['AAAA'], index=['sample1'])
barcode_map = qiime2.MetadataCategory(barcode_map)
demux_data = emp_single(bsi, barcode_map)
# test that an index.html file is created and that it has size > 0
# TODO: Remove _PlotQualView wrapper
with tempfile.TemporaryDirectory() as output_dir:
result = summarize(output_dir, _PlotQualView(demux_data,
paired=False), n=1)
self.assertTrue(result is None)
index_fp = os.path.join(output_dir, 'overview.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.getsize(index_fp) > 0)
csv_fp = os.path.join(output_dir, 'per-sample-fastq-counts.csv')
self.assertTrue(os.path.exists(csv_fp))
self.assertTrue(os.path.getsize(csv_fp) > 0)
pdf_fp = os.path.join(output_dir, 'demultiplex-summary.pdf')
self.assertFalse(os.path.exists(pdf_fp))
png_fp = os.path.join(output_dir, 'demultiplex-summary.png')
self.assertFalse(os.path.exists(png_fp))
with open(index_fp, 'r') as fh:
html = fh.read()
self.assertIn('<td>Minimum:</td><td>1</td>', html)
self.assertIn('<td>Maximum:</td><td>1</td>', html)
def test_paired_end(self):
barcodes = self.barcodes[:3]
forward = self.sequences[:3]
reverse = [('@s1/1 abc/1', 'CCC', '+', 'YYY'),
('@s2/1 abc/1', 'GGG', '+', 'PPP'),
('@s3/1 abc/1', 'TTT', '+', 'PPP')]
bpsi = BarcodePairedSequenceFastqIterator(barcodes, forward, reverse)
barcode_map = pd.Series(['AAAA', 'AACC', 'TTAA'],
index=['sample1', 'sample2', 'sample3'])
barcode_map = qiime2.MetadataCategory(barcode_map)
demux_data = emp_paired(bpsi, barcode_map)
with tempfile.TemporaryDirectory() as output_dir:
result = summarize(output_dir, _PlotQualView(demux_data,
paired=True), n=2)
self.assertTrue(result is None)
plot_fp = os.path.join(output_dir, 'quality-plot.html')
with open(plot_fp, 'r') as fh:
html = fh.read()
self.assertIn('<h5 class="text-center">Forward Reads</h5>',
html)
self.assertIn('<h5 class="text-center">Reverse Reads</h5>',
html)
def test_subsample_higher_than_seqs_count(self):
barcodes = self.barcodes[:1]
sequences = self.sequences[:1]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
barcode_map = pd.Series(['AAAA'], index=['sample1'])
barcode_map = qiime2.MetadataCategory(barcode_map)
demux_data = emp_single(bsi, barcode_map)
with tempfile.TemporaryDirectory() as output_dir:
result = summarize(output_dir, _PlotQualView(demux_data,
paired=False), n=50)
self.assertTrue(result is None)
plot_fp = os.path.join(output_dir, 'quality-plot.html')
with open(plot_fp, 'r') as fh:
html = fh.read()
self.assertIn('<strong>Warning:</strong>', html)
def test_phred_score_out_of_range(self):
barcodes = self.barcodes[:3]
sequences = [('@s1/1 abc/1', 'GGG', '+', 'jjj'),
('@s2/1 abc/1', 'CCC', '+', 'iii'),
('@s3/1 abc/1', 'AAA', '+', 'hhh')]
bsi = BarcodeSequenceFastqIterator(barcodes, sequences)
barcode_map = pd.Series(['AAAA', 'AACC', 'TTAA'],
index=['sample1', 'sample2', 'sample3'])
barcode_map = qiime2.MetadataCategory(barcode_map)
demux_data = emp_single(bsi, barcode_map)
with tempfile.TemporaryDirectory() as output_dir:
result = summarize(output_dir, _PlotQualView(demux_data,
paired=False), n=50)
self.assertTrue(result is None)
plot_fp = os.path.join(output_dir, 'quality-plot.html')
with open(plot_fp, 'r') as fh:
html = fh.read()
self.assertIn('<strong>Danger:</strong>', html)
| nervous-laughter/q2-demux | q2_demux/tests/test_demux.py | Python | bsd-3-clause | 35,140 | [
"scikit-bio"
] | bec51178e83bc3f61780cd91d0b468ab065e438c1d1d28146a7dc91e2f8e1f5a |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
from tf_sparsenet import Sparsenet as snet
try:
import matplotlib.pyplot as plt
except ImportError:
print("Can't import matplotlib. No plotting.")
def block_diag(*arrs):
"""
copied from scipy.linalg.block_diag to avoid scipy dependency because long story
"""
if arrs == ():
arrs = ([],)
arrs = [np.atleast_2d(a) for a in arrs]
bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2]
if bad_args:
raise ValueError("arguments in the following positions have dimension "
"greater than 2: %s" % bad_args)
shapes = np.array([a.shape for a in arrs])
out = np.zeros(np.sum(shapes, axis=0), dtype=arrs[0].dtype)
r, c = 0, 0
for i, (rr, cc) in enumerate(shapes):
out[r:r + rr, c:c + cc] = arrs[i]
r += rr
c += cc
return out
class TopoSparsenet(snet):
"""Topographic Sparsenet with TensorFlow backend
and a few methods for defining topologies."""
def __init__(self, data, datatype="image", pca=None,
dict_shape=(30, 30), topo=None, lam_g=0.1,
**kwargs):
"""
Topographic Sparsenet inherits from Sparsenet. Its unique
attributes give the dictionary a shape and define the relative
weight of the topographic term in the cost function.
The topology matrix g is defined by the topology object topo.
Args:
lam_g : float, defines weight of topography term
dict_shape : tuple (len, wid) of ints specifying shape of dictionary
"""
self.lam_g = lam_g
self.epsilon = 0.0001 # to regularize derivative of square root
self.dict_shape = dict_shape
nunits = int(np.prod(self.dict_shape))
self.topo = topo or topology((nunits, nunits))
nunits = self.topo.ncomponents * nunits
try:
kwargs['lam']
snet.__init__(self, data, nunits=nunits, datatype=datatype, pca=pca, **kwargs)
except KeyError:
snet.__init__(self, data, nunits=nunits, datatype=datatype, pca=pca, lam=0, **kwargs)
def build_graph(self):
graph = tf.get_default_graph()
self.g = tf.constant(self.topo.get_matrix(), dtype=tf.float32)
self._infrate = tf.Variable(self.infrate, trainable=False)
self._learnrate = tf.Variable(self.learnrate, trainable=False)
self.phi = tf.Variable(self.Q)
self.acts = tf.Variable(tf.zeros([self.nunits, self.batch_size]))
self.reset_acts = self.acts.assign(tf.zeros([self.nunits, self.batch_size]))
self.x = tf.Variable(tf.zeros([self.batch_size, self.stims.datasize]), trainable=False)
self.xhat = tf.matmul(tf.transpose(self.acts), self.phi, name='xhat')
self.resid = self.x - self.xhat
self.mse = tf.reduce_sum(tf.square(self.resid))/self.batch_size/self.stims.datasize
self.meanL1 = tf.reduce_sum(tf.abs(self.acts))/self.batch_size
self.layer2 = tf.reduce_sum(tf.sqrt(tf.matmul(self.g, tf.square(self.acts),
name='g_times_acts') + self.epsilon))/self.batch_size
self.loss = 0.5*self.mse + (self.lam*self.meanL1 + self.lam_g*self.layer2)/self.stims.datasize
inffactor = self.batch_size*self.stims.datasize
inferer = tf.train.GradientDescentOptimizer(self._infrate*inffactor)
self.inf_op = inferer.minimize(self.loss, var_list=[self.acts])
learner = tf.train.GradientDescentOptimizer(self.learnrate)
learn_step = tf.Variable(0,name='learn_step', trainable=False)
self.learn_op = learner.minimize(self.loss, global_step=learn_step, var_list=[self.phi])
self._ma_variances = tf.Variable(self.ma_variances, trainable=False)
self._gains = tf.Variable(self.gains, trainable=False)
_, self.variances = tf.nn.moments(self.acts, axes=[1])
vareta = self.var_avg_rate
newvar = (1.-vareta)*self._ma_variances + vareta*self.variances
self.update_variance = self._ma_variances.assign(newvar)
newgain = self.gains*tf.pow(self.var_goal/self._ma_variances,
self.gain_rate)
self.update_gains = self._gains.assign(newgain)
normphi = (tf.expand_dims(self._gains,
dim=1)*tf.nn.l2_normalize(self.phi, dim=1))
self.renorm_phi = self.phi.assign(normphi)
self._init_op = tf.global_variables_initializer()
return graph
def show_dict(self, cmap='RdBu_r', layout=None, savestr=None):
Qs = self.Q
layout = layout or self.dict_shape
ncomp = self.topo.ncomponents
per_comp = np.prod(layout)
nn = 0
display = self.stims.stimarray(Qs[nn*per_comp:(nn+1)*per_comp], layout=layout)
for nn in range(1,ncomp):
display = np.concatenate([display, self.stims.stimarray(Qs[nn*per_comp:(nn+1)*per_comp], layout=layout)],
axis=0)
plt.figure()
arrayplot = plt.imshow(display, interpolation='nearest', cmap=cmap, aspect='auto', origin='lower')
plt.axis('off')
plt.colorbar()
if savestr is not None:
plt.savefig(savestr, bbox_inches='tight')
return display
def sort(self, *args, **kwargs):
print("The topographic order is meaningful, don't sort it away!")
def get_param_list(self):
params = snet.get_param_list(self)
params['lam_g'] = self.lam_g
return params
class topology():
def __init__(self, shape, discs=True, torus=True, binary=True, sigma = 1.0, ncomponents = 1):
"""
shape: (tuple) (nlayer2comp, nlayer1) shape of each component
sigma : (float) defines stdev of default gaussian neighborhoods
"""
self.shape = shape
dict_side = int(np.sqrt(self.shape[1]))
assert dict_side**2 == self.shape[1], 'Only square dictionaries supported.'
self.dict_shape = (dict_side, dict_side)
self.discs = discs
self.torus = torus
self.binary = binary
self.sigma = sigma
self.ncomponents = ncomponents
def get_matrix(self):
g = np.zeros(self.shape)
if self.discs:
g = self.make_discs(g, *self.shape)
if self.ncomponents > 1:
blocks = [g.copy() for ii in range(self.ncomponents)]
g = block_diag(*blocks)
if self.binary:
g = self.binarize(g)
return g
def make_discs(self, g, nlayer2, nlayer1):
sigsquared = self.sigma**2
for i in range(nlayer2):
for j in range(nlayer1):
g[i, j] = np.exp(-self.distance(i, j)/(2 * sigsquared))
return g
def distance(self, i, j):
""" This function measures the squared distance between element i and j. The distance
here is the distance between element i and j once the row vector has been
reshaped into a square matrix, treating the dictionary as a torus globally
if torus is True."""
rows, cols = self.dict_shape
rowi = i // cols
coli = i % cols
rowj = j // cols
colj = j % cols
if self.torus:
# global topology is a torus
rowj = [rowj - rows, rowj, rowj + rows]
colj = [colj - cols, colj, colj + cols]
dist = []
for r in rowj:
for c in colj:
dist.append((rowi - r)**2 + (coli - c)**2)
return np.min(dist)
else:
return (rowi - rowj)**2 + (coli - colj)**2
def block_membership(self, i, j, width=5):
"""This returns 1 if j is in the ith block, otherwise 0. Currently only
works for square dictionaries."""
# FIXME: I think there's a bug here that makes the boundary conditions
# and the sizes wrong
size = self.dict_shape[0]
if size != self.dict_shape[1]:
raise NotImplementedError
i = [i // size, i % size]
j = [j // size, j % size]
if (abs((i[0]%size)-(j[0]%size)) % (size-1) < width) and (abs((i[1]%size)-(j[1]%size)) % (size-1) < width):
return 1
else:
return 0
def set_blocks(self, width=5):
"""Change the topography by making each second layer unit respond to
a square block of layer one with given width. g becomes binary."""
# FIXME: doesn't work because block_membership doesn't work
self.g = np.zeros_like(self.g)
nunits = np.prod(self.dict_shape)
for i in range(nunits):
for j in range(nunits):
self.g[i, j] = self.block_membership(i, j, width)
def binarize(self, g, thresh=1/2, width=None):
if width is not None:
thresh = np.exp(-width**2/(2*self.sigma**2))
return np.array(g >= thresh, dtype=int) | emdodds/DictLearner | tf_toposparse.py | Python | mit | 9,336 | [
"Gaussian"
] | dae03b509a2e669647bcd097e017232275bba8903d72471c868016d71896105c |
"""
This module module is used to generate the CAs and CRLs (revoked certificates)
Example::
from DIRAC.Core.Security import Utilities
retVal = Utilities.generateRevokedCertsFile()
if retVal['OK']:
cl = Elasticsearch( self.__url,
timeout = self.__timeout,
use_ssl = True,
verify_certs = True,
ca_certs = retVal['Value'] )
or::
retVal = Utilities.generateCAFile('/WebApp/HTTPS/Cert')
if retVal['OK']:
sslops = dict( certfile = CertificateMgmt.getCert(/WebApp/HTTPS/Cert),
keyfile = CertificateMgmt.getCert(/WebApp/HTTPS/Key),
cert_reqs = ssl.CERT_OPTIONAL,
ca_certs = retVal['Value'],
ssl_version = ssl.PROTOCOL_TLSv1 )
srv = tornado.httpserver.HTTPServer( self.__app, ssl_options = sslops, xheaders = True )
Note: If you wan to make sure that the CA is up to date, better to use the BundleDeliveryClient.
"""
import os
import tempfile
from DIRAC.Core.Security import X509Chain, X509CRL
from DIRAC.Core.Security import Locations
from DIRAC import gLogger, S_OK, S_ERROR
def generateCAFile(location=None):
"""
Generate a single CA file with all the PEMs
:param str location: we can specify a specific location in CS
:return: file cas.pem which contains all certificates
"""
caDir = Locations.getCAsLocation()
for fn in (os.path.join(os.path.dirname(caDir), "cas.pem"),
os.path.join(os.path.dirname(Locations.getHostCertificateAndKeyLocation(location)[0]), "cas.pem"),
False):
if not fn:
fn = tempfile.mkstemp(prefix="cas.", suffix=".pem")[1]
try:
with open(fn, "w") as fd:
for caFile in os.listdir(caDir):
caFile = os.path.join(caDir, caFile)
chain = X509Chain.X509Chain()
result = chain.loadChainFromFile(caFile)
if not result['OK']:
continue
expired = chain.hasExpired()
if not expired['OK'] or expired['Value']:
continue
fd.write(chain.dumpAllToString()['Value'])
gLogger.info("CAs used from: %s" % str(fn))
return S_OK(fn)
except IOError as err:
gLogger.warn(err)
return S_ERROR(caDir)
def generateRevokedCertsFile(location=None):
"""
Generate a single CA file with all the PEMs
:param str location: we can specify a specific location in CS
:return: file crls.pem which contains all revoked certificates
"""
caDir = Locations.getCAsLocation()
for fn in (os.path.join(os.path.dirname(caDir), "crls.pem"),
os.path.join(os.path.dirname(Locations.getHostCertificateAndKeyLocation(location)[0]), "crls.pem"),
False):
if not fn:
fn = tempfile.mkstemp(prefix="crls", suffix=".pem")[1]
try:
with open(fn, "w") as fd:
for caFile in os.listdir(caDir):
caFile = os.path.join(caDir, caFile)
result = X509CRL.X509CRL.instanceFromFile(caFile)
if not result['OK']:
continue
chain = result['Value']
fd.write(chain.dumpAllToString()['Value'])
return S_OK(fn)
except IOError:
continue
return S_ERROR(caDir)
| fstagni/DIRAC | Core/Security/Utilities.py | Python | gpl-3.0 | 3,245 | [
"DIRAC"
] | fe6c65a92443538f7a6938dd6c7e2a7fb469eed55f63924b9c406827e53b7532 |
# Copyright 2018 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Iterable, Mapping, Sequence
import numpy as np
import pytest
import tensorflow as tf
from numpy.testing import assert_allclose
from tensorflow import convert_to_tensor as ctt
import gpflow
import gpflow.inducing_variables as iv
import gpflow.kernels as krn
from gpflow import mean_functions as mf
from gpflow.config import default_float
from gpflow.expectations import expectation, quadrature_expectation
from gpflow.probability_distributions import (
DiagonalGaussian,
Gaussian,
MarkovGaussian,
ProbabilityDistribution,
)
rng = np.random.RandomState(1)
RTOL = 1e-6
num_data = 5
num_ind = 4
D_in = 2
D_out = 2
Xmu = ctt(rng.randn(num_data, D_in))
Xmu_markov = ctt(rng.randn(num_data + 1, D_in)) # (N+1)xD
Xcov = rng.randn(num_data, D_in, D_in)
Xcov = ctt(Xcov @ np.transpose(Xcov, (0, 2, 1)))
Z = rng.randn(num_ind, D_in)
def markov_gauss() -> MarkovGaussian:
cov_params = rng.randn(num_data + 1, D_in, 2 * D_in) / 2.0 # (N+1)xDx2D
Xcov = cov_params @ np.transpose(cov_params, (0, 2, 1)) # (N+1)xDxD
Xcross = cov_params[:-1] @ np.transpose(cov_params[1:], (0, 2, 1)) # NxDxD
Xcross = np.concatenate((Xcross, np.zeros((1, D_in, D_in))), 0) # (N+1)xDxD
Xcov = np.stack([Xcov, Xcross]) # 2x(N+1)xDxD
return MarkovGaussian(Xmu_markov, ctt(Xcov))
_means: Mapping[str, mf.MeanFunction] = {
"lin": mf.Linear(A=rng.randn(D_in, D_out), b=rng.randn(D_out)),
"identity": mf.Identity(input_dim=D_in),
"const": mf.Constant(c=rng.randn(D_out)),
"zero": mf.Zero(output_dim=D_out),
}
_distrs: Mapping[str, ProbabilityDistribution] = {
"gauss": Gaussian(Xmu, Xcov),
"dirac_gauss": Gaussian(Xmu, np.zeros((num_data, D_in, D_in))),
"gauss_diag": DiagonalGaussian(Xmu, rng.rand(num_data, D_in)),
"dirac_diag": DiagonalGaussian(Xmu, np.zeros((num_data, D_in))),
"dirac_markov_gauss": MarkovGaussian(Xmu_markov, np.zeros((2, num_data + 1, D_in, D_in))),
"markov_gauss": markov_gauss(),
}
_kerns: Mapping[str, krn.Kernel] = {
"rbf": krn.SquaredExponential(variance=rng.rand(), lengthscales=rng.rand() + 1.0),
"lin": krn.Linear(variance=rng.rand()),
"matern": krn.Matern32(variance=rng.rand()),
"rbf_act_dim_0": krn.SquaredExponential(
variance=rng.rand(), lengthscales=rng.rand() + 1.0, active_dims=[0]
),
"rbf_act_dim_1": krn.SquaredExponential(
variance=rng.rand(), lengthscales=rng.rand() + 1.0, active_dims=[1]
),
"lin_act_dim_0": krn.Linear(variance=rng.rand(), active_dims=[0]),
"lin_act_dim_1": krn.Linear(variance=rng.rand(), active_dims=[1]),
"rbf_lin_sum": krn.Sum(
[
krn.SquaredExponential(variance=rng.rand(), lengthscales=rng.rand() + 1.0),
krn.Linear(variance=rng.rand()),
]
),
"rbf_lin_sum2": krn.Sum(
[
krn.Linear(variance=rng.rand()),
krn.SquaredExponential(variance=rng.rand(), lengthscales=rng.rand() + 1.0),
krn.Linear(variance=rng.rand()),
krn.SquaredExponential(variance=rng.rand(), lengthscales=rng.rand() + 1.0),
]
),
"rbf_lin_prod": krn.Product(
[
krn.SquaredExponential(
variance=rng.rand(), lengthscales=rng.rand() + 1.0, active_dims=[0]
),
krn.Linear(variance=rng.rand(), active_dims=[1]),
]
),
}
def kerns(*args: str) -> Sequence[krn.Kernel]:
return [_kerns[k] for k in args]
def distrs(*args: str) -> Sequence[ProbabilityDistribution]:
return [_distrs[k] for k in args]
def means(*args: str) -> Sequence[mf.MeanFunction]:
return [_means[k] for k in args]
@pytest.fixture
def inducing_variable() -> iv.InducingVariables:
return iv.InducingPoints(Z)
def _check(params: Iterable[Any]) -> None:
analytic = expectation(*params)
quad = quadrature_expectation(*params)
assert_allclose(analytic, quad, rtol=RTOL)
# =================================== TESTS ===================================
distr_args1 = distrs("gauss")
mean_args = means("lin", "identity", "const", "zero")
kern_args1 = kerns("lin", "rbf", "rbf_lin_sum", "rbf_lin_prod")
kern_args2 = kerns("lin", "rbf", "rbf_lin_sum")
@pytest.mark.parametrize("distribution", distr_args1)
@pytest.mark.parametrize("mean1", mean_args)
@pytest.mark.parametrize("mean2", mean_args)
@pytest.mark.parametrize("arg_filter", [lambda p, m1, m2: (p, m1), lambda p, m1, m2: (p, m1, m2)])
def test_mean_function_only_expectations(
distribution: ProbabilityDistribution,
mean1: mf.MeanFunction,
mean2: mf.MeanFunction,
arg_filter: Callable[
[ProbabilityDistribution, mf.MeanFunction, mf.MeanFunction], Iterable[Any]
],
) -> None:
params = arg_filter(distribution, mean1, mean2)
_check(params)
@pytest.mark.parametrize("distribution", distrs("gauss", "gauss_diag"))
@pytest.mark.parametrize("kernel", kern_args1)
@pytest.mark.parametrize(
"arg_filter",
[
lambda p, k, f: (p, k),
lambda p, k, f: (p, (k, f)),
lambda p, k, f: (p, (k, f), (k, f)),
],
)
def test_kernel_only_expectations(
distribution: ProbabilityDistribution,
kernel: krn.Kernel,
inducing_variable: iv.InducingVariables,
arg_filter: Callable[[ProbabilityDistribution, krn.Kernel, mf.MeanFunction], Iterable[Any]],
) -> None:
params = arg_filter(distribution, kernel, inducing_variable)
_check(params)
@pytest.mark.parametrize("distribution", distr_args1)
@pytest.mark.parametrize("kernel", kerns("rbf", "lin", "matern", "rbf_lin_sum"))
@pytest.mark.parametrize("mean", mean_args)
@pytest.mark.parametrize(
"arg_filter", [lambda p, k, f, m: (p, (k, f), m), lambda p, k, f, m: (p, m, (k, f))]
)
def test_kernel_mean_function_expectations(
distribution: ProbabilityDistribution,
kernel: krn.Kernel,
inducing_variable: iv.InducingVariables,
mean: mf.MeanFunction,
arg_filter: Callable[
[ProbabilityDistribution, krn.Kernel, iv.InducingVariables, mf.MeanFunction], Iterable[Any]
],
) -> None:
params = arg_filter(distribution, kernel, inducing_variable, mean)
_check(params)
@pytest.mark.parametrize("kernel", kern_args1)
def test_eKdiag_no_uncertainty(kernel: krn.Kernel) -> None:
eKdiag = expectation(_distrs["dirac_diag"], kernel)
Kdiag = kernel(Xmu, full_cov=False)
assert_allclose(eKdiag, Kdiag, rtol=RTOL)
@pytest.mark.parametrize("kernel", kern_args1)
def test_eKxz_no_uncertainty(kernel: krn.Kernel, inducing_variable: iv.InducingVariables) -> None:
eKxz = expectation(_distrs["dirac_diag"], (kernel, inducing_variable))
Kxz = kernel(Xmu, Z)
assert_allclose(eKxz, Kxz, rtol=RTOL)
@pytest.mark.parametrize("kernel", kern_args2)
@pytest.mark.parametrize("mean", mean_args)
def test_eMxKxz_no_uncertainty(
kernel: krn.Kernel, inducing_variable: iv.InducingVariables, mean: mf.MeanFunction
) -> None:
exKxz = expectation(_distrs["dirac_diag"], mean, (kernel, inducing_variable))
Kxz = kernel(Xmu, Z)
xKxz = expectation(_distrs["dirac_gauss"], mean)[:, :, None] * Kxz[:, None, :]
assert_allclose(exKxz, xKxz, rtol=RTOL)
@pytest.mark.parametrize("kernel", kern_args1)
def test_eKzxKxz_no_uncertainty(
kernel: krn.Kernel, inducing_variable: iv.InducingVariables
) -> None:
eKzxKxz = expectation(
_distrs["dirac_diag"], (kernel, inducing_variable), (kernel, inducing_variable)
)
Kxz = kernel(Xmu, Z)
KzxKxz = Kxz[:, :, None] * Kxz[:, None, :]
assert_allclose(eKzxKxz, KzxKxz, rtol=RTOL)
def test_RBF_eKzxKxz_gradient_notNaN() -> None:
"""
Ensure that <K_{Z, x} K_{x, Z}>_p(x) is not NaN and correct, when
K_{Z, Z} is zero with finite precision. See pull request #595.
"""
kernel = krn.SquaredExponential(1, lengthscales=0.1)
kernel.variance.assign(2.0)
p = gpflow.probability_distributions.Gaussian(
tf.constant([[10]], dtype=default_float()),
tf.constant([[[0.1]]], dtype=default_float()),
)
z = iv.InducingPoints([[-10.0], [10.0]])
with tf.GradientTape() as tape:
ekz = expectation(p, (kernel, z), (kernel, z))
grad = tape.gradient(ekz, kernel.lengthscales.unconstrained_variable)
assert grad is not None and not np.isnan(grad)
@pytest.mark.parametrize("distribution", distrs("gauss_diag"))
@pytest.mark.parametrize("kern1", kerns("rbf_act_dim_0", "lin_act_dim_0"))
@pytest.mark.parametrize("kern2", kerns("rbf_act_dim_1", "lin_act_dim_1"))
def test_eKzxKxz_separate_dims_simplification(
distribution: ProbabilityDistribution,
kern1: krn.Kernel,
kern2: krn.Kernel,
inducing_variable: iv.InducingVariables,
) -> None:
_check((distribution, (kern1, inducing_variable), (kern2, inducing_variable)))
@pytest.mark.parametrize("distribution", distr_args1)
@pytest.mark.parametrize("kern1", kerns("rbf_lin_sum"))
@pytest.mark.parametrize("kern2", kerns("rbf_lin_sum2"))
def test_eKzxKxz_different_sum_kernels(
distribution: ProbabilityDistribution,
kern1: krn.Kernel,
kern2: krn.Kernel,
inducing_variable: iv.InducingVariables,
) -> None:
_check((distribution, (kern1, inducing_variable), (kern2, inducing_variable)))
@pytest.mark.parametrize("distribution", distr_args1)
@pytest.mark.parametrize("kern1", kerns("rbf_lin_sum2"))
@pytest.mark.parametrize("kern2", kerns("rbf_lin_sum2"))
def test_eKzxKxz_same_vs_different_sum_kernels(
distribution: ProbabilityDistribution,
kern1: krn.Kernel,
kern2: krn.Kernel,
inducing_variable: iv.InducingVariables,
) -> None:
# check the result is the same if we pass different objects with the same value
same = expectation(*(distribution, (kern1, inducing_variable), (kern1, inducing_variable)))
different = expectation(*(distribution, (kern1, inducing_variable), (kern2, inducing_variable)))
assert_allclose(same, different, rtol=RTOL)
@pytest.mark.parametrize("distribution", distrs("markov_gauss"))
@pytest.mark.parametrize("kernel", kern_args2)
@pytest.mark.parametrize("mean", means("identity"))
def test_exKxz_markov(
distribution: ProbabilityDistribution,
kernel: krn.Kernel,
mean: mf.MeanFunction,
inducing_variable: iv.InducingVariables,
) -> None:
_check((distribution, (kernel, inducing_variable), mean))
@pytest.mark.parametrize("distribution", distrs("dirac_markov_gauss"))
@pytest.mark.parametrize("kernel", kern_args2)
@pytest.mark.parametrize("mean", means("identity"))
def test_exKxz_markov_no_uncertainty(
distribution: ProbabilityDistribution,
kernel: krn.Kernel,
mean: mf.MeanFunction,
inducing_variable: iv.InducingVariables,
) -> None:
exKxz = expectation(distribution, (kernel, inducing_variable), mean)
Kzx = kernel(Xmu_markov[:-1, :], Z) # NxM
xKxz = Kzx[..., None] * Xmu_markov[1:, None, :] # NxMxD
assert_allclose(exKxz, xKxz, rtol=RTOL)
@pytest.mark.parametrize("kernel", kerns("rbf"))
@pytest.mark.parametrize("distribution", distrs("gauss", "gauss_diag", "markov_gauss"))
def test_cov_shape_inference(
distribution: ProbabilityDistribution,
kernel: krn.Kernel,
inducing_variable: iv.InducingVariables,
) -> None:
assert isinstance(distribution, (Gaussian, DiagonalGaussian, MarkovGaussian))
gauss_tuple = (distribution.mu, distribution.cov)
_check((gauss_tuple, (kernel, inducing_variable)))
if isinstance(distribution, MarkovGaussian):
_check((gauss_tuple, None, (kernel, inducing_variable)))
| GPflow/GPflow | tests/gpflow/expectations/test_expectations.py | Python | apache-2.0 | 12,037 | [
"Gaussian"
] | 36fe23867e7bf64bfe41e88e2ab43c97819434e74cf9ed3cdcde3f964dfbf0ce |
#! /usr/bin/env python
''' Generic utilities.'''
#from numpy import *
#from pylab import *
from datetime import time, datetime
from math import sqrt
__metaclass__ = type
datetimeFormat = "%Y-%m-%d %H:%M:%S"
#########################
# Enumerations
#########################
def inverseEnumeration(l):
'Returns the dictionary that provides for each element in the input list its index in the input list'
result = {}
for i,x in enumerate(l):
result[x] = i
return result
#########################
# Simple statistics
#########################
def sampleSize(stdev, tolerance, percentConfidence, printLatex = False):
from scipy.stats.distributions import norm
k = round(norm.ppf(0.5+percentConfidence/200., 0, 1)*100)/100. # 1.-(100-percentConfidence)/200.
if printLatex:
print('${0}^2\\frac{{{1}^2}}{{{2}^2}}$'.format(k, stdev, tolerance))
return (k*stdev/tolerance)**2
def confidenceInterval(mean, stdev, nSamples, percentConfidence, trueStd = True, printLatex = False):
'''if trueStd, use normal distribution, otherwise, Student
Use otherwise t.interval or norm.interval
ex: norm.interval(0.95, loc = 0., scale = 2.3/sqrt(11))
t.interval(0.95, 10, loc=1.2, scale = 2.3/sqrt(nSamples))
loc is mean, scale is sigma/sqrt(n) (for Student, 10 is df)'''
from math import sqrt
from scipy.stats.distributions import norm, t
if trueStd:
k = round(norm.ppf(0.5+percentConfidence/200., 0, 1)*100)/100. # 1.-(100-percentConfidence)/200.
else: # use Student
k = round(t.ppf(0.5+percentConfidence/200., nSamples-1)*100)/100.
e = k*stdev/sqrt(nSamples)
if printLatex:
print('${0} \pm {1}\\frac{{{2}}}{{\sqrt{{{3}}}}}$'.format(mean, k, stdev, nSamples))
return mean-e, mean+e
def computeChi2(expected, observed):
'''Returns the Chi2 statistics'''
result = 0.
for e, o in zip(expected, observed):
result += ((e-o)*(e-o))/e
return result
class EmpiricalDistribution:
def nSamples(self):
return sum(self.counts)
def cumulativeDensityFunction(sample, normalized = False):
'''Returns the cumulative density function of the sample of a random variable'''
from numpy import arange, cumsum
xaxis = sorted(sample)
counts = arange(1,len(sample)+1) # dtype = float
if normalized:
counts /= float(len(sample))
return xaxis, counts
class EmpiricalDiscreteDistribution(EmpiricalDistribution):
'''Class to represent a sample of a distribution for a discrete random variable
'''
from numpy.core.fromnumeric import sum
def __init__(self, categories, counts):
self.categories = categories
self.counts = counts
def mean(self):
result = [float(x*y) for x,y in zip(self.categories, self.counts)]
return sum(result)/self.nSamples()
def var(self, mean = None):
if not mean:
m = self.mean()
else:
m = mean
result = 0.
squares = [float((x-m)*(x-m)*y) for x,y in zip(self.categories, self.counts)]
return sum(squares)/(self.nSamples()-1)
def referenceCounts(self, probability):
'''probability is a function that returns the probability of the random variable for the category values'''
refProba = [probability(c) for c in self.categories]
refProba[-1] = 1-sum(refProba[:-1])
refCounts = [r*self.nSamples() for r in refProba]
return refCounts, refProba
class EmpiricalContinuousDistribution(EmpiricalDistribution):
'''Class to represent a sample of a distribution for a continuous random variable
with the number of observations for each interval
intervals (categories variable) are defined by their left limits, the last one being the right limit
categories contain therefore one more element than the counts'''
def __init__(self, categories, counts):
# todo add samples for initialization and everything to None? (or setSamples?)
self.categories = categories
self.counts = counts
def mean(self):
result = 0.
for i in range(len(self.counts)-1):
result += self.counts[i]*(self.categories[i]+self.categories[i+1])/2
return result/self.nSamples()
def var(self, mean = None):
if not mean:
m = self.mean()
else:
m = mean
result = 0.
for i in range(len(self.counts)-1):
mid = (self.categories[i]+self.categories[i+1])/2
result += self.counts[i]*(mid - m)*(mid - m)
return result/(self.nSamples()-1)
def referenceCounts(self, cdf):
'''cdf is a cumulative distribution function
returning the probability of the variable being less that x'''
# refCumulativeCounts = [0]#[cdf(self.categories[0][0])]
# for inter in self.categories:
# refCumulativeCounts.append(cdf(inter[1]))
refCumulativeCounts = [cdf(x) for x in self.categories[1:-1]]
refProba = [refCumulativeCounts[0]]
for i in xrange(1,len(refCumulativeCounts)):
refProba.append(refCumulativeCounts[i]-refCumulativeCounts[i-1])
refProba.append(1-refCumulativeCounts[-1])
refCounts = [p*self.nSamples() for p in refProba]
return refCounts, refProba
def printReferenceCounts(self, refCounts=None):
if refCounts:
ref = refCounts
else:
ref = self.referenceCounts
for i in xrange(len(ref[0])):
print('{0}-{1} & {2:0.3} & {3:0.3} \\\\'.format(self.categories[i],self.categories[i+1],ref[1][i], ref[0][i]))
#########################
# maths section
#########################
# def kernelSmoothing(sampleX, X, Y, weightFunc, halfwidth):
# '''Returns a smoothed weighted version of Y at the predefined values of sampleX
# Sum_x weight(sample_x,x) * y(x)'''
# from numpy import zeros, array
# smoothed = zeros(len(sampleX))
# for i,x in enumerate(sampleX):
# weights = array([weightFunc(x,xx, halfwidth) for xx in X])
# if sum(weights)>0:
# smoothed[i] = sum(weights*Y)/sum(weights)
# else:
# smoothed[i] = 0
# return smoothed
def kernelSmoothing(x, X, Y, weightFunc, halfwidth):
'''Returns the smoothed estimate of (X,Y) at x
Sum_x weight(sample_x,x) * y(x)'''
from numpy import zeros, array
weights = array([weightFunc(x,observedx, halfwidth) for observedx in X])
if sum(weights)>0:
return sum(weights*Y)/sum(weights)
else:
return 0
def uniform(center, x, halfwidth):
if abs(center-x)<halfwidth:
return 1.
else:
return 0.
def gaussian(center, x, halfwidth):
from numpy import exp
return exp(-((center-x)/halfwidth)**2/2)
def epanechnikov(center, x, halfwidth):
diff = abs(center-x)
if diff<halfwidth:
return 1.-(diff/halfwidth)**2
else:
return 0.
def triangular(center, x, halfwidth):
diff = abs(center-x)
if diff<halfwidth:
return 1.-abs(diff/halfwidth)
else:
return 0.
def medianSmoothing(x, X, Y, halfwidth):
'''Returns the media of Y's corresponding to X's in the interval [x-halfwidth, x+halfwidth]'''
from numpy import median
return median([y for observedx, y in zip(X,Y) if abs(x-observedx)<halfwidth])
def argmaxDict(d):
return max(d, key=d.get)
def framesToTime(nFrames, frameRate, initialTime = time()):
'''returns a datetime.time for the time in hour, minutes and seconds
initialTime is a datetime.time'''
from math import floor
seconds = int(floor(float(nFrames)/float(frameRate))+initialTime.hour*3600+initialTime.minute*60+initialTime.second)
h = int(floor(seconds/3600.))
seconds = seconds - h*3600
m = int(floor(seconds/60))
seconds = seconds - m*60
return time(h, m, seconds)
def timeToFrames(t, frameRate):
return frameRate*(t.hour*3600+t.minute*60+t.second)
def sortXY(X,Y):
'returns the sorted (x, Y(x)) sorted on X'
D = {}
for x, y in zip(X,Y):
D[x]=y
xsorted = sorted(D.keys())
return xsorted, [D[x] for x in xsorted]
def ceilDecimals(v, nDecimals):
'''Rounds the number at the nth decimal
eg 1.23 at 0 decimal is 2, at 1 decimal is 1.3'''
from math import ceil,pow
tens = pow(10,nDecimals)
return ceil(v*tens)/tens
def inBetween(bound1, bound2, x):
return bound1 <= x <= bound2 or bound2 <= x <= bound1
def pointDistanceL2(x1,y1,x2,y2):
''' Compute point-to-point distance (L2 norm, ie Euclidean distance)'''
return sqrt((x2-x1)**2+(y2-y1)**2)
def crossProduct(l1, l2):
return l1[0]*l2[1]-l1[1]*l2[0]
def cat_mvgavg(cat_list, halfWidth):
''' Return a list of categories/values smoothed according to a window.
halfWidth is the search radius on either side'''
from copy import deepcopy
smoothed = deepcopy(cat_list)
for point in range(len(cat_list)):
lower_bound_check = max(0,point-halfWidth)
upper_bound_check = min(len(cat_list)-1,point+halfWidth+1)
window_values = cat_list[lower_bound_check:upper_bound_check]
smoothed[point] = max(set(window_values), key=window_values.count)
return smoothed
def filterMovingWindow(inputSignal, halfWidth):
'''Returns an array obtained after the smoothing of the input by a moving average
The first and last points are copied from the original.'''
from numpy import ones,convolve,array
width = float(halfWidth*2+1)
win = ones(width,'d')
result = convolve(win/width,array(inputSignal),'same')
result[:halfWidth] = inputSignal[:halfWidth]
result[-halfWidth:] = inputSignal[-halfWidth:]
return result
def linearRegression(x, y, deg = 1, plotData = False):
'''returns the least square estimation of the linear regression of y = ax+b
as well as the plot'''
from numpy.lib.polynomial import polyfit
from matplotlib.pyplot import plot
from numpy.core.multiarray import arange
coef = polyfit(x, y, deg)
if plotData:
def poly(x):
result = 0
for i in range(len(coef)):
result += coef[i]*x**(len(coef)-i-1)
return result
plot(x, y, 'x')
xx = arange(min(x), max(x),(max(x)-min(x))/1000)
plot(xx, [poly(z) for z in xx])
return coef
#########################
# iterable section
#########################
def mostCommon(L):
'''Returns the most frequent element in a iterable
taken from http://stackoverflow.com/questions/1518522/python-most-common-element-in-a-list'''
from itertools import groupby
from operator import itemgetter
# get an iterable of (item, iterable) pairs
SL = sorted((x, i) for i, x in enumerate(L))
# print 'SL:', SL
groups = groupby(SL, key=itemgetter(0))
# auxiliary function to get "quality" for an item
def _auxfun(g):
item, iterable = g
count = 0
min_index = len(L)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
# print 'item %r, count %r, minind %r' % (item, count, min_index)
return count, -min_index
# pick the highest-count/earliest item
return max(groups, key=_auxfun)[0]
#########################
# sequence section
#########################
class LCSS:
'''Class that keeps the LCSS parameters
and puts together the various computations'''
def __init__(self, similarityFunc, delta = float('inf'), aligned = False, lengthFunc = min):
self.similarityFunc = similarityFunc
self.aligned = aligned
self.delta = delta
self.lengthFunc = lengthFunc
self.subSequenceIndices = [(0,0)]
def similarities(self, l1, l2, jshift=0):
from numpy import zeros, int as npint
n1 = len(l1)
n2 = len(l2)
self.similarityTable = zeros((n1+1,n2+1), dtype = npint)
for i in xrange(1,n1+1):
for j in xrange(max(1,i-jshift-self.delta),min(n2,i-jshift+self.delta)+1):
if self.similarityFunc(l1[i-1], l2[j-1]):
self.similarityTable[i,j] = self.similarityTable[i-1,j-1]+1
else:
self.similarityTable[i,j] = max(self.similarityTable[i-1,j], self.similarityTable[i,j-1])
def subSequence(self, i, j):
'''Returns the subsequence of two sequences
http://en.wikipedia.org/wiki/Longest_common_subsequence_problem'''
if i == 0 or j == 0:
return []
elif self.similarityTable[i][j] == self.similarityTable[i][j-1]:
return self.subSequence(i, j-1)
elif self.similarityTable[i][j] == self.similarityTable[i-1][j]:
return self.subSequence(i-1, j)
else:
return self.subSequence(i-1, j-1) + [(i-1,j-1)]
def _compute(self, _l1, _l2, computeSubSequence = False):
'''returns the longest common subsequence similarity
based on the threshold on distance between two elements of lists l1, l2
similarityFunc returns True or False whether the two points are considered similar
if aligned, returns the best matching if using a finite delta by shifting the series alignments
eg distance(p1, p2) < epsilon
'''
if len(_l2) < len(_l1): # l1 is the shortest
l1 = _l2
l2 = _l1
revertIndices = True
else:
l1 = _l1
l2 = _l2
revertIndices = False
n1 = len(l1)
n2 = len(l2)
if self.aligned:
lcssValues = {}
similarityTables = {}
for i in xrange(-n2-self.delta+1, n1+self.delta): # interval such that [i-shift-delta, i-shift+delta] is never empty, which happens when i-shift+delta < 1 or when i-shift-delta > n2
self.similarities(l1, l2, i)
lcssValues[i] = self.similarityTable.max()
similarityTables[i] = self.similarityTable
#print self.similarityTable
alignmentShift = argmaxDict(lcssValues) # ideally get the medium alignment shift, the one that minimizes distance
self.similarityTable = similarityTables[alignmentShift]
else:
alignmentShift = 0
self.similarities(l1, l2)
# threshold values for the useful part of the similarity table are n2-n1-delta and n1-n2-delta
self.similarityTable = self.similarityTable[:min(n1, n2+alignmentShift+self.delta)+1, :min(n2, n1-alignmentShift+self.delta)+1]
if computeSubSequence:
self.subSequenceIndices = self.subSequence(self.similarityTable.shape[0]-1, self.similarityTable.shape[1]-1)
if revertIndices:
self.subSequenceIndices = [(j,i) for i,j in self.subSequenceIndices]
return self.similarityTable[-1,-1]
def compute(self, l1, l2, computeSubSequence = False):
'''get methods are to be shadowed in child classes '''
return self._compute(l1, l2, computeSubSequence)
def computeAlignment(self):
from numpy import mean
return mean([j-i for i,j in self.subSequenceIndices])
def _computeNormalized(self, l1, l2, computeSubSequence = False):
''' compute the normalized LCSS
ie, the LCSS divided by the min or mean of the indicator lengths (using lengthFunc)
lengthFunc = lambda x,y:float(x,y)/2'''
return float(self._compute(l1, l2, computeSubSequence))/self.lengthFunc(len(l1), len(l2))
def computeNormalized(self, l1, l2, computeSubSequence = False):
return self._computeNormalized(l1, l2, computeSubSequence)
def _computeDistance(self, l1, l2, computeSubSequence = False):
''' compute the LCSS distance'''
return 1-self._computeNormalized(l1, l2, computeSubSequence)
def computeDistance(self, l1, l2, computeSubSequence = False):
return self._computeDistance(l1, l2, computeSubSequence)
#########################
# plotting section
#########################
def plotPolygon(poly, options = ''):
'Plots shapely polygon poly'
from numpy.core.multiarray import array
from matplotlib.pyplot import plot
from shapely.geometry import Polygon
tmp = array(poly.exterior)
plot(tmp[:,0], tmp[:,1], options)
def stepPlot(X, firstX, lastX, initialCount = 0, increment = 1):
'''for each value in X, increment by increment the initial count
returns the lists that can be plotted
to obtain a step plot increasing by one for each value in x, from first to last value
firstX and lastX should be respectively smaller and larger than all elements in X'''
sortedX = []
counts = [initialCount]
for x in sorted(X):
sortedX += [x,x]
counts.append(counts[-1])
counts.append(counts[-1]+increment)
counts.append(counts[-1])
return [firstX]+sortedX+[lastX], counts
class PlottingPropertyValues:
def __init__(self, values):
self.values = values
def __getitem__(self, i):
return self.values[i%len(self.values)]
markers = PlottingPropertyValues(['+', '*', ',', '.', 'x', 'D', 's', 'o'])
scatterMarkers = PlottingPropertyValues(['s','o','^','>','v','<','d','p','h','8','+','x'])
linestyles = PlottingPropertyValues(['-', '--', '-.', ':'])
colors = PlottingPropertyValues('brgmyck') # 'w'
def plotIndicatorMap(indicatorMap, squareSize, masked = True, defaultValue=-1):
from numpy import array, arange, ones, ma
from matplotlib.pyplot import pcolor
coords = array(indicatorMap.keys())
minX = min(coords[:,0])
minY = min(coords[:,1])
X = arange(minX, max(coords[:,0])+1.1)*squareSize
Y = arange(minY, max(coords[:,1])+1.1)*squareSize
C = defaultValue*ones((len(Y), len(X)))
for k,v in indicatorMap.iteritems():
C[k[1]-minY,k[0]-minX] = v
if masked:
pcolor(X, Y, ma.masked_where(C==defaultValue,C))
else:
pcolor(X, Y, C)
#########################
# Data download
#########################
def downloadECWeather(stationID, years, months = [], outputDirectoryname = '.', english = True):
'''Downloads monthly weather data from Environment Canada
If month is provided (number 1 to 12), it means hourly data for the whole month
Otherwise, means the data for each day, for the whole year
Example: MONTREAL MCTAVISH 10761
MONTREALPIERRE ELLIOTT TRUDEAU INTL A 5415
To get daily data for 2010 and 2011, downloadECWeather(10761, [2010,2011], [], '/tmp')
To get hourly data for 2009 and 2012, January, March and October, downloadECWeather(10761, [2009,2012], [1,3,10], '/tmp')'''
import urllib2
if english:
language = 'e'
else:
language = 'f'
if len(months) == 0:
timeFrame = 2
months = [1]
else:
timeFrame = 1
for year in years:
for month in months:
url = urllib2.urlopen('http://climat.meteo.gc.ca/climateData/bulkdata_{}.html?format=csv&stationID={}&Year={}&Month={}&Day=1&timeframe={}&submit=++T%C3%A9l%C3%A9charger+%0D%0Ades+donn%C3%A9es'.format(language, stationID, year, month, timeFrame))
data = url.read()
outFilename = '{}/{}-{}'.format(outputDirectoryname, stationID, year)
if timeFrame == 1:
outFilename += '-{}-hourly'.format(month)
else:
outFilename += '-daily'
outFilename += '.csv'
out = open(outFilename, 'w')
out.write(data)
out.close()
#########################
# File I/O
#########################
def removeExtension(filename, delimiter = '.'):
'''Returns the filename minus the extension (all characters after last .)'''
i = filename.rfind(delimiter)
if i>0:
return filename[:i]
else:
return filename
def cleanFilename(s):
'cleans filenames obtained when contatenating figure characteristics'
return s.replace(' ','-').replace('.','').replace('/','-')
def listfiles(dirname, extension, remove = False):
'''Returns the list of files with the extension in the directory dirname
If remove is True, the filenames are stripped from the extension'''
from os import listdir
tmp = [f for f in listdir(dirname) if f.endswith(extension)]
tmp.sort()
if remove:
return [removeExtension(f, extension) for f in tmp]
else:
return tmp
def mkdir(dirname):
'Creates a directory if it does not exist'
import os
if not os.path.exists(dirname):
os.mkdir(dirname)
else:
print(dirname+' already exists')
def removeFile(filename):
'''Deletes the file while avoiding raising an error
if the file does not exist'''
import os
if (os.path.exists(filename)):
os.remove(filename)
else:
print(filename+' does not exist')
def line2Floats(l, separator=' '):
'''Returns the list of floats corresponding to the string'''
return [float(x) for x in l.split(separator)]
def line2Ints(l, separator=' '):
'''Returns the list of ints corresponding to the string'''
return [int(x) for x in l.split(separator)]
#########################
# CLI utils
#########################
def parseCLIOptions(helpMessage, options, cliArgs, optionalOptions=[]):
''' Simple function to handle similar argument parsing
Returns the dictionary of options and their values
* cliArgs are most likely directly sys.argv
(only the elements after the first one are considered)
* options should be a list of strings for getopt options,
eg ['frame=','correspondences=','video=']
A value must be provided for each option, or the program quits'''
import sys, getopt
from numpy.core.fromnumeric import all
optionValues, args = getopt.getopt(cliArgs[1:], 'h', ['help']+options+optionalOptions)
optionValues = dict(optionValues)
if '--help' in optionValues.keys() or '-h' in optionValues.keys():
print(helpMessage+
'\n - Compulsory options: '+' '.join([opt.replace('=','') for opt in options])+
'\n - Non-compulsory options: '+' '.join([opt.replace('=','') for opt in optionalOptions]))
sys.exit()
missingArgument = [('--'+opt.replace('=','') in optionValues.keys()) for opt in options]
if not all(missingArgument):
print('Missing argument')
print(optionValues)
sys.exit()
return optionValues
#########################
# Profiling
#########################
def analyzeProfile(profileFilename, stripDirs = True):
'''Analyze the file produced by cProfile
obtained by for example:
- call in script (for main() function in script)
import cProfile, os
cProfile.run('main()', os.path.join(os.getcwd(),'main.profile'))
- or on the command line:
python -m cProfile [-o profile.bin] [-s sort] scriptfile [arg]'''
import pstats, os
p = pstats.Stats(os.path.join(os.pardir, profileFilename))
if stripDirs:
p.strip_dirs()
p.sort_stats('time')
p.print_stats(.2)
#p.sort_stats('time')
# p.print_callees(.1, 'int_prediction.py:')
return p
#########################
# running tests
#########################
if __name__ == "__main__":
import doctest
import unittest
suite = doctest.DocFileSuite('tests/utils.txt')
#suite = doctest.DocTestSuite()
unittest.TextTestRunner().run(suite)
#doctest.testmod()
#doctest.testfile("example.txt")
| Transience/tracker | utils.py | Python | mit | 23,493 | [
"Gaussian"
] | 4afb44cdb0966ec1c49e791ca8b5376f8b2110ea36903d439b5e42aa1e03588f |
"""Tools for signal processing."""
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
__all__ = ['smooth', 'canoncorr', 'participation_ratio', 'stable_rank']
def smooth(x, sigma=1.0, axis=0):
"""Smooths a 1D signal with a gaussian filter.
Args:
x: array_like, The array to be smoothed
sigma: float, The width of the gaussian filter (default: 1.0)
Returns:
xs: array_like, A smoothed version of the input signal
"""
return gaussian_filter1d(x, sigma, axis=axis)
def stable_rank(X):
"""Computes the stable rank of a matrix"""
assert X.ndim == 2, "X must be a matrix"
svals_sq = np.linalg.svd(X, compute_uv=False, full_matrices=False) ** 2
return svals_sq.sum() / svals_sq.max()
def participation_ratio(C):
"""Computes the participation ratio of a square matrix."""
assert C.ndim == 2, "C must be a matrix"
assert C.shape[0] == C.shape[1], "C must be a square matrix"
return np.trace(C) ** 2 / np.trace(np.linalg.matrix_power(C, 2))
def canoncorr(X, Y):
"""Canonical correlation between two subspaces.
Args:
X, Y: The subspaces to compare. They should be of the same size.
Returns:
corr: array_like, Cosine of the principal angles.
Notes:
The canonical correlation between subspaces generalizes the idea of the angle
between vectors to linear subspaces. It is defined as recursively finding unit
vectors in each subspace that are maximally correlated [1]_. Here, we compute
the principal vectors and angles via the QR decomposition [2]_.
References:
.. [1] Angles between flats. (2016, August 4). In Wikipedia, The Free Encyclopedia.
https://en.wikipedia.org/w/index.php?title=Angles_between_flats
.. [2] Björck, Ȧke, and Gene H. Golub. "Numerical methods for computing angles
between linear subspaces." Mathematics of computation 27.123 (1973): 579-594.
"""
# Orthogonalize each subspace
qu, qv = np.linalg.qr(X)[0], np.linalg.qr(Y)[0]
# singular values of the inner product between the orthogonalized spaces
return np.linalg.svd(qu.T.dot(qv), compute_uv=False, full_matrices=False)
| nirum/jetpack | jetpack/signals.py | Python | mit | 2,118 | [
"Gaussian"
] | 0dab99395949a1876d7c17ee1eb1cb3767b78ac916d17fd9f2511da53155988b |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module contains functions for computing QRNAS.
https://github.com/sunandanmukherjee/QRNAS
Output::
(...)
Missing atom added: ATOM 34 H3T RA3 A 77 39.444 67.315 58.412 1.00 0.00 H
Missing atom added: ATOM 23 OP3 G A 1 46.987 84.037 61.665 1.00 0.00
Number of atoms: 2470
Number of residues: 77
Building intraresidual bonds...
Building intraresidual angles...
Building intraresidual dihedrals...
Building intraresidual impropers...
Building interresidual bonds...
Building interresidual angles...
Building interresidual dihedrals...
Number of bonds built: 2662
Number of angles built: 4764
Number of dihedrals built: 7406
Number of impropers built: 524
-----------------------------------------------------------------------------
End of molecule (chain)
-----------------------------------------------------------------------------
-----------------------------------------------------------------------------
Building interresidual nonbonded pairs & H-bonds (it may take a while)...
Number of electrostatic pairs: 0
Number of van der Waals pairs: 332101
Number of H-bonds built: 64
Number of spring restraints: 0
Number of positional restraints: 0
Number of base pairs built: 20
Number of molecules (chains) read: 1
-----------------------------------------------------------------------------
Performing minimization step: 1. Total energy = 28783.5671 kcal/mol (28753.2335 without restraints)
Writing PDB file: /var/folders/yc/ssr9692s5fzf7k165grnhpk80000gp/T/tmpic_m3bcr/query_out.pdb ...Done.
"""
import os
from rna_tools.tools.mq.lib.wrappers.SubprocessUtils import run_command
from rna_tools.tools.pdb_formatix.PDBFile import PDBFile
from rna_tools.tools.mq.lib.wrappers.base_wrappers import ProgramWrapper
from rna_tools.rna_tools_config import QRNAS_PATH, QRNAS_CONFIG_PATH
import subprocess
import re
from shutil import copyfile
# directory where this script is
# files for some commands are also here
DIRECTORY = os.path.dirname(__file__)
class QRNAS(ProgramWrapper):
"""
Wrapper class for AMBER force field via QRNA of Julisz Stasiewicz
"""
program_name = 'QRNA'
executable = 'QRNA'
def __init__(self, job_id=None):
super(QRNAS, self).__init__('sequence', 'seq_name', job_id=job_id)
def _prepare_files(self):
os.symlink(QRNAS_PATH + os.sep + self.executable,
self.sandbox_dir + os.sep + 'QRNA')
os.symlink(QRNAS_PATH + os.sep + 'forcefield',
self.sandbox_dir + os.sep + 'forcefield')
def run(self, filename, numSteps, verbose=False):
"""run()
Return:
e.g., [30307.1088', '28783.5671']
"""
return [self.run_one(filename, str(numSteps), electrostatics=True, verbose=verbose), self.run_one(filename, str(numSteps), electrostatics=False, verbose=verbose)]
def run_one(self, filename, numSteps, electrostatics=True, verbose=False):
"""
Get AMBER energy via QRNA
Args:
path (str): The path of the file to wrap
field_storage (FileStorage): The :class:Y instance to wrap
temporary (bool): Whether or not to delete the file when the File
instance is destructed
Returns:
BufferedFileStorage: A buffered writable file descriptor
Arguments:
* name = name (or path, if you're in a different directory)
of a PDB file
Output:
* global energy as a floating point number
"""
# copy file to sandbox and apply some fixes
copyfile(filename, self.sandbox_dir + os.sep + 'query.pdb')
pdb_file = PDBFile(pdb_path=self.sandbox_dir + os.sep + 'query.pdb')
#pdb_file.pedantic_pdb()
#pdb_file.check_and_get_first_model()
pdb_file.save(self.sandbox_dir + os.sep + 'query.pdb')
self.pdb_fixes = pdb_file.fixes
# write config
f = open(self.sandbox_dir + os.sep + 'conf.cfg', 'w')
f.write('NSTEPS ' + numSteps + '\nNUMTHREADS 1\n')
if not electrostatics:
f.write('ELECTR 0\n')
f.close()
#print self.sandbox_dir
cmd = self.sandbox_dir + os.sep + 'QRNA ' + \
'-c ' + self.sandbox_dir + os.sep + 'conf.cfg ' + \
'-i ' + self.sandbox_dir + os.sep + 'query.pdb'
if verbose:
print(cmd)
self.log(cmd, 'debug')
self.log('Running program')
out = subprocess.getoutput(cmd)
self.log('Run finished')
self.log(out, 'debug')
if verbose: print(out)
# get result
#print out
#x1 = re.compile('Number of electrostatic pairs:\s+(?P<energy>[0-9]+)').search(out).group('energy')
#x2 = re.compile('Number of van der Waals pairs:\s+(?P<energy>[0-9]+)').search(out).group('energy')
#x3 = re.compile('2Number of H-bonds built:\s+(?P<energy>[0-9]+)').search(out).group('energy')
rx = re.compile('Performing minimization step: ' + numSteps + '. Total energy = (?P<energy>[01-9\.e\+]+) kcal').search(out)
if rx:
energy = rx.group('energy')
else:
energy = -1
self.log('# problem --', 'error')
return energy
def main():
qrnas = QRNAS()
try:
# Performing minimization step: 1. Total energy = 28783.5671 kcal/mol (28753.2335 without restraints)
filename = "test" + os.sep + "unmod_Val3_tRNA_model_si.pdb"
energy = qrnas.run(filename, 1, False)
print('energy: ', energy)
except Exception as e:
print(e)
finally:
#qrna.cleanup()
pass
if __name__ == "__main__":
main()
| mmagnus/rna-pdb-tools | rna_tools/tools/mq/QRNAS/QRNAS.py | Python | gpl-3.0 | 5,902 | [
"Amber"
] | 23656ab174ec375affc46fcd576ce954ccf10c2b27062b4aa9734bec8388cf37 |
# an integer number as compartment identifier
# type of neuronal compartment
# 0 - undefined
# 1 - soma
# 2 - axon
# 3 - basal dendrite
# 4 - apical dendrite
# x coordinate of the compartment
# y coordinate of the compartment
# z coordinate of the compartment
# radius of the compartment
# parent compartment
from itertools import *
import moogli
COMMENT_MARKER = "#"
COLUMN_SEPARATOR = " "
# UNDEFINED_TYPE_ID
# SOMA_TYPE_ID
# AXON_TYPE_ID
# BASAL_DENDRITE_TYPE_ID
# APICAL_DENDRITE_TYPE_ID
def split(predicate, iterable):
left = []
right = []
flag = True
for x in iterable:
if not flag: right.append(x)
else:
flag = predicate(x)
if flag: left.append(x)
else : right.append(x)
return (left, right)
def parse(filename):
swc_data = open(filename).readlines()
swc_data = dropwhile( lambda line : line.startswith(COMMENT_MARKER), swc_data)
swc_data = map( lambda line : line.strip().split(), swc_data)
swc_data = map( lambda line : ( int(line[0])
, int(line[1])
, float(line[2])
, float(line[3])
, float(line[4])
, float(line[5])
, int(line[6])
)
, swc_data
)
return create_network( swc_data
, filename
, filename
)
def create_network(swc_data, network_name, neuron_name):
print( network_name + " has " + str(len(swc_data)) + " compartments.")
network = moogli.Network(network_name)
neuron = moogli.Neuron(neuron_name)
for (cid, ctype, cx, cy, cz, cr, cpid) in swc_data:
compartment = moogli.ElectricalCompartment(str(cid))
compartment.distal = (cx, cy, cz, cr)
neuron.attach(compartment)
for (cid, ctype, cx, cy, cz, cr, cpid) in swc_data:
try:
neuron[str(cid)].parent = neuron[str(cpid)]
neuron[str(cid)].proximal = neuron[str(cpid)].distal
neuron[str(cid)].add_representation()
neuron[str(cid)].set_color((1.0, 0.0, 0.0, 1.0))
except KeyError:
neuron[str(cid)].parent = None
neuron[str(cid)].proximal = neuron[str(cid)].distal
neuron[str(cid)].add_representation()
neuron[str(cid)].set_color((0.0, 1.0, 0.0, 1.0))
neuron[str(cid)].show(1)
neuron.show(0)
network.attach(neuron)
return network
# def _network(swc_data):
# network = Network(filename)
# (somas, non_somas) = split( lambda line: line[COMPARTMENT_TYPE_INDEX] == SOMA_TYPE
# , swc_data
# )
# neuron = Neuron(filename)
# if len(somas) == 1:
# soma = Compartment( [somas[0]]
# )
# neuron.add_compartment(soma)
# def compartmentalize(child, parent = None):
# compartment = Compartment(child["id"])
# proximal =
# distal = [child["x"], child["y"], child["z"], child["r"]]
# compartment.add_geometry(
# )
| dilawar/moogli | moogli/parser/swc.py | Python | gpl-2.0 | 3,318 | [
"NEURON"
] | 5921889973d6cb4efffaf31e52b71ec072d3e1e5c3b5e784748d5998715e89c0 |
import Constants
import os
import glob
import sys
import h5py
import StringIO
from PIL import Image
import numpy as np
import binascii
# XRF JOB ARGS KEYS
JOB_IS_LIVE_JOB = 'Is_Live_Job' # INTEGER
JOB_STANDARDS = 'Standards' # TEXT
JOB_DETECTOR_LIST = 'DetectorList' # TEXT
JOB_MAX_FILES_TO_PROC = 'MaxFilesToProc' # INTEGER
JOB_MAX_LINES_TO_PROC = 'MaxLinesToProc' # INTEGER
JOB_QUICK_AND_DIRTY = 'QuickAndDirty' # INTEGER
JOB_XRF_BIN = 'XRF_Bin' # INTEGER
JOB_XANES_SCAN = 'XANES_Scan' # INTEGER
JOB_PROC_MASK = 'ProcMask' # INTEGER
def gen_args_dict():
return {
JOB_IS_LIVE_JOB:0,
JOB_STANDARDS:'maps_standardinfo.txt',
JOB_DETECTOR_LIST:'0,1,2,3',
JOB_MAX_FILES_TO_PROC:1,
JOB_MAX_LINES_TO_PROC:-1,
JOB_QUICK_AND_DIRTY:0,
JOB_XRF_BIN:0,
JOB_XANES_SCAN:0,
JOB_PROC_MASK:0
}
def gen_email_attachments(alias_path, job_dict):
images_dict = None
try:
# create image dictionary
images_dict = {}
full_file_name = ''
# check how many datasets are in job
#file_name = ''
file_dir = os.path.join(alias_path, Constants.DIR_IMG_DAT)
job_args = job_dict[Constants.JOB_ARGS]
proc_mask = job_args[JOB_PROC_MASK]
# will only check one file for images
if job_dict[Constants.JOB_DATASET_FILES_TO_PROC] == 'all':
#self.logger.warning('Warning: Too many datasets to parse images from')
return None
else:
temp_names = job_dict[Constants.JOB_DATASET_FILES_TO_PROC].split(',')
if len(temp_names) > 1:
#self.logger.warning('Warning: Can only parse one dataset for images, dataset list is %s', job_dict[Constants.JOB_DATASET_FILES_TO_PROC])
return None
temp_name = job_dict[Constants.JOB_DATASET_FILES_TO_PROC]
hdf_file_name = temp_name.replace('.mda', '.h5')
full_file_name = os.path.join(file_dir, hdf_file_name)
hdf_file = h5py.File(full_file_name, 'r')
maps_group = hdf_file[Constants.HDF5_GRP_MAPS]
if proc_mask & 4 == 4:
xrf_dataset = np.nan_to_num(maps_group[Constants.HDF5_DSET_XRF_FITS])
elif proc_mask & 1 == 1:
xrf_dataset = np.nan_to_num(maps_group[Constants.HDF5_DSET_XRF_ROI])
else:
#self.logger.warning('Warning: %s did not process XRF_ROI or XRF_FITS', file_name)
return None
channel_names = maps_group[Constants.HDF5_GRP_CHANNEL_NAMES]
if channel_names.shape[0] != xrf_dataset.shape[0]:
#self.logger.warning('Warning: file %s : Datasets: %s [%s] and %s [%s] length missmatch', file_name, Constants.HDF5_DSET_XRF_ROI, xrf_dataset.shape[0], Constants.HDF5_GRP_CHANNEL_NAMES, channel_names.shape[0])
return None
for i in range(channel_names.size):
outbuf = StringIO.StringIO()
I8 = (((xrf_dataset[i] - np.min(xrf_dataset[i])) / (np.max(xrf_dataset[i]) - np.min(xrf_dataset[i]))) * 255.9).astype(np.uint8)
img = Image.fromarray(I8, mode='L')
img.save(outbuf, format='JPEG')
name = 'channel_' + channel_names[i] + '.jpg'
images_dict[name] = binascii.b2a_base64(outbuf.getvalue())
except:
images_dict = None
return images_dict
def start_job(log_name, alias_path, job_dict, options, exitcode):
#find_maps_batch()
if not options['Path'] in sys.path:
sys.path.insert(0,options['Path'])
import maps_batch
saved_cwd = os.getcwd()
job_args = job_dict[Constants.JOB_ARGS]
if job_args[JOB_IS_LIVE_JOB] == 1:
dataset_full_file_path = max(glob.iglob(alias_path + '/mda/*.mda'), key=os.path.getctime)
job_dict[Constants.JOB_DATASET_FILES_TO_PROC] = os.path.basename(dataset_full_file_path)
#global _log_name
#_log_name = log_name
logger, fHandler = maps_batch.setup_logger('job_logs/' + log_name)
logger.info('Start Job Process')
detector_start = '0'
detector_elements = '4'
detector_list = str(job_args[JOB_DETECTOR_LIST]).strip()
if ',' in detector_list:
dlist = detector_list.split(',')
detector_start = dlist[0]
detector_elements = str(len(dlist))
else:
detector_start = detector_list
detector_elements = '1'
try:
os.chdir(options['Path'])
maps_set_str = os.path.join(str(alias_path), 'maps_settings.txt')
f = open(maps_set_str, 'w')
f.write(' This file will set some MAPS settings mostly to do with fitting' + '\n')
f.write('VERSION:' + str(job_dict[Constants.JOB_VERSION]).strip() + '\n')
f.write('DETECTOR_ELEMENTS:' + detector_elements + '\n')
f.write('MAX_NUMBER_OF_FILES_TO_PROCESS:' + str(job_args[JOB_MAX_FILES_TO_PROC]).strip() + '\n')
f.write('MAX_NUMBER_OF_LINES_TO_PROCESS:' + str(job_args[JOB_MAX_LINES_TO_PROC]).strip() + '\n')
f.write('QUICK_DIRTY:' + str(job_args[JOB_QUICK_AND_DIRTY]).strip() + '\n')
f.write('XRF_BIN:' + str(job_args[JOB_XRF_BIN]).strip() + '\n')
f.write('NNLS: 0\n')
f.write('XANES_SCAN:' + str(job_args[JOB_XANES_SCAN]).strip() + '\n')
f.write('DETECTOR_TO_START_WITH:' + detector_start + '\n')
f.write('BEAMLINE:' + str(job_dict[Constants.JOB_BEAM_LINE]).strip() + '\n')
f.write('DatasetFilesToProc:' + str(job_dict[Constants.JOB_DATASET_FILES_TO_PROC]).strip() + '\n')
standard_filenames = job_args[JOB_STANDARDS].split(';')
for item in standard_filenames:
f.write('STANDARD:' + item.strip() + '\n')
f.close()
proc_mask = int(job_args[JOB_PROC_MASK])
key_a = 0
key_b = 0
key_c = 0
key_d = 0
key_e = 0
key_f = 0 # for netcdf to hdf5 future feature
key_g = 0
if proc_mask & 1 == 1:
key_a = 1
if proc_mask & 2 == 2:
key_b = 1
if proc_mask & 4 == 4:
key_c = 1
if proc_mask & 8 == 8:
key_d = 1
if proc_mask & 16 == 16:
key_e = 1
if proc_mask & 32 == 32:
key_f = 1
if proc_mask & 64 == 64:
key_g = 1
maps_batch.maps_batch(wdir=alias_path, option_a_roi_plus=key_a, option_b_extract_spectra=key_b, option_c_per_pixel=key_c, option_d_image_extract=key_d, option_e_exchange_format=key_e, option_g_avg_hdf=key_g, logger=logger)
os.chdir(saved_cwd)
logger.info('Completed Job')
except:
logger.exception('job process')
os.chdir(saved_cwd)
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
raise SystemError("Error Processing Dataset")
logger.info('Done Job Process')
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
return 0
| aglowacki/Taskington | modules/mapspy.py | Python | mit | 6,145 | [
"NetCDF"
] | 387320ad65debf732784b5766c2073eb476772e479656cbc8cfb96a1da1ecb69 |
"""
Tools for making FSPS templates
"""
import os
from collections import OrderedDict
import numpy as np
import astropy.units as u
from astropy.cosmology import WMAP9
FLAM_CGS = u.erg/u.second/u.cm**2/u.Angstrom
LINE_CGS = 1.e-17*u.erg/u.second/u.cm**2
try:
from dust_attenuation.baseclasses import BaseAttAvModel
except:
BaseAttAvModel = object
from astropy.modeling import Parameter
import astropy.units as u
try:
from fsps import StellarPopulation
except:
# Broken, but imports
StellarPopulation = object
from . import utils
from . import templates
DEFAULT_LABEL = 'fsps_tau{tau:3.1f}_logz{logzsol:4.2f}_tage{tage:4.2f}_av{Av:4.2f}'
WG00_DEFAULTS = dict(geometry='shell', dust_type='mw',
dust_distribution='homogeneous')
__all__ = ["Zafar15", "ExtinctionModel", "SMC", "Reddy15", "KC13",
"ParameterizedWG00", "ExtendedFsps", "fsps_line_info",
"wuyts_line_Av"]
class ArrayExtCurve(BaseAttAvModel):
"""
Alam interpolated from arrays
"""
name = 'Array'
#bump_ampl = 1.
Rv = 2.21 # err 0.22
xarray = np.arange(0.09, 2.2, 0.01)
yarray = xarray*0.+1
left=None
right=None
def Alam(self, mu):
"""
klam, eq. 1
"""
Alam = np.interp(mu, self.xarray, self.yarray,
left=self.left, right=self.right)
return Alam
def evaluate(self, x, Av):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.micron
else:
xin = np.atleast_1d(x)
mu = xin.to(u.micron).value
alam = self.Alam(mu) #*self.Rv
return np.maximum(alam*Av, 0.)
class Zafar15(BaseAttAvModel):
"""
Quasar extinction curve from Zafar et al. (2015)
https://ui.adsabs.harvard.edu/abs/2015A%26A...584A.100Z/abstract
"""
name = 'Zafar+15'
#bump_ampl = 1.
Rv = 2.21 # err 0.22
@staticmethod
def Alam(mu, Rv):
"""
klam, eq. 1
"""
x = 1/mu
# My fit
coeffs = np.array([0.05694421, 0.57778243, -0.12417444])
Alam = np.polyval(coeffs, x)*2.21/Rv
# Only above x > 5.90
fuv = x > 5.90
if fuv.sum() > 0:
Afuv = 1/Rv*(-4.678+2.355*x + 0.622*(x-5.90)**2) + 1.
Alam[fuv] = Afuv[fuv]
return Alam
def evaluate(self, x, Av):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.micron
else:
xin = np.atleast_1d(x)
mu = xin.to(u.micron).value
alam = self.Alam(mu, self.Rv) #*self.Rv
# Rv = Av/EBV
# EBV=Av/Rv
# Ax = Alam/Av
#
# klam = Alam/EBV
# Alam = klam*EBV = klam*Av/Rv
return np.maximum(alam*Av, 0.)
class ExtinctionModel(BaseAttAvModel):
"""
Modify `dust_extinction.averages.G03_SMCBar` to work as Att
"""
#from dust_extinction.averages import G03_SMCBar
#SMCBar = G03_SMCBar()
curve_type = 'smc'
init_curve = None
#@property
def _curve_model(self):
if self.init_curve == self.curve_type:
return 0
if self.curve_type.upper() == 'SMC':
from dust_extinction.averages import G03_SMCBar as curve
elif self.curve_type.upper() == 'LMC':
from dust_extinction.averages import G03_LMCAvg as curve
elif self.curve_type.upper() in ['MW','F99']:
from dust_extinction.parameter_averages import F99 as curve
else:
raise ValueError(f'curve_type {self.curve_type} not recognized')
self.curve = curve()
self.init_curve = self.curve_type
def evaluate(self, x, Av):
self._curve_model()
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
xinv = 1./xin.to(u.micron)
curve = self.curve
xr = [x for x in curve.x_range]
xr[0] *= 1.001
xr[1] *= 0.999
print('xxx', xr)
if 'Rv' in curve.param_names:
klam = curve.evaluate(1/np.clip(xinv,
xr[0]/u.micron, xr[1]/u.micron),
Rv=curve.Rv)
else:
klam = curve.evaluate(1/np.clip(xinv,
xr[0]/u.micron, xr[1]/u.micron))
return klam*Av
class SMC(BaseAttAvModel):
"""
Modify `dust_extinction.averages.G03_SMCBar` to work as Att
"""
from dust_extinction.averages import G03_SMCBar
SMCBar = G03_SMCBar()
bump_ampl = Parameter(description="Amplitude of UV bump",
default=0., min=0., max=10.)
bump_gamma = 0.04
bump_x0 = 0.2175
def uv_bump(self, mu, bump_ampl):
"""
Drude profile for computing the UV bump.
Parameters
----------
x: np array (float)
expects wavelengths in [micron]
x0: float
Central wavelength of the UV bump (in microns).
gamma: float
Width (FWHM) of the UV bump (in microns).
ampl: float
Amplitude of the UV bump.
Returns
-------
np array (float)
lorentzian-like Drude profile
Raises
------
ValueError
Input x values outside of defined range
"""
return bump_ampl * (mu**2 * self.bump_gamma**2 /
((mu**2 - self.bump_x0**2)**2 +
mu**2 * self.bump_gamma**2))
def evaluate(self, x, Av, bump_ampl):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
xinv = 1./xin.to(u.micron)
klam = self.SMCBar.evaluate(1/np.clip(xinv,
0.301/u.micron, 9.99/u.micron))
if bump_ampl > 0:
klam += self.uv_bump(xin.to(u.micron).value, bump_ampl)
return klam*Av
class Reddy15(BaseAttAvModel):
"""
Attenuation curve from Reddy et al. (2015)
With optional UV bump
https://ui.adsabs.harvard.edu/abs/2015ApJ...806..259R/abstract
"""
name = 'Reddy+15'
#bump_ampl = 1.
bump_ampl = Parameter(description="Amplitude of UV bump",
default=2., min=0., max=10.)
bump_gamma = 0.04
bump_x0 = 0.2175
Rv = 2.505
@staticmethod
def _left(mu):
"""
klam, mu < 0.6 micron
"""
return -5.726 + 4.004/mu - 0.525/mu**2 + 0.029/mu**3 + 2.505
@staticmethod
def _right(mu):
"""
klam, mu > 0.6 micron
"""
return -2.672 - 0.010/mu + 1.532/mu**2 - 0.412/mu**3 + 2.505
@property
def koffset(self):
"""
Force smooth transition at 0.6 micron
"""
return self._left(0.6) - self._right(0.6)
def evaluate(self, x, Av, bump_ampl):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
mu = xin.to(u.micron).value
left = mu < 0.6
klam = mu*0.
# Reddy Eq. 8
kleft = self._left(mu)
kright = self._right(mu)
klam[left] = self._left(mu[left])
klam[~left] = self._right(mu[~left]) + self.koffset
# Rv = Av/EBV
# EBV=Av/Rv
# klam = Alam/EBV
# Alam = klam*EBV = klam*Av/Rv
return np.maximum((klam + self.uv_bump(mu, bump_ampl))*Av/self.Rv, 0.)
def uv_bump(self, mu, bump_ampl):
"""
Drude profile for computing the UV bump.
Parameters
----------
x: np array (float)
expects wavelengths in [micron]
x0: float
Central wavelength of the UV bump (in microns).
gamma: float
Width (FWHM) of the UV bump (in microns).
ampl: float
Amplitude of the UV bump.
Returns
-------
np array (float)
lorentzian-like Drude profile
Raises
------
ValueError
Input x values outside of defined range
"""
return bump_ampl * (mu**2 * self.bump_gamma**2 /
((mu**2 - self.bump_x0**2)**2 +
mu**2 * self.bump_gamma**2))
class KC13(BaseAttAvModel):
"""
Kriek & Conroy (2013) attenuation model, extends Noll 2009 with UV bump
amplitude correlated with the slope, delta.
Slightly different from KC13 since the N09 model uses Leitherer (2002)
below 1500 Angstroms.
"""
name = 'Kriek+Conroy2013'
delta = Parameter(description="delta: slope of the power law",
default=0., min=-3., max=3.)
#extra_bump = 1.
extra_params = {'extra_bump':1., 'beta':-3.2, 'extra_uv':-0.4}
# Big range for use with FSPS
x_range = [0.9e-4, 2.e8]
def _init_N09(self):
from dust_attenuation import averages, shapes, radiative_transfer
# Allow extrapolation
#shapes.x_range_N09 = [0.9e-4, 2.e8]
#averages.x_range_C00 = [0.9e-4, 2.e8]
#averages.x_range_L02 = [0.9e-4, 0.18]
shapes.C00.x_range = self.x_range
shapes.N09.x_range = self.x_range
if self.x_range[0] < 0.18:
shapes.L02.x_range = [self.x_range[0], 0.18]
else:
shapes.L02.x_range = [0.097, 0.18]
self.N09 = shapes.N09()
def evaluate(self, x, Av, delta):
import dust_attenuation
if not hasattr(self, 'N09'):
self._init_N09()
#Av = np.polyval(self.coeffs['Av'], tau_V)
x0 = 0.2175
gamma = 0.0350
ampl = (0.85 - 1.9*delta)*self.extra_params['extra_bump']
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = x
wred = np.array([2.199e4])*u.Angstrom
if self.N09.param_names[0] == 'x0':
Alam = self.N09.evaluate(xin, x0, gamma, ampl, delta, Av)
Ared = self.N09.evaluate(wred, x0, gamma, ampl, delta, Av)[0]
else:
Alam = self.N09.evaluate(xin, Av, x0, gamma, ampl, delta)
Ared = self.N09.evaluate(wred, Av, x0, gamma, ampl, delta)[0]
# Extrapolate with beta slope
red = xin > wred[0]
if red.sum() > 0:
Alam[red] = Ared*(xin[red]/wred[0])**self.extra_params['beta']
blue = xin < 1500*u.Angstrom
if blue.sum() > 0:
plblue = np.ones(len(xin))
wb = xin[blue].to(u.Angstrom).value/1500
plblue[blue] = wb**self.extra_params['extra_uv']
Alam *= plblue
return Alam
class ParameterizedWG00(BaseAttAvModel):
coeffs = {'Av': np.array([-0.001, 0.026, 0.643, -0.016]),
'x0': np.array([ 3.067e-19, -7.401e-18, 6.421e-17, -2.370e-16,
3.132e-16, 2.175e-01]),
'gamma': np.array([ 2.101e-06, -4.135e-05, 2.719e-04,
-7.178e-04, 3.376e-04, 4.270e-02]),
'ampl': np.array([-1.906e-03, 4.374e-02, -3.501e-01,
1.228e+00, -2.151e+00, 8.880e+00]),
'slope': np.array([-4.084e-05, 9.984e-04, -8.893e-03,
3.670e-02, -7.325e-02, 5.891e-02])}
# Turn off bump
include_bump = 0.25
wg00_coeffs = {'geometry': 'shell',
'dust_type': 'mw',
'dust_distribution': 'homogeneous'}
name = 'ParameterizedWG00'
# def __init__(self, Av=1.0, **kwargs):
# """
# Version of the N09 curves fit to the WG00 curves up to tauV=10
# """
# from dust_attenuation import averages, shapes, radiative_transfer
#
# # Allow extrapolation
# shapes.x_range_N09 = [0.01, 1000]
# averages.x_range_C00 = [0.01, 1000]
# averages.x_range_L02 = [0.01, 0.18]
#
# self.N09 = shapes.N09()
def _init_N09(self):
from dust_attenuation import averages, shapes, radiative_transfer
# Allow extrapolation
shapes.x_range_N09 = [0.009, 2.e8]
averages.x_range_C00 = [0.009, 2.e8]
averages.x_range_L02 = [0.009, 0.18]
self.N09 = shapes.N09()
def get_tau(self, Av):
"""
Get the WG00 tau_V for a given Av
"""
tau_grid = np.arange(0, 10, 0.01)
av_grid = np.polyval(self.coeffs['Av'], tau_grid)
return np.interp(Av, av_grid, tau_grid, left=0., right=tau_grid[-1])
def evaluate(self, x, Av):
import dust_attenuation
if not hasattr(self, 'N09'):
self._init_N09()
tau_V = self.get_tau(Av)
#Av = np.polyval(self.coeffs['Av'], tau_V)
x0 = np.polyval(self.coeffs['x0'], tau_V)
gamma = np.polyval(self.coeffs['gamma'], tau_V)
if self.include_bump:
ampl = np.polyval(self.coeffs['ampl'], tau_V)*self.include_bump
else:
ampl = 0.
slope = np.polyval(self.coeffs['slope'], tau_V)
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = x
if self.N09.param_names[0] == 'x0':
return self.N09.evaluate(xin, x0, gamma, ampl, slope, Av)
else:
return self.N09.evaluate(xin, Av, x0, gamma, ampl, slope)
def fsps_line_info(wlimits=None):
"""
Read FSPS line list
"""
try:
info_file = os.path.join(os.getenv('SPS_HOME'), 'data/emlines_info.dat')
with open(info_file, 'r') as f:
lines = f.readlines()
except:
return [], []
waves = np.array([float(l.split(',')[0]) for l in lines])
names = np.array([l.strip().split(',')[1].replace(' ','') for l in lines])
if wlimits is not None:
clip = (waves > wlimits[0]) & (waves < wlimits[1])
waves = waves[clip]
names = names[clip]
return waves, names
DEFAULT_LINES = fsps_line_info(wlimits=[1200, 1.9e4])[0]
BOUNDS = {}
BOUNDS['tage'] = [0.03, 12, 0.05]
BOUNDS['tau'] = [0.03, 2, 0.05]
BOUNDS['zred'] = [0.0, 13, 1.e-4]
BOUNDS['Av'] = [0.0, 15, 0.05]
BOUNDS['gas_logu'] = [-4, 0, 0.05]
BOUNDS['gas_logz'] = [-2, 0.3, 0.05]
BOUNDS['logzsol'] = [-2, 0.3, 0.05]
BOUNDS['sigma_smooth'] = [100, 500, 0.05]
def wuyts_line_Av(Acont):
"""
Wuyts prescription for extra extinction towards nebular emission
"""
return Acont + 0.9*Acont - 0.15*Acont**2
class ExtendedFsps(StellarPopulation):
"""
Extended functionality for the `fsps.StellarPopulation` object
"""
lognorm_center = 0.
lognorm_logwidth = 0.05
is_lognorm_sfh = False
lognorm_fburst = -30
cosmology = WMAP9
scale_lyman_series = 0.1
scale_lines = OrderedDict()
line_av_func = None
#_meta_bands = ['v']
@property
def izmet(self):
"""
Get zmet index for nearest ``self.zlegend`` value to ``loggzsol``.
"""
NZ = len(self.zlegend)
logzsol = self.params['logzsol']
zi = np.interp(logzsol, np.log10(self.zlegend/0.019), np.arange(NZ))
return np.clip(np.cast[int](np.round(zi)), 0, NZ-1)
@property
def fsps_ages(self):
"""
(linear) ages of the FSPS SSP age grid, Gyr
"""
if hasattr(self, '_fsps_ages'):
return self._fsps_ages
_ = self.get_spectrum()
fsps_ages = 10**(self.log_age-9)
self._fsps_ages = fsps_ages
return fsps_ages
def set_lognormal_sfh(self, min_sigma=3, verbose=False, **kwargs):
"""
Set lognormal tabular SFH
"""
try:
from grizli.utils_c.interp import interp_conserve_c as interp_func
except:
interp_func = utils.interp_conserve
if 'lognorm_center' in kwargs:
self.lognorm_center = kwargs['lognorm_center']
if 'lognorm_logwidth' in kwargs:
self.lognorm_logwidth = kwargs['lognorm_logwidth']
if self.is_lognorm_sfh:
self.params['sfh'] = 3
if verbose:
msg = 'lognormal SFH ({0}, {1}) [sfh3={2}]'
print(msg.format(self.lognorm_center, self.lognorm_logwidth,
self.is_lognorm_sfh))
xages = np.logspace(np.log10(self.fsps_ages[0]),
np.log10(self.fsps_ages[-1]), 2048)
mu = self.lognorm_center#*np.log(10)
# sfh = 1./t*exp(-(log(t)-mu)**2/2/sig**2)
logn_sfh = 10**(-(np.log10(xages)-mu)**2/2/self.lognorm_logwidth**2)
logn_sfh *= 1./xages
# Normalize
logn_sfh *= 1.e-9/(self.lognorm_logwidth*np.sqrt(2*np.pi*np.log(10)))
self.set_tabular_sfh(xages, logn_sfh)
self._lognorm_sfh = (xages, logn_sfh)
def lognormal_integral(self, tage=0.1, **kwargs):
"""
Integral of lognormal SFH up to t=tage
"""
from scipy.special import erfc
mu = self.lognorm_center*np.log(10)
sig = self.lognorm_logwidth*np.sqrt(np.log(10))
cdf = 0.5*erfc(-(np.log(tage)-mu)/sig/np.sqrt(2))
return cdf
def _set_extend_attrs(self, line_sigma=50, lya_sigma=200, **kwargs):
"""
Set attributes on `fsps.StellarPopulation` object used by `narrow_lines`.
sigma : line width (FWHM/2.35), km/s.
lya_sigma : width for Lyman-alpha
Sets `emline_dlam`, `emline_sigma` attributes.
"""
# Line widths, native FSPS and new
wave, line = self.get_spectrum(tage=1., peraa=True)
dlam = np.diff(wave)
self.emline_dlam = [np.interp(w, wave[1:], dlam)
for w in self.emline_wavelengths] # Angstrom
self.emline_sigma = [line_sigma for w in self.emline_wavelengths] #kms
# Separate Ly-alpha
lya_ix = np.argmin(np.abs(self.emline_wavelengths - 1216.8))
self.emline_sigma[lya_ix] = lya_sigma
# Line EWs computed in `narrow_emission_lines`
self.emline_eqw = [-1e10 for w in self.emline_wavelengths]
# Emission line names
waves, names = fsps_line_info()
if np.allclose(self.emline_wavelengths, waves, 0.5):
self.emline_names = names
else:
self.emline_names = ['?'] * len(self.emline_wavelengths)
for w, n in zip(waves, names):
dl = np.abs(self.emline_wavelengths - w)
if dl.min() < 0.5:
self.emline_names[np.argmin(dl)] = n
for l in self.emline_names:
self.scale_lines[l] = 1.
# Precomputed arrays for WG00 reddening defined between 0.1..3 um
self.wg00lim = (self.wavelengths > 1000) & (self.wavelengths < 3.e4)
self.wg00red = (self.wavelengths > 1000)*1.
self.exec_params = None
self.narrow = None
def narrow_emission_lines(self, tage=0.1, emwave=DEFAULT_LINES, line_sigma=100, oversample=5, clip_sigma=10, verbose=False, get_eqw=True, scale_lyman_series=None, scale_lines={}, force_recompute=False, use_sigma_smooth=True, lorentz=False, **kwargs):
"""
Replace broad FSPS lines with specified line widths
tage : age in Gyr of FSPS model
FSPS sigma: line width in A in FSPS models
emwave : (approx) wavelength of line to replace
line_sigma : line width in km/s of new line
oversample : factor by which to sample the Gaussian profiles
clip_sigma : sigmas from line center to use for the line
scale_lyman_series : scaling to apply to Lyman-series emission lines
scale_lines : scaling to apply to other emission lines, by name
Returns: `dict` with keys
wave_full, flux_full, line_full = wave and flux with fine lines
wave, flux_line, flux_clean = original model + removed lines
ymin, ymax = range of new line useful for plotting
"""
if not hasattr(self, 'emline_dlam'):
self._set_extend_attrs(line_sigma=line_sigma, **kwargs)
self.params['add_neb_emission'] = True
if scale_lyman_series is None:
scale_lyman_series = self.scale_lyman_series
else:
self.scale_lyman_series = scale_lyman_series
if scale_lines is None:
scale_lines = self.scale_lines
else:
for k in scale_lines:
if k in self.scale_lines:
self.scale_lines[k] = scale_lines[k]
else:
print(f'Line "{k}" not found in `self.scale_lines`')
# Avoid recomputing if all parameters are the same (i.e., change Av)
call_params = np.hstack([self.param_floats(params=None), emwave,
list(self.scale_lines.values()),
[tage, oversample, clip_sigma, scale_lyman_series]])
try:
is_close = np.allclose(call_params, self.exec_params)
except:
is_close = False
if is_close & (not force_recompute):
if verbose:
print('use stored')
return self.narrow
self.exec_params = call_params
wave, line = self.get_spectrum(tage=tage, peraa=True)
line_ix = [np.argmin(np.abs(self.emline_wavelengths - w))
for w in emwave]
line_lum = [self.emline_luminosity[i] for i in line_ix]
line_wave = [self.emline_wavelengths[i] for i in line_ix]
fsps_sigma = [np.sqrt((2*self.emline_dlam[i])**2 +
(self.params['sigma_smooth']/3.e5*self.emline_wavelengths[i])**2)
for i in line_ix]
if line_sigma < 0:
lines_sigma = [-line_sigma for ix in line_ix]
elif (self.params['sigma_smooth'] > 0) & (use_sigma_smooth):
lines_sigma = [self.params['sigma_smooth'] for ix in line_ix]
else:
lines_sigma = [self.emline_sigma[ix] for ix in line_ix]
line_dlam = [sig/3.e5*lwave
for sig, lwave in zip(lines_sigma, line_wave)]
clean = line*1
wlimits = [np.min(emwave), np.max(emwave)]
wlimits = [2./3*wlimits[0], 4.3*wlimits[1]]
wfine = utils.log_zgrid(wlimits, np.min(lines_sigma)/oversample/3.e5)
qfine = wfine < 0
if verbose:
msg = 'Matched line: {0} [{1}], lum={2}'
for i, ix in enumerate(line_ix):
print(msg.format(line_wave[i], ix, line_lum[i]))
#########
# Remove lines from FSPS
# line width seems to be 2*dlam at the line wavelength
for i, ix in enumerate(line_ix):
if self.params['nebemlineinspec']:
gauss = 1/np.sqrt(2*np.pi*fsps_sigma[i]**2)
gauss *= np.exp(-(wave - line_wave[i])**2/2/fsps_sigma[i]**2)
clean -= gauss*line_lum[i]
# indices of fine array where new lines defined
qfine |= np.abs(wfine - line_wave[i]) < clip_sigma*line_dlam[i]
# Linear interpolate cleaned spectrum on fine grid
iclean = np.interp(wfine[qfine], wave, clean)
# Append original and fine sampled arrays
wfull = np.append(wave, wfine[qfine])
cfull = np.append(clean, iclean)
so = np.argsort(wfull)
wfull, uniq = np.unique(wfull, return_index=True)
cfull = cfull[uniq]
gfull = cfull*0.
for i in range(len(line_wave)):
if lorentz:
# astropy.modeling.functional_models.Lorentz1D.html
# gamma is FWHM/2., integral is gamma*pi
gam = 2.35482*line_dlam[i]/2.
gline = gam**2/(gam**2 + (wfull-line_wave[i])**2)
norm = line_lum[i]/(gam*np.pi)
else:
# Gaussian
gline = np.exp(-(wfull - line_wave[i])**2/2/line_dlam[i]**2)
norm = line_lum[i]/np.sqrt(2*np.pi*line_dlam[i]**2)
if self.emline_names[line_ix[i]].startswith('Ly'):
norm *= scale_lyman_series
if self.emline_names[line_ix[i]] in self.scale_lines:
norm *= self.scale_lines[self.emline_names[line_ix[i]]]
gfull += gline*norm
if get_eqw:
clip = np.abs(wfull - line_wave[i]) < clip_sigma*line_dlam[i]
eqw = np.trapz(gline[clip]*norm/cfull[clip], wfull[clip])
self.emline_eqw[line_ix[i]] = eqw
cfull += gfull
# For plot range
ymin = iclean.min()
line_peak = [1/np.sqrt(2*np.pi*dlam**2)*lum for dlam,lum in zip(line_dlam, line_lum)]
ymax = iclean.max()+np.max(line_peak)
data = OrderedDict()
data['wave_full'] = wfull
data['flux_full'] = cfull
data['line_full'] = gfull
data['wave' ] = wave
data['flux_line' ] = line
data['flux_clean'] = clean
data['ymin' ] = ymin
data['ymax' ] = ymax
self.narrow = data
return data
def set_fir_template(self, arrays=None, file='templates/magdis/magdis_09.txt', verbose=True, unset=False, scale_pah3=0.5):
"""
Set the far-IR template for reprocessed dust emission
"""
if unset:
if verbose:
print('Unset FIR template attributes')
for attr in ['fir_template', 'fir_name', 'fir_arrays']:
if hasattr(self, attr):
delattr(self, attr)
return True
if os.path.exists(file):
if verbose:
print(f'Set FIR dust template from {file}')
_ = np.loadtxt(file, unpack=True)
wave, flux = _[0], _[1]
self.fir_name = file
elif arrays is not None:
if verbose:
print(f'Set FIR dust template from input arrays')
wave, flux = arrays
self.fir_name = 'user-supplied'
else:
if verbose:
print(f'Set FIR dust template from FSPS (DL07)')
# Set with fsps
self.params['dust1'] = 0
self.params['dust2'] = 1.
self.params['add_dust_emission'] = True
wave, flux = self.get_spectrum(tage=1., peraa=True)
self.params['add_dust_emission'] = False
wave, flux_nodust = self.get_spectrum(tage=1., peraa=True)
flux -= flux_nodust
self.fir_name = 'fsps-dl07'
if scale_pah3 is not None:
if verbose:
print(f'Scale 3.3um PAH: {scale_pah3:.2f}')
ran = np.abs(wave-3.3e4) < 0.5e4
line = np.abs(wave-3.3e4) < 0.3e4
px = np.polyfit(wave[ran & ~line], flux[ran & ~line], 3)
scaled_line = (flux[ran] - np.polyval(px, wave[ran]))*scale_pah3
flux[ran] = np.polyval(px, wave[ran]) + scaled_line
fir_flux = np.interp(self.wavelengths, wave, flux, left=0, right=0)
self.fir_template = fir_flux/np.trapz(fir_flux, self.wavelengths)
self.fir_arrays = arrays
return True
def set_dust(self, Av=0., dust_obj_type='WG00x', wg00_kwargs=WG00_DEFAULTS):
"""
Set `dust_obj` attribute
dust_obj_type:
'WG00' = `dust_attenuation.radiative_transfer.WG00`
'C00' = `dust_attenuation.averages.C00`
'WG00x' = `ParameterizedWG00`
'KC13' = Kriek & Conroy (2013) with dust_index parameter
'R15' = Reddy et al. (2015) with dust bump parameter
ir_template: (wave, flux)
Template to use for re-emitted IR light
"""
from dust_attenuation import averages, radiative_transfer
self.params['dust1'] = 0.
self.params['dust2'] = 0.
needs_init = False
if not hasattr(self, 'dust_obj'):
needs_init = True
if hasattr(self, 'dust_obj_type'):
if self.dust_obj_type != dust_obj_type:
needs_init = True
if needs_init:
self.dust_obj_type = dust_obj_type
if dust_obj_type == 'WG00':
self.dust_obj = radiative_transfer.WG00(tau_V=1.0,
**WG00_DEFAULTS)
elif dust_obj_type == 'WG00x':
self.dust_obj = ParameterizedWG00(Av=Av)
elif dust_obj_type == 'C00':
self.dust_obj = averages.C00(Av=Av)
elif dust_obj_type == 'R15':
self.dust_obj = Reddy15(Av=Av, bump_ampl=2.)
elif hasattr(dust_obj_type, 'extinguish'):
self.dust_obj = dust_obj_type
else:
self.dust_obj = KC13(Av=Av)
print('Init dust_obj: {0} {1}'.format(dust_obj_type, self.dust_obj.param_names))
self.Av = Av
if dust_obj_type == 'WG00':
Avs = np.array([0.151, 0.298, 0.44 , 0.574, 0.825, 1.05 , 1.252, 1.428, 1.584, 1.726, 1.853, 1.961, 2.065, 2.154, 2.318, 2.454, 2.573, 2.686, 3.11 , 3.447, 3.758, 4.049, 4.317, 4.59 , 4.868, 5.148])
taus = np.array([ 0.25, 0.5 , 0.75, 1. , 1.5 , 2. , 2.5 , 3. , 3.5 , 4. , 4.5 , 5. , 5.5 , 6. , 7. , 8. , 9. , 10. , 15. , 20. , 25. , 30. , 35. , 40. , 45. , 50. ])
tau_V = np.interp(Av, Avs, taus, left=0.25, right=50)
self.dust_obj.tau_V = tau_V
self.Av = self.dust_obj(5500*u.Angstrom)
elif dust_obj_type == 'KC13':
self.dust_obj.Av = Av
self.dust_obj.delta = self.params['dust_index']
else:
self.dust_obj.Av = Av
def redden(self, wave):
if hasattr(self.dust_obj, 'extinguish'):
return self.dust_obj.extinguish(wave, Av=self.Av)
else:
return 10**(-0.4*self.dust_obj(wave))
def get_full_spectrum(self, tage=1.0, Av=0., get_template=True, set_all_templates=False, z=None, tie_metallicity=True, **kwargs):
"""
Get full spectrum with reprocessed emission lines and dust emission
dust_fraction: Fraction of the SED that sees the specified Av
"""
# Set the dust model
if Av is None:
Av = self.Av
if 'dust_obj_type' in kwargs:
self.set_dust(Av=Av, dust_obj_type=kwargs['dust_obj_type'])
elif hasattr(self, 'dust_obj'):
self.set_dust(Av=Av, dust_obj_type=self.dust_obj_type)
else:
self.set_dust(Av=Av, dust_obj_type='WG00x')
# Lognormal SFH?
if ('lognorm_center' in kwargs) | ('lognorm_logwidth' in kwargs):
self.set_lognormal_sfh(**kwargs)
if 'lognorm_fburst' in kwargs:
self.lognorm_fburst = kwargs['lognorm_fburst']
# Burst fraction for lognormal SFH
if self.is_lognorm_sfh:
if not hasattr(self, '_lognorm_sfh'):
self.set_lognormal_sfh()
t1 = self.lognormal_integral(tage)
dt = (tage-self._lognorm_sfh[0])
t100 = (dt <= 0.1) & (dt >= 0)
sfhy = self._lognorm_sfh[1]*1.
sfhy += t1*10**self.lognorm_fburst/100e6*t100
self.set_tabular_sfh(self._lognorm_sfh[0], sfhy)
# Set FSPS parameters
for k in kwargs:
if k in self.params.all_params:
self.params[k] = kwargs[k]
if 'zmet' not in kwargs:
self.params['zmet'] = self.izmet
if ('gas_logz' not in kwargs) & tie_metallicity:
self.params['gas_logz'] = self.params['logzsol']
# Run the emission line function
if tage is None:
tage = self.params['tage']
_ = self.narrow_emission_lines(tage=tage, **kwargs)
wave = _['wave_full']
flux = _['flux_full']
lines = _['line_full']
contin = flux - lines
#self.sfr100 = self.sfr_avg(dt=np.minimum(tage, 0.1))
# Apply dust
if self.dust_obj_type == 'WG00':
# To template
red = (wave > 1000)*1.
wlim = (wave > 1000) & (wave < 3.e4)
red[wlim] = self.redden(wave[wlim]*u.Angstrom)
# To lines
red_lines = (self.emline_wavelengths > 1000)*1.
wlim = (self.emline_wavelengths > 1000)
wlim &= (self.emline_wavelengths < 3.e4)
line_wave = self.emline_wavelengths[wlim]*u.Angstrom
red_lines[wlim] = self.redden(line_wave)
else:
red = self.redden(wave*u.Angstrom)
if self.line_av_func is None:
self.Av_line = self.Av*1.
red_lines_full = red
line_wave = self.emline_wavelengths*u.Angstrom
red_lines = self.redden(line_wave)
else:
# Differential reddening towards nebular lines
self.Av_line = self.line_av_func(Av)
self.set_dust(Av=self.Av_line,
dust_obj_type=self.dust_obj_type)
red_lines_full = self.redden(wave*u.Angstrom)
line_wave = self.emline_wavelengths*u.Angstrom
red_lines = self.redden(line_wave)
# Reset for continuum
self.set_dust(Av=Av, dust_obj_type=self.dust_obj_type)
# Apply dust to line luminosities
lred = [llum*lr for llum, lr in
zip(self.emline_luminosity, red_lines)]
self.emline_reddened = np.array(lred)
# Total energy
e0 = np.trapz(flux, wave)
# Energy of reddened template
reddened = contin*red+lines*red_lines_full
e1 = np.trapz(reddened, wave)
self.energy_absorbed = (e0 - e1)
# Add dust emission
if hasattr(self, 'fir_template') & self.params['add_dust_emission']:
dust_em = np.interp(wave, self.wavelengths, self.fir_template)
dust_em *= self.energy_absorbed
else:
dust_em = 0.
meta0 = self.meta
self.templ = self.as_template(wave, reddened+dust_em, meta=meta0)
# Set template attributes
if set_all_templates:
# Original wavelength grid
owave = self.wavelengths
owave = owave[self.wg00lim]*u.Angstrom
self.wg00red[self.wg00lim] = self.redden(owave)
ofir = self.fir_template*self.energy_absorbed
fl_orig = _['flux_line']*self.wg00red + ofir
self.templ_orig = self.as_template(owave, fl_orig, meta=meta0)
# No lines
meta = meta0.copy()
meta['add_neb_emission'] = False
fl_cont = contin*red + dust_em
ocont = _['flux_clean']*self.wg00red + ofir
self.templ_cont = self.as_template(wave, fl_cont, meta=meta)
self.templ_cont_orig = self.as_template(owave, ocont, meta=meta)
# No dust
meta = meta0.copy()
meta['add_neb_emission'] = True
meta['Av'] = 0
self.templ_unred = self.as_template(wave, flux, meta=meta)
self.templ_unred_orig = self.as_template(owave, _['flux_clean'],
meta=meta)
if get_template:
return self.templ
else:
return self.templ.wave, self.templ.flux
def as_template(self, wave, flux, label=DEFAULT_LABEL, meta=None):
"""
Return a `eazy.templates.Template` object with metadata
"""
if meta is None:
meta = self.meta
templ = templates.Template(arrays=(wave, flux), meta=meta,
name=label.format(**meta))
return templ
def lognorm_avg_sfr(self, tage=None, dt=0.1):
"""
Analytic average SFR for lognorm SFH
"""
if tage is None:
tage = self.params['tage']
t1 = self.lognormal_integral(tage)
t0 = self.lognormal_integral(np.maximum(tage-dt, 0))
sfr_avg = (t1*(1+10**self.lognorm_fburst)-t0)/(dt*1.e9)
return sfr_avg
@property
def sfr100(self):
"""
SFR averaged over maximum(tage, 100 Myr) from `sfr_avg`
"""
if self.params['sfh'] == 0:
sfr_avg = 0.
elif self.params['sfh'] == 3:
# Try to integrate SFH arrays if attribute set
if self.is_lognorm_sfh:
sfr_avg = self.lognorm_avg_sfr(tage=None, dt=0.1)
elif hasattr(self, '_sfh_tab'):
try:
fwd = self.params['tabsfh_forward']
except:
fwd = 1
if fwd == 1:
age_lb = self.params['tage'] - self._sfh_tab[0]
step = -1
else:
age_lb = self._sfh_tab[0]
step = 1
age100 = (age_lb <= 0.1) & (age_lb >= 0)
if age100.sum() < 2:
sfr_avg = 0.
else:
sfr_avg = np.trapz(self._sfh_tab[1][age100][::step],
age_lb[age100][::step])/0.1
else:
sfr_avg = 0.
else:
sfr_avg = self.sfr_avg(dt=np.minimum(self.params['tage'], 0.1))
return sfr_avg
@property
def sfr10(self):
"""
SFR averaged over last MAXIMUM(tage, 10 Myr) from `sfr_avg`
"""
if self.params['sfh'] == 0:
sfr_avg = 0.
elif self.params['sfh'] == 3:
# Try to integrate SFH arrays if attribute set
if self.is_lognorm_sfh:
sfr_avg = self.lognorm_avg_sfr(tage=None, dt=0.01)
elif hasattr(self, '_sfh_tab'):
try:
fwd = self.params['tabsfh_forward']
except:
fwd = 1
if fwd == 1:
age_lb = self.params['tage'] - self._sfh_tab[0]
step = -1
else:
age_lb = self._sfh_tab[0]
step = 1
age10 = (age_lb < 0.01) & (age_lb >= 0)
if age10.sum() < 2:
sfr_avg = 0.
else:
sfr_avg = np.trapz(self._sfh_tab[1][age10][::step],
age_lb[age10][::step])/0.1
else:
sfr_avg = 0.
else:
sfr_avg = self.sfr_avg(dt=np.minimum(self.params['tage'], 0.01))
return sfr_avg
@property
def meta(self):
"""
Full metadata, including line properties
"""
import fsps
meta = self.param_dict
if self._zcontinuous:
meta['metallicity'] = 10**self.params['logzsol']*0.019
else:
meta['metallicity'] = self.zlegend[self.params['zmet']]
for k in ['log_age','stellar_mass', 'formed_mass', 'log_lbol',
'sfr', 'sfr100', 'dust_obj_type','Av','energy_absorbed',
'fir_name', '_zcontinuous', 'scale_lyman_series',
'lognorm_center', 'lognorm_logwidth', 'is_lognorm_sfh',
'lognorm_fburst']:
if hasattr(self, k):
meta[k] = self.__getattribute__(k)
if hasattr(self, 'emline_names'):
has_red = hasattr(self, 'emline_reddened')
if self.emline_luminosity.ndim == 1:
for i in range(len(self.emline_wavelengths)):
n = self.emline_names[i]
if n in self.scale_lines:
kscl = self.scale_lines[n]
else:
kscl = 1.0
meta[f'scale {n}'] = kscl
meta[f'line {n}'] = self.emline_luminosity[i]*kscl
if has_red:
meta[f'rline {n}'] = self.emline_reddened[i]*kscl
meta[f'eqw {n}'] = self.emline_eqw[i]
meta[f'sigma {n}'] = self.emline_sigma[i]
# Band information
if hasattr(self, '_meta_bands'):
light_ages = self.light_age_band(self._meta_bands, flat=False)
band_flux = self.get_mags(tage=self.params['tage'], zmet=None,
bands=self._meta_bands, units='flam')
band_waves = [fsps.get_filter(b).lambda_eff*u.Angstrom
for b in self._meta_bands]
band_lum = [f*w for f, w in zip(band_flux, band_waves)]
for i, b in enumerate(self._meta_bands):
meta[f'lwage_{b}'] = light_ages[i]
meta[f'lum_{b}'] = band_lum[i].value
try:
meta['libraries'] = ';'.join([s.decode() for s in self.libraries])
except:
try:
meta['libraries'] = ';'.join([s for s in self.libraries])
except:
meta['libraries'] = '[error]'
return meta
@property
def param_dict(self):
"""
`dict` version of `self.params`
"""
d = OrderedDict()
for p in self.params.all_params:
d[p] = self.params[p]
return d
def light_age_band(self, bands=['v'], flat=True):
"""
Get light-weighted age of current model
"""
self.params['compute_light_ages'] = True
band_ages = self.get_mags(tage=self.params['tage'], zmet=None,
bands=bands)
self.params['compute_light_ages'] = False
if flat & (band_ages.shape == (1,)):
return band_ages[0]
else:
return band_ages
def pset(self, params):
"""
Return a subset dictionary of `self.meta`
"""
d = OrderedDict()
for p in params:
if p in self.meta:
d[p] = self.meta[p]
else:
d[p] = np.nan
return d
def param_floats(self, params=None):
"""
Return a list of parameter values. If `params` is None, then use
full list in `self.params.all_params`.
"""
if params is None:
params = self.params.all_params
d = []
for p in params:
d.append(self.params[p]*1)
return np.array(d)
def parameter_bounds(self, params, limit_age=False):
"""
Parameter bounds for `scipy.optimize.least_squares`
"""
blo = []
bhi = []
steps = []
for p in params:
if p in BOUNDS:
blo.append(BOUNDS[p][0])
bhi.append(BOUNDS[p][1])
steps.append(BOUNDS[p][2])
else:
blo.append(-np.inf)
bhi.append(np.inf)
steps.append(0.05)
return (blo, bhi), steps
def fit_spec(self, wave_obs, flux_obs, err_obs, mask=None, plist=['tage', 'Av', 'gas_logu', 'sigma_smooth'], func_kwargs={'lorentz':False}, verbose=True, bspl_kwargs=None, cheb_kwargs=None, lsq_kwargs={'method':'trf', 'max_nfev':200, 'loss':'huber', 'x_scale':1.0, 'verbose':True}, show=False):
"""
Fit models to observed spectrum
"""
from scipy.optimize import least_squares
import matplotlib.pyplot as plt
import grizli.utils
sys_err = 0.015
if wave_obs is None:
# mpdaf muse spectrum
_ = None # muse spectrum object
spec = _[0].spectra['MUSE_TOT_SKYSUB']
wave_obs = spec.wave.coord()
flux_obs = spec.data.filled(fill_value=np.nan)
err_obs = np.sqrt(spec.var.filled(fill_value=np.nan))
err_obs = np.sqrt(err_obs**2+(sys_err*flux_obs)**2)
mask = np.isfinite(flux_obs+err_obs) & (err_obs > 0)
omask = mask
#mask = omask & (wave_obs/(1+0.0342) > 6520) & (wave_obs/(1+0.0342) < 6780)
mask = omask & (wave_obs/(1+0.0342) > 4800) & (wave_obs/(1+0.0342) < 5050)
theta0 = np.array([self.meta[p] for p in plist])
if bspl_kwargs is not None:
bspl = grizli.utils.bspline_templates(wave_obs, get_matrix=True,
**bspl_kwargs)
elif cheb_kwargs is not None:
bspl = grizli.utils.cheb_templates(wave_obs, get_matrix=True,
**cheb_kwargs)
else:
bspl = None
kwargs = func_kwargs.copy()
for i, p in enumerate(plist):
kwargs[p] = theta0[i]
# Model test
margs = (self, plist, wave_obs, flux_obs, err_obs, mask, bspl, kwargs, 'model')
flux_model, Anorm, chi2_init = self.objfun_fitspec(theta0, *margs)
if show:
mask &= np.isfinite(flux_model+flux_obs+err_obs) & (err_obs > 0)
plt.close('all')
fig = plt.figure(figsize=(12, 6))
plt.errorbar(wave_obs[mask], flux_obs[mask], err_obs[mask], color='k', alpha=0.5, linestyle='None', marker='.')
plt.plot(wave_obs, flux_model, color='pink', linewidth=2, alpha=0.8)
else:
fig = None
bounds, steps = self.parameter_bounds(plist)
#lsq_kwargs['diff_step'] = np.array(steps)/2.
#lsq_kwargs['diff_step'] = 0.05
lsq_kwargs['diff_step'] = steps
lmargs = (self, plist, wave_obs, flux_obs, err_obs, mask, bspl, kwargs, 'least_squares verbose')
_res = least_squares(self.objfun_fitspec, theta0, bounds=bounds, args=lmargs, **lsq_kwargs)
fit_model, Anorm, chi2_fit = self.objfun_fitspec(_res.x, *margs)
result = {'fit_model':fit_model, 'Anorm':Anorm, 'chi2_fit':chi2_fit,
'least_squares':_res, 'bounds':bounds, 'plist':plist,
'lsq_kwargs':lsq_kwargs, 'bspl':bspl}
return result
@staticmethod
def objfun_fitspec(theta, self, plist, wave_obs, flux_obs, err_obs, mask, bspl, kwargs, ret_type):
"""
Objective function for fitting spectra
"""
import scipy.stats
try:
from grizli.utils_c.interp import interp_conserve_c as interp_func
except:
interp_func = utils.interp_conserve
err_scale = 1.
for i, p in enumerate(plist):
if p == 'err_scale':
err_scale = theta[i]
continue
kwargs[p] = theta[i]
templ = self.get_full_spectrum(**kwargs)
flux_model = templ.resample(wave_obs, z=self.params['zred'],
in_place=False,
return_array=True, interp_func=interp_func)
flux_model = flux_model.flatten()
if mask is None:
mask = np.isfinite(flux_model+flux_obs+err_obs) & (err_obs > 0)
if bspl is not None:
_A = (bspl.T*flux_model)
_yx = (flux_obs / err_obs)[mask]
_c = np.linalg.lstsq((_A/err_obs).T[mask,:], _yx, rcond=-1)
Anorm = np.mean(bspl.dot(_c[0]))
flux_model = _A.T.dot(_c[0])
else:
lsq_num = (flux_obs*flux_model/err_obs**2)[mask].sum()
lsq_den = (flux_model**2/err_obs**2)[mask].sum()
Anorm = lsq_num/lsq_den
flux_model *= Anorm
chi = ((flux_model - flux_obs)/err_obs)[mask]
chi2 = (chi**2).sum()
if 'verbose' in ret_type:
print('{0} {1:.4f}'.format(theta, chi2))
if 'model' in ret_type:
return flux_model, Anorm, chi2
elif 'least_squares' in ret_type:
return chi
elif 'logpdf' in ret_type:
#return -chi2/2.
lnp = scipy.stats.norm.logpdf((flux_model-flux_obs)[mask],
loc=0,
scale=(err_obs*flux_model)[mask])
return lnp
else:
return chi2
def line_to_obsframe(self, zred=None, cosmology=None, verbose=False, unit=LINE_CGS, target_stellar_mass=None, target_sfr=None, target_lir=None):
"""
Scale factor to convert internal line luminosities (L_sun) to observed frame
If ``target_stellar_mass``, ``target_sfr``, or ``target_lir``
specified, then scale the output to the desired value using the
intrinsic properties. Units are linear ``Msun``, ``Msun/yr``,
and ``Lsun``, respectively.
"""
from astropy.constants import L_sun
if zred == None:
zred = self.params['zred']
if verbose:
msg = 'continuum_to_obsframe: Use params[zred] = {0:.3f}'
print(msg.format(zred))
if cosmology is None:
cosmology = self.cosmology
else:
self.cosmology = cosmology
if zred <= 0:
dL = 1*u.cm
else:
dL = cosmology.luminosity_distance(zred).to(u.cm)
to_cgs = (1*L_sun/(4*np.pi*dL**2)).to(unit)
if target_stellar_mass is not None:
to_cgs *= target_stellar_mass / self.stellar_mass
elif target_sfr is not None:
to_cgs *= target_sfr / self.sfr100
elif target_lir is not None:
to_cgs *= target_lir / self.energy_absorbed
return to_cgs.value
def continuum_to_obsframe(self, zred=None, cosmology=None, unit=u.microJansky, verbose=False, target_stellar_mass=None, target_sfr=None, target_lir=None):
"""
Compute a normalization factor to scale input FSPS model flux density
units of (L_sun / Hz) or (L_sun / \AA) to observed-frame `unit`.
If ``target_stellar_mass``, ``target_sfr``, or ``target_lir``
specified, then scale the output to the desired value using the
intrinsic properties. Units are linear ``Msun``, ``Msun/yr``,
and ``Lsun``, respectively.
"""
from astropy.constants import L_sun
if zred == None:
zred = self.params['zred']
if verbose:
msg = 'continuum_to_obsframe: Use params[zred] = {0:.3f}'
print(msg.format(zred))
if cosmology is None:
cosmology = self.cosmology
else:
self.cosmology = cosmology
if zred <= 0:
dL = 1*u.cm
else:
dL = cosmology.luminosity_distance(zred).to(u.cm)
# FSPS L_sun / Hz to observed-frame
try:
# Unit is like f-lambda
_x = (1*unit).to(u.erg/u.second/u.cm**2/u.Angstrom)
is_flam = True
obs_unit = (1*L_sun/u.Angstrom/(4*np.pi*dL**2)).to(unit)/(1+zred)
except:
# Unit is like f-nu
is_flam = False
obs_unit = (1*L_sun/u.Hz/(4*np.pi*dL**2)).to(unit)*(1+zred)
if target_stellar_mass is not None:
obs_unit *= target_stellar_mass / self.stellar_mass
elif target_sfr is not None:
obs_unit *= target_sfr / self.sfr100
elif target_lir is not None:
obs_unit *= target_lir / self.energy_absorbed
return obs_unit.value
def fit_phot(self, phot_dict, filters=None, flux_unit=u.microJansky, plist=['tage', 'Av', 'gas_logu', 'sigma_smooth'], func_kwargs={'lorentz':False}, verbose=True, lsq_kwargs={'method':'trf', 'max_nfev':200, 'loss':'huber', 'x_scale':1.0, 'verbose':True}, show=False, TEF=None, photoz_obj=None):
"""
Fit models to observed spectrum
"""
from scipy.optimize import least_squares
import matplotlib.pyplot as plt
import grizli.utils
sys_err = 0.02
flux = phot_dict['fobs']
err = phot_dict['efobs']
if 'flux_unit' in phot_dict:
flux_unit = phot_dict['flux_unit']
x0 = np.array([self.meta[p] for p in plist])
# Are input fluxes f-lambda or f-nu?
try:
_x = (1*flux_unit).to(u.erg/u.second/u.cm**2/u.Angstrom)
is_flam = True
except:
is_flam = False
# Initialize keywords
kwargs = func_kwargs.copy()
for i_p, p in enumerate(plist):
kwargs[p] = x0[i_p]
# Initial model
margs = (self, plist, flux, err, is_flam, filters, TEF, kwargs, 'model')
flux_model, Anorm, chi2_init, templ = self.objfun_fitphot(x0, *margs)
if show:
if hasattr(show, 'plot'):
ax = show
else:
plt.close('all')
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(111)
mask = err > 0
pivot = np.array([f.pivot for f in filters])
so = np.argsort(pivot)
ax.errorbar(pivot[mask]/1.e4, flux[mask], err[mask],
color='k', alpha=0.5, linestyle='None', marker='.')
ax.scatter(pivot[so]/1.e4, flux_model[so], color='pink',
alpha=0.8, zorder=100)
# Parameter bounds
bounds, steps = self.parameter_bounds(plist)
lsq_kwargs['diff_step'] = steps
# Run the optimization
lmargs = (self, plist, flux, err, is_flam, filters, TEF, kwargs, 'least_squares verbose')
_res = least_squares(self.objfun_fitphot, x0, bounds=bounds,
args=lmargs, **lsq_kwargs)
_out = self.objfun_fitphot(_res.x, *margs)
xtempl = _out[3]
xscale = _out[1]
_fit = {}
_fit['fmodel'] = _out[0]
_fit['scale'] = xscale
_fit['chi2'] = _out[2]
_fit['templ'] = xtempl
_fit['plist'] = plist
_fit['theta'] = _res.x
_fit['res'] = _res
# Stellar mass
#fit_model, Anorm, chi2_fit, templ = _phot
# Parameter scaling to observed frame.
# e.g., stellar mass = self.stellar_mass * scale / to_obsframe
z = self.params['zred']
_obsfr = self.continuum_to_obsframe(zred=z, unit=flux_unit)
_fit['to_obsframe'] = _obsfr
scl = _fit['scale']/_fit['to_obsframe']
_fit['log_mass'] = np.log10(self.stellar_mass*scl)
_fit['sfr'] = self.sfr*scl
_fit['sfr10'] = self.sfr10*scl
_fit['sfr100'] = self.sfr100*scl
age_bands = ['i1500','v']
ages = self.light_age_band(bands=age_bands)
for i, b in enumerate(age_bands):
_fit['age_'+b] = ages[i]
if show:
ax.scatter(pivot[so]/1.e4, _fit['fmodel'][so], color='r',
alpha=0.8, zorder=101)
iz = templ.zindex(z)
igm = templ.igm_absorption(z, scale_tau=1.4)
if is_flam:
ax.plot(xtempl.wave*(1+z)/1.e4, xtempl.flux[iz,:]*xscale*igm,
color='r', alpha=0.3, zorder=10000)
else:
ax.plot(xtempl.wave*(1+z)/1.e4, xtempl.flux_fnu(iz)*xscale*igm,
color='r', alpha=0.3, zorder=10000)
return _fit
@staticmethod
def objfun_fitphot(theta, self, plist, flux_fnu, err_fnu, is_flam, filters, TEF, kwargs, ret_type):
"""
Objective function for fitting spectra
"""
try:
from grizli.utils_c.interp import interp_conserve_c as interp_func
except:
interp_func = utils.interp_conserve
for i, p in enumerate(plist):
kwargs[p] = theta[i]
templ = self.get_full_spectrum(**kwargs)
model_fnu = templ.integrate_filter_list(filters,
z=self.params['zred'], flam=is_flam,
include_igm=True)
mask = (err_fnu > 0)
full_var = err_fnu**2
if TEF is not None:
tefz = TEF(self.params['zred'])
full_var += (flux_fnu*tefz)**2
lsq_num = (flux_fnu*model_fnu/full_var)[mask].sum()
lsq_den = (model_fnu**2/full_var)[mask].sum()
Anorm = lsq_num/lsq_den
model_fnu *= Anorm
chi = ((model_fnu - flux_fnu)/np.sqrt(full_var))[mask]
chi2 = (chi**2).sum()
if 'verbose' in ret_type:
print('{0} {1:.4f}'.format(theta, (chi**2).sum()))
if 'model' in ret_type:
return model_fnu, Anorm, chi2, templ
elif 'least_squares' in ret_type:
return chi
elif 'logpdf' in ret_type:
return -chi2/2
else:
return chi2
| gbrammer/eazy-py | eazy/sps.py | Python | mit | 59,521 | [
"Gaussian"
] | fd4b8a5d31406aa2ab0f970d0dbb0fe50f0cc7c57adf79ce536c8729602436d6 |
from netpyne import specs, sim
import matplotlib.pyplot as plt
import numpy as np
# Network parameters
netParams = specs.NetParams() # object of class NetParams to store the network parameters
## Cell types
PYRcell = {'secs': {}}
PYRcell['secs']['soma'] = {'geom': {}, 'mechs': {}}
PYRcell['secs']['soma']['geom'] = {'diam': 20, 'L': 20, 'Ra': 123.0}
PYRcell['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70}
PYRcell['secs']['dend0'] = {'geom': {}, 'topol': {}, 'mechs': {}}
PYRcell['secs']['dend0']['geom'] = {'diam': 1.0, 'L': 300.0, 'Ra': 150.0, 'nseg': 9, 'cm': 1}
PYRcell['secs']['dend0']['topol'] = {'parentSec': 'soma', 'parentX': 1.0, 'childX': 0}
PYRcell['secs']['dend0']['mechs']['pas'] = {'g': 0.0000357, 'e': -70}
PYRcell['secs']['dend1'] = {'geom': {}, 'topol': {}, 'mechs': {}}
PYRcell['secs']['dend1']['geom'] = {'diam': 1.0, 'L': 100.0, 'Ra': 150.0, 'nseg': 9, 'cm': 1}
PYRcell['secs']['dend1']['topol'] = {'parentSec': 'dend0', 'parentX': 1.0, 'childX': 0}
PYRcell['secs']['dend1']['mechs']['pas'] = {'g': 0.0000357, 'e': -70}
PYRcell['secs']['dend2'] = {'geom': {}, 'topol': {}, 'mechs': {}}
PYRcell['secs']['dend2']['geom'] = {'diam': 1.0, 'L': 100.0, 'Ra': 150.0, 'nseg': 9, 'cm': 1}
PYRcell['secs']['dend2']['topol'] = {'parentSec': 'dend0', 'parentX': 1.0, 'childX': 0}
PYRcell['secs']['dend2']['mechs']['pas'] = {'g': 0.0000357, 'e': -70}
PYRcell['secs']['dend3'] = {'geom': {}, 'topol': {}, 'mechs': {}}
PYRcell['secs']['dend3']['geom'] = {'diam': 1.0, 'L': 100.0, 'Ra': 150.0, 'nseg': 9, 'cm': 1}
PYRcell['secs']['dend3']['topol'] = {'parentSec': 'dend0', 'parentX': 1.0, 'childX': 0}
PYRcell['secs']['dend3']['mechs']['pas'] = {'g': 0.0000357, 'e': -70}
netParams.cellParams['PYR'] = PYRcell
#netParams.defineCellShapes = True # sets 3d geometry aligned along the y-axis
## Population parameters
Ns = 100
Nm = 10
netParams.popParams['S'] = {'cellType': 'PYR', 'numCells': Ns}
netParams.popParams['M'] = {'cellType': 'PYR', 'numCells': Nm}
## Synaptic mechanism parameters
netParams.synMechParams['exc'] = {'mod': 'Exp2Syn', 'tau1': 1.0, 'tau2': 5.0, 'e': 0} # excitatory synaptic mechanism
# Stimulation parameters
netParams.stimSourceParams['bkg'] = {'type': 'NetStim', 'rate': 1, 'noise': 0.5}
netParams.stimTargetParams['bkg->PYR'] = {'source': 'bkg', 'conds': {'cellType': 'PYR'}, 'weight': 0.01, 'delay': 5, 'synMech': 'exc'}
## Cell connectivity rules
netParams.connParams['S->M'] = {'preConds': {'pop': 'S'}, 'postConds': {'pop': 'M'}, # S -> M
'probability': 0.5, # probability of connection
'weight': 0.0001, # synaptic weight
'delay': 5, # transmission delay (ms)
'sec': 'dend0', # section to connect to
'loc': 0.5, # location of synapse
'synMech': 'exc'} # target synaptic mechanism
# Simulation options
simConfig = specs.SimConfig() # object of class SimConfig to store simulation configuration
simConfig.duration = 1*1e3 # Duration of the simulation, in ms
simConfig.dt = 0.025 # Internal integration timestep to use
simConfig.verbose = False # Show detailed messages
simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'},'V_dend0':{'sec':'dend0','loc':0.5,'var':'v'}} # Dict with traces to record
simConfig.recordStep = 0.1 # Step size in ms to save data (eg. V traces, LFP, etc)
simConfig.filename = 'subconn_without' # Set file output name
simConfig.savePickle = False # Save params, network and sim output to pickle file
simConfig.saveDat = True
watchneuron = Ns # first neuron in population M (where S impinges on)
simConfig.analysis['plotTraces'] = {'include': [watchneuron], 'saveFig': True} # Plot recorded traces for this list of cells
simConfig.analysis['plotShape'] = {'includePre': ['S'],'includePost': [watchneuron],'cvar':'numSyns','dist':0.7, 'saveFig': True}
# Create network and run simulation
sim.createSimulateAnalyze(netParams = netParams, simConfig = simConfig)
t1 = sim.allSimData['t']
Vsoma1 = sim.allSimData['V_soma']['cell_%d' %watchneuron]
Vdend1 = sim.allSimData['V_dend0']['cell_%d' %watchneuron]
#################################################
## Adds a Subcellular synaptic redistribution
#################################################
netParams.subConnParams['S->M'] = {
'preConds': {'pop': 'S'}, 'postConds': {'pop': 'M'}, # S -> M
'sec': ['dend3'], # probability of connection
'density': {'type':'distance','ref_sec':'soma','ref_seg':0.5, 'target_distance': 360}
}
simConfig.filename = 'subconn_with' # Set file output name
# Create network and run simulation
sim.createSimulateAnalyze(netParams = netParams, simConfig = simConfig)
t2 = sim.allSimData['t']
Vsoma2 = sim.allSimData['V_soma']['cell_%d' %watchneuron]
Vdend2 = sim.allSimData['V_dend0']['cell_%d' %watchneuron]
fig, ax= plt.subplots()
ax.plot(t1,Vdend1)
ax.plot(t2,Vdend2)
plt.xlabel('Time')
plt.ylabel('Vdend')
fig.savefig('comparison.jpg')
diff = []
for i in range(len(t1)):
diff.append(Vdend1[i] - Vdend2[i])
fig, ax= plt.subplots()
ax.plot(t1,diff)
plt.xlabel('Time')
plt.ylabel('Difference')
fig.savefig('difference.jpg')
| Neurosim-lab/netpyne | examples/subConn/subConn_DistanceBased.py | Python | mit | 5,269 | [
"NEURON"
] | f5ad11829b202fbdadf36e648938fdd56129f8c585ad332b71e90314888493e4 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""MOL2 file format --- :mod:`MDAnalysis.coordinates.MOL2`
========================================================
Classes to work with Tripos_ molecule structure format (MOL2_) coordinate and
topology files. Used, for instance, by the DOCK_ docking code.
Example for working with mol2 files
-----------------------------------
To open a mol2, remove all hydrogens and save as a new file, use the following::
u = Universe("Molecule.mol2")
gr = u.select_atoms("not name H*")
print(len(u.atoms), len(gr))
gr.write("Molecule_noh.mol2")
.. _MOL2: http://www.tripos.com/data/support/mol2.pdf
.. _Tripos: http://www.tripos.com/
.. _DOCK: http://dock.compbio.ucsf.edu/
See Also
--------
rdkit: rdkit_ is an open source cheminformatics toolkit with more mol2
functionality
.. _rdkit: http://www.rdkit.org/docs/GettingStartedInPython.html
Classes
-------
.. autoclass:: MOL2Reader
:members:
.. autoclass:: MOL2Writer
:members:
Notes
-----
* The MDAnalysis :class:`MOL2Reader` and :class:`MOL2Writer` only handle the
MOLECULE, SUBSTRUCTURE, ATOM, and BOND record types. Other records are not
currently read or preserved on writing.
* As the CRYSIN record type is not parsed / written, MOL2 systems always have
dimensions set to ``None`` and dimensionless MOL2 files are written.
MOL2 format notes
-----------------
* MOL2 Format Specification: (http://www.tripos.com/data/support/mol2.pdf)
* Example file (http://www.tripos.com/mol2/mol2_format3.html)::
# Name: benzene
# Creating user name: tom
# Creation time: Wed Dec 28 00:18:30 1988
# Modifying user name: tom
# Modification time: Wed Dec 28 00:18:30 1988
@<TRIPOS>MOLECULE
benzene
12 12 1 0 0
SMALL
NO_CHARGES
@<TRIPOS>ATOM
1 C1 1.207 2.091 0.000 C.ar 1 BENZENE 0.000
2 C2 2.414 1.394 0.000 C.ar 1 BENZENE 0.000
3 C3 2.414 0.000 0.000 C.ar 1 BENZENE 0.000
4 C4 1.207 -0.697 0.000 C.ar 1 BENZENE 0.000
5 C5 0.000 0.000 0.000 C.ar 1 BENZENE 0.000
6 C6 0.000 1.394 0.000 C.ar 1 BENZENE 0.000
7 H1 1.207 3.175 0.000 H 1 BENZENE 0.000
8 H2 3.353 1.936 0.000 H 1 BENZENE 0.000
9 H3 3.353 -0.542 0.000 H 1 BENZENE 0.000
10 H4 1.207 -1.781 0.000 H 1 BENZENE 0.000
11 H5 -0.939 -0.542 0.000 H 1 BENZENE 0.000
12 H6 -0.939 1.936 0.000 H 1 BENZENE 0.000
@<TRIPOS>BOND
1 1 2 ar
2 1 6 ar
3 2 3 ar
4 3 4 ar
5 4 5 ar
6 5 6 ar
7 1 7 1
8 2 8 1
9 3 9 1
10 4 10 1
11 5 11 1
12 6 12 1
@<TRIPOS>SUBSTRUCTURE
1 BENZENE 1 PERM 0 **** **** 0 ROOT
"""
import numpy as np
from . import base
from ..lib import util
class MOL2Reader(base.ReaderBase):
"""Reader for MOL2 structure format.
.. versionchanged:: 0.11.0
Frames now 0-based instead of 1-based.
MOL2 now reuses the same Timestep object for every frame,
previously created a new instance of Timestep each frame.
.. versionchanged:: 0.20.0
Allows for comments at top of file.
Ignores status bit strings
.. versionchanged:: 2.0.0
Bonds attribute is not added if no bonds are present in MOL2 file
"""
format = 'MOL2'
units = {'time': None, 'length': 'Angstrom'}
def __init__(self, filename, **kwargs):
"""Read coordinates from `filename`.
Parameters
----------
filename : str or NamedStream
name of the mol2 file or stream
"""
super(MOL2Reader, self).__init__(filename, **kwargs)
self.n_atoms = None
blocks = []
with util.openany(filename) as f:
for i, line in enumerate(f):
# found new molecules
if "@<TRIPOS>MOLECULE" in line:
blocks.append({"start_line": i, "lines": []})
if len(blocks):
blocks[-1]["lines"].append(line)
self.n_frames = len(blocks)
self.frames = blocks
sections, coords = self.parse_block(blocks[0])
self.n_atoms = len(coords)
self.ts = self._Timestep(self.n_atoms, **self._ts_kwargs)
self.ts = self._read_frame(0)
def parse_block(self, block):
sections = {}
cursor = None
for line in block["lines"]:
if "@<TRIPOS>" in line:
cursor = line.split("@<TRIPOS>")[1].strip().lower()
sections[cursor] = []
continue
elif line.startswith("#") or line == "\n":
continue
sections[cursor].append(line)
atom_lines = sections["atom"]
if not len(atom_lines):
raise Exception("The mol2 (starting at line {0}) block has no atoms"
"".format(block["start_line"]))
elif self.n_atoms is None:
# First time round, remember the number of atoms
self.n_atoms = len(atom_lines)
elif len(atom_lines) != self.n_atoms:
raise ValueError(
"MOL2Reader assumes that the number of atoms remains unchanged"
" between frames; the current "
"frame has {0}, the next frame has {1} atoms"
"".format(self.n_atoms, len(atom_lines)))
coords = np.zeros((self.n_atoms, 3), dtype=np.float32)
for i, a in enumerate(atom_lines):
aid, name, x, y, z, atom_type, resid, resname, charge = a.split()[:9]
#x, y, z = float(x), float(y), float(z)
coords[i, :] = x, y, z
return sections, coords
def _read_next_timestep(self, ts=None):
if ts is None:
ts = self.ts
else:
# TODO: cleanup _read_frame() to use a "free" Timestep
raise NotImplementedError("MOL2Reader cannot assign to a timestep")
frame = self.frame + 1
return self._read_frame(frame)
def _read_frame(self, frame):
try:
block = self.frames[frame]
except IndexError:
errmsg = (f"Invalid frame {frame} for trajectory with length "
f"{len(self)}")
raise IOError(errmsg) from None
sections, coords = self.parse_block(block)
for sect in ['molecule', 'substructure']:
try:
self.ts.data[sect] = sections[sect]
except KeyError:
pass
self.ts.positions = np.array(coords, dtype=np.float32)
if self.convert_units:
# in-place !
self.convert_pos_from_native(self.ts._pos)
self.ts.frame = frame
return self.ts
def _reopen(self):
# Make frame think it's before start, so calling next
# reads first frame
self.ts.frame = -1
class MOL2Writer(base.WriterBase):
"""mol2 format writer
Write a file in the Tripos_ molecule structure format (MOL2_).
Note
----
:class:`MOL2Writer` can only be used to write out previously loaded MOL2 files.
If you're trying to convert, for example, a PDB file to MOL you should
use other tools, like rdkit_.
Here is an example how to use rdkit_ to convert a PDB to MOL::
from rdkit import Chem
mol = Chem.MolFromPDBFile("molecule.pdb", removeHs=False)
Chem.MolToMolFile(mol, "molecule.mol" )
MOL2 writer is currently not available for rdkit master. It requires SYBYL
atomtype generation.
See page 7 for list of SYBYL atomtypes
(http://tripos.com/tripos_resources/fileroot/pdfs/mol2_format2.pdf).
.. _rdkit: http://www.rdkit.org/docs/GettingStartedInPython.html
.. versionchanged:: 0.11.0
Frames now 0-based instead of 1-based
"""
format = 'MOL2'
multiframe = True
units = {'time': None, 'length': 'Angstrom'}
def __init__(self, filename, n_atoms=None, convert_units=True):
"""Create a new MOL2Writer
Parameters
----------
filename: str
name of output file
convert_units: bool (optional)
units are converted to the MDAnalysis base format; [``True``]
"""
self.filename = filename
self.convert_units = convert_units # convert length and time to base units
self.frames_written = 0
self.file = util.anyopen(self.filename, 'w') # open file on init
def close(self):
self.file.close()
def encode_block(self, obj):
"""
Parameters
----------
obj : AtomGroup or Universe
"""
# Issue 2717
try:
obj = obj.atoms
except AttributeError:
errmsg = "Input obj is neither an AtomGroup or Universe"
raise TypeError(errmsg) from None
traj = obj.universe.trajectory
ts = traj.ts
try:
molecule = ts.data['molecule']
except KeyError:
errmsg = "MOL2Writer cannot currently write non MOL2 data"
raise NotImplementedError(errmsg) from None
# Need to remap atom indices to 1 based in this selection
mapping = {a: i for i, a in enumerate(obj.atoms, start=1)}
# only write bonds if the Bonds attribute exists (Issue #3057)
if hasattr(obj, "bonds"):
# Grab only bonds between atoms in the obj
# ie none that extend out of it
bondgroup = obj.intra_bonds
bonds = sorted((b[0], b[1], b.order) for b in bondgroup)
bond_lines = ["@<TRIPOS>BOND"]
bls = ["{0:>5} {1:>5} {2:>5} {3:>2}".format(bid,
mapping[atom1],
mapping[atom2],
order)
for bid, (atom1, atom2, order) in enumerate(bonds, start=1)]
bond_lines.extend(bls)
bond_lines.append("\n")
bond_lines = "\n".join(bond_lines)
else:
bondgroup = []
bond_lines = ""
atom_lines = ["@<TRIPOS>ATOM"]
atom_lines.extend("{0:>4} {1:>4} {2:>13.4f} {3:>9.4f} {4:>9.4f}"
" {5:>4} {6} {7} {8:>7.4f}"
"".format(mapping[a],
a.name,
a.position[0],
a.position[1],
a.position[2],
a.type,
a.resid,
a.resname,
a.charge)
for a in obj.atoms)
atom_lines.append("\n")
atom_lines = "\n".join(atom_lines)
try:
substructure = ["@<TRIPOS>SUBSTRUCTURE\n"] + ts.data['substructure']
except KeyError:
substructure = ""
check_sums = molecule[1].split()
check_sums[0], check_sums[1] = str(len(obj.atoms)), str(len(bondgroup))
# prevent behavior change between repeated calls
# see gh-2678
molecule_0_store = molecule[0]
molecule_1_store = molecule[1]
molecule[1] = "{0}\n".format(" ".join(check_sums))
molecule.insert(0, "@<TRIPOS>MOLECULE\n")
return_val = ("".join(molecule) + atom_lines +
bond_lines + "".join(substructure))
molecule[0] = molecule_0_store
molecule[1] = molecule_1_store
return return_val
def _write_next_frame(self, obj):
"""Write a new frame to the MOL2 file.
Parameters
----------
obj : AtomGroup or Universe
.. versionchanged:: 1.0.0
Renamed from `write_next_timestep` to `_write_next_frame`.
"""
block = self.encode_block(obj)
self.file.writelines(block)
| MDAnalysis/mdanalysis | package/MDAnalysis/coordinates/MOL2.py | Python | gpl-2.0 | 13,100 | [
"MDAnalysis",
"RDKit"
] | 7476f7ef03f3c672f9d74fb47aa89cc18bcebb025ccf16a35d876dd795070f4f |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2007-2009 Stephane Charette
# Copyright (C) 2009 Gary Burton
# Contribution 2009 by Bob Ham <rah@bash.sh>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import os
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------------
#
# GTK+ modules
#
#-------------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------------
from gramps.gen.config import config
from gramps.gen.plug.report import CATEGORY_GRAPHVIZ
from ._reportdialog import ReportDialog
from ._papermenu import PaperFrame
import gramps.gen.plug.docgen.graphdoc as graphdoc
from gramps.gen.plug.menu import Menu
#-------------------------------------------------------------------------------
#
# GraphvizFormatComboBox
#
#-------------------------------------------------------------------------------
class GraphvizFormatComboBox(Gtk.ComboBox):
"""
Format combo box class for Graphviz report.
"""
def set(self, active=None):
self.store = Gtk.ListStore(GObject.TYPE_STRING)
self.set_model(self.store)
cell = Gtk.CellRendererText()
self.pack_start(cell, True)
self.add_attribute(cell, 'text', 0)
index = 0
active_index = 0
for item in graphdoc.FORMATS:
name = item["descr"]
self.store.append(row=[name])
if item['type'] == active:
active_index = index
index += 1
self.set_active(active_index)
def get_label(self):
return graphdoc.FORMATS[self.get_active()]["descr"]
def get_reference(self):
return graphdoc.FORMATS[self.get_active()]["class"]
def get_paper(self):
return 1
def get_styles(self):
return 0
def get_ext(self):
return '.%s' % graphdoc.FORMATS[self.get_active()]['ext']
def get_format_str(self):
return graphdoc.FORMATS[self.get_active()]["type"]
def is_file_output(self):
return True
def get_clname(self):
return graphdoc.FORMATS[self.get_active()]["type"]
#-----------------------------------------------------------------------
#
# GraphvizReportDialog
#
#-----------------------------------------------------------------------
class GraphvizReportDialog(ReportDialog):
"""A class of ReportDialog customized for graphviz based reports."""
def __init__(self, dbstate, uistate, opt, name, translated_name):
"""Initialize a dialog to request that the user select options
for a graphviz report. See the ReportDialog class for
more information."""
self.category = CATEGORY_GRAPHVIZ
self.__gvoptions = graphdoc.GVOptions()
self.dbname = dbstate.db.get_dbname()
ReportDialog.__init__(self, dbstate, uistate, opt,
name, translated_name)
def init_options(self, option_class):
try:
if issubclass(option_class, object): # Old-style class
self.options = option_class(self.raw_name,
self.dbstate.get_database())
except TypeError:
self.options = option_class
menu = Menu()
self.__gvoptions.add_menu_options(menu)
for category in menu.get_categories():
for name in menu.get_option_names(category):
option = menu.get_option(category, name)
self.options.add_menu_option(category, name, option)
self.options.load_previous_values()
def init_interface(self):
ReportDialog.init_interface(self)
self.doc_type_changed(self.format_menu)
def setup_format_frame(self):
"""Set up the format frame of the dialog."""
self.format_menu = GraphvizFormatComboBox()
self.format_menu.set(self.options.handler.get_format_name())
self.format_menu.connect('changed', self.doc_type_changed)
label = Gtk.Label(label="%s:" % _("Output Format"))
label.set_alignment(0.0, 0.5)
self.tbl.attach(label, 1, 2, self.row, self.row+1, Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
self.tbl.attach(self.format_menu, 2, 4, self.row, self.row+1,
yoptions=Gtk.AttachOptions.SHRINK)
self.row += 1
self.open_with_app = Gtk.CheckButton(_("Open with default viewer"))
self.open_with_app.set_active(
config.get('interface.open-with-default-viewer'))
self.tbl.attach(self.open_with_app, 2, 4, self.row, self.row+1,
yoptions=Gtk.AttachOptions.SHRINK)
self.row += 1
ext = self.format_menu.get_ext()
if ext is None:
ext = ""
else:
spath = self.get_default_directory()
default_name = self.dbname + "_" + \
"".join(x[0].upper() for x in self.raw_name.split("_"))
if self.options.get_output():
base = os.path.basename(self.options.get_output())
else:
base = "%s%s" % (default_name, ext)
spath = os.path.normpath(os.path.join(spath, base))
self.target_fileentry.set_filename(spath)
def setup_report_options_frame(self):
self.paper_label = Gtk.Label(label='<b>%s</b>'%_("Paper Options"))
self.paper_label.set_use_markup(True)
handler = self.options.handler
self.paper_frame = PaperFrame(
handler.get_paper_metric(),
handler.get_paper_name(),
handler.get_orientation(),
handler.get_margins(),
handler.get_custom_paper_size(),
)
self.notebook.insert_page(self.paper_frame, self.paper_label, 0)
self.paper_frame.show_all()
ReportDialog.setup_report_options_frame(self)
def doc_type_changed(self, obj):
"""
This routine is called when the user selects a new file
formats for the report. It adjust the various dialog sections
to reflect the appropriate values for the currently selected
file format. For example, a HTML document doesn't need any
paper size/orientation options, but it does need a template
file. Those chances are made here.
"""
self.open_with_app.set_sensitive(True)
fname = self.target_fileentry.get_full_path(0)
(spath, ext) = os.path.splitext(fname)
ext_val = obj.get_ext()
if ext_val:
fname = spath + ext_val
else:
fname = spath
self.target_fileentry.set_filename(fname)
format_str = obj.get_format_str()
if format_str in ['gvpdf', 'gspdf', 'ps']:
# Always use 72 DPI for PostScript and PDF files.
self.__gvoptions.dpi.set_value(72)
self.__gvoptions.dpi.set_available(False)
else:
self.__gvoptions.dpi.set_available(True)
if format_str in ['gspdf', 'dot']:
# Multiple pages only valid for dot and PDF via GhostsScript.
self.__gvoptions.h_pages.set_available(True)
self.__gvoptions.v_pages.set_available(True)
else:
self.__gvoptions.h_pages.set_value(1)
self.__gvoptions.v_pages.set_value(1)
self.__gvoptions.h_pages.set_available(False)
self.__gvoptions.v_pages.set_available(False)
def make_document(self):
"""Create a document of the type requested by the user.
"""
pstyle = self.paper_frame.get_paper_style()
self.doc = self.format(self.options, pstyle)
self.options.set_document(self.doc)
def on_ok_clicked(self, obj):
"""The user is satisfied with the dialog choices. Validate
the output file name before doing anything else. If there is
a file name, gather the options and create the report."""
# Is there a filename? This should also test file permissions, etc.
if not self.parse_target_frame():
self.window.run()
# Preparation
self.parse_format_frame()
self.parse_user_options()
self.options.handler.set_paper_metric(
self.paper_frame.get_paper_metric())
self.options.handler.set_paper_name(self.paper_frame.get_paper_name())
self.options.handler.set_orientation(self.paper_frame.get_orientation())
self.options.handler.set_margins(self.paper_frame.get_paper_margins())
self.options.handler.set_custom_paper_size(
self.paper_frame.get_custom_paper_size())
# Create the output document.
self.make_document()
# Save options
self.options.handler.save_options()
config.set('interface.open-with-default-viewer',
self.open_with_app.get_active())
def parse_format_frame(self):
"""Parse the format frame of the dialog. Save the user
selected output format for later use."""
self.format = self.format_menu.get_reference()
format_name = self.format_menu.get_clname()
self.options.handler.set_format_name(format_name)
def setup_style_frame(self):
"""Required by ReportDialog"""
pass
| Forage/Gramps | gramps/gui/plug/report/_graphvizreportdialog.py | Python | gpl-2.0 | 10,828 | [
"Brian"
] | da0c7056934f21b05893d94f76358b874ba4788a0806324ab19316244f150847 |
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import subprocess
import os
from ansible.plugins.callback import CallbackBase
FAILED_VOICE = "Zarvox"
REGULAR_VOICE = "Trinoids"
HAPPY_VOICE = "Cellos"
LASER_VOICE = "Princess"
SAY_CMD = "/usr/bin/say"
class CallbackModule(CallbackBase):
"""
makes Ansible much more exciting on OS X.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'osx_say'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
# plugin disable itself if say is not present
# ansible will not call any callback if disabled is set to True
if not os.path.exists(SAY_CMD):
self.disabled = True
self._display.warning("%s does not exist, plugin %s disabled" % (SAY_CMD, os.path.basename(__file__)))
def say(self, msg, voice):
subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)])
def runner_on_failed(self, host, res, ignore_errors=False):
self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_ok(self, host, res):
self.say("pew", LASER_VOICE)
def runner_on_skipped(self, host, item=None):
self.say("pew", LASER_VOICE)
def runner_on_unreachable(self, host, res):
self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_async_ok(self, host, res, jid):
self.say("pew", LASER_VOICE)
def runner_on_async_failed(self, host, res, jid):
self.say("Failure on host %s" % host, FAILED_VOICE)
def playbook_on_start(self):
self.say("Running Playbook", REGULAR_VOICE)
def playbook_on_notify(self, host, handler):
self.say("pew", LASER_VOICE)
def playbook_on_task_start(self, name, is_conditional):
if not is_conditional:
self.say("Starting task: %s" % name, REGULAR_VOICE)
else:
self.say("Notifying task: %s" % name, REGULAR_VOICE)
def playbook_on_setup(self):
self.say("Gathering facts", REGULAR_VOICE)
def playbook_on_play_start(self, name):
self.say("Starting play: %s" % name, HAPPY_VOICE)
def playbook_on_stats(self, stats):
self.say("Play complete", HAPPY_VOICE)
| qrkourier/ansible | lib/ansible/plugins/callback/osx_say.py | Python | gpl-3.0 | 3,032 | [
"exciting"
] | 48976026b81ad2239392878583ac874c89169e575ca84ed4253432cd166ff316 |
# dmd_image.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import os
import sys
from optparse import OptionParser
import pygame
import pygame.locals
# CONFIGURATION SETTINGS: You can change these if you want
# ------------------------------------------------------------------------------
window_size = (800, 600) # pixel size of the main window
dmd_size = (128, 32) # pixel size of the native DMD
dmd_screen_size = (640, 160) # pixel size of the on screen DMD
pixel_color = [255, 85, 0] # R, G, B colors of the image pixels
dark_color = [0, 0, 0] # R, G, B colors of the 'off' pixels
pixel_spacing = 2 # Pixel spacing between dots
loop_ms = 100 # How many ms it waits per cycle
file_extensions = ['bmp', 'jpg', 'jpeg', 'gif', 'png', 'tif']
shades = 16
alpha_color = None
red = 299 # .299
green = 587 # .587
blue = 114 # .114
# ------------------------------------------------------------------------------
# END OF CONFIGURATION SETTINGS. Don't change anything below here
image_path = None
image_list = list()
image_index = 0
x_offset = 0
y_offset = 0
snapshot_flash_index = 0
dmd_palette = None
source_image_dmd_surface = None
source_image_screen_surface = None
source_image_surface = None
def load_image():
global source_image_surface
global image_list
global image_index
global image_size
global image_path
source_image_surface = pygame.image.load(os.path.join(image_path,
image_list[image_index]))
def change_image(direction):
global image_list
global image_index
if direction == 'left':
image_index -= 1
elif direction == 'right':
image_index += 1
if image_index < 0:
image_index = len(image_list) - 1
elif image_index > len(image_list) - 1:
image_index = 0
load_image()
update_screen()
def change_offset(direction):
global y_offset
global x_offset
if direction == 'up':
y_offset -= 1
elif direction == 'down':
y_offset += 1
elif direction == 'left':
x_offset -= 1
elif direction == 'right':
x_offset += 1
def main_loop():
global snapshot_flash_index
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
process_key(event.key, event.mod)
pygame.time.delay(loop_ms)
if snapshot_flash_index:
snapshot_flash_index -= 1
update_screen()
def process_key(key, mods):
ctrl = False
shift = False
# Calculate mods
if (mods & pygame.locals.KMOD_SHIFT) or (mods & pygame.locals.KMOD_CAPS):
shift = True
if (mods & pygame.locals.KMOD_META) or (mods & pygame.locals.KMOD_CTRL):
ctrl = True
if key == pygame.locals.K_ESCAPE:
sys.exit()
else:
char = pygame.key.name(key)
if ctrl and char == 's':
screen_snap()
elif ctrl and char == 'a':
flip_antialias()
elif ctrl and char == 'b':
flip_bounding_box()
elif shift and (char == 'up' or char == 'down' or
char == 'left' or char == 'right'):
change_offset(char)
elif char == 'up' or char == 'down':
change_image_size(char)
elif char == 'left' or char == 'right':
change_image(char)
else:
#text_string += get_char(char, shift)
pass
update_screen()
def update_screen():
global source_image_surface
#global source_image_screen_surface
global pixel_color
global dark_color
global screen
global dmd_size
global dmd_screen_size
global pixel_spacing
global image_index
global image_list
global image_bg_color
global snapshot_flash_index
global snapshot_flash_brightness
global snapshot_flash_steps
global y_offset
global x_offset
global dmd_palette
new_image_dmd_surface = pygame.Surface(dmd_size, depth=8)
new_image_dmd_surface.set_palette(dmd_palette)
new_image_dmd_surface.fill(dark_color)
new_image_dmd_surface.blit(surface_to_dmd(source_image_surface),
(x_offset, y_offset))
source_image_dmd_surface = pygame.Surface(dmd_size)
source_image_dmd_surface.fill(dark_color)
source_image_dmd_surface.blit(source_image_surface, (x_offset, y_offset))
source_image_screen_surface = make_screen_surface(source_image_dmd_surface,
dmd_screen_size, pixel_spacing)
new_image_screen_surface = make_screen_surface(new_image_dmd_surface,
dmd_screen_size, pixel_spacing)
# Create the surface for the image name
info_image = pygame.font.Font(None, 50)
imagename_surface = info_image.render(image_list[image_index], True,
(255, 255, 255))
screen.fill((0,0,0))
# center the DMD screen on the display surface
x = (screen.get_width() - source_image_screen_surface.get_width()) / 2
y = (screen.get_height() - source_image_screen_surface.get_height()) / 3
# draw a box around the source DMD
pygame.draw.rect(screen, (255, 255, 255),
(x-1, 100-1, source_image_screen_surface.get_width() + 2,
source_image_screen_surface.get_height() + 2), 1)
# draw a box around the new DMD
pygame.draw.rect(screen, (255, 255, 255),
(x-1, 400-1, source_image_screen_surface.get_width() + 2,
source_image_screen_surface.get_height() + 2), 1)
screen.blit(source_image_screen_surface, (x, 100))
screen.blit(new_image_screen_surface, (x, 400))
screen.blit(imagename_surface, (10,10))
if snapshot_flash_index:
value = int(snapshot_flash_brightness * snapshot_flash_index /
float(snapshot_flash_steps))
screen.fill((value, value, value))
pygame.display.update()
def make_screen_surface(surface, dimensions, pixel_spacing=0):
# scale it
new_surface = pygame.transform.scale(surface, dimensions)
# pixelize it
if pixel_spacing:
ratio = new_surface.get_width() / float(surface.get_width())
for row in range(surface.get_height() + 1):
pygame.draw.line(new_surface, (0, 0, 0), (0, row*ratio),
(new_surface.get_width()-1, row*ratio),
pixel_spacing)
for col in range(surface.get_width() + 1):
pygame.draw.line(new_surface, (0, 0, 0), (col*ratio, 0),
(col*ratio, new_surface.get_height()-1),
pixel_spacing)
return new_surface
def surface_to_dmd(surface):
global dmd_palette
global shades
global red
global green
global blue
global alpha_color
total_weights = float(red + blue + green)
red_mult = red / total_weights
green_mult = green / total_weights
blue_mult = blue / total_weights
width, height = surface.get_size()
pa = pygame.PixelArray(surface)
new_surface = pygame.Surface((width, height), depth=8)
# todo add support for alpha channel (per pixel), and specifying the
# alpha color before the conversion versus after
new_surface.set_palette(dmd_palette)
if alpha_color is not None:
new_surface.set_colorkey((alpha_color, 0, 0))
new_pa = pygame.PixelArray(new_surface)
for x in range(width):
for y in range(height):
pixel_color = surface.unmap_rgb(pa[x, y])
pixel_weight = ((pixel_color[0] * red_mult) +
(pixel_color[1] * green_mult) +
(pixel_color[2] * blue_mult)) / 255.0
new_pa[x, y] = int(round(pixel_weight * (shades - 1)))
'''
if new_pa[x, y] > shades -1:
print "max shade", shades-1
print "this shade", new_pa[x, y]
print "source pixel", pixel_color
print "mults", red_mult, green_mult, blue_mult
print "calculated weight", pixel_weight
print "caluculated ratio", pixel_weight / 255.0
print "calculated value", pixel_weight / 255.0 * shades
print "rounded", round(pixel_weight / 255.0 * shades)
sys.exit()
'''
return new_pa.surface
def create_palette():
global shades
global dark_color
global pixel_color
global dmd_palette
palette = []
step_size = [(pixel_color[0] - dark_color[0]) / (shades - 1.0),
(pixel_color[1] - dark_color[1]) / (shades - 1.0),
(pixel_color[2] - dark_color[2]) / (shades - 1.0)
]
current_color = dark_color
# manually add the first entry to ensure it's exactly as entered
palette.append((int(current_color[0]),
int(current_color[1]),
int(current_color[2])))
# calculate all the middle values (all except the dark and bright)
for i in range(shades-2):
current_color[0] += step_size[0]
current_color[1] += step_size[1]
current_color[2] += step_size[2]
palette.append((int(current_color[0]),
int(current_color[1]),
int(current_color[2])))
# manually add the last entry to ensure it's exactly as entered
palette.append(pixel_color)
dmd_palette = palette
def screen_snap():
global snapshot_folder
global image_list
global image_index
global image_size
global antialias
global prefer_uncompressed_snapshots
global bounding_box
global snapshot_flash_index
global snapshot_flash_steps
global y_offset
# make sure we have our folder
if not os.path.isdir(snapshot_folder):
os.mkdir(snapshot_folder)
surface = pygame.display.get_surface()
filename = image_list[image_index].split('.')[0] + '-' + str(image_size)
if antialias:
filename += '-aa'
if bounding_box:
filename += '-bb'
if y_offset:
filename += '-y' + str(y_offset)
if prefer_uncompressed_snapshots or not pygame.image.get_extended():
filename += '.bmp'
else:
filename += '.png'
filename = os.path.join(snapshot_folder, filename)
pygame.image.save(surface, filename)
snapshot_flash_index = snapshot_flash_steps
def flip_antialias():
global antialias
antialias ^= 1
def flip_bounding_box():
global bounding_box
bounding_box ^= 1
def get_char(char, shift):
if char == 'space':
return ' '
if len(char) == 1:
if char.isalpha() and shift:
return char.upper()
else:
return char
return ''
def setup_file_list(image_string):
global image_file
global image_path
global image_list
global image_index
if os.path.isdir(image_string):
image_path = image_string
image_file = None
elif os.path.isfile(image_string):
image_path, image_file = os.path.split(image_string)
else:
print "ERROR: Parameter passed isn't a valid path or file name."
sys.exit()
# Find all the images in this folder and add them to the list
for item in os.walk(image_path):
for file_name in item[2]:
if file_name.upper().endswith('.BMP'):
image_list.append(file_name)
# figure out which one is ours
if image_file:
image_index = image_list.index(image_file)
def main():
global screen
global dmd_palette
global pixel_color
global dark_color
global shades
# Get command line input
parser = OptionParser()
(options, args) = parser.parse_args()
options = vars(options)
if len(args) != 1:
print "Error. This tool requires a image filename as a command line parameter"
sys.exit()
else:
setup_file_list(args[0])
pygame.init()
# Set up the window
flags = 0
flags = flags | pygame.locals.RESIZABLE
screen = pygame.display.set_mode(window_size, flags)
pygame.display.set_caption("Mission Pinball Framework Image Tester")
create_palette()
load_image()
update_screen()
while 1:
main_loop()
if __name__ == "__main__":
main()
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| spierepf/mpf | tools/dmd_image.py | Python | mit | 13,830 | [
"Brian"
] | 820ecda710c74685e89a22980aae7e49e2caa12b85afe0a3cc21f2d15d1e5d5e |
# common.py - shared symbols and globals
import os, errno
# exception class so you know where exception came from
class FsDriftException(Exception):
pass
NOTOK = 1
OK = 0
BYTES_PER_KiB = 1 << 10
BYTES_PER_MiB = 1 << 20
KiB_PER_GiB = 1 << 20
MiB_PER_GiB = 1 << 10
USEC_PER_SEC = 1000000
FD_UNDEFINED = -1
class rq:
READ = 0
RANDOM_READ = 1
CREATE = 2
RANDOM_WRITE = 3
APPEND = 4
SOFTLINK = 5
HARDLINK = 6
DELETE = 7
RENAME = 8
TRUNCATE = 9
REMOUNT = 10
READDIR = 11
# file size can either be fixed or exponential random distribution
class FileSizeDistr:
fixed = 0
exponential = 1
def FileSizeDistr2str(v):
if v == FileSizeDistr.fixed:
return "fixed"
elif v == FileSizeDistr.exponential:
return "exponential"
raise FsDriftException(
'file size distribution must be one of: fixed, exponential')
# files are selected from population with random uniform
# or gaussian distribution.
class FileAccessDistr:
uniform = 2
gaussian = 3
def FileAccessDistr2str(v):
if v == FileAccessDistr.uniform:
return "uniform"
elif v == FileAccessDistr.gaussian:
return "gaussian"
raise FsDriftException(
'file access distribution must be one of: uniform, gaussian')
# instead of looking up before deletion, do reverse, delete and catch exception
def ensure_deleted(file_path):
try:
os.unlink(file_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
# just create an empty file
# leave exception handling to caller
def touch(fn):
open(fn, 'w').close()
# create directory if it's not already there
def ensure_dir_exists(dirpath):
if not os.path.exists(dirpath):
parent_path = os.path.dirname(dirpath)
if parent_path == dirpath:
raise FsDriftException(
'ensure_dir_exists: cannot obtain parent path of non-existent path: ' +
dirpath)
ensure_dir_exists(parent_path)
try:
os.mkdir(dirpath)
except OSError as e:
if e.errno != errno.EEXIST: # workaround for filesystem bug
raise e
else:
if not os.path.isdir(dirpath):
raise FsDriftException('%s already exists and is not a directory!'
% dirpath)
# careful with this one
def deltree(topdir):
if len(topdir) < 6:
raise FsDriftException('are you sure you want to delete %s ?' % topdir)
if not os.path.exists(topdir):
return
if not os.path.isdir(topdir):
return
for (dir, subdirs, files) in os.walk(topdir, topdown=False):
for f in files:
os.unlink(os.path.join(dir, f))
for d in subdirs:
os.rmdir(os.path.join(dir, d))
os.rmdir(topdir)
| bengland2/fsstress | common.py | Python | apache-2.0 | 2,836 | [
"Gaussian"
] | 60867200973d808e1c87c6eb79dea0b6cd964497f0b210edd5fc2438d482d002 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.