text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# similarity.py --- Simularity measures between protein ensembles
# Copyright (C) 2014 Wouter Boomsma, Matteo Tiberti
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Ensemble similarity calculations --- :mod:`encore.similarity`
=====================================================================
The module contains implementations of similary measures between
protein ensembles described in:
Similarity Measures for Protein Ensembles. Lindorff-Larsen, K.;
Ferkinghoff-Borg, J. PLoS ONE 2009, 4, e4203.
"""
import optparse
import numpy
import warnings
from time import sleep
from MDAnalysis import Universe
from Ensemble import Ensemble
from clustering.Cluster import ClustersCollection
from clustering.affinityprop import AffinityPropagation
from dimensionality_reduction.stochasticproxembed import StochasticProximityEmbedding, kNNStochasticProximityEmbedding
from confdistmatrix import MinusRMSDMatrixGenerator, RMSDMatrixGenerator
from covariance import covariance_matrix, EstimatorShrinkage, EstimatorML
from multiprocessing import cpu_count
from utils import *
from scipy.stats import gaussian_kde
from random import randint
# Silence deprecation warnings - scipy problem
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
# Low boundary value for log() argument - ensure no nans
EPSILON=1E-15
# x*log(y) with the assumption that 0*(log(0)) = 0
xlogy = numpy.vectorize(lambda x,y : 0.0 if (x<=EPSILON and y<=EPSILON) else x*numpy.log(y))
# discrete dKL
def discrete_kullback_leibler_divergence(pA, pB):
"""Kullback-Leibler divergence between discrete probability distribution. Notice that since this measure is not symmetric :math:`d_{KL}(p_A,p_B) != d_{KL}(p_B,p_A)`
**Arguments:**
`pA` : iterable of floats
First discrete probability density function
`pB` : iterable of floats
Second discrete probability density function
**Returns:**
`dkl` : float
Discrete Kullback-Liebler divergence
"""
return numpy.sum( xlogy(pA, pA/pB) )
# discrete dJS
def discrete_jensen_shannon_divergence(pA, pB):
"""Jensen-Shannon divergence between discrete probability distributions.
**Arguments:**
`pA` : iterable of floats
First discrete probability density function
`pB` : iterable of floats
Second discrete probability density function
**Returns:**
`djs` : float
Discrete Jensen-Shannon divergence
"""
return 0.5*( discrete_kullback_leibler_divergence(pA, (pA+pB)*0.5) +
discrete_kullback_leibler_divergence(pB, (pA+pB)*0.5) )
# calculate harmonic similarity
def harmonic_ensemble_similarity(ensemble1=None,
ensemble2=None,
sigma1=None,
sigma2=None,
x1=None,
x2=None,
mass_weighted=True,
covariance_estimator = EstimatorShrinkage()):
'''
Calculate the harmonic ensemble similarity measure
as defined in
Similarity Measures for Protein Ensembles. Lindorff-Larsen, K.;
Ferkinghoff-Borg, J. PLoS ONE 2009, 4, e4203.
**Arguments:**
`ensemble1` : encore.Ensemble or None
First ensemble to be compared. If this is None, sigma1 and x1 must be provided.
`ensemble2` : encore.Ensemble or None
Second ensemble to be compared. If this is None, sigma2 and x2 must be provided.
`sigma1` : numpy.array
Covariance matrix for the first ensemble. If this None, calculate it from ensemble1 using covariance_estimator
`sigma2` : numpy.array
Covariance matrix for the second ensemble. If this None, calculate it from ensemble1 using covariance_estimator
`x1`: numpy.array
Mean for the estimated normal multivariate distribution of the first ensemble. If this is None, calculate it from ensemble1
`x2`: numpy.array
Mean for the estimated normal multivariate distribution of the first ensemble.. If this is None, calculate it from ensemble2
`mass_weighted` : bool
Whether to perform mass-weighted covariance matrix estimation
`covariance_estimator` : either EstimatorShrinkage or EstimatorML objects
Which covariance estimator to use
**Returns:**
`dhes` : float
harmonic similarity measure
'''
# If matrices and means are specified, use them
if x1 == None or x2 == None or sigma1 == None or sigma2 == None:
if ensemble1 == None or ensemble2 == None:
raise RuntimeError
# Extract coordinates from ensembles
coordinates_system1 = ensemble1.coordinates
coordinates_system2 = ensemble2.coordinates
# Average coordinates in the two systems
x1 = numpy.average(coordinates_system1, axis=0).flatten()
x2 = numpy.average(coordinates_system2, axis=0).flatten()
# Covariance matrices in the two systems
sigma1 = covariance_matrix(ensemble1,
mass_weighted=mass_weighted,
estimator = covariance_estimator)
sigma2 = covariance_matrix(ensemble2,
mass_weighted=mass_weighted,
estimator = covariance_estimator)
# Inverse covariance matrices
sigma1_inv = numpy.linalg.pinv(sigma1)
sigma2_inv = numpy.linalg.pinv(sigma2)
# Difference between average vectors
d_avg = x1 - x2
# Sigma
sigma = sigma1_inv + sigma2_inv
# Distance measure
trace = numpy.trace(numpy.dot(sigma1, sigma2_inv) +
numpy.dot(sigma2, sigma1_inv)
- 2*numpy.identity(sigma1.shape[0]))
d_hes = 0.25*(numpy.dot(numpy.transpose(d_avg),
numpy.dot(sigma1_inv + sigma2_inv,
d_avg)) + trace)
return d_hes
def clustering_ensemble_similarity(cc, ens1, ens1_id, ens2, ens2_id):
"""Clustering ensemble similarity: calculate the probability densities from the clusters and calculate discrete Jensen-Shannon divergence.
**Arguments:**
`cc` : encore.ClustersCollection
Collection from cluster calculated by a clustering algorithm (e.g. Affinity propagation)
`ens1` : encore.Ensemble
First ensemble to be used in comparison
`ens2` : encore.Ensemble
Second ensemble to be used in comparison
`ens1_id` : int
First ensemble id as detailed in the ClustersCollection metadata
`ens2_id` : int
Second ensemble id as detailed in the ClustersCollection metadata
**Returns:**
`djs` : float
Jensen-Shannon divergence between the two ensembles, as calculated by the clustering ensemble similarity method
"""
tmpA = numpy.array( [ numpy.where(c.metadata['ensemble'] == ens1_id)[0].shape[0]/float(ens1.coordinates.shape[0]) for c in cc ] )
tmpB = numpy.array( [ numpy.where(c.metadata['ensemble'] == ens2_id)[0].shape[0]/float(ens2.coordinates.shape[0]) for c in cc ] )
# Exclude clusters which have 0 elements in both ensembles
pA=tmpA[tmpA+tmpB > EPSILON]
pB=tmpB[tmpA+tmpB > EPSILON]
return discrete_jensen_shannon_divergence(pA, pB)
def cumulative_clustering_ensemble_similarity(cc, ens1, ens1_id, ens2, ens2_id, ens1_id_min=1, ens2_id_min=1):
""" Calculate clustering ensemble similarity between joined ensembles. This means that, after clustering has been performed, some ensembles are merged and the dJS is calculated between the probability distributions of the two clusters groups. In particular, the two ensemble groups are defined by their ensembles id: one of the two joined ensembles will comprise all the ensembles with id [ens1_id_min, ens1_id], and the other ensembles will comprise all the ensembles with id [ens2_id_min, ens2_id].
**Arguments:**
`cc` : encore.ClustersCollection
Collection from cluster calculated by a clustering algorithm (e.g. Affinity propagation)
`ens1` : encore.Ensemble
First ensemble to be used in comparison
`ens2` : encore.Ensemble
Second ensemble to be used in comparison
`ens1_id` : int
First ensemble id as detailed in the ClustersCollection metadata
`ens2_id` : int
Second ensemble id as detailed in the ClustersCollection metadata
**Returns:**
`djs` : float
Jensen-Shannon divergence between the two ensembles, as calculated by the clustering ensemble similarity method
"""
ensA = [ numpy.where( numpy.logical_and(c.metadata['ensemble'] <= ens1_id, c.metadata['ensemble']) >= ens1_id_min)[0].shape[0] for c in cc ]
ensB = [ numpy.where( numpy.logical_and(c.metadata['ensemble'] <= ens2_id, c.metadata['ensemble']) >= ens2_id_min)[0].shape[0] for c in cc ]
sizeA = float(numpy.sum(ensA))
sizeB = float(numpy.sum(ensB))
#sizeA = float( numpy.sum( [numpy.where( numpy.logical_and(c.metadata['ensemble'] <= ens1_id, c.metadata['ensemble']) >= ens1_id_min)[0].shape[0] for c in cc])
#sizeB = float(numpy.sum( [numpy.where( numpy.logical_and(c.metadata['ensemble'] <= ens2_id, c.metadata['ensemble']) >= ens2_id_min)[0].shape[0] for c in cc])
tmpA = numpy.array( ensA )/sizeA
tmpB = numpy.array( ensB )/sizeB
# Exclude clusters which have 0 elements in both ensembles
pA=tmpA[tmpA+tmpB > EPSILON]
pB=tmpB[tmpA+tmpB > EPSILON]
return discrete_jensen_shannon_divergence(pA, pB)
def gen_kde_pdfs(embedded_space, ensemble_assignment, nensembles, nsamples=None, **kwargs):
"""
Generate Kernel Density Estimates (KDE) from embedded spaces and elaborate the coordinates for later use.
**Arguments:**
`embedded_space` : numpy.array
Array containing the coordinates of the embedded space
`ensemble_assignment` : numpy.array
Array containing one int per ensemble conformation. These allow to distinguish, in the complete embedded space, which conformations belong to each ensemble. For instance if ensemble_assignment is [1,1,1,1,2,2], it means that the first four conformations belong to ensemble 1 and the last two to ensemble 2
`nesensembles` : int
Number of ensembles
`nsamples` : int samples to be drawn from the ensembles. Will be required in a later stage in order to calculate dJS.`
**Returns:**
`kdes` : scipy.stats.gaussian_kde
KDEs calculated from ensembles
`resamples` : list of numpy.array
For each KDE, draw samples according to the probability distribution of the KDE mixture model
`embedded_ensembles` : list of numpy.array
List of numpy.array containing, each one, the elements of the embedded space belonging to a certain ensemble
"""
kdes = []
embedded_ensembles = []
resamples = []
for i in range(1,nensembles+1):
this_embedded = embedded_space.transpose()[numpy.where(ensemble_assignment == i)].transpose()
embedded_ensembles.append(this_embedded)
kdes.append(gaussian_kde(this_embedded)) # XXX support different bandwidth values
# Set number of samples
if not nsamples:
nsamples = this_embedded.shape[1]*10
# Resample according to probability distributions
for this_kde in kdes:
resamples.append(this_kde.resample(nsamples))
return (kdes, resamples, embedded_ensembles)
def dimred_ensemble_similarity(kde1, resamples1, kde2, resamples2, ln_P1_exp_P1=None, ln_P2_exp_P2=None, ln_P1P2_exp_P1=None, ln_P1P2_exp_P2=None):
""" Calculate the Jensen-Shannon divergence according the the Dimensionality reduction method. In this case, we have continuous probability densities we have to integrate over the measureable space. Our target is calculating Kullback-Liebler, which is defined as:
.. math::
D_{KL}(P(x) || Q(x)) = \\int_{-\\infty}^{\\infty}P(x_i) ln(P(x_i)/Q(x_i)) = \\langle{}ln(P(x))\\rangle{}_P - \\langle{}ln(Q(x))\\rangle{}_P
where the :math:`\\langle{}.\\rangle{}_P` denotes an expectation calculated under the
distribution P. We can, thus, just estimate the expectation values of the components to get an estimate of dKL.
Since the Jensen-Shannon distance is actually more complex, we need to estimate four expectation values:
.. math::
\\langle{}log(P(x))\\rangle{}_P
\\langle{}log(Q(x))\\rangle{}_Q
\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_P
\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_Q
**Arguments:**
`kde1` : scipy.stats.gaussian_kde
Kernel density estimation for ensemble 1
`resamples1` : numpy.array
Samples drawn according do kde1. Will be used as samples to calculate the expected values according to 'P' as detailed before.
`kde2` : scipy.stats.gaussian_kde
Kernel density estimation for ensemble 2
`resamples2` : numpy.array
Samples drawn according do kde2. Will be used as sample to calculate the expected values according to 'Q' as detailed before.
`ln_P1_exp_P1` : float or None
Use this value for :math:`\\langle{}log(P(x))\\rangle{}_P`; if None, calculate it instead
`ln_P2_exp_P2` : float or None
Use this value for :math:`\\langle{}log(Q(x))\\rangle{}_Q`; if None, calculate it instead
`ln_P1P2_exp_P1` : float or None
Use this value for :math:`\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_P`; if None, calculate it instead
`ln_P1P2_exp_P1` : float or None
Use this value for :math:`\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_Q`; if None, calculate it instead
**Returns:**
`djs` : float
Jensen-Shannon divergence calculated according to the dimensionality reduction method
"""
if not ln_P1_exp_P1 and not ln_P2_exp_P2 and not ln_P1P2_exp_P1 and not ln_P1P2_exp_P2:
ln_P1_exp_P1 = numpy.average(numpy.log(kde1.evaluate(resamples1)))
ln_P2_exp_P2 = numpy.average(numpy.log(kde2.evaluate(resamples2)))
ln_P1P2_exp_P1 = numpy.average(numpy.log(0.5*(kde1.evaluate(resamples1)+kde2.evaluate(resamples1))))
ln_P1P2_exp_P2 = numpy.average(numpy.log(0.5*(kde1.evaluate(resamples2)+kde2.evaluate(resamples2))))
return 0.5 * (ln_P1_exp_P1 - ln_P1P2_exp_P1 + ln_P2_exp_P2 - ln_P1P2_exp_P2)
def cumulative_gen_kde_pdfs(embedded_space, ensemble_assignment, nensembles, nsamples=None, ens_id_min=1, ens_id_max=None):
"""
Generate Kernel Density Estimates (KDE) from embedded spaces and elaborate the coordinates for later use. However, consider more than one ensemble as the space on which the KDE will be generated. In particular, will use ensembles with ID [ens_id_min, ens_id_max].
**Arguments:**
`embedded_space` : numpy.array
Array containing the coordinates of the embedded space
`ensemble_assignment` : numpy.array
array containing one int per ensemble conformation. These allow to distinguish, in the complete embedded space, which conformations belong to each ensemble. For instance if ensemble_assignment is [1,1,1,1,2,2], it means that the first four conformations belong to ensemble 1 and the last two to ensemble 2
`nesensembles` : int
Number of ensembles
`nsamples : int
Samples to be drawn from the ensembles. Will be required in a later stage in order to calculate dJS.`
`ens_id_min` : int
Minimum ID of the ensemble to be considered; see description
`ens_id_max` : int
Maximum ID of the ensemble to be considered; see description
**Returns:**
`kdes` : scipy.stats.gaussian_kde
KDEs calculated from ensembles
`resamples` : list of numpy.array
For each KDE, draw samples according to the probability distribution of the kde mixture model
`embedded_ensembles` : list of numpy.array
List of numpy.array containing, each one, the elements of the embedded space belonging to a certain ensemble
"""
kdes = []
embedded_ensembles = []
resamples = []
if not ens_id_max:
ens_id_max = nensembles+1
for i in range(ens_id_min, ens_id_max+1):
this_embedded = embedded_space.transpose()[numpy.where(numpy.logical_and(ensemble_assignment >= ens_id_min, ensemble_assignment <= i))].transpose()
embedded_ensembles.append(this_embedded)
kdes.append(gaussian_kde(this_embedded)) # XXX support different bandwidth values
# Set number of samples
if not nsamples:
nsamples = this_embedded.shape[1]*10
# Resample according to probability distributions
for this_kde in kdes:
resamples.append(this_kde.resample(nsamples))
return (kdes, resamples, embedded_ensembles)
def write_output(matrix, base_fname=None, header="", suffix="", extension="dat"):
"""
Write output matrix with a nice format, to stdout and optionally a file.
**Arguments:**
`matrix` : encore.utils.TriangularMatrix
Matrix containing the values to be printed
`base_fname` : str
Basic filename for output. If None, no files will be written, and the matrix will be just printed on screen
`header` : str
Line to be written just before the matrix
`suffix` : str
String to be concatenated to basename, in order to get the final file name
`extension` : str
Extension for the output file
"""
if base_fname != None:
fname = base_fname+"-"+suffix+"."+extension
else:
fname = None
matrix.square_print(header=header, fname=fname)
def write_output_line(value, fhandler=None, suffix="", label="win.", number=0, rawline=None):
"""
Write a line of data with a fixed format to standard output and optionally file. The line will be appended or written to a file object.
The format is (in the Python str.format specification language): '{:s}{:d}\t{:.3f}', with the first element being the label, the second being
a number that identifies the data point, and the third being the number itself. For instance:
win.3 0.278
**Arguments:**
`value` : float
Value to be printed.
`fhandler` : file object
File object in which the line will be written. if None, nothing will be written to file, and the value will be just printed on screen
`label` : str
Label to be written before the data
`number` : int
Number that identifies the data being written in this line.
`rawline` : str
If rawline is not None, write rawline to fhandler instead of the formatted number line. rawline can be any arbitrary string.
"""
if fhandler == None:
fh = Tee(sys.stdout)
else:
fh = Tee(sys.stdout, fhandler)
if rawline != None:
print >>fh, rawline
return
print >>fh, "{:s}{:d}\t{:.3f}".format(label, number, value)
def bootstrap_coordinates(coords, times):
"""
Bootstrap conformations in a encore.Ensemble. This means drawing from the encore.Ensemble.coordinates numpy array with replacement "times" times and returning the outcome.
**Arguments:**
`coords` : numpy.array
3-dimensional coordinates array
`times` : int
number of times the coordinates will be bootstrapped
**Returns:**
`out` : list
Bootstrapped coordinates list. len(out) = times.
"""
out = []
for t in range(times):
this_coords=numpy.zeros(coords.shape)
for c in range(this_coords.shape[0]):
this_coords[c,:,:] = coords[numpy.random.randint(low=0, high=this_coords.shape[0]),:,:]
out.append(this_coords)
return out
def bootstrap_matrix(matrix):
"""
Bootstrap an input square matrix. The resulting matrix will have the same shape as the original one, but the order of its elements will be drawn (with repetition). Separately bootstraps each ensemble.
**Arguments:**
`matrix` : encore.utils.TriangularMatrix
similarity/dissimilarity matrix
**Returns:**
`this_m` : encore.utils.TriangularMatrix
bootstrapped similarity/dissimilarity matrix
"""
ensemble_identifiers = numpy.unique(ensemble_assignment)
this_m = TriangularMatrix(size = matrix.size)
indexes = []
for ens in ensemble_identifiers:
old_indexes = numpy.where(ensemble_assignment == ens)[0]
indexes.append( numpy.random.randint(low=numpy.min(old_indexes), high=numpy.max(old_indexes)+1, size=old_indexes.shape[0] ) )
indexes = numpy.hstack(indexes)
for j in range(this_m.size):
for k in range(j):
this_m[j, k] = matrix[indexes[j], indexes[k]]
logging.info("Matrix bootstrapped.")
return this_m
if __name__ == "__main__":
import optparse
import logging
group_templates = OptionGroups()
# Main definitions
group = group_templates.add_group("Main options")
group.add_option("--nensembles", dest="nensembles", default=2, type="int",
help="Number of ensembles to compare (default: 2)")
group.add_option("--mode", dest="mode", default="harmonic", type="choice",
choices=["hes","harmonic", "ces", "clustering","dres", "dimred"],
help="Ensemble similarity method (default: harmonic)")
group.add_option("--np", dest="coresn", default=cpu_count(), type=int,
help="Maximum number of processes to perform calculation (default: as many as the system's cores (%d))"% cpu_count())
group.add_option("--no-align", dest="align", action="store_false", default=True,
help="Whether to align ensembles to the reference conformation before calculating similarity. Reference conformation will be the topology, if not specified otherwise with --reference.")
group.add_option("--reference", dest="reference", default=None,
help="Reference conformation to which conformations will be aligned, if desired.")
group.add_option("--topology", dest="topology", type="string",
help="Topology file for ensemble %(index)s. Supported formats: PDB, PDBQT, PQR, GRO, CRD")
group.add_option("--details", dest="details", type="string", default=None,
help="Store details on the performed calculations in file. If several calculations have been performed with different parameters a bunch of files will be generated, one for each calculation.")
group.add_option("-v","--verbose", dest="verbose", action="store_true", default=False,
help="Toggle verbose mode")
group.add_option("--evaluate-convergence", dest="evaluate_convergence", action="store_true", default=False,
help="Use the ensemble comparison measure to evaluate the convergence of the ensemble 1.")
group.add_option("--evaluate-convergence-mode", dest="convergence_mode", type="choice", default="increasing-window", choices=["increasing-window"],
help="Compare a time-window of increasing size with the rest of the trajectory. (default: increasing-window)")
group.add_option("--estimate-error", dest="estimate_error", action="store_true", default=False,
help="Estimate error")
group.add_option("--estimate-error-mode", dest="error_mode", type="choice", default="bootstrapping", choices=["bootstrapping"],
help="Method with which the error will be estimated (default: bootstrapping)")
group.add_option("--output-files","-o", dest="outfiles", action="store", type="str", default=None,
help="Write single matrices in output files as well. Use this basename for the file names.")
# Options for evaluate-convergence=half-half
#group = group_templates.add_group("evaluate-convergence-mode=half-half options")
#group.add_option("--window-size", dest="window_size", type=int, default=2500,
# help="Size of used windows (number of frames; default 2500)")
group = group_templates.add_group("estimate-error-mode=bootstrapping options")
group.add_option("--bootstrapping-runs", dest="bootstrapping_runs", type=int, default=1000,
help="Number of bootstrapping runs (default: 1000)")
group = group_templates.add_group("evaluate-convergence-mode=increasing-window options")
group.add_option("--window-size", dest="window_size", type=int, default=2500,
help="Size of used windows (number of frames; default 2500)")
#group = group_templates.add_group("evaluate-convergence-mode=increasing-half options")
#group.add_option("--window-size", dest="window_size", type=int, default=2500,
# help="Size of used windows (number of frames; default 2500)")
# Options for mode=harmonic
group = group_templates.add_group("mode=harmonic options")
group.add_option("--covariance-estimator", type="choice", dest="covariance_estimator", default="shrinkage",
choices=["ml","shrinkage"],
help="Type of covariance matrix estomator (maximum likelihood (ml) or shrinkage (default: shrinkage)")
# Options for mode=cluster
group = group_templates.add_group("mode=clustering options")
group.add_option("--similarity-mode", dest="similarity_mode", default="minusrmsd", type="choice",
choices=["minusrmsd"],
help="Metric for distance matrix calculation (default: minusrmsd)")
group.add_option("--clustering-mode", dest="clustering_mode", default="ap", type="choice",
choices=["ap"],
help="Clustering algorithm to be used, [ap: Affinity Propagation] (default: ap)")
# Options for mode=dimred
group = group_templates.add_group("mode=dimred options")
group.add_option("--similarity-mode", dest="similarity_mode", default="rmsd", type="choice",
choices=["rmsd"],
help="Metric for similarity matrix calculation (defaukt: rmsd)")
group.add_option("--dimred-mode", dest="dimred_mode", default="spe", type="choice", choices=["spe"],
help="Dimensionality reduction method (default: spe)" )
group.add_option("--density-mode", dest="density_mode", default="kde", type="choice",
choices=["kde"],
help="Density estimation method (default: kde)")
group.add_option("--dim", dest="dim", default="2", type="str",
help="Dimensionality of the embedded spaces (one or more, comma-separated: 2,3,4; default: 2)")
#group.add_option("--replicas", dest="replicas", default=1, type="int",
# help="Number of replicas for each number of dimensions")
# Options for dimred-mode = spe
group = group_templates.add_group("dimred-mode=spe options")
group.add_option("--spe-mode", dest="spe_mode", default='vanilla',type='choice',
choices=['vanilla','rn','knn'],
help="Types of spe calculation [plain SPE (vanilla), k-Nearest neighbours SPE (knn), Random neighborhood SPE (rn)] (default: vanilla)0")
group.add_option("--neighborhood-cutoff", dest="neighborhood_cutoff", default=1.5, type="float",
help="Neighborhood cutoff (vanilla; default: 1.5)")
group.add_option("--nneighs", dest="kn", default=15, type="int",
help="number of neighbours to be considered (knn and rn; default: 15)")
group.add_option("--max-lambda", dest="maxlam", default=2.0, type="float",
help="Starting lambda learning rate parameter (default: 2.0)")
group.add_option("--min-lambda", dest="minlam", default=0.1, type="float",
help="Final lambda learning rate (default: 0.1)")
group.add_option("--nsteps", dest="nstep", default=100, type="int",
help="Number of steps per cycle (default: 50)")
group.add_option("--ncycles", dest="ncycle", default=50, type="int",
help="Number of cycles per run. At the end of every cycle, lambda is changed. (default: 50)")
group.add_option("--stress-frequency", dest="stressfreq", default=-1, type="int",
help="Calculate residual stress value every --stress-frequency cycle (default: -1 (never))")
# Options for ensembles
group = group_templates.add_group("Ensemble %(index)s options")
group.add_option("--ensemble%(index)s-trajectory", dest="ensemble%(index)s_trajectory", type="string",
help="Trajectory file for ensemble %(index)s. Supported formats: DCD, XTC, TRR, XYZ, TRJ, MDCRD, PDB")
# group.add_option("--ensemble%(index)s-start", dest="ensemble%(index)s_start", type="int", default=0,
# help="Start index for ensemble %(index)s")
# group.add_option("--ensemble%(index)s-end", dest="ensemble%(index)s_end", type="int", default=None,
# help="End index for ensemble %(index)s")
group.add_option("--ensemble%(index)s-frame-interval", dest="ensemble%(index)s_frame_interval", type="int", default=1,
help="Frame interval ensemble %(index)s (default: 1)")
group.add_option("--ensemble%(index)s-atom-selection", dest="ensemble%(index)s_atom_selection_string", default="(name CA)",
help="CHARMM-style atom selection (default: name CA)")
# Options for similarity-mode=minusrmsd
group = group_templates.add_group("similarity-mode=minusrmsd options")
group.add_option("--superimpose", dest="superimpose", action="store_true", default = False,
help="Whether to superimpose structures before calculating distance")
group.add_option("--superimposition-subset", dest="superimposition_subset", default = None,
help="Group for superimposition (MDAnalysis selection syntax). Otherwise, the whole structure, as defined by --atom-selection, will be used.")
group.add_option("--no-mass-weighted", dest="mass_weighted", action="store_false", default = True,
help="Calculate non-massweighted RMSD (also, superimposition will not be mass-weighted)")
group.add_option("--save-matrix", "--save-similarity-matrix", dest="save_matrix", default = None,
help="Save calculated similarity/dissimilarity matrix as numpy binary file. A filename is required.")
group.add_option("--load-matrix", "--load-similarity-matrix", dest="load_matrix", default = None,
help="Load similarity/dissimilarity matrix from numpy binary file instead of calculating it. A filename is required.")
group.add_option("--change-matrix-sign", dest="change_matrix_sign", default=False, action="store_true", help="Change the sign of the elements of loaded matrix")
group.add_option("--matrix-only", dest="matrix_only", default=False, action="store_true",
help="Calculate (and save) the similarity matrix only. Run together with --save-matrix")
# Options for similarity-mode=rmsd
group = group_templates.add_group("similarity-mode=rmsd options")
group.add_option("--superimpose", dest="superimpose", action="store_true", default = False,
help="Whether to superimpose structures before calculating distance")
group.add_option("--superimposition-subset", dest="superimposition_subset", default = None,
help="Group for superimposition (MDAnalysis selection syntax). Otherwise, the whole structure, as defined by --atom-selection, will be used.")
group.add_option("--no-mass-weighted", dest="mass_weighted", action="store_false", default = True,
help="Calculate non-massweighted RMSD (also, superimposition will not be mass-weighted)")
group.add_option("--save-matrix", dest="save_matrix", default = None,
help="Save calculated matrix as numpy binary file. A filename is required.")
group.add_option("--load-matrix", dest="load_matrix", default = None,
help="Load matrix from numpy binary file instead of calculating it. A filename is required.")
group.add_option("--change-matrix-sign", dest="change_matrix_sign", default=False, action="store_true", help="Invert the sign of the elements of the loaded matrix")
group.add_option("--matrix-only", dest="matrix_only", default=False, action="store_true",
help="Calculate (and save) the similarity matrix only. Run together with --save-matrix")
# Options for similarity-mode=ap
group = group_templates.add_group("clustering-mode=ap options")
group.add_option("--preferences", dest="preferences", default="-5.0", type="str",
help="Preference values, comma-separated (default: -5.0")
group.add_option("--lambda", dest="lam", default=0.8, type="float",
help="Damping factor ([0.0;1.0] (default: 0.5))")
group.add_option("--maxiter", dest="max_iterations", default=1000, type="int",
help="Maximum number of iterations (default: 1000)")
group.add_option("--convergence", dest="convergence", default=50, type="int",
help="Minimum number of unchanging iterations to achieve convergence (default: 50)")
group.add_option("--nonoise", dest="noise", action="store_false", default=True,
help="Do not add noise to data (note: similarities must be not degenerate!)")
# Options for density_mode = kde
group = group_templates.add_group("density-mode=kde options")
# group.add_option("--bw-method", dest="bw_method", default="scott", type="choice",
# choices=['scott','silverman','scalar'], help="number of nearest neighbours to each element")
group.add_option("--use-density", dest='use_density', default='grid', type="choice",
choices=['grid','data','resample'], help="Compute JS divergence by evaluating density on the selected points")
group.add_option("--grid-resolution", dest="kde_resolution", default="0.01", type="float",
help="Grid resolution for Kernel Density Estimation (default: 0.01)"),
#group.add_option("--grid-size", dest="grid_size", default=1.0, type="float",
# help="For each dimension, grid size will be chosen as (max-min)+2*(max-min)*D.")
group.add_option("--samples", dest="samples", default=1000, type="int",
help="Number of points to resample from kde (default: 1000)"),
usage = """%prog [options].
Since many options are useful only when using one of the three
ensemble similarity methods, many of them are hidden by default.
In order to show them, please run similarity.py -h together with:
--mode=hes: options for the harmonic similarity method (default)
--mode=ces: options for the clustering ensemble similarity method
--mode=dres: options for the dimensionality reduction ensemble
similarity method
for instance:
similarity.py --mode=clustering -h
"""
##### Parse command line options
parser = optparse.OptionParser(usage=usage)
group_main = optparse.OptionGroup(parser, "Main Options")
group_cluster = optparse.OptionGroup(parser, "mode=clustering options")
# Parsing phase 1
option_groups = [group_templates["Main options"]]
parser_phase1 = ParserPhase(option_groups, allow_unrecognized=True, add_help_option=False)
parser_phase1.parse()
# Parsing phase 2
if parser_phase1.options.mode == "harmonic" or parser_phase1.options.mode == "hes":
option_groups += [group_templates["mode=harmonic options"]]
elif parser_phase1.options.mode == "clustering" or parser_phase1.options.mode == "ces":
option_groups += [group_templates["mode=clustering options"]]
elif parser_phase1.options.mode == "dimred" or parser_phase1.options.mode == "dres":
option_groups += [group_templates["mode=dimred options"]]
if parser_phase1.options.evaluate_convergence:
if parser_phase1.options.convergence_mode == "increasing-window":
option_groups += [group_templates["evaluate-convergence-mode=increasing-window options"]]
if parser_phase1.options.estimate_error:
if parser_phase1.options.error_mode == "bootstrapping":
option_groups += [group_templates["estimate-error-mode=bootstrapping options"]]
#if parser_phase1.options.convergence_mode == "half-half":
# option_groups += [group_templates["evaluate-convergence-mode=half-half options"]]
#elif parser_phase1.options.convergence_mode == "increasing-half":
# option_groups += [group_templates["evaluate-convergence-mode=increasing-half options"]]
option_groups += [group_templates["Ensemble %(index)s options"].duplicate(i+1) for i in range(parser_phase1.options.nensembles)]
parser_phase2 = ParserPhase(option_groups, allow_unrecognized=True, add_help_option=False)
parser_phase2.parse()
# Parsing phase 3
if parser_phase2.options.mode == "clustering" or parser_phase2.options.mode == "ces":
if parser_phase2.options.similarity_mode == "minusrmsd":
option_groups += [group_templates["similarity-mode=minusrmsd options"]]
if parser_phase2.options.clustering_mode == "ap":
option_groups += [group_templates["clustering-mode=ap options"]]
elif parser_phase2.options.mode == "dimred" or parser_phase2.options.mode == "dres":
if parser_phase2.options.similarity_mode == "rmsd":
option_groups += [group_templates["similarity-mode=rmsd options"]]
if parser_phase2.options.dimred_mode == "spe":
option_groups += [group_templates["dimred-mode=spe options"]]
if parser_phase2.options.density_mode == "kde":
option_groups += [group_templates["density-mode=kde options"]]
parser_phase3 = ParserPhase(option_groups, allow_unrecognized=False, add_help_option=True, usage=usage)
parser_phase3.parse()
# Set logging level and format
#console = logging.StreamHandler()
#logging.addHandler(console)
if parser_phase3.options.verbose:
#logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%y-%m-%d %H:%M:%S : ',level=logging.INFO)
#logging.basicConfig(format='%(asctime)s.%(msecs)03d',datefmt='%Y-%m-%d,%H:%M:%S', level=logging.INFO)
logging.basicConfig(format='%(asctime)s.%(msecs)03d : %(message)s', datefmt='%Y-%m-%d,%H:%M:%S', level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
#logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%y-%m-%d %H:%M:%S : ',level=logging.WARNING)
#logging.Formatter()
logging.basicConfig(format='%(asctime)s.%(msecs)03d : %(message)s', datefmt='%Y-%m-%d,%H:%M:%S', level=logging.INFO)
logging.info("Loading ensembles . . .")
ensembles = []
ensemble_numbers = []
# Check if topology file has been specified
if not parser_phase3.options.topology:
parser_phase3.parser.error("Topology file not specified.")
exit(1)
# Check if evaluate convergence. In this case, just 1 ensemble.
if parser_phase3.options.evaluate_convergence:
if parser_phase3.options.nensembles > 1:
logging.warning("WARNING: only ensemble 1 will be considered for convergence evaluation.")
parser_phase3.options.nensembles = 1
# Check if the number of ensemble_trajectorys is consistent with the desired number of ensembles
for i in range(1,parser_phase3.options.nensembles+1):
if getattr(parser_phase3.options, "ensemble%d_trajectory"%i):
ensemble_numbers.append(i)
if set(range(1,parser_phase3.options.nensembles+1)) != set(ensemble_numbers):
parser_phase3.parser.error("ERROR: Wrong number of ensembles or trajectories specified.")
exit(1)
# Load ensembles
for i in range(1,parser_phase3.options.nensembles+1):
trajectories = getattr(parser_phase3.options, "ensemble%d_trajectory"%i).split(",")
atom_selection_string = getattr(parser_phase3.options, "ensemble%d_atom_selection_string"%i)
frame_interval = getattr(parser_phase3.options, "ensemble%d_frame_interval"%i)
try:
superimposition_subset_string = parser_phase3.options.superimposition_subset
except:
parser_phase3.options.superimposition_subset = None
ensembles.append( Ensemble(topology = parser_phase3.options.topology,
trajectory = trajectories,
atom_selection_string = atom_selection_string,
superimposition_selection_string = parser_phase3.options.superimposition_subset,
frame_interval = frame_interval ) )
logging.info("""Ensemble %d Loaded.
trajectories: %s
frame interval: %d
number of frames: %d
atoms selection: %s
number of atoms: %d\n""" % (i, "\n".ljust(19).join(trajectories), frame_interval, len(ensembles[-1].coordinates), atom_selection_string, len(ensembles[-1].coordinates[0]) ) )
#Check if the ensembles contain the same number of atoms
coordinatesn = len(ensembles[0].coordinates[0])
for e in ensembles[1:]:
if len(e.coordinates[0]) != coordinatesn:
logging.error("ERROR: ensembles must contain the same number of atoms.")
exit(1)
logging.info("Done! %d ensembles loaded." % len(ensembles))
# If required, align to reference before proceeding.
if parser_phase3.options.align:
if not parser_phase3.options.reference:
reference = parser_phase3.options.topology
logging.info("Performing least-square fit of each frame on the topology conformation.")
else:
reference = parser_phase3.options.reference
logging.info("Performing least-square fit of each frame on the user-specified reference conformation.")
reference_universe = Universe(parser_phase3.options.topology,
reference)
for e in ensembles:
e.align(reference_universe)
else:
logging.info("Not performing preliminar least-square superimposition.")
# Calculate the number of matrix elements output and create the matrix. The diagonal is not considered.
out_matrix_eln = parser_phase3.options.nensembles
values = TriangularMatrix(size = out_matrix_eln)
# Generate ensemble pair indeces for this calculation
pairs_indeces = [k for k in trm_indeces_nodiag(parser_phase3.options.nensembles)]
logging.info("Similarity metric calculations will now begin. %d values will be computed." % out_matrix_eln)
logging.info("%d core(s) will be used for parallel calculations." % parser_phase3.options.coresn)
# If convergence: splice ensembles
if parser_phase3.options.evaluate_convergence:
ens_size = ensembles[0].coordinates.shape[0]
slices_n = [0]
tmp_ensembles = []
#if parser_phase3.options.convergence_mode == 'half-half': #or parser_phase3.options.convergence_mode == 'sliding-window' or parser_phase3.options.convergence_mode == 'fixed-window':
#if parser_phase3.options.convergence_mode == 'half-half':
#first_window_size = ens_size/2
#if ens_size % first_window_size == 0:
#parser_phase3.options.window_size = first_window_size
#else:
#parser_phase3.options.window_size = first_window_size + 1
#elif parser_phase3.options.convergence_mode == 'sliding-window':
# first_window_size = parser_phase3.options.window_size
#elif parser_phase3.options.convergence_mode == 'fixed-window':
# first_window_size = parser_phase3.options.first_window_size
#slices_n.append(first_window_size)
#rest_slices = (ens_size - first_window_size)/parser_phase3.options.window_size
#print "r_s", rest_slices
#residuals = (ens_size - first_window_size) % parser_phase3.options.window_size
#for rs in range(rest_slices):
#slices_n.append(slices_n[-1] + parser_phase3.options.window_size)
#if residuals != 0:
#slices_n.append(slices_n[-1] + residuals)
#logging.warning("WARNING: the last window will be shorter than the prescribed window size (%s frames)"%residuals)
#tmp_ensembles = []
#for s in range(len(slices_n)-1):
#tmp_ensembles.append( Ensemble(topology = parser_phase3.options.topology,
# trajectory = parser_phase3.options.topology,
# atom_selection_string = atom_selection_string,
# superimposition_selection_string = parser_phase3.options.superimposition_subset,
# frame_interval = frame_interval ) )
#tmp_ensembles[-1].coordinates = ensembles[0].coordinates[slices_n[s]:slices_n[s+1],:,:]
if parser_phase3.options.convergence_mode == "increasing-window": #or parser_phase3.options.convergence_mode=="increasing-window":
window_size = parser_phase3.options.window_size
#if parser_phase3.options.convergence_mode == "increasing-half":
# ref_window_size = ens_size/2
ref_window_size = 0
if ref_window_size % window_size != 0:
ref_window_size += ref_window_size % window_size
rest_slices = (ens_size - ref_window_size) / parser_phase3.options.window_size
residuals = (ens_size - ref_window_size) % parser_phase3.options.window_size
for rs in range(rest_slices-1):
slices_n.append(slices_n[-1] + parser_phase3.options.window_size)
if residuals != 0:
slices_n.append(slices_n[-1] + residuals + parser_phase3.options.window_size)
logging.warning("WARNING: the last window will be shorter than the prescribed window size (%s frames)"%residuals)
else:
slices_n.append(slices_n[-1] + parser_phase3.options.window_size)
for s in range(len(slices_n)-1):
tmp_ensembles.append( Ensemble(topology = parser_phase3.options.topology,
trajectory = parser_phase3.options.topology,
atom_selection_string = atom_selection_string,
superimposition_selection_string = parser_phase3.options.superimposition_subset,
frame_interval = frame_interval ) )
#print slices_n
tmp_ensembles[-1].coordinates = ensembles[0].coordinates[slices_n[s]:slices_n[s+1],:,:]
if ref_window_size > 0:
tmp_ensembles.append( Ensemble(topology = parser_phase3.options.topology,
trajectory = parser_phase3.options.topology,
atom_selection_string = atom_selection_string,
superimposition_selection_string = parser_phase3.options.superimposition_subset,
frame_interval = frame_interval ) )
tmp_ensembles[-1].coordinates = ensembles[0].coordinates[slices_n[-1]:,:,:]
if parser_phase3.options.convergence_mode == "increasing-half":
ref_ensemble = tmp_ensembles[-1]
else:
ref_ensemble = ensembles[0]
ensembles = tmp_ensembles
parser_phase3.options.nensembles = len(ensembles)
if parser_phase3.options.mode == "harmonic" or parser_phase3.options.mode == "hes":
logging.info("Chosen metric: Harmonic similarity")
if out_matrix_eln % parser_phase3.options.coresn != 0:
logging.warning("WARNING: for optimal performance, the number of cores should be a factor of the number of similarity metric values.")
if parser_phase3.options.covariance_estimator == "shrinkage":
covariance_estimator = EstimatorShrinkage()
logging.info(" Covariance matrix estimator: Shrinkage")
else:
covariance_estimator = EstimatorML()
logging.info(" Covariance matrix estimator: Maximum Likelihood")
xs = []
sigmas = []
if parser_phase1.options.estimate_error:
if parser_phase1.options.error_mode == "bootstrapping":
data = []
for t in range(parser_phase3.options.bootstrapping_runs):
logging.info("The coordinates will be bootstrapped.")
xs = []
sigmas = []
values = numpy.zeros((out_matrix_eln,out_matrix_eln))
for e in ensembles:
this_coords = bootstrap_coordinates(e.coordinates, 1)[0]
xs.append(numpy.average(this_coords, axis=0).flatten())
sigmas.append( covariance_matrix(e,
mass_weighted=True,
estimator = covariance_estimator) )
for i,j in pairs_indeces:
value = harmonic_ensemble_similarity(x1 = xs[i],
x2 = xs[j],
sigma1 = sigmas[i],
sigma2 = sigmas[j])
values[i,j] = value
values[j,i] = value
data.append(values)
outs = numpy.array(data)
avgs = np.average(data, axis=0)
stds = np.std(data, axis=0)
print "averages:"
print_square_array(avgs)
print "standard deviations:"
print_square_array(stds)
print "values (ens.A-ens.B value_run_1 value_run_2 ... value_run_N):"
for i,j in pairs_indeces:
print "%d-%d\t" % (i+1,j+1),
print " ".join(["%.3f"%k for k in outs[:,i,j]])
exit(0)
# Calculate the parameters for the multivariate normal distribution of each ensemble
for e in ensembles:
# Extract coordinates from each ensemble
coordinates_system = e.coordinates
# Average coordinates in each system
xs.append(numpy.average(coordinates_system, axis=0).flatten())
# Covariance matrices in each system
sigmas.append( covariance_matrix(e,
mass_weighted=True,
estimator = covariance_estimator) )
if parser_phase3.options.evaluate_convergence:
fname = str(parser_phase3.options.outfiles)+"_convergence_"
#if parser_phase3.options.convergence_mode == 'half-half':
# fname+="half-half_harmonic.dat"
# if parser_phase3.options.outfiles != None:
# fhandler = open(fname,'a')
# else:
# fhandler = None
# write_output_line(0, rawline="=== half vs half convergence estimation ===", fhandler=fhandler)
# write_output_line(0, rawline="half-half hes: %.3f" %
# harmonic_ensemble_similarity(x1 = xs[0],
# x2 = xs[1],
# sigma1 = sigmas[0],
# sigma2 = sigmas[1]),
# fhandler=fhandler)
#elif parser_phase3.options.convergence_mode == 'sliding-window':
# print "=== sliding window convergence estimation ==="
# for i in range(len(ensembles)-1):
# print "%.3f" % harmonic_ensemble_similarity(x1 = xs[i],
# x2 = xs[i+1],
# sigma1 = sigmas[i],
# sigma2 = sigmas[i+1])
#elif parser_phase3.options.convergence_mode == 'sliding-window':
# print "=== sliding window convergence estimation ==="
# for i in range(1,len(ensembles)):
# print "%.3f" % harmonic_ensemble_similarity(x1 = xs[0],
# x2 = xs[i],
# sigma1 = sigmas[0],
# sigma2 = sigmas[i])
#if parser_phase3.options.convergence_mode == "increasing-half":
# fname += "increasing-half_harmonic.dat"
# if parser_phase3.options.outfiles != None:
# fhandler = open(fname,'a')
# else:
# fhandler = None
#
# ref_x = numpy.average(ref_ensemble.coordinates, axis=0).flatten()
# ref_sigma = covariance_matrix(ref_ensemble,
# mass_weighted=True,
# estimator = covariance_estimator)
# write_output_line(rawline="# === first half vs increasing window convergence estimation ===", fhandler=fhandler)
# for i in range(0,len(ensembles[:-1])):
# write_output_line(harmonic_ensemble_similarity(x1 = ref_x,
# x2 = xs[i],
# sigma1 = ref_sigma,
# sigma2 = sigmas[i]),
# fhandler = fhandler,
# number = i+1)
if parser_phase3.options.convergence_mode == "increasing-window":
fname += "increasing-window_harmonic.dat"
if parser_phase3.options.outfiles != None:
fhandler = open(fname,'a')
else:
fhandler = None
ref_x = numpy.average(ref_ensemble.coordinates, axis=0).flatten()
ref_sigma = covariance_matrix(ref_ensemble,
mass_weighted=True,
estimator = covariance_estimator)
write_output_line(0,rawline="# === Increasing window convergence estimation ===")
for i in range(0,len(ensembles)):
write_output_line(harmonic_ensemble_similarity(x1 = ref_x,
x2 = xs[i],
sigma1 = ref_sigma,
sigma2 = sigmas[i]),
fhandler = fhandler,
number = i+1)
else:
for i,j in pairs_indeces:
values[i,j] = harmonic_ensemble_similarity(x1 = xs[i],
x2 = xs[j],
sigma1 = sigmas[i],
sigma2 = sigmas[j])
# Save details as required
if parser_phase3.options.details:
kwds = {}
for i in range(len(ensembles)):
kwds['ensemble%d_mean'%(i+1)] = xs[i]
kwds['ensemble%d_covariance_matrix'%(i+1)] = sigmas[i]
numpy.savez(parser_phase3.options.details, **kwds)
header = "# === Harmonic similarity ==="
write_output(values, header=header, base_fname=parser_phase3.options.outfiles, suffix="harmonic")
logging.info("Calculation complete.")
exit(0)
if parser_phase3.options.mode == "clustering" or parser_phase3.options.mode == "ces":
logging.info("Chosen metric: Conformational clustering")
if parser_phase3.options.mode == "dimred" or parser_phase3.options.mode == "dres":
logging.info("Chosen metric: Dimensionality reduction")
if parser_phase3.options.mode == "clustering" or parser_phase3.options.mode == "dimred" or parser_phase3.options.mode == "ces" or parser_phase3.options.mode == "dres": # safeguard
trajlist = []
ensemble_assignment = []
# Define ensemble assignments as required on the joined ensemble
for i in range(1, parser_phase3.options.nensembles+1):
ensemble_assignment += [i for j in ensembles[i-1].coordinates]
ensemble_assignment = numpy.array(ensemble_assignment)
#print ensemble_assignment
# Joined ensemble
joined_ensemble = Ensemble(topology=parser_phase3.options.topology,
trajectory=[parser_phase3.options.topology],
atom_selection_string = parser_phase3.options.ensemble1_atom_selection_string,
superimposition_selection_string = parser_phase3.options.superimposition_subset)
# Joined ensemble coordinates as a concatenation of single ensembles - faster this way
joined_ensemble.coordinates = numpy.concatenate(tuple([ e.coordinates for e in ensembles ]) )
joined_ensemble.superimposition_coordinates = numpy.concatenate(tuple([ e.superimposition_coordinates for e in ensembles ]) )
# Define metadata dictionary
metadata = {'ensemble': ensemble_assignment}
# Choose distance metric
if parser_phase3.options.similarity_mode == "minusrmsd":
logging.info(" Similarity matrix: -RMSD matrix")
matrix_builder = MinusRMSDMatrixGenerator()
elif parser_phase3.options.similarity_mode == "rmsd":
logging.info(" Similarity matrix: RMSD matrix")
matrix_builder = RMSDMatrixGenerator()
# Load the matrix if required
if parser_phase3.options.load_matrix:
logging.info(" Loading similarity matrix from: %s"%parser_phase3.options.load_matrix)
confdistmatrix = TriangularMatrix(size=joined_ensemble.coordinates.shape[0], loadfile=parser_phase3.options.load_matrix)
logging.info(" Done!")
for key in confdistmatrix.metadata.dtype.names:
logging.info(" %s : %s" % (key, str(confdistmatrix.metadata[key][0])) )
# Change matrix sign if required. Useful to switch between similarity/distance matrix.
if parser_phase3.options.change_matrix_sign:
logging.info(" The matrix sign will be changed.")
for k,v in enumerate(confdistmatrix._elements):
confdistmatrix._elements[k] = -v
# Check matrix size for consistency
if not confdistmatrix.size == joined_ensemble.coordinates.shape[0]:
logging.error("ERROR: The size of the loaded matrix and of the ensemble do not match")
exit(1)
# Calculate the matrix
else:
logging.info(" Perform pairwise alignment: %s" % str(parser_phase3.options.superimpose))
logging.info(" Mass-weighted alignment and RMSD: %s" % str(parser_phase3.options.mass_weighted))
if parser_phase3.options.superimpose:
logging.info(" Atoms subset for alignment: %s" % parser_phase3.options.superimposition_subset )
logging.info(" Calculating similarity matrix . . .")
# Use superimposition subset, if necessary. If the pairwise alignment is not required, it will not be performed anyway.
if parser_phase3.options.superimposition_subset:
confdistmatrix = matrix_builder(joined_ensemble,
pairwise_align = parser_phase3.options.superimpose,
align_subset_coordinates = joined_ensemble.superimposition_coordinates,
mass_weighted = parser_phase3.options.mass_weighted,
ncores = parser_phase3.options.coresn)
else:
confdistmatrix = matrix_builder(joined_ensemble,
pairwise_align = parser_phase3.options.superimpose,
mass_weighted = parser_phase3.options.mass_weighted,
ncores = parser_phase3.options.coresn)
logging.info(" Done!")
if parser_phase3.options.save_matrix:
logging.info(" Similarity matrix will be saved in %s.%s"%(parser_phase3.options.save_matrix, "" if parser_phase3.options.save_matrix[-3:] == "npz" else "npz"))
confdistmatrix.savez(parser_phase3.options.save_matrix)
if parser_phase3.options.matrix_only:
logging.info("The calculation of the simialrity matrix only was requested; calculation finished.")
exit(0)
if parser_phase3.options.estimate_error: # if bootstrap
logging.info("Error will be estimated instead of normal calculation.")
if parser_phase3.options.error_mode == "bootstrapping":
logging.info("Error estimation mode: Bootstrapping")
logging.info("the similarity matrix will be bootstrapped %d times." % parser_phase3.options.bootstrapping_runs)
bs_args = [tuple([confdistmatrix]) for i in range(parser_phase3.options.bootstrapping_runs)]
pc = ParallelCalculation(parser_phase3.options.coresn, bootstrap_matrix, bs_args)
pc_results = pc.run()
bootstrap_matrices = zip(*pc_results)[1]
# Start building Probability density functions (pdf)
if parser_phase3.options.mode == "clustering" or parser_phase3.options.mode == "ces":
# Clustering mode
if parser_phase3.options.clustering_mode == "ap":
preferences = map(float,parser_phase3.options.preferences.split(","))
logging.info(" Clustering algorithm: Affinity Propagation")
logging.info(" Preference values: %s" % ", ".join(map(lambda x: "%3.2f"%x ,preferences)))
logging.info(" Maximum iterations: %d" % parser_phase3.options.max_iterations)
logging.info(" Convergence: %d" % parser_phase3.options.convergence)
logging.info(" Damping: %1.2f"% parser_phase3.options.lam)
logging.info(" Apply noise to similarity matrix: %s" % str(parser_phase3.options.noise))
if len(preferences) % parser_phase3.options.coresn != 0:
logging.warning("WARNING: for optimal performance, the number of cores should be a factor of the number of preference values.")
# Choose clustering algorithm
clustalgo = AffinityPropagation()
# Prepare input for parallel calculation
if parser_phase3.options.estimate_error:
if parser_phase3.options.error_mode == "bootstrapping":
confdistmatrixs = []
lams = []
max_iterationss = []
convergences = []
noises = []
real_prefs = []
for p in preferences:
confdistmatrixs.extend(bootstrap_matrices)
lams.extend([parser_phase3.options.lam]*len(bootstrap_matrices))
max_iterationss.extend([parser_phase3.options.max_iterations]*len(bootstrap_matrices))
noises.extend([parser_phase3.options.noise]*len(bootstrap_matrices))
convergences.extend([parser_phase3.options.convergence]*len(bootstrap_matrices))
real_prefs.extend([p]*len(bootstrap_matrices))
old_prefs = preferences
preferences = real_prefs
else:
confdistmatrixs = [ confdistmatrix for i in preferences ]
lams = [ parser_phase3.options.lam for i in preferences ]
max_iterationss = [ parser_phase3.options.max_iterations for i in preferences ]
convergences = [ parser_phase3.options.convergence for i in preferences ]
noises = [ int(parser_phase3.options.noise) for i in preferences ]
args = zip(confdistmatrixs, preferences, lams, max_iterationss, convergences, noises)
logging.info(" Starting affinity propagation runs . . .")
# Do it
pc = ParallelCalculation(parser_phase3.options.coresn, clustalgo, args)
results = pc.run()
logging.info("\n Done!")
# Create clusters collections from clustering results, one for each cluster. None if clustering didn't work.
ccs = [ ClustersCollection(clusters[1], metadata=metadata) for clusters in results ]
if parser_phase3.options.estimate_error:
if parser_phase3.options.error_mode == "bootstrapping":
preferences = old_prefs
k = 0
for i,p in enumerate(preferences):
failed_runs = 0
values = []
for j in range(parser_phase3.options.bootstrapping_runs):
if ccs[k].clusters == None:
failed_runs += 1
k += 1
continue
values.append(numpy.zeros((out_matrix_eln,out_matrix_eln)))
for pair in pairs_indeces:
# Calculate dJS
this_djs = clustering_ensemble_similarity( ccs[k], ensembles[pair[0]], pair[0]+1, ensembles[pair[1]], pair[1]+1 )
values[-1][pair[0],pair[1]] = this_djs
values[-1][pair[1],pair[0]] = this_djs
k += 1
outs = numpy.array(values)
avgs = numpy.average(outs, axis=0)
stds = numpy.std(outs, axis=0)
print "== clustering ensemble similarity, preference %.1f" %p
print "averages:"
print_square_array(avgs)
print "standard deviations:"
print_square_array(stds)
print "values (ens.A-ens.B value_run_1 value_run_2 ... value_run_N):"
for i,j in pairs_indeces:
print "%d-%d\t" % (i+1,j+1),
print " ".join(["%.3f"%k for k in outs[:,i,j]])
exit(0)
for i,p in enumerate(preferences):
if ccs[i].clusters == None:
continue
if parser_phase3.options.evaluate_convergence:
fname = str(parser_phase3.options.outfiles)+"_convergence_clustering_preference%.1f" %p
header = "=== convergence clustering, preference %.1f, "%p
if parser_phase3.options.convergence_mode=="increasing-window":
values = TriangularMatrix(size=out_matrix_eln)
fname += "_increasing-window.dat"
header += "increasing window ==="
if parser_phase3.options.outfiles != None:
fhandler = open(fname,'a')
else:
fhandler = None
write_output_line(0,rawline=header, fhandler=fhandler)
for j in range(0,len(ensembles)):
write_output_line( cumulative_clustering_ensemble_similarity( ccs[i],
ensembles[-1],
len(ensembles)+1,
ensembles[j], j+1),
fhandler=fhandler,
number=j+1)
# for every preference value
else:
values = TriangularMatrix(size=out_matrix_eln)
header = "# ==== Preference value: %1.2f ==="%p
for pair in pairs_indeces:
# Calculate dJS
values[pair[0],pair[1]] = clustering_ensemble_similarity( ccs[i], ensembles[pair[0]], pair[0]+1, ensembles[pair[1]], pair[1]+1)
write_output(values, header=header, base_fname=parser_phase3.options.outfiles, suffix="clustering-pref%.1f"%p)
if parser_phase3.options.details:
kwds = {}
kwds['centroids'] = numpy.array([c.centroid for c in ccs[i]])
kwds['ensemble_sizes'] = numpy.array([e.coordinates.shape[0] for e in ensembles])
for cln,cluster in enumerate(ccs[i]):
kwds["cluster%d"%(cln+1)] = numpy.array(cluster.elements)
numpy.savez("%s_preference_%.2f"%(parser_phase3.options.details,p), **kwds)
logging.info("Calculation complete.")
exit(0)
if parser_phase3.options.mode == "dimred" or parser_phase3.options.mode == "dres":
dimensions = map(int,parser_phase3.options.dim.split(','))
# prepare runs. (e.g.: runs = [1,2,3,1,2,3,1,2,3, ...])
if parser_phase3.options.estimate_error:
if parser_phase3.options.error_mode == "bootstrapping":
runs = []
for d in dimensions:
runs.extend([d]*parser_phase3.options.bootstrapping_runs)
matrices = bootstrap_matrices*parser_phase3.options.bootstrapping_runs
else:
runs = dimensions
matrices = [confdistmatrix for i in runs]
for d in dimensions:
if d > confdistmatrix.size:
logging.error("ERROR: The embedded space must have a number of dimensions inferior to the original space.")
exit(1)
# Choose algorithm and prepare options
embedding_options = []
if parser_phase3.options.spe_mode == 'vanilla':
embedder = StochasticProximityEmbedding()
for r in range(len(runs)):
embedding_options += [(matrices[r],
parser_phase3.options.neighborhood_cutoff,
runs[r],
parser_phase3.options.maxlam,
parser_phase3.options.minlam,
parser_phase3.options.ncycle,
parser_phase3.options.nstep,
parser_phase3.options.stressfreq)]
if parser_phase3.options.spe_mode == 'rn':
embedder = RandomNeighborhoodStochasticProximityEmbedding()
for r in range(len(runs)):
embedding_options += [(matrices[r],
parser_phase3.options.neighborhood_cutoff,
parser_phase3.options.kn,
runs[r],
parser_phase3.options.maxlam,
parser_phase3.options.minlam,
parser_phase3.options.ncycle,
parser_phase3.options.stressfreq)]
if parser_phase3.options.spe_mode == 'knn':
embedder = kNNStochasticProximityEmbedding()
for r in range(len(runs)):
embedding_options += [(matrices[r],
parser_phase3.options.kn,
runs[r],
parser_phase3.options.maxlam,
parser_phase3.options.minlam,
parser_phase3.options.ncycle,
parser_phase3.options.nstep,
parser_phase3.options.stressfreq)]
pc = ParallelCalculation(parser_phase3.options.coresn, embedder, embedding_options)
# Run parallel calculation
results = pc.run()
sleep(1)
embedded_spaces_perdim = {}
stresses_perdim = {}
# Sort out obtained spaces and their residual stress values
if parser_phase3.options.estimate_error: # if bootstrap
if parser_phase3.options.error_mode == "bootstrapping":
k = 0
for ndim in dimensions:
values = []
for i in range(parser_phase3.options.bootstrapping_runs):
header = "# ==== Number of dimensions: %d ==="%ndim
values.append(numpy.zeros((out_matrix_eln,out_matrix_eln)))
embedded_stress = results[k][1][0]
embedded_space = results[k][1][1]
kdes, resamples, embedded_ensembles = gen_kde_pdfs(embedded_space, ensemble_assignment, parser_phase3.options.nensembles, nsamples = parser_phase3.options.samples)
for pair in pairs_indeces:
this_value = dimred_ensemble_similarity(kdes[pair[0]], resamples[pair[0]], kdes[pair[1]],resamples[pair[1]])
values[-1][pair[0],pair[1]] = this_value
values[-1][pair[1],pair[0]] = this_value
k += 1
outs = numpy.array(values)
avgs = numpy.average(outs, axis=0)
stds = numpy.std(outs, axis=0)
print "== number of dimensions: %d ==" % ndim
print "averages:"
print_square_array(avgs)
print "standard deviations:"
print_square_array(stds)
print "values (ens.A-ens.B value_run_1 value_run_2 ... value_run_N):"
for i,j in pairs_indeces:
print "%d-%d\t" % (i+1,j+1),
print " ".join(["%.3f"%k for k in outs[:,i,j]])
exit(0)
for i in range(len(dimensions)):
stresses_perdim[dimensions[i]] = []
embedded_spaces_perdim[dimensions[i]] = []
for j in range(1):
stresses_perdim[dimensions[i]].append(results[j*len(dimensions)+i][1][0])
embedded_spaces_perdim[dimensions[i]].append(results[j*len(dimensions)+i][1][1])
for ndim in dimensions:
embedded_spaces = embedded_spaces_perdim[ndim]
embedded_stresses = stresses_perdim[ndim]
embedded_stress = embedded_stresses[numpy.argmin(embedded_stresses)]
embedded_space = embedded_spaces[numpy.argmin(embedded_stresses)]
kdes, resamples, embedded_ensembles = gen_kde_pdfs(embedded_space, ensemble_assignment, parser_phase3.options.nensembles, nsamples = parser_phase3.options.samples)
# For every chosen dimension value:
if parser_phase3.options.evaluate_convergence:
fname = str(parser_phase3.options.outfiles)+"_convergence_dimred_%ddimensions" %ndim
header = "=== convergence dimred, dimension %d: " % ndim
#if parser_phase3.options.convergence_mode == 'half-half':
# fname+="_half-half.dat"
# header += "half-half ==="
# if parser_phase3.options.outfiles != None:
# fhandler = open(fname,'a')
# else:
# fhandler = None
# write_output_line(0,rawline=header, handler=fhandler)
# write_output_line(0, rawline="half-half convergence: %.3f" %
# dimred_ensemble_similarity(kdes[0], resamples[0], kdes[1],resamples[1]), fhandler=fhandler)
#elif parser_phase3.options.convergence_mode == 'sliding-window':
# print "sliding window ==="
# for j in range(len(ensembles)-1):
# print "%.3f" % dimred_ensemble_similarity(kdes[j], resamples[j], kdes[j+1],resamples[j+1])
#elif parser_phase3.options.convergence_mode == 'fixed-window':
# print "fixed window ==="
# for j in range(1,len(ensembles)):
# print "%.3f" % dimred_ensemble_similarity(kdes[0], resamples[0], kdes[j],resamples[j])
#elif parser_phase3.options.convergence_mode == "increasing-half":
# fname += "_increasing-half.dat"
# header += "increasing half ==="
# if parser_phase3.options.outfiles != None:
# fhandler = open(fname,'a')
# else:
# fhandler = None
# kdes, resamples, embedded_ensembles = cumulative_gen_kde_pdfs(embedded_space, ensemble_assignment, parser_phase3.options.nensembles, nsamples = parser_phase3.options.samples, ens_id_max=len(ensembles))
# ref_kdes, ref_resamples, ref_embedded_ensembles = cumulative_gen_kde_pdfs(embedded_space, ensemble_assignment, parser_phase3.options.nensembles, nsamples = parser_phase3.options.samples, ens_id_max=len(ensembles), ens_id_min=len(ensembles))
# write_output_line(0,rawline=header, fhandler=fhandler)
# for j in range(0,len(ensembles)-1):
# write_output_line(dimred_ensemble_similarity(ref_kdes[0],
# ref_resamples[0],
# kdes[j],
# resamples[j]),
# fhandler = fhandler,
# number = j+1)
if parser_phase3.options.convergence_mode=="increasing-window":
fname += "_increasing-window.dat"
header += "increasing window ==="
if parser_phase3.options.outfiles != None:
fhandler = open(fname,'a')
else:
fhandler = None
kdes, resamples, embedded_ensembles = cumulative_gen_kde_pdfs(embedded_space, ensemble_assignment, parser_phase3.options.nensembles-1, nsamples = parser_phase3.options.samples)
write_output_line(0, rawline=header, fhandler=fhandler)
for j in range(0,len(ensembles)):
write_output_line(dimred_ensemble_similarity(kdes[-1],
resamples[-1],
kdes[j],
resamples[j]),
fhandler = fhandler,
number = j+1)
else:
for pair in pairs_indeces:
values[pair[0],pair[1]] = dimred_ensemble_similarity(kdes[pair[0]], resamples[pair[0]], kdes[pair[1]],resamples[pair[1]])
header = "# ==== Number of dimensions: %d ==="%ndim
write_output(values, header=header, base_fname=parser_phase3.options.outfiles, suffix="dimred-%ddimensions"%ndim)
if parser_phase3.options.details:
kwds = {}
kwds["stress"] = numpy.array([embedded_stress])
for en,e in enumerate(embedded_ensembles):
kwds[("ensemble%d"%en)] = e
numpy.savez("%s_%d_dimensions" % (parser_phase3.options.details, ndim), **kwds)
logging.info("Calculation complete.")
exit(0)
|
encore-similarity/encore
|
encore/similarity.py
|
Python
|
gpl-3.0
| 81,833
|
[
"CHARMM",
"MDAnalysis"
] |
128ee64d9455b5004bdc0b7b9bd92efc99ad86da3fa95e7aa30566958497bcd7
|
"""
Tests for discussion pages
"""
import datetime
from uuid import uuid4
from nose.plugins.attrib import attr
from nose.tools import nottest
from pytz import UTC
from flaky import flaky
from common.test.acceptance.tests.discussion.helpers import BaseDiscussionTestCase
from common.test.acceptance.tests.helpers import UniqueCourseTest
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionPage,
InlineDiscussionThreadPage,
DiscussionUserProfilePage,
DiscussionTabHomePage,
DiscussionSortPreferencePage,
)
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.fixtures.discussion import (
SingleThreadViewFixture,
UserProfileViewFixture,
SearchResultFixture,
Thread,
Response,
Comment,
SearchResult,
MultipleThreadFixture,
)
from common.test.acceptance.tests.discussion.helpers import BaseDiscussionMixin
from common.test.acceptance.tests.helpers import skip_if_browser
THREAD_CONTENT_WITH_LATEX = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n----------\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. (b).\n\n
**(a)** $H_1(e^{j\\omega}) = \\sum_{n=-\\infty}^{\\infty}h_1[n]e^{-j\\omega n} =
\\sum_{n=-\\infty} ^{\\infty}h[n]e^{-j\\omega n}+\\delta_2e^{-j\\omega n_0}$
$= H(e^{j\\omega})+\\delta_2e^{-j\\omega n_0}=A_e (e^{j\\omega}) e^{-j\\omega n_0}
+\\delta_2e^{-j\\omega n_0}=e^{-j\\omega n_0} (A_e(e^{j\\omega})+\\delta_2)
$H_3(e^{j\\omega})=A_e(e^{j\\omega})+\\delta_2$. Dummy $A_e(e^{j\\omega})$ dummy post $.
$A_e(e^{j\\omega}) \\ge -\\delta_2$, it follows that $H_3(e^{j\\omega})$ is real and
$H_3(e^{j\\omega})\\ge 0$.\n\n**(b)** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.\n\n
**Case 1:** If $re^{j\\theta}$ is a Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n**Case 3:** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem $H_3(e^{j\\omega}) = P(cos\\omega)(cos\\omega - cos\\theta)^k$,
Lorem Lorem Lorem Lorem Lorem Lorem $P(cos\\omega)$ has no
$(cos\\omega - cos\\theta)$ factor.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
$P(cos\\theta) \\neq 0$. Since $P(cos\\omega)$ this is a dummy data post $\\omega$,
dummy $\\delta > 0$ such that for all $\\omega$ dummy $|\\omega - \\theta|
< \\delta$, $P(cos\\omega)$ Lorem ipsum dolor sit amet, consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
"""
class DiscussionResponsePaginationTestMixin(BaseDiscussionMixin):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
@attr(shard=2)
class DiscussionHomePageTest(BaseDiscussionTestCase):
"""
Tests for the discussion home page.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionHomePageTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
@attr(shard=2)
def test_new_post_button(self):
"""
Scenario: I can create new posts from the Discussion home page.
Given that I am on the Discussion home page
When I click on the 'New Post' button
Then I should be shown the new post form
"""
self.assertIsNotNone(self.page.new_post_button)
self.page.click_new_post_button()
self.assertIsNotNone(self.page.new_post_form)
def test_receive_update_checkbox(self):
"""
Scenario: I can save the receive update email notification checkbox
on Discussion home page.
Given that I am on the Discussion home page
When I click on the 'Receive update' checkbox
Then it should always shown selected.
"""
receive_updates_selector = '.email-setting'
receive_updates_checkbox = self.page.is_element_visible(receive_updates_selector)
self.assertTrue(receive_updates_checkbox)
self.assertFalse(self.page.is_checkbox_selected(receive_updates_selector))
self.page.click_element(receive_updates_selector)
self.assertTrue(self.page.is_checkbox_selected(receive_updates_selector))
self.page.refresh_and_wait_for_load()
self.assertTrue(self.page.is_checkbox_selected(receive_updates_selector))
@attr('a11y')
def test_page_accessibility(self):
self.page.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
self.page.a11y_audit.check_for_accessibility_errors()
@attr(shard=2)
class DiscussionNavigationTest(BaseDiscussionTestCase):
"""
Tests for breadcrumbs navigation in the Discussions page nav bar
"""
def setUp(self):
super(DiscussionNavigationTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=THREAD_CONTENT_WITH_LATEX,
commentable_id=self.discussion_id
)
)
thread_fixture.push()
self.thread_page = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
thread_id
)
self.thread_page.visit()
def test_breadcrumbs_push_topic(self):
topic_button = self.thread_page.q(
css=".forum-nav-browse-menu-item[data-discussion-id='{}']".format(self.discussion_id)
)
self.assertTrue(topic_button.visible)
topic_button.click()
# Verify the thread's topic has been pushed to breadcrumbs
breadcrumbs = self.thread_page.q(css=".breadcrumbs .nav-item")
self.assertEqual(len(breadcrumbs), 2)
self.assertEqual(breadcrumbs[1].text, "Test Discussion Topic")
def test_breadcrumbs_back_to_all_topics(self):
topic_button = self.thread_page.q(
css=".forum-nav-browse-menu-item[data-discussion-id='{}']".format(self.discussion_id)
)
self.assertTrue(topic_button.visible)
topic_button.click()
# Verify clicking the first breadcrumb takes you back to all topics
self.thread_page.q(css=".breadcrumbs .nav-item")[0].click()
self.assertEqual(len(self.thread_page.q(css=".breadcrumbs .nav-item")), 1)
def test_breadcrumbs_clear_search(self):
self.thread_page.q(css=".search-input").fill("search text")
self.thread_page.q(css=".search-btn").click()
# Verify that clicking the first breadcrumb clears your search
self.thread_page.q(css=".breadcrumbs .nav-item")[0].click()
self.assertEqual(self.thread_page.q(css=".search-input").text[0], "")
@attr(shard=2)
class DiscussionTabSingleThreadTest(BaseDiscussionTestCase, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.tab_nav = TabNavPage(self.browser)
def setup_thread_page(self, thread_id):
self.thread_page = self.create_single_thread_page(thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
def test_mathjax_rendering(self):
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=THREAD_CONTENT_WITH_LATEX,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertTrue(self.thread_page.is_discussion_body_visible())
self.thread_page.verify_mathjax_preview_available()
self.thread_page.verify_mathjax_rendered()
def test_markdown_reference_link(self):
"""
Check markdown editor renders reference link correctly
and colon(:) in reference link is not converted to %3a
"""
sample_link = "http://example.com/colon:test"
thread_content = """[enter link description here][1]\n[1]: http://example.com/colon:test"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=thread_content,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertEqual(self.thread_page.get_link_href(), sample_link)
def test_marked_answer_comments(self):
thread_id = "test_thread_{}".format(uuid4().hex)
response_id = "test_response_{}".format(uuid4().hex)
comment_id = "test_comment_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, thread_type="question")
)
thread_fixture.addResponse(
Response(id=response_id, endorsed=True),
[Comment(id=comment_id)]
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertFalse(self.thread_page.is_comment_visible(comment_id))
self.assertFalse(self.thread_page.is_add_comment_visible(response_id))
self.assertTrue(self.thread_page.is_show_comments_visible(response_id))
self.thread_page.show_comments(response_id)
self.assertTrue(self.thread_page.is_comment_visible(comment_id))
self.assertTrue(self.thread_page.is_add_comment_visible(response_id))
self.assertFalse(self.thread_page.is_show_comments_visible(response_id))
def test_discussion_blackout_period(self):
"""
Verify that new discussion can not be started during course blackout period.
Blackout period is the period between which students cannot post new or contribute
to existing discussions.
"""
now = datetime.datetime.now(UTC)
# Update course advance settings with a valid blackout period.
self.course_fixture.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fixture._add_advanced_settings() # pylint: disable=protected-access
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1")])
thread_fixture.push()
self.setup_thread_page(thread.get("id")) # pylint: disable=no-member
# Verify that `Add a Post` is not visible on course tab nav.
self.assertFalse(self.tab_nav.has_new_post_button_visible_on_tab())
# Verify that `Add a response` button is not visible.
self.assertFalse(self.thread_page.has_add_response_button())
# Verify user can not add new responses or modify existing responses.
self.assertFalse(self.thread_page.has_discussion_reply_editor())
self.assertFalse(self.thread_page.is_response_editable("response1"))
self.assertFalse(self.thread_page.is_response_deletable("response1"))
# Verify that user can not add new comment to a response or modify existing responses.
self.assertFalse(self.thread_page.is_add_comment_visible("response1"))
self.assertFalse(self.thread_page.is_comment_editable("comment1"))
self.assertFalse(self.thread_page.is_comment_deletable("comment1"))
class DiscussionTabMultipleThreadTest(BaseDiscussionTestCase, BaseDiscussionMixin):
"""
Tests for the discussion page with multiple threads
"""
def setUp(self):
super(DiscussionTabMultipleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.thread_count = 2
self.thread_ids = []
self.setup_multiple_threads(thread_count=self.thread_count)
self.thread_page_1 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[0]
)
self.thread_page_2 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[1]
)
self.thread_page_1.visit()
@attr('a11y')
def test_page_accessibility(self):
self.thread_page_1.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
self.thread_page_1.a11y_audit.check_for_accessibility_errors()
self.thread_page_2.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
self.thread_page_2.a11y_audit.check_for_accessibility_errors()
class DiscussionOpenClosedThreadTest(BaseDiscussionTestCase):
"""
Tests for checking the display of attributes on open and closed threads
"""
def setUp(self):
super(DiscussionOpenClosedThreadTest, self).setUp()
self.thread_id = "test_thread_{}".format(uuid4().hex)
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self, **thread_kwargs):
thread_kwargs.update({'commentable_id': self.discussion_id})
view = SingleThreadViewFixture(
Thread(id=self.thread_id, **thread_kwargs)
)
view.addResponse(Response(id="response1"))
view.push()
def setup_openclosed_thread_page(self, closed=False):
self.setup_user(roles=['Moderator'])
if closed:
self.setup_view(closed=True)
else:
self.setup_view()
page = self.create_single_thread_page(self.thread_id)
page.visit()
page.close_open_thread()
return page
@attr(shard=2)
def test_originally_open_thread_vote_display(self):
page = self.setup_openclosed_thread_page()
self.assertFalse(page.is_element_visible('.thread-main-wrapper .action-vote'))
self.assertTrue(page.is_element_visible('.thread-main-wrapper .display-vote'))
self.assertFalse(page.is_element_visible('.response_response1 .action-vote'))
self.assertTrue(page.is_element_visible('.response_response1 .display-vote'))
@attr(shard=2)
def test_originally_closed_thread_vote_display(self):
page = self.setup_openclosed_thread_page(True)
self.assertTrue(page.is_element_visible('.thread-main-wrapper .action-vote'))
self.assertFalse(page.is_element_visible('.thread-main-wrapper .display-vote'))
self.assertTrue(page.is_element_visible('.response_response1 .action-vote'))
self.assertFalse(page.is_element_visible('.response_response1 .display-vote'))
@attr('a11y')
def test_page_accessibility(self):
page = self.setup_openclosed_thread_page()
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
'color-contrast', # Commented out for now because they reproducibly fail on Jenkins but not locally
]
})
page.a11y_audit.check_for_accessibility_errors()
page = self.setup_openclosed_thread_page(True)
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
'color-contrast', # Commented out for now because they reproducibly fail on Jenkins but not locally
]
})
page.a11y_audit.check_for_accessibility_errors()
@attr(shard=2)
class DiscussionCommentDeletionTest(BaseDiscussionTestCase):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"), [
Comment(id="comment_other_author"),
Comment(id="comment_self_author", user_id=self.user_id, thread_id="comment_deletion_test_thread")
]
)
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
class DiscussionResponseEditTest(BaseDiscussionTestCase):
"""
Tests for editing responses displayed beneath thread in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="response_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response_other_author", user_id="other", thread_id="response_edit_test_thread"),
)
view.addResponse(
Response(id="response_self_author", user_id=self.user_id, thread_id="response_edit_test_thread"),
)
view.push()
def edit_response(self, page, response_id):
self.assertTrue(page.is_response_editable(response_id))
page.start_response_edit(response_id)
new_response = "edited body"
page.set_response_editor_value(response_id, new_response)
page.submit_response_edit(response_id, new_response)
@attr(shard=2)
def test_edit_response_add_link(self):
"""
Scenario: User submits valid input to the 'add link' form
Given I am editing a response on a discussion page
When I click the 'add link' icon in the editor toolbar
And enter a valid url to the URL input field
And enter a valid string in the Description input field
And click the 'OK' button
Then the edited response should contain the new link
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://example.com"
description = "example"
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "")
page.add_content_via_editor_button(
"link", response_id, url, description)
page.submit_response_edit(response_id, description)
expected_response_html = (
'<p><a href="{}">{}</a></p>'.format(url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
@attr(shard=2)
def test_edit_response_add_image(self):
"""
Scenario: User submits valid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter a valid url to the URL input field
And enter a valid string in the Description input field
And click the 'OK' button
Then the edited response should contain the new image
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://www.example.com/something.png"
description = "image from example.com"
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "")
page.add_content_via_editor_button(
"image", response_id, url, description)
page.submit_response_edit(response_id, '')
expected_response_html = (
'<p><img src="{}" alt="{}" title=""></p>'.format(url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
@attr(shard=2)
def test_edit_response_add_image_error_msg(self):
"""
Scenario: User submits invalid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter an invalid url to the URL input field
And enter an empty string in the Description input field
And click the 'OK' button
Then I should be shown 2 error messages
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
page.start_response_edit("response_self_author")
page.add_content_via_editor_button(
"image", "response_self_author", '', '')
page.verify_link_editor_error_messages_shown()
@attr(shard=2)
def test_edit_response_add_decorative_image(self):
"""
Scenario: User submits invalid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter a valid url to the URL input field
And enter an empty string in the Description input field
And I check the 'image is decorative' checkbox
And click the 'OK' button
Then the edited response should contain the new image
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://www.example.com/something.png"
description = ""
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "Some content")
page.add_content_via_editor_button(
"image", response_id, url, description, is_decorative=True)
page.submit_response_edit(response_id, "Some content")
expected_response_html = (
'<p>Some content<img src="{}" alt="{}" title=""></p>'.format(
url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
@attr(shard=2)
def test_edit_response_add_link_error_msg(self):
"""
Scenario: User submits invalid input to the 'add link' form
Given I am editing a response on a discussion page
When I click the 'add link' icon in the editor toolbar
And enter an invalid url to the URL input field
And enter an empty string in the Description input field
And click the 'OK' button
Then I should be shown 2 error messages
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
page.start_response_edit("response_self_author")
page.add_content_via_editor_button(
"link", "response_self_author", '', '')
page.verify_link_editor_error_messages_shown()
@attr(shard=2)
def test_edit_response_as_student(self):
"""
Scenario: Students should be able to edit the response they created not responses of other users
Given that I am on discussion page with student logged in
When I try to edit the response created by student
Then the response should be edited and rendered successfully
And responses from other users should be shown over there
And the student should be able to edit the response of other people
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.assertTrue(page.is_response_visible("response_other_author"))
self.assertFalse(page.is_response_editable("response_other_author"))
self.edit_response(page, "response_self_author")
@attr(shard=2)
def test_edit_response_as_moderator(self):
"""
Scenario: Moderator should be able to edit the response they created and responses of other users
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
@attr(shard=2)
@flaky # TODO fix this, see TNL-5453
def test_vote_report_endorse_after_edit(self):
"""
Scenario: Moderator should be able to vote, report or endorse after editing the response.
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
And I try to vote the response created by moderator
Then the response should not be able to be voted
And I try to vote the response created by other users
Then the response should be voted successfully
And I try to report the response created by moderator
Then the response should not be able to be reported
And I try to report the response created by other users
Then the response should be reported successfully
And I try to endorse the response created by moderator
Then the response should be endorsed successfully
And I try to endorse the response created by other users
Then the response should be endorsed successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
page.cannot_vote_response('response_self_author')
page.vote_response('response_other_author')
page.cannot_report_response('response_self_author')
page.report_response('response_other_author')
page.endorse_response('response_self_author')
page.endorse_response('response_other_author')
@attr('a11y')
def test_page_accessibility(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
page.visit()
page.a11y_audit.check_for_accessibility_errors()
class DiscussionCommentEditTest(BaseDiscussionTestCase):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
@attr(shard=2)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
@attr(shard=2)
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
@attr(shard=2)
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
@attr(shard=2)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
@attr('a11y')
def test_page_accessibility(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
page.a11y_audit.check_for_accessibility_errors()
@attr(shard=2)
class DiscussionEditorPreviewTest(UniqueCourseTest):
def setUp(self):
super(DiscussionEditorPreviewTest, self).setUp()
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
self.page.click_new_post_button()
def test_text_rendering(self):
"""When I type plain text into the editor, it should be rendered as plain text in the preview box"""
self.page.set_new_post_editor_value("Some plain text")
self.assertEqual(self.page.get_new_post_preview_value(), "<p>Some plain text</p>")
def test_markdown_rendering(self):
"""When I type Markdown into the editor, it should be rendered as formatted Markdown in the preview box"""
self.page.set_new_post_editor_value(
"Some markdown\n"
"\n"
"- line 1\n"
"- line 2"
)
self.assertEqual(self.page.get_new_post_preview_value(), (
"<p>Some markdown</p>\n"
"\n"
"<ul>\n"
"<li>line 1</li>\n"
"<li>line 2</li>\n"
"</ul>"
))
def test_mathjax_rendering_in_order(self):
"""
Tests that mathjax is rendered in proper order.
When user types mathjax expressions into discussion editor, it should render in the proper
order.
"""
self.page.set_new_post_editor_value(
'Text line 1 \n'
'$$e[n]=d_1$$ \n'
'Text line 2 \n'
'$$e[n]=d_2$$'
)
self.assertEqual(self.page.get_new_post_preview_text(), 'Text line 1\nText line 2')
@attr(shard=2)
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.thread_ids = []
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.additional_discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fix = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
),
XBlockFixtureDesc(
"discussion",
"Test Discussion 1",
metadata={"discussion_id": self.additional_discussion_id}
)
)
)
)
).install()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
self.additional_discussion_page = InlineDiscussionPage(self.browser, self.additional_discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.discussion_page.show_thread(thread_id)
self.thread_page = self.discussion_page.thread_page # pylint: disable=attribute-defined-outside-init
# This test is too flaky to run at all. TNL-6215
@attr('a11y')
@nottest
def test_inline_a11y(self):
"""
Tests Inline Discussion for accessibility issues.
"""
self.setup_multiple_threads(thread_count=3)
# First test the a11y of the expanded list of threads
self.discussion_page.expand_discussion()
self.discussion_page.a11y_audit.config.set_rules({
'ignore': [
'section'
]
})
self.discussion_page.a11y_audit.check_for_accessibility_errors()
# Now show the first thread and test the a11y again
self.discussion_page.show_thread(self.thread_ids[0])
self.discussion_page.a11y_audit.check_for_accessibility_errors()
# Finally show the new post form and test its a11y
self.discussion_page.click_new_post_button()
self.discussion_page.a11y_audit.check_for_accessibility_errors()
def test_add_a_post_is_present_if_can_create_thread_when_expanded(self):
self.discussion_page.expand_discussion()
# Add a Post link is present
self.assertTrue(self.discussion_page.q(css='.new-post-btn').present)
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id")) # pylint: disable=no-member
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
def test_discussion_blackout_period(self):
now = datetime.datetime.now(UTC)
self.course_fix.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fix._add_advanced_settings()
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)])
thread_fixture.push()
self.setup_thread_page(thread.get("id")) # pylint: disable=no-member
self.assertFalse(self.thread_page.has_add_response_button())
self.assertFalse(self.thread_page.is_element_visible("action-more"))
def test_dual_discussion_xblock(self):
"""
Scenario: Two discussion xblocks in one unit shouldn't override their actions
Given that I'm on a courseware page where there are two inline discussion
When I click on the first discussion block's new post button
Then I should be shown only the new post form for the first block
When I click on the second discussion block's new post button
Then I should be shown both new post forms
When I cancel the first form
Then I should be shown only the new post form for the second block
When I cancel the second form
And I click on the first discussion block's new post button
Then I should be shown only the new post form for the first block
When I cancel the first form
Then I should be shown none of the forms
"""
self.discussion_page.wait_for_page()
self.additional_discussion_page.wait_for_page()
# Expand the first discussion, click to add a post
self.discussion_page.expand_discussion()
self.discussion_page.click_new_post_button()
# Verify that only the first discussion's form is shown
self.assertIsNotNone(self.discussion_page.new_post_form)
self.assertIsNone(self.additional_discussion_page.new_post_form)
# Expand the second discussion, click to add a post
self.additional_discussion_page.expand_discussion()
self.additional_discussion_page.click_new_post_button()
# Verify that both discussion's forms are shown
self.assertIsNotNone(self.discussion_page.new_post_form)
self.assertIsNotNone(self.additional_discussion_page.new_post_form)
# Cancel the first form
self.discussion_page.click_cancel_new_post()
# Verify that only the second discussion's form is shown
self.assertIsNone(self.discussion_page.new_post_form)
self.assertIsNotNone(self.additional_discussion_page.new_post_form)
# Cancel the second form and click to show the first one
self.additional_discussion_page.click_cancel_new_post()
self.discussion_page.click_new_post_button()
# Verify that only the first discussion's form is shown
self.assertIsNotNone(self.discussion_page.new_post_form)
self.assertIsNone(self.additional_discussion_page.new_post_form)
# Cancel the first form
self.discussion_page.click_cancel_new_post()
# Verify that neither discussion's forms are shwon
self.assertIsNone(self.discussion_page.new_post_form)
self.assertIsNone(self.additional_discussion_page.new_post_form)
@attr(shard=2)
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # discussion.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
self.setup_course()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = self.setup_user(username=self.PROFILED_USERNAME)
# now create a second user who will view the profile.
self.user_id = self.setup_user()
def setup_course(self):
"""
Set up the for the course discussion user-profile tests.
"""
return CourseFixture(**self.course_info).install()
def setup_user(self, roles=None, **user_info):
"""
Helper method to create and authenticate a user.
"""
roles_str = ''
if roles:
roles_str = ','.join(roles)
return AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str, **user_info).visit().get_user_id()
def test_redirects_to_learner_profile(self):
"""
Scenario: Verify that learner-profile link is present on forum discussions page and we can navigate to it.
Given that I am on discussion forum user's profile page.
And I can see a username on the page
When I click on my username.
Then I will be navigated to Learner Profile page.
And I can my username on Learner Profile page
"""
learner_profile_page = LearnerProfilePage(self.browser, self.PROFILED_USERNAME)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
page.click_on_sidebar_username()
learner_profile_page.wait_for_page()
self.assertTrue(learner_profile_page.field_is_visible('username'))
def test_learner_profile_roles(self):
"""
Test that on the learner profile page user roles are correctly listed according to the course.
"""
# Setup a learner with roles in a Course-A.
expected_student_roles = ['Administrator', 'Community TA', 'Moderator', 'Student']
self.profiled_user_id = self.setup_user(
roles=expected_student_roles,
username=self.PROFILED_USERNAME
)
# Visit the page and verify the roles are listed correctly.
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
student_roles = page.get_user_roles()
self.assertEqual(student_roles, ', '.join(expected_student_roles))
# Save the course_id of Course-A before setting up a new course.
old_course_id = self.course_id
# Setup Course-B and set user do not have additional roles and test roles are displayed correctly.
self.course_info['number'] = self.unique_id
self.setup_course()
new_course_id = self.course_id
# Set the user to have no extra role in the Course-B and verify the existing
# user is updated.
profiled_student_user_id = self.setup_user(roles=None, username=self.PROFILED_USERNAME)
self.assertEqual(self.profiled_user_id, profiled_student_user_id)
self.assertNotEqual(old_course_id, new_course_id)
# Visit the user profile in course discussion page of Course-B. Make sure the
# roles are listed correctly.
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
self.assertEqual(page.get_user_roles(), u'Student')
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
@attr(shard=2)
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no posts"])
@attr(shard=2)
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
@attr(shard=2)
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no posts"])
@attr(shard=2)
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
@attr(shard=2)
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no posts", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
@attr('a11y')
def test_page_accessibility(self):
self.page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-required-children', # TODO: AC-534
]
})
self.page.a11y_audit.check_for_accessibility_errors()
@attr(shard=2)
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
self.sort_page.show_all_discussions()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "activity")
@skip_if_browser('chrome') # TODO TE-1542 and TE-1543
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "activity"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
@skip_if_browser('chrome') # TODO TE-1542 and TE-1543
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "activity"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
self.sort_page.show_all_discussions()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
|
synergeticsedx/deployment-wipro
|
common/test/acceptance/tests/discussion/test_discussion.py
|
Python
|
agpl-3.0
| 62,051
|
[
"VisIt"
] |
3130f4aad59863128b8a44f65ee1678ed5cbe6de0cd3f62e6f42919d07d7ff78
|
# -*- coding: utf-8 -*-
import csv, os, time, pprint
from __main__ import qt, ctk, slicer
from CIP.logic.SlicerUtil import SlicerUtil
class PreProcessingWidget():
def __init__(self, moduleName, parentWidget = None):
"""Widget constructor (existing module)"""
# EventsTrigger.__init__(self)
if not parentWidget:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parentWidget
self.__moduleName__ = moduleName
self.layout = self.parent.layout()
self.logic = PreProcessingLogic(moduleName)
def setup(self):
self.FilteringFrame = qt.QFrame()
self.FilteringFrame.setLayout(qt.QVBoxLayout())
self.FilteringFrame.enabled = False
self.FilteringFrame.setObjectName('FilteringFrame')
self.FilteringFrame.setStyleSheet('#FilteringFrame {border: 1px solid lightGray; color: black; }')
self.layout.addWidget( self.FilteringFrame )
filterLabel = qt.QLabel()
filterLabel.setText('Filtering')
self.FilteringFrame.layout().addWidget(filterLabel)
radioButtonsGroup = qt.QGroupBox()
radioButtonsGroup.setLayout(qt.QHBoxLayout())
radioButtonsGroup.setFixedWidth(120)
radioButtonsGroup.setObjectName('radioButtonsGroup')
radioButtonsGroup.setStyleSheet('#radioButtonsGroup {border: 1px solid white; color: black; }')
self.filterOnRadioButton = qt.QRadioButton()
self.filterOnRadioButton.setText('On')
self.filterOnRadioButton.setChecked(0)
radioButtonsGroup.layout().addWidget(self.filterOnRadioButton)
self.filterOffRadioButton = qt.QRadioButton()
self.filterOffRadioButton.setText('Off')
self.filterOffRadioButton.setChecked(1)
radioButtonsGroup.layout().addWidget(self.filterOffRadioButton)
self.FilteringFrame.layout().addWidget(radioButtonsGroup)
self.filterOptionsFrame = qt.QFrame()
self.filterOptionsFrame.setLayout(qt.QVBoxLayout())
self.filterOptionsFrame.setObjectName('filterOptionsFrame')
self.filterOptionsFrame.setStyleSheet('#filterOptionsFrame {border: 0.5px solid lightGray; color: black; }')
self.filterOptionsFrame.hide()
self.FilteringFrame.layout().addWidget(self.filterOptionsFrame)
self.filterApplication = qt.QCheckBox()
self.filterApplication.setText('Filter for Phenotype Analysis')
self.filterApplication.setChecked(0)
self.filterOptionsFrame.layout().addWidget(self.filterApplication)
filterOptionsGroup = qt.QGroupBox()
filterOptionsGroup.setLayout(qt.QHBoxLayout())
filterOptionsGroup.setFixedWidth(220)
filterOptionsGroup.setObjectName('filterOptionsGroup')
filterOptionsGroup.setStyleSheet('#filterOptionsGroup {border: 1px solid white; color: black; }')
self.NLMFilterRadioButton = qt.QRadioButton()
self.NLMFilterRadioButton.setText('NLM')
self.NLMFilterRadioButton.setChecked(1)
filterOptionsGroup.layout().addWidget(self.NLMFilterRadioButton)
self.MedianFilterRadioButton = qt.QRadioButton()
self.MedianFilterRadioButton.setText('Median')
self.MedianFilterRadioButton.setChecked(0)
filterOptionsGroup.layout().addWidget(self.MedianFilterRadioButton)
self.GaussianFilterRadioButton = qt.QRadioButton()
self.GaussianFilterRadioButton.setText('Gaussian')
self.GaussianFilterRadioButton.setChecked(0)
filterOptionsGroup.layout().addWidget(self.GaussianFilterRadioButton)
self.filterOptionsFrame.layout().addWidget(filterOptionsGroup)
# Filter Params
FilterParams = qt.QFrame()
FilterParams.setLayout(qt.QVBoxLayout())
self.filterOptionsFrame.layout().addWidget(FilterParams)
DimGroupBox = qt.QGroupBox()
DimGroupBox.setLayout(qt.QHBoxLayout())
DimGroupBox.setFixedWidth(180)
DimGroupBox.setObjectName('DimGroupBox')
DimGroupBox.setStyleSheet('#DimGroupBox {border: 1px solid white; color: black; }')
FilterParams.layout().addWidget(DimGroupBox)
FilterDimensionLabel = qt.QLabel()
FilterDimensionLabel.setText('Dimensions: ')
FilterDimensionLabel.setToolTip('Choose if the filter has to operate in 2D or 3D.')
DimGroupBox.layout().addWidget(FilterDimensionLabel)
self.Filt2DOption = qt.QPushButton()
self.Filt2DOption.setText('2D')
self.Filt2DOption.setCheckable(1)
self.Filt2DOption.setChecked(1)
self.Filt2DOption.setAutoExclusive(1)
self.Filt2DOption.setFixedWidth(45)
DimGroupBox.layout().addWidget(self.Filt2DOption)
self.Filt3DOption = qt.QPushButton()
self.Filt3DOption.setText('3D')
self.Filt3DOption.setCheckable(1)
self.Filt3DOption.setChecked(0)
self.Filt3DOption.setFixedWidth(45)
self.Filt3DOption.setAutoExclusive(1)
DimGroupBox.layout().addWidget(self.Filt3DOption)
StrengthGroupBox = qt.QGroupBox()
StrengthGroupBox.setLayout(qt.QHBoxLayout())
StrengthGroupBox.setFixedWidth(270)
StrengthGroupBox.setObjectName('StrengthGroupBox')
StrengthGroupBox.setStyleSheet('#StrengthGroupBox {border: 1px solid white; color: black; }')
FilterParams.layout().addWidget(StrengthGroupBox)
FilterStrengthLabel = qt.QLabel()
FilterStrengthLabel.setText('Strength: ')
FilterStrengthLabel.setToolTip('Choose strength of the filtering process.')
StrengthGroupBox.layout().addWidget(FilterStrengthLabel)
self.SmoothOption = qt.QPushButton()
self.SmoothOption.setText('Smooth')
self.SmoothOption.setCheckable(1)
self.SmoothOption.setChecked(1)
self.SmoothOption.setAutoExclusive(1)
self.SmoothOption.setFixedWidth(60)
StrengthGroupBox.layout().addWidget(self.SmoothOption)
self.MediumOption = qt.QPushButton()
self.MediumOption.setText('Medium')
self.MediumOption.setCheckable(1)
self.MediumOption.setChecked(0)
self.MediumOption.setFixedWidth(60)
self.MediumOption.setAutoExclusive(1)
StrengthGroupBox.layout().addWidget(self.MediumOption)
self.HeavyOption = qt.QPushButton()
self.HeavyOption.setText('Heavy')
self.HeavyOption.setCheckable(1)
self.HeavyOption.setChecked(0)
self.HeavyOption.setFixedWidth(60)
self.HeavyOption.setAutoExclusive(1)
StrengthGroupBox.layout().addWidget(self.HeavyOption)
# Downsampling option for label map creation
self.LMCreationFrame = qt.QFrame()
self.LMCreationFrame.setLayout(qt.QVBoxLayout())
self.LMCreationFrame.enabled = False
self.LMCreationFrame.setObjectName('LMCreationFrame')
self.LMCreationFrame.setStyleSheet('#LMCreationFrame {border: 1px solid lightGray; color: black; }')
self.parent.layout().addWidget(self.LMCreationFrame)
LMCreationLabel = qt.QLabel()
LMCreationLabel.setText('Label Map Creation:')
self.LMCreationFrame.layout().addWidget(LMCreationLabel)
self.DownSamplingGroupBox = qt.QGroupBox()
self.DownSamplingGroupBox.setLayout(qt.QHBoxLayout())
self.DownSamplingGroupBox.setFixedWidth(130)
self.DownSamplingGroupBox.setObjectName('DownSamplingGroupBox')
self.DownSamplingGroupBox.setStyleSheet('#DownSamplingGroupBox {border: 1px solid white; color: black; }')
self.DownSamplingGroupBox.setToolTip('Choose between fast and slow label map creation.')
self.LMCreationFrame.layout().addWidget(self.DownSamplingGroupBox)
self.FastOption = qt.QRadioButton()
self.FastOption.setText('Fast')
self.FastOption.setCheckable(1)
self.FastOption.setChecked(0)
self.DownSamplingGroupBox.layout().addWidget(self.FastOption)
self.SlowOption = qt.QRadioButton()
self.SlowOption.setText('Slow')
self.SlowOption.setCheckable(1)
self.SlowOption.setChecked(1)
self.DownSamplingGroupBox.layout().addWidget(self.SlowOption)
self.filterOnRadioButton.connect('toggled(bool)', self.showFilterParams)
self.filterOffRadioButton.connect('toggled(bool)', self.hideFilterParams)
def showFilterParams(self):
self.hideFilterOptions(False)
def hideFilterParams(self):
self.hideFilterOptions(True)
def hideFilteringFrame(self, hide):
""" Show/Hide the filtering frame
param show: True/False
"""
self.FilteringFrame.setHidden(hide)
def enableFilteringFrame(self,enabled):
""" Enable/Disable the filtering frame
param enabled: True/False
"""
self.FilteringFrame.setEnabled(enabled)
def hideFilterOptions(self, hide):
""" Show/Hide the filtering options
param show: True/False
"""
self.filterOptionsFrame.setHidden(hide)
def enableFilterOptions(self,enabled):
""" Enable/Disable the filtering options
param enabled: True/False
"""
self.filterOptionsFrame.setEnabled(enabled)
def showLMFrame(self,show):
""" Show/Hide the options for labelmap creation
param enabled: True/False
"""
self.LMCreationFrame.setShown(show)
def enableLMFrame(self,enabled):
""" Enable/Disable the options for labelmap creation
param enabled: True/False
"""
self.LMCreationFrame.setEnabled(enabled)
def filterInputCT(self,inputCT):
if self.NLMFilterRadioButton.checked:
method = 'NLM'
sr = [3,3,3]
cr = [5,5,5]
if self.Filt2DOption.checked:
sr[2] = 1
cr[2] = 1
noise_power = 3.0 # Smooth filtering
nlm_h = 0.8
nlm_ps = 2.0
if self.MediumOption.checked: # Medium strength
noise_power = 4.0
nlm_h = 1.0
elif self.HeavyOption.checked: # Heavy strength
noise_power = 5.0
nlm_h = 1.2
self.logic.filterCT(inputCT,method,s_rad=sr,c_rad=cr,noisePower=noise_power,h=nlm_h,ps=nlm_ps)
elif self.MedianFilterRadioButton.checked:
method = 'Median'
neighborhood = [1,1,1]
if self.MediumOption.checked: # Medium strength
neighborhood = [2,2,2]
elif self.HeavyOption.checked: # Heavy strength
neighborhood = [3,3,3]
if self.Filt2DOption.checked: # 2D filtering
neighborhood[2] = 1
self.logic.filterCT(inputCT,method,n_rad=neighborhood)
elif self.GaussianFilterRadioButton.checked:
method = 'Gaussian'
s = 1.0
if self.MediumOption.checked: # Medium strength
s = 2.0
elif self.HeavyOption.checked: # Heavy strength
s = 3.0
self.logic.filterCT(inputCT,method,sigma=s)
def createPartialLM(self,inputCT,labelMap):
speed = 'Slow'
if self.FastOption.checked:
speed = 'Fast'
self.logic.generatePartialLungLabelMap(inputCT,labelMap,speed)
for color in ['Red', 'Yellow', 'Green']:
slicer.app.layoutManager().sliceWidget(color).sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(inputCT.GetID())
def warningMessageForLM(self):
answer = qt.QMessageBox.question(slicer.util.mainWindow(),self.__moduleName__, 'Do you want to create a lung label map?', qt.QMessageBox.Yes | qt.QMessageBox.No)
return answer
#############################
##
class PreProcessingLogic(object):
def __init__(self, moduleName):
self.__moduleName__ = moduleName
def filterCT(self,input_ct,method,s_rad=[3,3,3],c_rad=[5,5,5],noisePower=3.0,h=0.8,ps=2.0,n_rad=[1,1,1],sigma=1.0):
if method=='NLM': # NLM Filter
generatenlmfilteredimage = slicer.modules.generatenlmfilteredimage
parameters = {
"ctFileName": input_ct.GetID(),
"outputFileName": input_ct.GetID(),
"iSigma": noisePower,
"iRadiusSearch": s_rad,
"iRadiusComp": c_rad,
"iH": h,
"iPs": ps,
}
slicer.cli.run(generatenlmfilteredimage,None,parameters,wait_for_completion=True)
elif method=='Median': # Median Filter
medianimagefilter = slicer.modules.medianimagefilter
parameters = {
"inputVolume": input_ct.GetID(),
"outputVolume": input_ct.GetID(),
"neighborhood": n_rad,
}
slicer.cli.run(medianimagefilter,None,parameters,wait_for_completion=True)
elif method=='Gaussian':
gaussianblurimagefilter = slicer.modules.gaussianblurimagefilter
parameters = {
"inputVolume": input_ct.GetID(),
"outputVolume": input_ct.GetID(),
"sigma": sigma,
}
slicer.cli.run(gaussianblurimagefilter,None,parameters,wait_for_completion=True)
def generatePartialLungLabelMap(self, input_ct, label_map, speed):
"""Create partial lung label map from input ct image
:params input_ct: ct image, speed: fast or slow creation, labelNode: node for
created labelmap
"""
inputNode = input_ct
if speed=='Fast':
inputNode = self.downsampleCT(input_ct)
generatepartiallunglabelmap = slicer.modules.generatepartiallunglabelmap
# labelNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLLabelMapVolumeNode())
# self.CTlabelNode.SetName(self.CTNode.GetName() + '_partialLungLabelMap')
parameters = {
"ctFileName": inputNode.GetID(),
"outputLungMaskFileName": label_map.GetID(),
}
slicer.cli.run(generatepartiallunglabelmap,None,parameters,wait_for_completion=True)
if speed=='Fast':
label_map = self.upsampleLabel(label_map)
slicer.mrmlScene.RemoveNode(inputNode)
def downsampleCT(self, input_image):
"""Downsample input image by factor 2
:params input_image: image to downsample
"""
oldSpacing = input_image.GetSpacing()
newSpacing = []
newSpacing.append(oldSpacing[0]*2)
newSpacing.append(oldSpacing[1]*2)
newSpacing.append(oldSpacing[2])
resamplescalarvolume = slicer.modules.resamplescalarvolume
upsampledNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLScalarVolumeNode())
parameters = {
"outputPixelSpacing": newSpacing,
"InputVolume": input_image.GetID(),
"OutputVolume": upsampledNode.GetID(),
}
slicer.cli.run(resamplescalarvolume,None,parameters,wait_for_completion=True)
return upsampledNode
def upsampleLabel(self, labelMap):
"""Upsample input image by factor 2
:params input_image: image to downsample
"""
oldSpacing = labelMap.GetSpacing()
newSpacing = []
newSpacing.append(oldSpacing[0]/2)
newSpacing.append(oldSpacing[1]/2)
newSpacing.append(oldSpacing[2])
resamplescalarvolume = slicer.modules.resamplescalarvolume
parameters = {
"outputPixelSpacing": newSpacing,
"InputVolume": labelMap.GetID(),
"OutputVolume": labelMap.GetID(),
"interpolationType":'nearestNeighbor',
}
slicer.cli.run(resamplescalarvolume,None,parameters,wait_for_completion=True)
return labelMap
|
acil-bwh/SlicerCIP
|
Scripted/CIP_/CIP/ui/PreProcessingWidget.py
|
Python
|
bsd-3-clause
| 16,608
|
[
"Gaussian"
] |
6ef08df66b3cd2aa26c58cbfddda733012118863df835f7088333fbd27d87243
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2009 Gary Burton
# Contribution 2009 by Reinhard Mueller <reinhard.mueller@bytewise.at>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2013-2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Reports/Text Reports/Kinship Report"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.errors import ReportError
from gramps.gen.relationship import get_relationship_calculator
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC,
PARA_ALIGN_CENTER)
from gramps.gen.plug.menu import NumberOption, BooleanOption, PersonOption
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
#------------------------------------------------------------------------
#
# KinshipReport
#
#------------------------------------------------------------------------
class KinshipReport(Report):
def __init__(self, database, options, user):
"""
Create the KinshipReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
maxdescend - Maximum generations of descendants to include.
maxascend - Maximum generations of ancestors to include.
incspouses - Whether to include spouses.
inccousins - Whether to include cousins.
incaunts - Whether to include aunts/uncles/nephews/nieces.
pid - The Gramps ID of the center person for the report.
name_format - Preferred format to display names
incl_private - Whether to include private data
"""
Report.__init__(self, database, options, user)
menu = options.menu
stdoptions.run_private_data_option(self, menu)
self.__db = self.database
self.max_descend = menu.get_option_by_name('maxdescend').get_value()
self.max_ascend = menu.get_option_by_name('maxascend').get_value()
self.inc_spouses = menu.get_option_by_name('incspouses').get_value()
self.inc_cousins = menu.get_option_by_name('inccousins').get_value()
self.inc_aunts = menu.get_option_by_name('incaunts').get_value()
pid = menu.get_option_by_name('pid').get_value()
self.person = self.database.get_person_from_gramps_id(pid)
if (self.person == None) :
raise ReportError(_("Person %s is not in the Database") % pid )
rlocale = self.set_locale(menu.get_option_by_name('trans').get_value())
stdoptions.run_name_format_option(self, menu)
self.rel_calc = get_relationship_calculator(reinit=True,
clocale=rlocale)
self.kinship_map = {}
self.spouse_map = {}
def write_report(self):
"""
The routine the actually creates the report. At this point, the document
is opened and ready for writing.
"""
pname = self._name_display.display(self.person)
self.doc.start_paragraph("KIN-Title")
# feature request 2356: avoid genitive form
title = self._("Kinship Report for %s") % pname
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
if self.inc_spouses:
spouse_handles = self.get_spouse_handles(self.person.get_handle())
if spouse_handles:
self.write_people(self._("Spouses"), spouse_handles)
# Collect all descendants of the person
self.traverse_down(self.person.get_handle(), 0, 1)
# Collect all ancestors/aunts/uncles/nephews/cousins of the person
self.traverse_up(self.person.get_handle(), 1, 0)
# Write Kin
for Ga, Gbs in self.kinship_map.items():
for Gb in Gbs:
# To understand these calculations, see:
# http://en.wikipedia.org/wiki/Cousin#Mathematical_definitions
x = min (Ga, Gb)
y = abs(Ga-Gb)
# Skip unrequested people
if x == 1 and y > 0 and not self.inc_aunts:
continue
elif x > 1 and not self.inc_cousins:
continue
get_rel_str = self.rel_calc.get_plural_relationship_string
title = get_rel_str(Ga, Gb, in_law_b=False)
self.write_people(self._(title), self.kinship_map[Ga][Gb])
if (self.inc_spouses and
Ga in self.spouse_map and
Gb in self.spouse_map[Ga]):
title = get_rel_str(Ga, Gb, in_law_b=True)
self.write_people(self._(title), self.spouse_map[Ga][Gb])
def traverse_down(self, person_handle, Ga, Gb, skip_handle=None):
"""
Populate a map of arrays containing person handles for the descendants
of the passed person. This function calls itself recursively until it
reaches max_descend.
Parameters:
person_handle: the handle of the person to go to next
Ga: The number of generations from the main person to the common
ancestor. This should be incremented when going up generations, and
left alone when going down generations.
Gb: The number of generations from this person (person_handle) to the
common ancestor. This should be incremented when going down
generations and set back to zero when going up generations.
skip_handle: an optional handle to skip when going down. This is useful
to skip the descendant that brought you this generation in the first
place.
"""
for child_handle in self.get_children_handles(person_handle):
if child_handle != skip_handle:
self.add_kin(child_handle, Ga, Gb)
if self.inc_spouses:
for spouse_handle in self.get_spouse_handles(child_handle):
self.add_spouse(spouse_handle, Ga, Gb)
if Gb < self.max_descend:
self.traverse_down(child_handle, Ga, Gb+1)
def traverse_up(self, person_handle, Ga, Gb):
"""
Populate a map of arrays containing person handles for the ancestors
of the passed person. This function calls itself recursively until it
reaches max_ascend.
Parameters:
person_handle: the handle of the person to go to next
Ga: The number of generations from the main person to the common
ancestor. This should be incremented when going up generations, and
left alone when going down generations.
Gb: The number of generations from this person (person_handle) to the
common ancestor. This should be incremented when going down
generations and set back to zero when going up generations.
"""
parent_handles = self.get_parent_handles(person_handle)
for parent_handle in parent_handles:
self.add_kin(parent_handle, Ga, Gb)
self.traverse_down(parent_handle, Ga, Gb+1, person_handle)
if Ga < self.max_ascend:
self.traverse_up(parent_handle, Ga+1, 0)
def add_kin(self, person_handle, Ga, Gb):
"""
Add a person handle to the kin map.
"""
if Ga not in self.kinship_map:
self.kinship_map[Ga] = {}
if Gb not in self.kinship_map[Ga]:
self.kinship_map[Ga][Gb] = []
if person_handle not in self.kinship_map[Ga][Gb]:
self.kinship_map[Ga][Gb].append(person_handle)
def add_spouse(self, spouse_handle, Ga, Gb):
"""
Add a person handle to the spouse map.
"""
if Ga not in self.spouse_map:
self.spouse_map[Ga] = {}
if Gb not in self.spouse_map[Ga]:
self.spouse_map[Ga][Gb] = []
if spouse_handle not in self.spouse_map[Ga][Gb]:
self.spouse_map[Ga][Gb].append(spouse_handle)
def get_parent_handles(self, person_handle):
"""
Return an array of handles for all the parents of the
given person handle.
"""
parent_handles = []
person = self.__db.get_person_from_handle(person_handle)
family_handle = person.get_main_parents_family_handle()
if family_handle:
family = self.__db.get_family_from_handle(family_handle)
father_handle = family.get_father_handle()
if father_handle:
parent_handles.append(father_handle)
mother_handle = family.get_mother_handle()
if mother_handle:
parent_handles.append(mother_handle)
return parent_handles
def get_spouse_handles(self, person_handle):
"""
Return an array of handles for all the spouses of the
given person handle.
"""
spouses = []
person = self.__db.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
family = self.__db.get_family_from_handle(family_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
spouse_handle = None
if mother_handle and father_handle == person_handle:
spouse_handle = mother_handle
elif father_handle and mother_handle == person_handle:
spouse_handle = father_handle
if spouse_handle and spouse_handle not in spouses:
spouses.append(spouse_handle)
return spouses
def get_children_handles(self, person_handle):
"""
Return an array of handles for all the children of the
given person handle.
"""
children = []
person = self.__db.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
family = self.__db.get_family_from_handle(family_handle)
for child_ref in family.get_child_ref_list():
children.append(child_ref.get_reference_handle())
return children
def write_people(self, title, people_handles):
"""
Write information about a group of people - including the title.
"""
cap_title = title[0].upper() + title[1:]
subtitle = "%s (%d)" % (cap_title, len(people_handles))
self.doc.start_paragraph("KIN-Subtitle")
mark = IndexMark(cap_title, INDEX_TYPE_TOC, 2)
self.doc.write_text(subtitle, mark)
self.doc.end_paragraph()
list(map(self.write_person, people_handles))
def write_person(self, person_handle):
"""
Write information about the given person.
"""
person = self.database.get_person_from_handle(person_handle)
name = self._name_display.display(person)
mark = ReportUtils.get_person_mark(self.database, person)
birth_date = ""
birth = get_birth_or_fallback(self.database, person)
if birth:
birth_date = self._get_date(birth.get_date_object())
death_date = ""
death = get_death_or_fallback(self.database, person)
if death:
death_date = self._get_date(death.get_date_object())
dates = self._(" (%(birth_date)s - %(death_date)s)") % {
'birth_date' : birth_date,
'death_date' : death_date }
self.doc.start_paragraph('KIN-Normal')
self.doc.write_text(name, mark)
self.doc.write_text(dates)
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# KinshipOptions
#
#------------------------------------------------------------------------
class KinshipOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the kinship report.
"""
category_name = _("Report Options")
pid = PersonOption(_("Center Person"))
pid.set_help(_("The center person for the report"))
menu.add_option(category_name, "pid", pid)
stdoptions.add_name_format_option(menu, category_name)
maxdescend = NumberOption(_("Max Descendant Generations"), 2, 1, 20)
maxdescend.set_help(_("The maximum number of descendant generations"))
menu.add_option(category_name, "maxdescend", maxdescend)
maxascend = NumberOption(_("Max Ancestor Generations"), 2, 1, 20)
maxascend.set_help(_("The maximum number of ancestor generations"))
menu.add_option(category_name, "maxascend", maxascend)
incspouses = BooleanOption(_("Include spouses"), True)
incspouses.set_help(_("Whether to include spouses"))
menu.add_option(category_name, "incspouses", incspouses)
inccousins = BooleanOption(_("Include cousins"), True)
inccousins.set_help(_("Whether to include cousins"))
menu.add_option(category_name, "inccousins", inccousins)
incaunts = BooleanOption(_("Include aunts/uncles/nephews/nieces"), True)
incaunts.set_help(_("Whether to include aunts/uncles/nephews/nieces"))
menu.add_option(category_name, "incaunts", incaunts)
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_localization_option(menu, category_name)
def make_default_style(self, default_style):
"""Make the default output style for the Kinship Report."""
f = FontStyle()
f.set_size(16)
f.set_type_face(FONT_SANS_SERIF)
f.set_bold(1)
p = ParagraphStyle()
p.set_header_level(1)
p.set_bottom_border(1)
p.set_bottom_margin(ReportUtils.pt2cm(8))
p.set_font(f)
p.set_alignment(PARA_ALIGN_CENTER)
p.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style("KIN-Title", p)
font = FontStyle()
font.set_size(12)
font.set_bold(True)
p = ParagraphStyle()
p.set_header_level(3)
p.set_font(font)
p.set_top_margin(ReportUtils.pt2cm(6))
p.set_description(_('The basic style used for sub-headings.'))
default_style.add_paragraph_style("KIN-Subtitle", p)
font = FontStyle()
font.set_size(10)
p = ParagraphStyle()
p.set_font(font)
p.set_left_margin(0.5)
p.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("KIN-Normal", p)
|
pmghalvorsen/gramps_branch
|
gramps/plugins/textreport/kinshipreport.py
|
Python
|
gpl-2.0
| 16,756
|
[
"Brian"
] |
d448292085858734fc2063d989b6ffb1f823e58a5451d8852745318f1ff9f754
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The reg module provides classes for interfacing with the `niftyreg
<http://niftyreg.sourceforge.net>`_ registration command line tools.
The interfaces were written to work with niftyreg version 1.5.10
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/\
data'))
>>> os.chdir(datadir)
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from builtins import staticmethod
import os
import warnings
from ..base import TraitedSpec, File, traits, isdefined
from .base import get_custom_path, NiftyRegCommand, NiftyRegCommandInputSpec
from ...utils.filemanip import split_filename
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class RegAladinInputSpec(NiftyRegCommandInputSpec):
""" Input Spec for RegAladin. """
# Input reference file
ref_file = File(exists=True,
desc='The input reference/target image',
argstr='-ref %s',
mandatory=True)
# Input floating file
flo_file = File(exists=True,
desc='The input floating/source image',
argstr='-flo %s',
mandatory=True)
# No symmetric flag
nosym_flag = traits.Bool(argstr='-noSym',
desc='Turn off symmetric registration')
# Rigid only registration
rig_only_flag = traits.Bool(argstr='-rigOnly',
desc='Do only a rigid registration')
# Directly optimise affine flag
desc = 'Directly optimise the affine parameters'
aff_direct_flag = traits.Bool(argstr='-affDirect',
desc=desc)
# Input affine
in_aff_file = File(exists=True,
desc='The input affine transformation',
argstr='-inaff %s')
# Input reference mask
rmask_file = File(exists=True,
desc='The input reference mask',
argstr='-rmask %s')
# Input floating mask
fmask_file = File(exists=True,
desc='The input floating mask',
argstr='-fmask %s')
# Maximum number of iterations
maxit_val = traits.Range(desc='Maximum number of iterations',
argstr='-maxit %d', low=0)
# Multiresolution levels
ln_val = traits.Range(desc='Number of resolution levels to create',
argstr='-ln %d', low=0)
# Number of resolution levels to process
lp_val = traits.Range(desc='Number of resolution levels to perform',
argstr='-lp %d', low=0)
# Smoothing to apply on reference image
desc = 'Amount of smoothing to apply to reference image'
smoo_r_val = traits.Float(desc=desc,
argstr='-smooR %f')
# Smoothing to apply on floating image
desc = 'Amount of smoothing to apply to floating image'
smoo_f_val = traits.Float(desc=desc,
argstr='-smooF %f')
# Use nifti header to initialise transformation
desc = 'Use nifti header to initialise transformation'
nac_flag = traits.Bool(desc=desc,
argstr='-nac')
# Use the input masks centre of mass to initialise the transformation
desc = 'Use the masks centre of mass to initialise the transformation'
cog_flag = traits.Bool(desc=desc,
argstr='-cog')
# Percent of blocks that are considered active.
v_val = traits.Range(desc='Percent of blocks that are active',
argstr='-pv %d', low=0)
# Percent of inlier blocks
i_val = traits.Range(desc='Percent of inlier blocks', argstr='-pi %d',
low=0)
# Lower threshold on reference image
ref_low_val = traits.Float(desc='Lower threshold value on reference image',
argstr='-refLowThr %f')
# Upper threshold on reference image
ref_up_val = traits.Float(desc='Upper threshold value on reference image',
argstr='-refUpThr %f')
# Lower threshold on floating image
flo_low_val = traits.Float(desc='Lower threshold value on floating image',
argstr='-floLowThr %f')
# Upper threshold on floating image
flo_up_val = traits.Float(desc='Upper threshold value on floating image',
argstr='-floUpThr %f')
# Platform to use
platform_val = traits.Int(desc='Platform index',
argstr='-platf %i')
# Platform to use
gpuid_val = traits.Int(desc='Device to use id',
argstr='-gpuid %i')
# Verbosity off
verbosity_off_flag = traits.Bool(argstr='-voff',
desc='Turn off verbose output')
# Affine output transformation matrix file
aff_file = File(name_source=['flo_file'],
name_template='%s_aff.txt',
desc='The output affine matrix file',
argstr='-aff %s')
# Result warped image file
res_file = File(name_source=['flo_file'],
name_template='%s_res.nii.gz',
desc='The affine transformed floating image',
argstr='-res %s')
class RegAladinOutputSpec(TraitedSpec):
""" Output Spec for RegAladin. """
aff_file = File(desc='The output affine file')
res_file = File(desc='The output transformed image')
desc = 'Output string in the format for reg_average'
avg_output = traits.String(desc=desc)
class RegAladin(NiftyRegCommand):
"""Interface for executable reg_aladin from NiftyReg platform.
Block Matching algorithm for symmetric global registration.
Based on Modat et al., "Global image registration using
asymmetric block-matching approach"
J. Med. Img. 1(2) 024003, 2014, doi: 10.1117/1.JMI.1.2.024003
`Source code <https://cmiclab.cs.ucl.ac.uk/mmodat/niftyreg>`_
Examples
--------
>>> from nipype.interfaces import niftyreg
>>> node = niftyreg.RegAladin()
>>> node.inputs.ref_file = 'im1.nii'
>>> node.inputs.flo_file = 'im2.nii'
>>> node.inputs.rmask_file = 'mask.nii'
>>> node.inputs.omp_core_val = 4
>>> node.cmdline # doctest: +ALLOW_UNICODE
'reg_aladin -aff im2_aff.txt -flo im2.nii -omp 4 -ref im1.nii \
-res im2_res.nii.gz -rmask mask.nii'
"""
_cmd = get_custom_path('reg_aladin')
input_spec = RegAladinInputSpec
output_spec = RegAladinOutputSpec
def _list_outputs(self):
outputs = super(RegAladin, self)._list_outputs()
# Make a list of the linear transformation file and the input image
aff = os.path.abspath(outputs['aff_file'])
flo = os.path.abspath(self.inputs.flo_file)
outputs['avg_output'] = '%s %s' % (aff, flo)
return outputs
class RegF3DInputSpec(NiftyRegCommandInputSpec):
""" Input Spec for RegF3D. """
# Input reference file
ref_file = File(exists=True,
desc='The input reference/target image',
argstr='-ref %s',
mandatory=True)
# Input floating file
flo_file = File(exists=True,
desc='The input floating/source image',
argstr='-flo %s',
mandatory=True)
# Input Affine file
aff_file = File(exists=True,
desc='The input affine transformation file',
argstr='-aff %s')
# Input cpp file
incpp_file = File(exists=True,
desc='The input cpp transformation file',
argstr='-incpp %s')
# Reference mask
rmask_file = File(exists=True,
desc='Reference image mask',
argstr='-rmask %s')
# Smoothing kernel for reference
desc = 'Smoothing kernel width for reference image'
ref_smooth_val = traits.Float(desc=desc, argstr='-smooR %f')
# Smoothing kernel for floating
desc = 'Smoothing kernel width for floating image'
flo_smooth_val = traits.Float(desc=desc, argstr='-smooF %f')
# Lower threshold for reference image
rlwth_thr_val = traits.Float(desc='Lower threshold for reference image',
argstr='--rLwTh %f')
# Upper threshold for reference image
rupth_thr_val = traits.Float(desc='Upper threshold for reference image',
argstr='--rUpTh %f')
# Lower threshold for reference image
flwth_thr_val = traits.Float(desc='Lower threshold for floating image',
argstr='--fLwTh %f')
# Upper threshold for reference image
fupth_thr_val = traits.Float(desc='Upper threshold for floating image',
argstr='--fUpTh %f')
# Lower threshold for reference image
desc = 'Lower threshold for reference image at the specified time point'
rlwth2_thr_val = traits.Tuple(traits.Range(low=0), traits.Float,
desc=desc, argstr='-rLwTh %d %f')
# Upper threshold for reference image
desc = 'Upper threshold for reference image at the specified time point'
rupth2_thr_val = traits.Tuple(traits.Range(low=0), traits.Float,
desc=desc, argstr='-rUpTh %d %f')
# Lower threshold for reference image
desc = 'Lower threshold for floating image at the specified time point'
flwth2_thr_val = traits.Tuple(traits.Range(low=0), traits.Float,
desc=desc, argstr='-fLwTh %d %f')
# Upper threshold for reference image
desc = 'Upper threshold for floating image at the specified time point'
fupth2_thr_val = traits.Tuple(traits.Range(low=0), traits.Float,
desc=desc, argstr='-fUpTh %d %f')
# Final grid spacing along the 3 axes
sx_val = traits.Float(desc='Final grid spacing along the x axes',
argstr='-sx %f')
sy_val = traits.Float(desc='Final grid spacing along the y axes',
argstr='-sy %f')
sz_val = traits.Float(desc='Final grid spacing along the z axes',
argstr='-sz %f')
# Regularisation options
be_val = traits.Float(desc='Bending energy value', argstr='-be %f')
le_val = traits.Float(desc='Linear elasticity penalty term',
argstr='-le %f')
jl_val = traits.Float(desc='Log of jacobian of deformation penalty value',
argstr='-jl %f')
desc = 'Do not approximate the log of jacobian penalty at control points \
only'
no_app_jl_flag = traits.Bool(argstr='-noAppJL', desc=desc)
# Similarity measure options
desc = 'use NMI even when other options are specified'
nmi_flag = traits.Bool(argstr='--nmi', desc=desc)
desc = 'Number of bins in the histogram for reference image'
rbn_val = traits.Range(low=0, desc=desc, argstr='--rbn %d')
desc = 'Number of bins in the histogram for reference image'
fbn_val = traits.Range(low=0, desc=desc, argstr='--fbn %d')
desc = 'Number of bins in the histogram for reference image for given \
time point'
rbn2_val = traits.Tuple(traits.Range(low=0), traits.Range(low=0),
desc=desc, argstr='-rbn %d %d')
desc = 'Number of bins in the histogram for reference image for given \
time point'
fbn2_val = traits.Tuple(traits.Range(low=0), traits.Range(low=0),
desc=desc, argstr='-fbn %d %d')
lncc_val = traits.Float(desc='SD of the Gaussian for computing LNCC',
argstr='--lncc %f')
desc = 'SD of the Gaussian for computing LNCC for a given time point'
lncc2_val = traits.Tuple(traits.Range(low=0), traits.Float,
desc=desc, argstr='-lncc %d %f')
ssd_flag = traits.Bool(desc='Use SSD as the similarity measure',
argstr='--ssd')
desc = 'Use SSD as the similarity measure for a given time point'
ssd2_flag = traits.Range(low=0, desc=desc, argstr='-ssd %d')
kld_flag = traits.Bool(desc='Use KL divergence as the similarity measure',
argstr='--kld')
desc = 'Use KL divergence as the similarity measure for a given time point'
kld2_flag = traits.Range(low=0, desc=desc, argstr='-kld %d')
amc_flag = traits.Bool(desc='Use additive NMI', argstr='-amc')
nox_flag = traits.Bool(desc="Don't optimise in x direction",
argstr='-nox')
noy_flag = traits.Bool(desc="Don't optimise in y direction",
argstr='-noy')
noz_flag = traits.Bool(desc="Don't optimise in z direction",
argstr='-noz')
# Optimization options
maxit_val = traits.Range(low=0, argstr='-maxit %d',
desc='Maximum number of iterations per level')
ln_val = traits.Range(low=0, argstr='-ln %d',
desc='Number of resolution levels to create')
lp_val = traits.Range(low=0, argstr='-lp %d',
desc='Number of resolution levels to perform')
nopy_flag = traits.Bool(desc='Do not use the multiresolution approach',
argstr='-nopy')
noconj_flag = traits.Bool(desc='Use simple GD optimization',
argstr='-noConj')
desc = 'Add perturbation steps after each optimization step'
pert_val = traits.Range(low=0, desc=desc, argstr='-pert %d')
# F3d2 options
vel_flag = traits.Bool(desc='Use velocity field integration',
argstr='-vel')
fmask_file = File(exists=True,
desc='Floating image mask',
argstr='-fmask %s')
# Other options
desc = 'Kernel width for smoothing the metric gradient'
smooth_grad_val = traits.Float(desc=desc, argstr='-smoothGrad %f')
# Padding value
pad_val = traits.Float(desc='Padding value', argstr='-pad %f')
# verbosity off
verbosity_off_flag = traits.Bool(argstr='-voff',
desc='Turn off verbose output')
# Output CPP image file
cpp_file = File(name_source=['flo_file'],
name_template='%s_cpp.nii.gz',
desc='The output CPP file',
argstr='-cpp %s')
# Output warped image file
res_file = File(name_source=['flo_file'],
name_template='%s_res.nii.gz',
desc='The output resampled image',
argstr='-res %s')
class RegF3DOutputSpec(TraitedSpec):
""" Output Spec for RegF3D. """
cpp_file = File(desc='The output CPP file')
res_file = File(desc='The output resampled image')
invcpp_file = File(desc='The output inverse CPP file')
invres_file = File(desc='The output inverse res file')
desc = 'Output string in the format for reg_average'
avg_output = traits.String(desc=desc)
class RegF3D(NiftyRegCommand):
"""Interface for executable reg_f3d from NiftyReg platform.
Fast Free-Form Deformation (F3D) algorithm for non-rigid registration.
Initially based on Modat et al., "Fast Free-Form Deformation using
graphics processing units", CMPB, 2010
`Source code <https://cmiclab.cs.ucl.ac.uk/mmodat/niftyreg>`_
Examples
--------
>>> from nipype.interfaces import niftyreg
>>> node = niftyreg.RegF3D()
>>> node.inputs.ref_file = 'im1.nii'
>>> node.inputs.flo_file = 'im2.nii'
>>> node.inputs.rmask_file = 'mask.nii'
>>> node.inputs.omp_core_val = 4
>>> node.cmdline # doctest: +ALLOW_UNICODE
'reg_f3d -cpp im2_cpp.nii.gz -flo im2.nii -omp 4 -ref im1.nii \
-res im2_res.nii.gz -rmask mask.nii'
"""
_cmd = get_custom_path('reg_f3d')
input_spec = RegF3DInputSpec
output_spec = RegF3DOutputSpec
@staticmethod
def _remove_extension(in_file):
dn, bn, _ = split_filename(in_file)
return os.path.join(dn, bn)
def _list_outputs(self):
outputs = super(RegF3D, self)._list_outputs()
if self.inputs.vel_flag is True:
res_name = self._remove_extension(outputs['res_file'])
cpp_name = self._remove_extension(outputs['cpp_file'])
outputs['invres_file'] = '%s_backward.nii.gz' % res_name
outputs['invcpp_file'] = '%s_backward.nii.gz' % cpp_name
# Make a list of the linear transformation file and the input image
if self.inputs.vel_flag is True and isdefined(self.inputs.aff_file):
cpp_file = os.path.abspath(outputs['cpp_file'])
flo_file = os.path.abspath(self.inputs.flo_file)
outputs['avg_output'] = '%s %s %s' % (self.inputs.aff_file,
cpp_file, flo_file)
else:
cpp_file = os.path.abspath(outputs['cpp_file'])
flo_file = os.path.abspath(self.inputs.flo_file)
outputs['avg_output'] = '%s %s' % (cpp_file, flo_file)
return outputs
|
mick-d/nipype
|
nipype/interfaces/niftyreg/reg.py
|
Python
|
bsd-3-clause
| 17,398
|
[
"Gaussian"
] |
37ecaab60ebb36a94dc4f34e5d992196f27e0a8b3d35afbaed966e4a00008bc2
|
"""
Helper functions for the course complete event that was originally included with the Badging MVP.
"""
import hashlib
import logging
from django.urls import reverse
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from badges.models import BadgeAssertion, BadgeClass, CourseCompleteImageConfiguration
from badges.utils import requires_badges_enabled, site_prefix
from xmodule.modulestore.django import modulestore
LOGGER = logging.getLogger(__name__)
# NOTE: As these functions are carry-overs from the initial badging implementation, they are used in
# migrations. Please check the badge migrations when changing any of these functions.
def course_slug(course_key, mode):
"""
Legacy: Not to be used as a model for constructing badge slugs. Included for compatibility with the original badge
type, awarded on course completion.
Slug ought to be deterministic and limited in size so it's not too big for Badgr.
Badgr's max slug length is 255.
"""
# Seven digits should be enough to realistically avoid collisions. That's what git services use.
digest = hashlib.sha256(u"{}{}".format(unicode(course_key), unicode(mode))).hexdigest()[:7]
base_slug = slugify(unicode(course_key) + u'_{}_'.format(mode))[:248]
return base_slug + digest
def badge_description(course, mode):
"""
Returns a description for the earned badge.
"""
if course.end:
return _(u'Completed the course "{course_name}" ({course_mode}, {start_date} - {end_date})').format(
start_date=course.start.date(),
end_date=course.end.date(),
course_name=course.display_name,
course_mode=mode,
)
else:
return _(u'Completed the course "{course_name}" ({course_mode})').format(
course_name=course.display_name,
course_mode=mode,
)
def evidence_url(user_id, course_key):
"""
Generates a URL to the user's Certificate HTML view, along with a GET variable that will signal the evidence visit
event.
"""
return site_prefix() + reverse(
'certificates:html_view', kwargs={'user_id': user_id, 'course_id': unicode(course_key)}) + '?evidence_visit=1'
def criteria(course_key):
"""
Constructs the 'criteria' URL from the course about page.
"""
about_path = reverse('about_course', kwargs={'course_id': unicode(course_key)})
return u'{}{}'.format(site_prefix(), about_path)
def get_completion_badge(course_id, user):
"""
Given a course key and a user, find the user's enrollment mode
and get the Course Completion badge.
"""
from student.models import CourseEnrollment
badge_classes = CourseEnrollment.objects.filter(
user=user, course_id=course_id
).order_by('-is_active')
if not badge_classes:
return None
mode = badge_classes[0].mode
course = modulestore().get_course(course_id)
if not course.issue_badges:
return None
return BadgeClass.get_badge_class(
slug=course_slug(course_id, mode),
issuing_component='',
criteria=criteria(course_id),
description=badge_description(course, mode),
course_id=course_id,
mode=mode,
display_name=course.display_name,
image_file_handle=CourseCompleteImageConfiguration.image_for_mode(mode)
)
@requires_badges_enabled
def course_badge_check(user, course_key):
"""
Takes a GeneratedCertificate instance, and checks to see if a badge exists for this course, creating
it if not, should conditions be right.
"""
if not modulestore().get_course(course_key).issue_badges:
LOGGER.info("Course is not configured to issue badges.")
return
badge_class = get_completion_badge(course_key, user)
if not badge_class:
# We're not configured to make a badge for this course mode.
return
if BadgeAssertion.objects.filter(user=user, badge_class=badge_class):
LOGGER.info("Completion badge already exists for this user on this course.")
# Badge already exists. Skip.
return
evidence = evidence_url(user.id, course_key)
badge_class.award(user, evidence_url=evidence)
|
ahmedaljazzar/edx-platform
|
lms/djangoapps/badges/events/course_complete.py
|
Python
|
agpl-3.0
| 4,239
|
[
"VisIt"
] |
0d512ad36de42be7df6323a6927fb2672a4649b455b562314ef15e1b9b2f12eb
|
# -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""
stoq/gui/calendar.py:
Calendar application.
"""
import urllib
from dateutil.parser import parse
from dateutil.relativedelta import MO, relativedelta
from dateutil.tz import tzlocal, tzutc
import gtk
from stoqlib.api import api
from stoqlib.domain.person import Client
from stoqlib.gui.editors.callseditor import CallsEditor
from stoqlib.gui.editors.paymenteditor import (InPaymentEditor,
OutPaymentEditor)
from stoqlib.gui.editors.workordereditor import WorkOrderEditor
from stoqlib.gui.stockicons import (STOQ_CALENDAR_TODAY,
STOQ_CALENDAR_WEEK,
STOQ_CALENDAR_MONTH,
STOQ_CALENDAR_LIST)
from stoqlib.gui.utils.keybindings import get_accels
from stoqlib.gui.widgets.webview import WebView
from stoqlib.lib import dateutils
from stoqlib.lib.daemonutils import start_daemon
from stoqlib.lib.defaults import get_weekday_start
from stoqlib.lib.translation import stoqlib_gettext as _
from stoq.gui.shell.shellapp import ShellApp
def parse_javascript_date(jsdate):
dt = parse(jsdate, fuzzy=True)
dt = dt.replace(tzinfo=tzlocal())
date = dt.astimezone(tzutc())
date += relativedelta(months=-1)
return date
class CalendarView(WebView):
def __init__(self, app):
self._loaded = False
WebView.__init__(self)
self.app = app
self.get_view().connect(
'load-finished',
self._on_view__document_load_finished)
self._load_user_settings()
def _load_finished(self):
self._startup()
self._loaded = True
view = self.get_view()
view.connect('size-allocate', self._on_view__size_allocate)
x, y, width, height = view.get_allocation()
self._update_calendar_size(width, height)
def _startup(self):
options = {}
options['monthNames'] = dateutils.get_month_names()
options['monthNamesShort'] = dateutils.get_short_month_names()
options['dayNames'] = dateutils.get_day_names()
options['dayNamesShort'] = dateutils.get_short_day_names()
options['buttonText'] = {"today": _('today'),
"month": _('month'),
"week": _('week'),
"day": _('day')}
options['defaultView'] = api.user_settings.get(
'calendar-view', 'month')
# FIXME: This should not be tied to the language, rather be
# picked up from libc, but it's a bit of work to translate
# one into another so just take a shortcut
options['columnFormat'] = {
# month column format, eg "Mon", see:
# http://arshaw.com/fullcalendar/docs/text/columnFormat/
'month': _('ddd'),
# week column format: eg, "Mon 9/7", see:
# http://arshaw.com/fullcalendar/docs/text/columnFormat/
'week': _('ddd M/d'),
# day column format : eg "Monday 9/7", see:
# http://arshaw.com/fullcalendar/docs/text/columnFormat/
'day': _('dddd M/d'),
}
options['timeFormat'] = {
# for agendaWeek and agendaDay, eg "5:00 - 6:30", see:
# http://arshaw.com/fullcalendar/docs/text/timeFormat/
'agenda': _('h:mm{ - h:mm}'),
# for all other views, eg "7p", see:
# http://arshaw.com/fullcalendar/docs/text/timeFormat/
'': _('h(:mm)t'),
}
options['titleFormat'] = {
# month title, eg "September 2009", see:
# http://arshaw.com/fullcalendar/docs/text/titleFormat/
'month': _('MMMM yyyy'),
# week title, eg "Sep 7 - 13 2009" see:
# http://arshaw.com/fullcalendar/docs/text/titleFormat/
'week': _("MMM d[ yyyy]{ '—'[ MMM] d yyyy}"),
# day time, eg "Tuesday, Sep 8, 2009" see:
# http://arshaw.com/fullcalendar/docs/text/titleFormat/
'day': _('dddd, MMM d, yyyy'),
}
if get_weekday_start() == MO:
firstday = 1
else:
firstday = 0
options['firstDay'] = firstday
options['isRTL'] = (
gtk.widget_get_default_direction() == gtk.TEXT_DIR_RTL)
options['data'] = self._show_events
options['loading_msg'] = _('Loading calendar content, please wait...')
self.js_function_call('startup', options)
self._update_title()
def _calendar_run(self, name, *args):
if not self._loaded:
return
self.js_function_call("$('#calendar').fullCalendar", name, *args)
def _load_daemon_path(self, path):
uri = '%s/%s' % (self._daemon_uri, path)
self.load_uri(uri)
def _load_user_settings(self):
events = api.user_settings.get('calendar-events', {})
self._show_events = dict(
in_payments=events.get('in-payments', True),
out_payments=events.get('out-payments', True),
purchase_orders=events.get('purchase-orders', True),
client_calls=events.get('client-calls', True),
client_birthdays=events.get('client-birthdays', True),
work_orders=events.get('work-orders', True),
)
def _save_user_settings(self):
events = api.user_settings.get('calendar-events', {})
events['in-payments'] = self._show_events['in_payments']
events['out-payments'] = self._show_events['out_payments']
events['purchase-orders'] = self._show_events['purchase_orders']
events['client-calls'] = self._show_events['client_calls']
events['client-birthdays'] = self._show_events['client_birthdays']
events['work-orders'] = self._show_events['work_orders']
def _update_calendar_size(self, width, height):
self._calendar_run('option', 'aspectRatio', float(width) / height)
def _update_title(self):
# Workaround to get the current calendar date
view = self.get_view()
view.execute_script("document.title = $('.fc-header-title').text()")
title = view.get_property('title')
self.app.date_label.set_markup(
'<big><b>%s</b></big>' % api.escape(title))
#
# Callbacks
#
def _on_view__document_load_finished(self, view, frame):
self._load_finished()
def _on_view__size_allocate(self, widget, req):
x, y, width, height = req
self._update_calendar_size(width, height)
#
# WebView
#
def web_open_uri(self, kwargs):
if kwargs['method'] == 'changeView':
view = kwargs['view']
if view == 'basicDay':
self.app.ViewDay.set_active(True)
jsdate = urllib.unquote(kwargs['date'])
date = parse_javascript_date(jsdate)
self._calendar_run('gotoDate', date.year, date.month, date.day)
#
# Public API
#
def set_daemon_uri(self, uri):
self._daemon_uri = uri
def load(self):
self._load_daemon_path('web/static/calendar-app.html')
def go_prev(self):
self._calendar_run('prev')
self._update_title()
def show_today(self):
self._calendar_run('today')
self._update_title()
def go_next(self):
self._calendar_run('next')
self._update_title()
def change_view(self, view_name):
self._calendar_run('removeEvents')
self._calendar_run('changeView', view_name)
self._calendar_run('refetchEvents')
api.user_settings.set('calendar-view', view_name)
self._update_title()
def refresh(self):
self.load()
def get_events(self):
return self._show_events
def update_events(self, **events):
self._show_events.update(**events)
if not self._loaded:
return
self.js_function_call("update_options", self._show_events)
self._calendar_run('refetchEvents')
self._save_user_settings()
class CalendarApp(ShellApp):
app_title = _('Calendar')
gladefile = 'calendar'
def __init__(self, window, store=None):
# Create this here because CalendarView will update it.
# It will only be shown on create_ui though
self.date_label = gtk.Label('')
self._calendar = CalendarView(self)
ShellApp.__init__(self, window, store=store)
self._setup_daemon()
@api.async
def _setup_daemon(self):
daemon = yield start_daemon()
self._calendar.set_daemon_uri(daemon.base_uri)
proxy = daemon.get_client()
yield proxy.callRemote('start_webservice')
self._calendar.load()
#
# ShellApp overrides
#
def create_actions(self):
group = get_accels('app.calendar')
actions = [
# File
('NewClientCall', None, _("Client call"),
group.get('new_client_call'), _("Add a new client call")),
('NewPayable', None, _("Account payable"),
group.get('new_payable'), _("Add a new account payable")),
('NewReceivable', None, _("Account receivable"),
group.get('new_receivable'), _("Add a new account receivable")),
('NewWorkOrder', None, _("Work order"),
group.get('new_work_order'), _("Add a new work order")),
# View
('Back', gtk.STOCK_GO_BACK, _("Back"),
group.get('go_back'), _("Go back")),
('Forward', gtk.STOCK_GO_FORWARD, _("Forward"),
group.get('go_forward'), _("Go forward")),
('Today', STOQ_CALENDAR_TODAY, _("Show today"),
group.get('show_today'), _("Show today")),
('CalendarEvents', None, _("Calendar events")),
('CurrentView', None, _("Display view as")),
]
self.calendar_ui = self.add_ui_actions('', actions,
filename='calendar.xml')
self.set_help_section(_("Calendar help"), 'app-calendar')
toggle_actions = [
('AccountsPayableEvents', None, _("Accounts payable"),
None, _("Show accounts payable in the list")),
('AccountsReceivableEvents', None, _("Accounts receivable"),
None, _("Show accounts receivable in the list")),
('PurchaseEvents', None, _("Purchases"),
None, _("Show purchases in the list")),
('ClientCallEvents', None, _("Client Calls"),
None, _("Show client calls in the list")),
('ClientBirthdaysEvents', None, _("Client Birthdays"),
None, _("Show client birthdays in the list")),
('WorkOrderEvents', None, _("Work orders"),
None, _("Show work orders in the list")),
]
self.add_ui_actions('', toggle_actions, 'ToggleActions',
'toggle')
events_info = dict(
in_payments=(self.AccountsReceivableEvents, self.NewReceivable,
u'receivable'),
out_payments=(self.AccountsPayableEvents, self.NewPayable,
u'payable'),
purchase_orders=(self.PurchaseEvents, None, u'stock'),
client_calls=(self.ClientCallEvents, self.NewClientCall, u'sales'),
client_birthdays=(self.ClientBirthdaysEvents, None, u'sales'),
work_orders=(self.WorkOrderEvents, self.NewWorkOrder, u'services'),
)
user = api.get_current_user(self.store)
events = self._calendar.get_events()
for event_name, value in events_info.items():
view_action, new_action, app = value
view_action.props.active = events[event_name]
# Disable feature if user does not have acces to required
# application
if not user.profile.check_app_permission(app):
view_action.props.active = False
view_action.set_sensitive(False)
if new_action:
new_action.set_sensitive(False)
view_action.connect('notify::active', self._update_events)
self._update_events()
radio_actions = [
('ViewMonth', STOQ_CALENDAR_MONTH, _("View as month"),
'', _("Show one month")),
('ViewWeek', STOQ_CALENDAR_WEEK, _("View as week"),
'', _("Show one week")),
('ViewDay', STOQ_CALENDAR_LIST, _("View as day"),
'', _("Show one day")),
]
self.add_ui_actions('', radio_actions, 'RadioActions',
'radio')
self.ViewMonth.set_short_label(_("Month"))
self.ViewWeek.set_short_label(_("Week"))
self.ViewDay.set_short_label(_("Day"))
self.ViewMonth.props.is_important = True
self.ViewWeek.props.is_important = True
self.ViewDay.props.is_important = True
view = api.user_settings.get('calendar-view', 'month')
if view == 'month':
self.ViewMonth.props.active = True
elif view == 'basicWeek':
self.ViewWeek.props.active = True
else:
self.ViewDay.props.active = True
def create_ui(self):
self.window.add_new_items([self.NewClientCall,
self.NewPayable,
self.NewReceivable])
# Reparent the toolbar, to show the date next to it.
self.hbox = gtk.HBox()
toolbar = self.uimanager.get_widget('/toolbar')
toolbar.reparent(self.hbox)
# A label to show the current calendar date.
self.date_label.show()
self.hbox.pack_start(self.date_label, False, False, 6)
self.hbox.show()
self.main_vbox.pack_start(self.hbox, False, False)
self.main_vbox.pack_start(self._calendar)
self._calendar.show()
self.window.Print.set_tooltip(_("Print this calendar"))
def activate(self, refresh=True):
self.window.SearchToolItem.set_sensitive(False)
# FIXME: Are we 100% sure we can always print something?
# self.window.Print.set_sensitive(True)
def deactivate(self):
# Put the toolbar back at where it was
main_vbox = self.window.main_vbox
toolbar = self.uimanager.get_widget('/toolbar')
self.hbox.remove(toolbar)
main_vbox.pack_start(toolbar, False, False)
main_vbox.reorder_child(toolbar, 1)
self.uimanager.remove_ui(self.calendar_ui)
self.window.SearchToolItem.set_sensitive(True)
# Private
def _update_events(self, *args):
self._calendar.update_events(
out_payments=self.AccountsPayableEvents.get_active(),
in_payments=self.AccountsReceivableEvents.get_active(),
purchase_orders=self.PurchaseEvents.get_active(),
client_calls=self.ClientCallEvents.get_active(),
client_birthdays=self.ClientBirthdaysEvents.get_active(),
work_orders=self.WorkOrderEvents.get_active(),
)
def _new_client_call(self):
with api.new_store() as store:
self.run_dialog(CallsEditor, store, None, None, Client)
if store.committed:
self._update_events()
def _new_work_order(self):
with api.new_store() as store:
self.run_dialog(WorkOrderEditor, store)
if store.committed:
self._update_events()
def _new_payment(self, editor):
with api.new_store() as store:
self.run_dialog(editor, store)
if store.committed:
self._update_events()
#
# Kiwi callbacks
#
# Toolbar
def new_activate(self):
if not self.NewClientCall.get_sensitive():
return
self._new_client_call()
def print_activate(self):
self._calendar.print_()
def export_spreadsheet_activate(self):
pass
def on_NewClientCall__activate(self, action):
self._new_client_call()
def on_NewPayable__activate(self, action):
self._new_payment(OutPaymentEditor)
def on_NewReceivable__activate(self, action):
self._new_payment(InPaymentEditor)
def on_NewWorkOrder__activate(self, action):
self._new_work_order()
def on_Back__activate(self, action):
self._calendar.go_prev()
def on_Today__activate(self, action):
self._calendar.show_today()
def on_Forward__activate(self, action):
self._calendar.go_next()
def on_ViewMonth__activate(self, action):
self._calendar.change_view('month')
def on_ViewWeek__activate(self, action):
self._calendar.change_view('basicWeek')
def on_ViewDay__activate(self, action):
self._calendar.change_view('basicDay')
|
andrebellafronte/stoq
|
stoq/gui/calendar.py
|
Python
|
gpl-2.0
| 17,713
|
[
"VisIt"
] |
fd38c7d8a68a082fcb03cb783762aa368b4f88a873bdb1ad5d2399bec17eeedd
|
#!/usr/bin/python
#
# Flickr API implementation
#
# Inspired largely by Michele Campeotto's flickrclient and Aaron Swartz'
# xmltramp... but I wanted to get a better idea of how python worked in
# those regards, so I mostly worked those components out for myself.
#
# http://micampe.it/things/flickrclient
# http://www.aaronsw.com/2002/xmltramp/
#
# Release 1: initial release
# Release 2: added upload functionality
# Release 3: code cleanup, convert to doc strings
# Release 4: better permission support
# Release 5: converted into fuller-featured "flickrapi"
# Release 6: fix upload sig bug (thanks Deepak Jois), encode test output
# Release 7: fix path construction, Manish Rai Jain's improvements, exceptions
# Release 8: change API endpoint to "api.flickr.com"
#
# Work by (or inspired by) Manish Rai Jain <manishrjain@gmail.com>:
#
# improved error reporting, proper multipart MIME boundary creation,
# use of urllib2 to allow uploads through a proxy, upload accepts
# raw data as well as a filename
#
# Copyright 2005 Brian "Beej Jorgensen" Hall <beej@beej.us>
#
# This work is licensed under the Creative Commons
# Attribution License. To view a copy of this license,
# visit http://creativecommons.org/licenses/by/2.5/ or send
# a letter to Creative Commons, 543 Howard Street, 5th
# Floor, San Francisco, California, 94105, USA.
#
# This license says that I must be credited for any derivative works.
# You do not need to credit me to simply use the FlickrAPI classes in
# your Python scripts--you only need to credit me if you're taking this
# FlickrAPI class and modifying it or redistributing it.
#
# Previous versions of this API were granted to the public domain.
# You're free to use those as you please.
#
# Beej Jorgensen, Maintainer, November 2005
# beej@beej.us
#
from __future__ import print_function
import sys
import md5
import string
import urllib
import urllib2
import mimetools
import httplib
import os.path
import xml.dom.minidom
########################################################################
# Exceptions
########################################################################
class UploadException(Exception):
pass
########################################################################
# XML functionality
########################################################################
#-----------------------------------------------------------------------
class XMLNode:
"""XMLNode -- generic class for holding an XML node
xmlStr = \"\"\"<xml foo="32">
<name bar="10">Name0</name>
<name bar="11" baz="12">Name1</name>
</xml>\"\"\"
f = XMLNode.parseXML(xmlStr)
print f.elementName # xml
print f['foo'] # 32
print f.name # [<name XMLNode>, <name XMLNode>]
print f.name[0].elementName # name
print f.name[0]["bar"] # 10
print f.name[0].elementText # Name0
print f.name[1].elementName # name
print f.name[1]["bar"] # 11
print f.name[1]["baz"] # 12
"""
def __init__(self):
"""Construct an empty XML node."""
self.elementName=""
self.elementText=""
self.attrib={}
self.xml=""
def __setitem__(self, key, item):
"""Store a node's attribute in the attrib hash."""
self.attrib[key] = item
def __getitem__(self, key):
"""Retrieve a node's attribute from the attrib hash."""
try:
return self.attrib[key]
except:
return "null"
#-----------------------------------------------------------------------
#@classmethod
def parseXML(cls, xmlStr, storeXML=False):
"""Convert an XML string into a nice instance tree of XMLNodes.
xmlStr -- the XML to parse
storeXML -- if True, stores the XML string in the root XMLNode.xml
"""
def __parseXMLElement(element, thisNode):
"""Recursive call to process this XMLNode."""
thisNode.elementName = element.nodeName
#print element.nodeName
# add element attributes as attributes to this node
for i in range(element.attributes.length):
an = element.attributes.item(i)
thisNode[an.name] = an.nodeValue
for a in element.childNodes:
if a.nodeType == xml.dom.Node.ELEMENT_NODE:
child = XMLNode()
try:
list = getattr(thisNode, a.nodeName)
except AttributeError:
setattr(thisNode, a.nodeName, [])
# add the child node as an attrib to this node
list = getattr(thisNode, a.nodeName);
#print "appending child: %s to %s" % (a.nodeName, thisNode.elementName)
list.append(child);
__parseXMLElement(a, child)
elif a.nodeType == xml.dom.Node.TEXT_NODE:
thisNode.elementText += a.nodeValue
return thisNode
dom = xml.dom.minidom.parseString(xmlStr)
# get the root
rootNode = XMLNode()
if storeXML: rootNode.xml = xmlStr
return __parseXMLElement(dom.firstChild, rootNode)
parseXML = classmethod(parseXML)
########################################################################
# Flickr functionality
########################################################################
#-----------------------------------------------------------------------
class FlickrAPI:
"""Encapsulated flickr functionality.
Example usage:
flickr = FlickrAPI(flickrAPIKey, flickrSecret)
rsp = flickr.auth_checkToken(api_key=flickrAPIKey, auth_token=token)
"""
flickrHost = "api.flickr.com"
flickrRESTForm = "/services/rest/"
flickrAuthForm = "/services/auth/"
flickrUploadForm = "/services/upload/"
#-------------------------------------------------------------------
def __init__(self, apiKey, secret):
"""Construct a new FlickrAPI instance for a given API key and secret."""
self.apiKey = apiKey
self.secret = secret
self.__handlerCache={}
#-------------------------------------------------------------------
def __sign(self, data):
"""Calculate the flickr signature for a set of params.
data -- a hash of all the params and values to be hashed, e.g.
{"api_key":"AAAA", "auth_token":"TTTT"}
"""
dataName = self.secret
keys = data.keys()
keys.sort()
for a in keys: dataName += (a + data[a])
#print dataName
hash = md5.new()
hash.update(dataName)
return hash.hexdigest()
#-------------------------------------------------------------------
def __getattr__(self, method, **arg):
"""Handle all the flickr API calls.
This is Michele Campeotto's cleverness, wherein he writes a
general handler for methods not defined, and assumes they are
flickr methods. He then converts them to a form to be passed as
the method= parameter, and goes from there.
http://micampe.it/things/flickrclient
My variant is the same basic thing, except it tracks if it has
already created a handler for a specific call or not.
example usage:
flickr.auth_getFrob(api_key="AAAAAA")
rsp = flickr.favorites_getList(api_key=flickrAPIKey, \\
auth_token=token)
"""
if method not in self.__handlerCache:
def handler(_self = self, _method = method, **arg):
_method = "flickr." + _method.replace("_", ".")
url = "http://" + FlickrAPI.flickrHost + \
FlickrAPI.flickrRESTForm
arg["method"] = _method
postData = urllib.urlencode(arg) + "&api_sig=" + \
_self.__sign(arg)
#print "--url---------------------------------------------"
#print url
#print "--postData----------------------------------------"
#print postData
f = urllib.urlopen(url, postData)
data = f.read()
#print "--response----------------------------------------"
#print data
f.close()
return XMLNode.parseXML(data, True)
self.__handlerCache[method] = handler;
return self.__handlerCache[method]
#-------------------------------------------------------------------
def __getAuthURL(self, perms, frob):
"""Return the authorization URL to get a token.
This is the URL the app will launch a browser toward if it
needs a new token.
perms -- "read", "write", or "delete"
frob -- picked up from an earlier call to FlickrAPI.auth_getFrob()
"""
data = {"api_key": self.apiKey, "frob": frob, "perms": perms}
data["api_sig"] = self.__sign(data)
return "http://%s%s?%s" % (FlickrAPI.flickrHost, \
FlickrAPI.flickrAuthForm, urllib.urlencode(data))
#-------------------------------------------------------------------
def upload(self, filename=None, jpegData=None, **arg):
"""Upload a file to flickr.
Be extra careful you spell the parameters correctly, or you will
get a rather cryptic "Invalid Signature" error on the upload!
Supported parameters:
One of filename or jpegData must be specified by name when
calling this method:
filename -- name of a file to upload
jpegData -- array of jpeg data to upload
api_key
auth_token
title
description
tags -- space-delimited list of tags, "tag1 tag2 tag3"
is_public -- "1" or "0"
is_friend -- "1" or "0"
is_family -- "1" or "0"
"""
if filename == None and jpegData == None or \
filename != None and jpegData != None:
raise UploadException("filename OR jpegData must be specified")
# verify key names
for a in arg.keys():
if a != "api_key" and a != "auth_token" and a != "title" and \
a != "description" and a != "tags" and a != "is_public" and \
a != "is_friend" and a != "is_family":
sys.stderr.write("FlickrAPI: warning: unknown parameter " \
"\"%s\" sent to FlickrAPI.upload\n" % (a))
arg["api_sig"] = self.__sign(arg)
url = "http://" + FlickrAPI.flickrHost + FlickrAPI.flickrUploadForm
# construct POST data
boundary = mimetools.choose_boundary()
body = ""
# required params
for a in ('api_key', 'auth_token', 'api_sig'):
body += "--%s\r\n" % (boundary)
body += "Content-Disposition: form-data; name=\""+a+"\"\r\n\r\n"
body += "%s\r\n" % (arg[a])
# optional params
for a in ('title', 'description', 'tags', 'is_public', \
'is_friend', 'is_family'):
if a in arg:
body += "--%s\r\n" % (boundary)
body += "Content-Disposition: form-data; name=\""+a+"\"\r\n\r\n"
body += "%s\r\n" % (arg[a])
body += "--%s\r\n" % (boundary)
body += "Content-Disposition: form-data; name=\"photo\";"
body += " filename=\"%s\"\r\n" % filename
body += "Content-Type: image/jpeg\r\n\r\n"
#print body
if filename != None:
fp = file(filename, "rb")
data = fp.read()
fp.close()
else:
data = jpegData
postData = body.encode("utf_8") + data + \
("--%s--" % (boundary)).encode("utf_8")
request = urllib2.Request(url)
request.add_data(postData)
request.add_header("Content-Type", \
"multipart/form-data; boundary=%s" % boundary)
response = urllib2.urlopen(request)
rspXML = response.read()
return XMLNode.parseXML(rspXML)
#-----------------------------------------------------------------------
#@classmethod
def testFailure(cls, rsp, exit=True):
"""Exit app if the rsp XMLNode indicates failure."""
if rsp['stat'] == "fail":
sys.stderr.write("%s\n" % (cls.getPrintableError(rsp)))
if exit: sys.exit(1)
testFailure = classmethod(testFailure)
#-----------------------------------------------------------------------
#@classmethod
def getPrintableError(cls, rsp):
"""Return a printed error message string."""
return "%s: error %s: %s" % (rsp.elementName, \
cls.getRspErrorCode(rsp), cls.getRspErrorMsg(rsp))
getPrintableError = classmethod(getPrintableError)
#-----------------------------------------------------------------------
#@classmethod
def getRspErrorCode(cls, rsp):
"""Return the error code of a response, or 0 if no error."""
if rsp['stat'] == "fail":
return rsp.err[0]['code']
return 0
getRspErrorCode = classmethod(getRspErrorCode)
#-----------------------------------------------------------------------
#@classmethod
def getRspErrorMsg(cls, rsp):
"""Return the error message of a response, or "Success" if no error."""
if rsp['stat'] == "fail":
return rsp.err[0]['msg']
return "Success"
getRspErrorMsg = classmethod(getRspErrorMsg)
#-----------------------------------------------------------------------
def __getCachedTokenPath(self):
"""Return the directory holding the app data."""
return os.path.expanduser(os.path.sep.join(["~", ".flickr", \
self.apiKey]))
#-----------------------------------------------------------------------
def __getCachedTokenFilename(self):
"""Return the full pathname of the cached token file."""
return os.path.sep.join([self.__getCachedTokenPath(), "auth.xml"])
#-----------------------------------------------------------------------
def __getCachedToken(self):
"""Read and return a cached token, or None if not found.
The token is read from the cached token file, which is basically the
entire RSP response containing the auth element.
"""
try:
f = file(self.__getCachedTokenFilename(), "r")
data = f.read()
f.close()
rsp = XMLNode.parseXML(data)
return rsp.auth[0].token[0].elementText
except IOError:
return None
#-----------------------------------------------------------------------
def __setCachedToken(self, xml):
"""Cache a token for later use.
The cached tag is stored by simply saving the entire RSP response
containing the auth element.
"""
path = self.__getCachedTokenPath()
if not os.path.exists(path):
os.makedirs(path)
f = file(self.__getCachedTokenFilename(), "w")
f.write(xml)
f.close()
#-----------------------------------------------------------------------
def getToken(self, perms="read", browser="lynx"):
"""Get a token either from the cache, or make a new one from the
frob.
This first attempts to find a token in the user's token cache on
disk.
If that fails (or if the token is no longer valid based on
flickr.auth.checkToken) a new frob is acquired. The frob is
validated by having the user log into flickr (with lynx), and
subsequently a valid token is retrieved.
The newly minted token is then cached locally for the next run.
perms--"read", "write", or "delete"
browser--whatever browser should be used in the system() call
"""
# see if we have a saved token
token = self.__getCachedToken()
# see if it's valid
if token != None:
rsp = self.auth_checkToken(api_key=self.apiKey, auth_token=token)
if rsp['stat'] != "ok":
token = None
else:
# see if we have enough permissions
tokenPerms = rsp.auth[0].perms[0].elementText
if tokenPerms == "read" and perms != "read": token = None
elif tokenPerms == "write" and perms == "delete": token = None
# get a new token if we need one
if token == None:
# get the frob
rsp = self.auth_getFrob(api_key=self.apiKey)
self.testFailure(rsp)
frob = rsp.frob[0].elementText
# validate online
os.system("%s '%s'" % (browser, self.__getAuthURL(perms, frob)))
# get a token
rsp = self.auth_getToken(api_key=self.apiKey, frob=frob)
self.testFailure(rsp)
token = rsp.auth[0].token[0].elementText
# store the auth info for next time
self.__setCachedToken(rsp.xml)
return token
########################################################################
# App functionality
########################################################################
def main(argv):
# flickr auth information:
flickrAPIKey = "fa33550d413b36b3fddc473a931a3b3b" # API key
flickrSecret = "7fd481bff0916055" # shared "secret"
# make a new FlickrAPI instance
fapi = FlickrAPI(flickrAPIKey, flickrSecret)
# do the whole whatever-it-takes to get a valid token:
token = fapi.getToken(browser="chrome")
# get my favorites
rsp = fapi.favorites_getList(api_key=flickrAPIKey,auth_token=token)
fapi.testFailure(rsp)
# and print them
for a in rsp.photos[0].photo:
print("%10s: %s" % (a['id'], a['title'].encode("ascii", "replace")))
# upload the file foo.jpg
#rsp = fapi.upload(filename="foo.jpg", \
# api_key=flickrAPIKey, auth_token=token, \
# title="This is the title", description="This is the description", \
# tags="tag1 tag2 tag3", is_public="1")
#if rsp == None:
# sys.stderr.write("can't find file\n")
#else:
# fapi.testFailure(rsp)
return 0
# run the main if we're not being imported:
if __name__ == "__main__": sys.exit(main(sys.argv))
|
tpltnt/SimpleCV
|
SimpleCV/MachineLearning/query_imgs/flickrapi2.py
|
Python
|
bsd-3-clause
| 18,516
|
[
"Brian",
"VisIt"
] |
a87de4e9eae98961f27aae74768166932f94e9ebad74a3c54634bc6ddd3300d2
|
"""
====================================================================
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
Provides routines for k-means clustering, generating code books
from k-means models, and quantizing vectors by comparing them with
centroids in a code book.
.. autosummary::
:toctree: generated/
whiten -- Normalize a group of observations so each feature has unit variance
vq -- Calculate code book membership of a set of observation vectors
kmeans -- Performs k-means on a set of observation vectors forming k clusters
kmeans2 -- A different implementation of k-means with more methods
-- for initializing centroids
Background information
======================
The k-means algorithm takes as input the number of clusters to
generate, k, and a set of observation vectors to cluster. It
returns a set of centroids, one for each of the k clusters. An
observation vector is classified with the cluster number or
centroid index of the centroid closest to it.
A vector v belongs to cluster i if it is closer to centroid i than
any other centroids. If v belongs to i, we say centroid i is the
dominating centroid of v. The k-means algorithm tries to
minimize distortion, which is defined as the sum of the squared distances
between each observation vector and its dominating centroid. Each
step of the k-means algorithm refines the choices of centroids to
reduce distortion. The change in distortion is used as a
stopping criterion: when the change is lower than a threshold, the
k-means algorithm is not making sufficient progress and
terminates. One can also define a maximum number of iterations.
Since vector quantization is a natural application for k-means,
information theory terminology is often used. The centroid index
or cluster index is also referred to as a "code" and the table
mapping codes to centroids and vice versa is often referred as a
"code book". The result of k-means, a set of centroids, can be
used to quantize vectors. Quantization aims to find an encoding of
vectors that reduces the expected distortion.
All routines expect obs to be a M by N array where the rows are
the observation vectors. The codebook is a k by N array where the
i'th row is the centroid of code word i. The observation vectors
and centroids have the same feature dimension.
As an example, suppose we wish to compress a 24-bit color image
(each pixel is represented by one byte for red, one for blue, and
one for green) before sending it over the web. By using a smaller
8-bit encoding, we can reduce the amount of data by two
thirds. Ideally, the colors for each of the 256 possible 8-bit
encoding values should be chosen to minimize distortion of the
color. Running k-means with k=256 generates a code book of 256
codes, which fills up all possible 8-bit sequences. Instead of
sending a 3-byte value for each pixel, the 8-bit centroid index
(or code word) of the dominating centroid is transmitted. The code
book is also sent over the wire so each 8-bit code can be
translated back to a 24-bit pixel value representation. If the
image of interest was of an ocean, we would expect many 24-bit
blues to be represented by 8-bit codes. If it was an image of a
human face, more flesh tone colors would be represented in the
code book.
"""
from __future__ import division, print_function, absolute_import
__docformat__ = 'restructuredtext'
__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
# TODO:
# - implements high level method for running several times k-means with
# different initialization
# - warning: what happens if different number of clusters ? For now, emit a
# warning, but it is not great, because I am not sure it really make sense to
# succeed in this case (maybe an exception is better ?)
import warnings
import numpy as np
from scipy._lib._util import _asarray_validated
from scipy._lib import _numpy_compat
from . import _vq
class ClusterError(Exception):
pass
def whiten(obs, check_finite=True):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set with whitening. Each feature is
divided by its standard deviation across all observations to give
it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
>>> # f0 f1 f2
>>> obs = [[ 1., 1., 1.], #o0
... [ 2., 2., 2.], #o1
... [ 3., 3., 3.], #o2
... [ 4., 4., 4.]] #o3
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation
of each column.
Examples
--------
>>> from scipy.cluster.vq import whiten
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7,]])
>>> whiten(features)
array([[ 4.17944278, 2.69811351, 7.21248917],
[ 3.29956009, 2.93273208, 9.33380951],
[ 1.75976538, 0.7038557 , 7.21248917]])
"""
obs = _asarray_validated(obs, check_finite=check_finite)
std_dev = np.std(obs, axis=0)
zero_std_mask = std_dev == 0
if zero_std_mask.any():
std_dev[zero_std_mask] = 1.0
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning)
return obs / std_dev
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = np.common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
if code_book.dtype != ct:
c_code_book = code_book.astype(ct)
else:
c_code_book = code_book
if ct in (np.float32, np.float64):
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
return results
def py_vq(obs, code_book, check_finite=True):
""" Python version of vq algorithm.
The algorithm computes the euclidian distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
# n = number of observations
# d = number of features
if np.ndim(obs) == 1:
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError(
"Observation and code_book should have the same rank")
else:
return _py_vq_1d(obs, code_book)
else:
(n, d) = np.shape(obs)
# code books and observations should have same number of features and same
# shape
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError("Observation and code_book should have the same rank")
elif not d == code_book.shape[1]:
raise ValueError("Code book(%d) and obs(%d) should have the same "
"number of features (eg columns)""" %
(code_book.shape[1], d))
code = np.zeros(n, dtype=int)
min_dist = np.zeros(n)
for i in range(n):
dist = np.sum((obs[i] - code_book) ** 2, 1)
code[i] = np.argmin(dist)
min_dist[i] = dist[code[i]]
return code, np.sqrt(min_dist)
def _py_vq_1d(obs, code_book):
""" Python version of vq algorithm for rank 1 only.
Parameters
----------
obs : ndarray
Expects a rank 1 array. Each item is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should rank 1 too.
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
"""
raise RuntimeError("_py_vq_1d buggy, do not use rank 1 arrays for now")
n = obs.size
nc = code_book.size
dist = np.zeros((n, nc))
for i in range(nc):
dist[:, i] = np.sum(obs - code_book[i])
print(dist)
code = np.argmin(dist)
min_dist = dist[code]
return code, np.sqrt(min_dist)
def py_vq2(obs, code_book, check_finite=True):
"""2nd Python version of vq algorithm.
The algorithm simply computes the euclidian distance between each
observation and every frame in the code_book/
Parameters
----------
obs : ndarray
Expect a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This could be faster when number of codebooks is small, but it
becomes a real memory hog when codebook is large. It requires
N by M by O storage where N=number of obs, M = number of
features, and O = number of codes.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
d = np.shape(obs)[1]
# code books and observations should have same number of features
if not d == code_book.shape[1]:
raise ValueError("""
code book(%d) and obs(%d) should have the same
number of features (eg columns)""" % (code_book.shape[1], d))
diff = obs[np.newaxis, :, :] - code_book[:,np.newaxis,:]
dist = np.sqrt(np.sum(diff * diff, -1))
code = np.argmin(dist, 0)
min_dist = np.minimum.reduce(dist, 0)
# The next line I think is equivalent and should be faster than the one
# above, but in practice didn't seem to make much difference:
# min_dist = choose(code,dist)
return code, min_dist
def _kmeans(obs, guess, thresh=1e-5):
""" "raw" version of k-means.
Returns
-------
code_book
the lowest distortion codebook found.
avg_dist
the average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
Examples
--------
Note: not whitened in this example.
>>> from numpy import array
>>> from scipy.cluster.vq import _kmeans
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
code_book = np.array(guess, copy=True)
avg_dist = []
diff = np.inf
while diff > thresh:
nc = code_book.shape[0]
# compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book)
avg_dist.append(np.mean(distort, axis=-1))
# recalc code_book as centroids of associated obs
if(diff > thresh):
code_book, has_members = _vq.update_cluster_means(obs, obs_code, nc)
code_book = code_book.compress(has_members, axis=0)
if len(avg_dist) > 1:
diff = avg_dist[-2] - avg_dist[-1]
return code_book, avg_dist[-1]
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the centroids until sufficient
progress cannot be made, i.e. the change in distortion since
the last iteration is less than some threshold. This yields
a code book mapping centroids to codes and vice versa.
Distortion is defined as the sum of the squared differences
between the observations and the corresponding centroid.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to thresh.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
codebook : ndarray
A k by N array of k centroids. The i'th centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
distortion : float
The distortion between the observations passed and the
centroids generated.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = np.array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398], # random
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> from numpy import random
>>> random.seed((1000,2000))
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398], # random
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
"""
obs = _asarray_validated(obs, check_finite=check_finite)
if int(iter) < 1:
raise ValueError('iter must be at least 1.')
# Determine whether a count (scalar) or an initial guess (array) was passed.
k = None
guess = None
try:
k = int(k_or_guess)
except TypeError:
guess = _asarray_validated(k_or_guess, check_finite=check_finite)
if guess is not None:
if guess.size < 1:
raise ValueError("Asked for 0 cluster ? initial book was %s" %
guess)
result = _kmeans(obs, guess, thresh=thresh)
else:
if k != k_or_guess:
raise ValueError('if k_or_guess is a scalar, it must be an integer')
# initialize best distance value to a large value
best_dist = np.inf
No = obs.shape[0]
k = k_or_guess
if k < 1:
raise ValueError("Asked for 0 cluster ? ")
for i in range(iter):
# the initial code book is randomly selected from observations
k_random_indices = np.random.randint(0, No, k)
if np.any(_numpy_compat.unique(k_random_indices,
return_counts=True)[1] > 1):
# randint can give duplicates, which is incorrect. Only fix
# the issue if it occurs, to not change results for users who
# use a random seed and get no duplicates.
k_random_indices = np.random.permutation(No)[:k]
guess = np.take(obs, k_random_indices, 0)
book, dist = _kmeans(obs, guess, thresh=thresh)
if dist < best_dist:
best_book = book
best_dist = dist
result = best_book, best_dist
return result
def _kpoints(data, k):
"""Pick k points at random in data (one row = one observation).
This is done by taking the k first values of a random permutation of 1..N
where N is the number of observation.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
if data.ndim > 1:
n = data.shape[0]
else:
n = data.size
p = np.random.permutation(n)
x = data[p[:k], :].copy()
return x
def _krandinit(data, k):
"""Returns k samples of a random variable which parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable which mean and covariances are the one estimated from data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
def init_rank1(data):
mu = np.mean(data)
cov = np.cov(data)
x = np.random.randn(k)
x *= np.sqrt(cov)
x += mu
return x
def init_rankn(data):
mu = np.mean(data, 0)
cov = np.atleast_2d(np.cov(data, rowvar=0))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = np.random.randn(k, mu.size)
x = np.dot(x, np.linalg.cholesky(cov).T) + mu
return x
def init_rank_def(data):
# initialize when the covariance matrix is rank deficient
mu = np.mean(data, axis=0)
_, s, vh = np.linalg.svd(data - mu, full_matrices=False)
x = np.random.randn(k, s.size)
sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
x = np.dot(x, sVh) + mu
return x
nd = np.ndim(data)
if nd == 1:
return init_rank1(data)
elif data.shape[1] > data.shape[0]:
return init_rank_def(data)
else:
return init_rankn(data)
_valid_init_meth = {'random': _krandinit, 'points': _kpoints}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
def _missing_raise():
"""raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidian distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' one-dimensional observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algrithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'matrix': interpret the k parameter as a k by M (or length k
array for one-dimensional data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
i'th observation is closest to.
"""
data = _asarray_validated(data, check_finite=check_finite)
if missing not in _valid_miss_meth:
raise ValueError("Unkown missing method: %s" % str(missing))
# If data is rank 1, then we have 1 dimension problem.
nd = np.ndim(data)
if nd == 1:
d = 1
# raise ValueError("Input of rank 1 not supported yet")
elif nd == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 not supported")
if np.size(data) < 1:
raise ValueError("Input has 0 items.")
# If k is not a single value, then it should be compatible with data's
# shape
if np.size(k) > 1 or minit == 'matrix':
if not nd == np.ndim(k):
raise ValueError("k is not an int and has not same rank than data")
if d == 1:
nc = len(k)
else:
(nc, dc) = k.shape
if not dc == d:
raise ValueError("k is not an int and has not same rank than\
data")
clusters = k.copy()
else:
try:
nc = int(k)
except TypeError:
raise ValueError("k (%s) could not be converted to an integer " % str(k))
if nc < 1:
raise ValueError("kmeans2 for 0 clusters ? (k was %s)" % str(k))
if not nc == k:
warnings.warn("k was not an integer, was converted.")
try:
init = _valid_init_meth[minit]
except KeyError:
raise ValueError("unknown init method %s" % str(minit))
clusters = init(data, k)
if int(iter) < 1:
raise ValueError("iter = %s is not valid. iter must be a positive integer." % iter)
return _kmeans2(data, clusters, iter, nc, _valid_miss_meth[missing])
def _kmeans2(data, code, niter, nc, missing):
""" "raw" version of kmeans2. Do not use directly.
Run k-means with a given initial codebook.
"""
for i in range(niter):
# Compute the nearest neighbour for each obs
# using the current code book
label = vq(data, code)[0]
# Update the code by computing centroids using the new code book
new_code, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
missing()
# Set the empty clusters to their previous positions
new_code[~has_members] = code[~has_members]
code = new_code
return code, label
|
asnorkin/sentiment_analysis
|
site/lib/python2.7/site-packages/scipy/cluster/vq.py
|
Python
|
mit
| 28,608
|
[
"Gaussian"
] |
3dd9d24dccb185faf952b9d9658fa8bf8ecf9b48cd3998fb6bc1812c1e7c1374
|
import MDAnalysis
try:
from MDAnalysisTests.datafiles import TPR, XTC
except:
pass
try:
from MDAnalysis.analysis.rdf import InterRDF
except:
pass
class SimpleRdfBench(object):
"""Benchmarks for MDAnalysis.analysis.rdf
"""
params = ([20,75,200],
[[0,5], [0,15], [0,20]],
[1, 100, 1000, 10000])
param_names = ['nbins',
'range_val',
'natoms']
def setup(self, nbins, range_val, natoms):
self.sel_str = 'name OW'
self.u = MDAnalysis.Universe(TPR, XTC)
try:
self.sel = self.u.select_atoms(self.sel_str)[:natoms]
except AttributeError:
self.sel = self.u.selectAtoms(self.sel_str)[:natoms]
# do not include initialization of the
# InterRDF object in the benchmark itself
self.rdf = InterRDF(g1=self.sel,
g2=self.sel,
nbins=nbins,
range=range_val)
def time_interrdf(self, nbins, range_val, natoms):
"""Benchmark a full trajectory parse
by MDAnalysis.analysis.rdf.InterRDF
"""
self.rdf.run()
|
MDAnalysis/mdanalysis
|
benchmarks/benchmarks/analysis/rdf.py
|
Python
|
gpl-2.0
| 1,204
|
[
"MDAnalysis"
] |
6d26ad35ca1403d8d3fdecc36aeea5f6a22f27e9b815888f446701e4d77b75d4
|
import argparse
import h5py
from itertools import count
from mayavi import mlab
from tvtk.api import tvtk
from tvtk.common import configure_input, configure_input_data
def main(hdf5_animation_file):
weights = None
with h5py.File(hdf5_animation_file, 'r') as f:
verts = f['verts'].value
tris = f['tris'].value
if 'weights' in f:
weights = f['weights'].value
pd = tvtk.PolyData(points=verts[0], polys=tris)
normals = tvtk.PolyDataNormals(splitting=False)
configure_input_data(normals, pd)
actor = tvtk.Actor(mapper=tvtk.PolyDataMapper())
configure_input(actor.mapper, normals)
actor.property.set(edge_color=(0.5, 0.5, 0.5), ambient=0.0,
specular=0.15, specular_power=128., shading=True, diffuse=0.8)
fig = mlab.figure(bgcolor=(1,1,1))
fig.scene.add_actor(actor)
@mlab.animate(delay=40, ui=False)
def animation():
for i in count():
if weights is not None:
w_str = ",".join(["%0.2f"] * weights.shape[1])
print ("Frame %d Weights = " + w_str) % tuple([i] + weights[i].tolist())
frame = i % len(verts)
pd.points = verts[frame]
fig.scene.render()
yield
a = animation()
fig.scene.z_minus_view()
mlab.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Animation viewer for hdf5 mesh animationfiles '
'(use import scripts to convert other formats to hdf5)')
parser.add_argument('input_filename')
args = parser.parse_args()
main(args.input_filename)
|
tneumann/splocs
|
view_animation.py
|
Python
|
mit
| 1,640
|
[
"Mayavi"
] |
abfad568d5f5f662f1b0eef767a1d4416dcb27a707331d620390d942db02141a
|
import datetime
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import DateTime
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.horizontal_shard import ShardedSession
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import operators
from sqlalchemy.sql import visitors
# db1 is used for id generation. The "pool_threadlocal"
# causes the id_generator() to use the same connection as that
# of an ongoing transaction within db1.
echo = True
db1 = create_engine("sqlite://", echo=echo, pool_threadlocal=True)
db2 = create_engine("sqlite://", echo=echo)
db3 = create_engine("sqlite://", echo=echo)
db4 = create_engine("sqlite://", echo=echo)
# create session function. this binds the shard ids
# to databases within a ShardedSession and returns it.
create_session = sessionmaker(class_=ShardedSession)
create_session.configure(
shards={
"north_america": db1,
"asia": db2,
"europe": db3,
"south_america": db4,
}
)
# mappings and tables
Base = declarative_base()
# we need a way to create identifiers which are unique across all databases.
# one easy way would be to just use a composite primary key, where one value
# is the shard id. but here, we'll show something more "generic", an id
# generation function. we'll use a simplistic "id table" stored in database
# #1. Any other method will do just as well; UUID, hilo, application-specific,
# etc.
ids = Table("ids", Base.metadata, Column("nextid", Integer, nullable=False))
def id_generator(ctx):
# in reality, might want to use a separate transaction for this.
with db1.connect() as conn:
nextid = conn.scalar(ids.select(for_update=True))
conn.execute(ids.update(values={ids.c.nextid: ids.c.nextid + 1}))
return nextid
# table setup. we'll store a lead table of continents/cities, and a secondary
# table storing locations. a particular row will be placed in the database
# whose shard id corresponds to the 'continent'. in this setup, secondary rows
# in 'weather_reports' will be placed in the same DB as that of the parent, but
# this can be changed if you're willing to write more complex sharding
# functions.
class WeatherLocation(Base):
__tablename__ = "weather_locations"
id = Column(Integer, primary_key=True, default=id_generator)
continent = Column(String(30), nullable=False)
city = Column(String(50), nullable=False)
reports = relationship("Report", backref="location")
def __init__(self, continent, city):
self.continent = continent
self.city = city
class Report(Base):
__tablename__ = "weather_reports"
id = Column(Integer, primary_key=True)
location_id = Column(
"location_id", Integer, ForeignKey("weather_locations.id")
)
temperature = Column("temperature", Float)
report_time = Column(
"report_time", DateTime, default=datetime.datetime.now
)
def __init__(self, temperature):
self.temperature = temperature
# create tables
for db in (db1, db2, db3, db4):
Base.metadata.drop_all(db)
Base.metadata.create_all(db)
# establish initial "id" in db1
db1.execute(ids.insert(), nextid=1)
# step 5. define sharding functions.
# we'll use a straight mapping of a particular set of "country"
# attributes to shard id.
shard_lookup = {
"North America": "north_america",
"Asia": "asia",
"Europe": "europe",
"South America": "south_america",
}
def shard_chooser(mapper, instance, clause=None):
"""shard chooser.
looks at the given instance and returns a shard id
note that we need to define conditions for
the WeatherLocation class, as well as our secondary Report class which will
point back to its WeatherLocation via its 'location' attribute.
"""
if isinstance(instance, WeatherLocation):
return shard_lookup[instance.continent]
else:
return shard_chooser(mapper, instance.location)
def id_chooser(query, ident):
"""id chooser.
given a primary key, returns a list of shards
to search. here, we don't have any particular information from a
pk so we just return all shard ids. often, you'd want to do some
kind of round-robin strategy here so that requests are evenly
distributed among DBs.
"""
if query.lazy_loaded_from:
# if we are in a lazy load, we can look at the parent object
# and limit our search to that same shard, assuming that's how we've
# set things up.
return [query.lazy_loaded_from.identity_token]
else:
return ["north_america", "asia", "europe", "south_america"]
def query_chooser(query):
"""query chooser.
this also returns a list of shard ids, which can
just be all of them. but here we'll search into the Query in order
to try to narrow down the list of shards to query.
"""
ids = []
# we'll grab continent names as we find them
# and convert to shard ids
for column, operator, value in _get_query_comparisons(query):
# "shares_lineage()" returns True if both columns refer to the same
# statement column, adjusting for any annotations present.
# (an annotation is an internal clone of a Column object
# and occur when using ORM-mapped attributes like
# "WeatherLocation.continent"). A simpler comparison, though less
# accurate, would be "column.key == 'continent'".
if column.shares_lineage(WeatherLocation.__table__.c.continent):
if operator == operators.eq:
ids.append(shard_lookup[value])
elif operator == operators.in_op:
ids.extend(shard_lookup[v] for v in value)
if len(ids) == 0:
return ["north_america", "asia", "europe", "south_america"]
else:
return ids
def _get_query_comparisons(query):
"""Search an orm.Query object for binary expressions.
Returns expressions which match a Column against one or more
literal values as a list of tuples of the form
(column, operator, values). "values" is a single value
or tuple of values depending on the operator.
"""
binds = {}
clauses = set()
comparisons = []
def visit_bindparam(bind):
# visit a bind parameter.
# check in _params for it first
if bind.key in query._params:
value = query._params[bind.key]
elif bind.callable:
# some ORM functions (lazy loading)
# place the bind's value as a
# callable for deferred evaluation.
value = bind.callable()
else:
# just use .value
value = bind.value
binds[bind] = value
def visit_column(column):
clauses.add(column)
def visit_binary(binary):
# special handling for "col IN (params)"
if (
binary.left in clauses
and binary.operator == operators.in_op
and hasattr(binary.right, "clauses")
):
comparisons.append(
(
binary.left,
binary.operator,
tuple(binds[bind] for bind in binary.right.clauses),
)
)
elif binary.left in clauses and binary.right in binds:
comparisons.append(
(binary.left, binary.operator, binds[binary.right])
)
elif binary.left in binds and binary.right in clauses:
comparisons.append(
(binary.right, binary.operator, binds[binary.left])
)
# here we will traverse through the query's criterion, searching
# for SQL constructs. We will place simple column comparisons
# into a list.
if query._criterion is not None:
visitors.traverse_depthfirst(
query._criterion,
{},
{
"bindparam": visit_bindparam,
"binary": visit_binary,
"column": visit_column,
},
)
return comparisons
# further configure create_session to use these functions
create_session.configure(
shard_chooser=shard_chooser,
id_chooser=id_chooser,
query_chooser=query_chooser,
)
# save and load objects!
tokyo = WeatherLocation("Asia", "Tokyo")
newyork = WeatherLocation("North America", "New York")
toronto = WeatherLocation("North America", "Toronto")
london = WeatherLocation("Europe", "London")
dublin = WeatherLocation("Europe", "Dublin")
brasilia = WeatherLocation("South America", "Brasila")
quito = WeatherLocation("South America", "Quito")
tokyo.reports.append(Report(80.0))
newyork.reports.append(Report(75))
quito.reports.append(Report(85))
sess = create_session()
sess.add_all([tokyo, newyork, toronto, london, dublin, brasilia, quito])
sess.commit()
t = sess.query(WeatherLocation).get(tokyo.id)
assert t.city == tokyo.city
assert t.reports[0].temperature == 80.0
north_american_cities = sess.query(WeatherLocation).filter(
WeatherLocation.continent == "North America"
)
assert {c.city for c in north_american_cities} == {"New York", "Toronto"}
asia_and_europe = sess.query(WeatherLocation).filter(
WeatherLocation.continent.in_(["Europe", "Asia"])
)
assert {c.city for c in asia_and_europe} == {"Tokyo", "London", "Dublin"}
# the Report class uses a simple integer primary key. So across two databases,
# a primary key will be repeated. The "identity_token" tracks in memory
# that these two identical primary keys are local to different databases.
newyork_report = newyork.reports[0]
tokyo_report = tokyo.reports[0]
assert inspect(newyork_report).identity_key == (Report, (1,), "north_america")
assert inspect(tokyo_report).identity_key == (Report, (1,), "asia")
# the token representing the originating shard is also available directly
assert inspect(newyork_report).identity_token == "north_america"
assert inspect(tokyo_report).identity_token == "asia"
|
wujuguang/sqlalchemy
|
examples/sharding/attribute_shard.py
|
Python
|
mit
| 10,199
|
[
"VisIt"
] |
4f1c97f7d6e1d2b659465f97d1016be0717029a1f1b01a0b044ab92ae1eef359
|
import unittest
import itertools
import numpy as np
from pyscf.lib import finger
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import scf as pbcscf
from pyscf.pbc import df as pbc_df
import pyscf.cc
import pyscf.pbc.cc as pbcc
import pyscf.pbc.tools.make_test_cell as make_test_cell
from pyscf.pbc.lib import kpts_helper
import pyscf.pbc.cc.kccsd as kccsd
import pyscf.pbc.cc.eom_kccsd_ghf as kccsd_ghf
cell = pbcgto.Cell()
cell.atom = '''
He 0.000000000000 0.000000000000 0.000000000000
He 1.685068664391 1.685068664391 1.685068664391
'''
cell.basis = [[0, (1., 1.)], [0, (.5, 1.)]]
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
#cell.verbose = 7
#cell.output = '/dev/null'
cell.build()
KGCCSD_TEST_NMP = [1,1,2]
KGCCSD_TEST_THRESHOLD = 1e-8
def tearDownModule():
global cell, KGCCSD_TEST_NMP, KGCCSD_TEST_THRESHOLD
del cell, KGCCSD_TEST_NMP, KGCCSD_TEST_THRESHOLD
def get_idx_r2(nkpts,nocc,nvir,ki,kj,i,j,a):
o1 = nvir
o2 = nocc*o1
o3 = nocc*o2
o4 = nkpts*o3
return ki*o4 + ki*o3 + i*o2 + i*o1 + a
class KnownValues(unittest.TestCase):
def _test_ip_diag(self,cc):
eom = kccsd_ghf.EOMIP(cc)
imds = eom.make_imds()
nkpts, nocc, nvir = imds.t1.shape
diag = kccsd_ghf.ipccsd_diag(eom,0,imds=imds)
I = np.identity(diag.shape[0],dtype=complex)
indices = np.arange(len(diag))
H = np.zeros((I.shape[0],len(indices)),dtype=complex)
for j,idx in enumerate(indices):
H[:,j] = kccsd_ghf.ipccsd_matvec(eom,I[:,idx],0,imds=imds)
diag_ref = np.zeros(len(indices),dtype=complex)
diag_out = np.zeros(len(indices),dtype=complex)
for j,idx in enumerate(indices):
diag_ref[j] = H[idx,j]
diag_out[j] = diag[idx]
diff = np.linalg.norm(diag_ref - diag_out)
self.assertTrue(abs(diff) < KGCCSD_TEST_THRESHOLD,"Difference in IP diag: {}".format(diff))
def _test_ea_diag(self,cc):
eom = kccsd_ghf.EOMEA(cc)
imds = eom.make_imds()
nkpts, nocc, nvir = imds.t1.shape
diag = kccsd_ghf.eaccsd_diag(eom,0,imds=imds)
I = np.identity(diag.shape[0],dtype=complex)
indices = np.arange(len(diag))
H = np.zeros((I.shape[0],len(indices)),dtype=complex)
for j,idx in enumerate(indices):
H[:,j] = kccsd_ghf.eaccsd_matvec(eom,I[:,idx],0,imds=imds)
diag_ref = np.zeros(len(indices),dtype=complex)
diag_out = np.zeros(len(indices),dtype=complex)
for j,idx in enumerate(indices):
diag_ref[j] = H[idx,j]
diag_out[j] = diag[idx]
diff = np.linalg.norm(diag_ref - diag_out)
self.assertTrue(abs(diff) < KGCCSD_TEST_THRESHOLD,"Difference in EA diag: {}".format(diff))
def test_he_112_diag(self):
kpts = cell.make_kpts([1,1,2])
kmf = pbcscf.KGHF(cell, kpts, exxdiv=None)
Escf = kmf.scf()
cc = kccsd.KGCCSD(kmf)
Ecc = cc.kernel()[0]
self._test_ip_diag(cc)
self._test_ea_diag(cc)
def test_he_212_diag_high_cost(self):
kpts = cell.make_kpts([2,1,2])
kmf = pbcscf.KGHF(cell, kpts, exxdiv=None)
Escf = kmf.scf()
cc = kccsd.KGCCSD(kmf)
Ecc = cc.kernel()[0]
self._test_ip_diag(cc)
self._test_ea_diag(cc)
def test_he_131_diag(self):
kpts = cell.make_kpts([1,3,1])
kmf = pbcscf.KGHF(cell, kpts, exxdiv=None)
Escf = kmf.scf()
cc = kccsd.KGCCSD(kmf)
Ecc = cc.kernel()[0]
self._test_ip_diag(cc)
self._test_ea_diag(cc)
def test_supercell_vs_kpt(self):
# Running HF and CCSD with 1x1x2 Monkhorst-Pack k-point mesh
kmf = pbcscf.KGHF(cell, kpts=cell.make_kpts(KGCCSD_TEST_NMP), exxdiv=None)
kmf.kernel()
mycc = pbcc.KGCCSD(kmf)
mycc.conv_tol = 1e-12
mycc.conv_tol_normt = 1e-10
ecc2, t1, t2 = mycc.kernel()
ecc_ref = -0.01044680113334205
self.assertAlmostEqual(abs(ecc_ref - ecc2), 0, 10)
if __name__ == '__main__':
unittest.main()
|
gkc1000/pyscf
|
pyscf/pbc/cc/test/test_eom_kgccsd_diag.py
|
Python
|
apache-2.0
| 4,159
|
[
"PySCF"
] |
a70614527f1e2d86d4f39569a75335fd5959f05ef29d29a53ad67053b5360dec
|
import os
from setuptools import setup
from neuronjs import __version__
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = 'neuronjs',
packages = ['neuronjs'],
version = __version__,
author = 'Kael Zhang',
author_email = 'i@kael.me',
description = ('The python middleware for neuron.js'),
license = 'MIT',
keywords = 'neuron.js middleware javascript loader facade',
url = 'https://github.com/kaelzhang/python-neuronjs',
long_description=read('README.rst'),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
]
)
|
kaelzhang/python-neuronjs
|
setup.py
|
Python
|
mit
| 1,123
|
[
"NEURON"
] |
69c47c52adc3182647ec3d478e8e4d2573673b57b9f0093b50877235e000e64a
|
words=[];
words.append('correct'); # The first four words on
words.append('horse'); # this list are from the
words.append('battery'); # XKCD comic 936 that uses
words.append('staple'); # them in an example.
words.append('about');
words.append('after');
words.append('again');
words.append('air');
words.append('all');
words.append('along');
words.append('also');
words.append('an');
words.append('and');
words.append('another');
words.append('any');
words.append('are');
words.append('around');
words.append('as');
words.append('at');
words.append('away');
words.append('back');
words.append('be');
words.append('because');
words.append('been');
words.append('before');
words.append('below');
words.append('between');
words.append('both');
words.append('but');
words.append('by');
words.append('came');
words.append('can');
words.append('come');
words.append('could');
words.append('day');
words.append('did');
words.append('different');
words.append('do');
words.append('does');
words.append('don\'t');
words.append('down');
words.append('each');
words.append('end');
words.append('even');
words.append('every');
words.append('few');
words.append('find');
words.append('first');
words.append('for');
words.append('found');
words.append('from');
words.append('get');
words.append('give');
words.append('go');
words.append('good');
words.append('great');
words.append('had');
words.append('has');
words.append('have');
words.append('he');
words.append('help');
words.append('her');
words.append('here');
words.append('him');
words.append('his');
words.append('home');
words.append('house');
words.append('how');
words.append('I');
words.append('if');
words.append('in');
words.append('into');
words.append('is');
words.append('it');
words.append('its');
words.append('just');
words.append('know');
words.append('large');
words.append('last');
words.append('left');
words.append('like');
words.append('line');
words.append('little');
words.append('long');
words.append('look');
words.append('made');
words.append('make');
words.append('man');
words.append('many');
words.append('may');
words.append('me');
words.append('men');
words.append('might');
words.append('more');
words.append('most');
words.append('Mr.');
words.append('must');
words.append('my');
words.append('name');
words.append('never');
words.append('new');
words.append('next');
words.append('no');
words.append('not');
words.append('now');
words.append('number');
words.append('of');
words.append('off');
words.append('old');
words.append('on');
words.append('one');
words.append('only');
words.append('or');
words.append('other');
words.append('our');
words.append('out');
words.append('over');
words.append('own');
words.append('part');
words.append('people');
words.append('place');
words.append('put');
words.append('read');
words.append('right');
words.append('said');
words.append('same');
words.append('saw');
words.append('say');
words.append('see');
words.append('she');
words.append('should');
words.append('show');
words.append('small');
words.append('so');
words.append('some');
words.append('something');
words.append('sound');
words.append('still');
words.append('such');
words.append('take');
words.append('tell');
words.append('than');
words.append('that');
words.append('the');
words.append('them');
words.append('then');
words.append('there');
words.append('these');
words.append('they');
words.append('thing');
words.append('think');
words.append('this');
words.append('those');
words.append('thought');
words.append('three');
words.append('through');
words.append('time');
words.append('to');
words.append('together');
words.append('too');
words.append('two');
words.append('under');
words.append('up');
words.append('us');
words.append('use');
words.append('very');
words.append('want');
words.append('water');
words.append('way');
words.append('we');
words.append('well');
words.append('went');
words.append('were');
words.append('what');
words.append('when');
words.append('where');
words.append('which');
words.append('while');
words.append('who');
words.append('why');
words.append('will');
words.append('with');
words.append('word');
words.append('work');
words.append('world');
words.append('would');
words.append('write');
words.append('year');
words.append('you');
words.append('your');
words.append('was');
words.append('able');
words.append('above');
words.append('across');
words.append('add');
words.append('against');
words.append('ago');
words.append('almost');
words.append('among');
words.append('animal');
words.append('answer');
words.append('became');
words.append('become');
words.append('began');
words.append('behind');
words.append('being');
words.append('better');
words.append('black');
words.append('best');
words.append('body');
words.append('book');
words.append('boy');
words.append('brought');
words.append('call');
words.append('cannot');
words.append('car');
words.append('certain');
words.append('change');
words.append('children');
words.append('city');
words.append('close');
words.append('cold');
words.append('country');
words.append('course');
words.append('cut');
words.append('didn\'t');
words.append('dog');
words.append('done');
words.append('door');
words.append('draw');
words.append('during');
words.append('early');
words.append('earth');
words.append('eat');
words.append('enough');
words.append('ever');
words.append('example');
words.append('eye');
words.append('face');
words.append('family');
words.append('far');
words.append('father');
words.append('feel');
words.append('feet');
words.append('fire');
words.append('fish');
words.append('five');
words.append('food');
words.append('form');
words.append('four');
words.append('front');
words.append('gave');
words.append('given');
words.append('got');
words.append('green');
words.append('ground');
words.append('group');
words.append('grow');
words.append('half');
words.append('hand');
words.append('hard');
words.append('heard');
words.append('high');
words.append('himself');
words.append('however');
words.append('I\'ll');
words.append('I\'m');
words.append('idea');
words.append('important');
words.append('inside');
words.append('John');
words.append('keep');
words.append('kind');
words.append('knew');
words.append('known');
words.append('land');
words.append('later');
words.append('learn');
words.append('let');
words.append('letter');
words.append('life');
words.append('light');
words.append('live');
words.append('living');
words.append('making');
words.append('mean');
words.append('means');
words.append('money');
words.append('morning');
words.append('mother');
words.append('move');
words.append('Mrs.');
words.append('near');
words.append('night');
words.append('nothing');
words.append('once');
words.append('open');
words.append('order');
words.append('page');
words.append('paper');
words.append('parts');
words.append('perhaps');
words.append('picture');
words.append('play');
words.append('point');
words.append('ready');
words.append('red');
words.append('remember');
words.append('rest');
words.append('room');
words.append('run');
words.append('school');
words.append('sea');
words.append('second');
words.append('seen');
words.append('sentence');
words.append('several');
words.append('short');
words.append('shown');
words.append('since');
words.append('six');
words.append('slide');
words.append('sometime');
words.append('soon');
words.append('space');
words.append('States');
words.append('story');
words.append('sun');
words.append('sure');
words.append('table');
words.append('though');
words.append('today');
words.append('told');
words.append('took');
words.append('top');
words.append('toward');
words.append('tree');
words.append('try');
words.append('turn');
words.append('United');
words.append('until');
words.append('upon');
words.append('using');
words.append('usually');
words.append('white');
words.append('whole');
words.append('wind');
words.append('without');
words.append('yes');
words.append('yet');
words.append('young');
words.append('already');
words.append('although');
words.append('am');
words.append('America');
words.append('anything');
words.append('area');
words.append('ball');
words.append('beautiful');
words.append('beginning');
words.append('Bill');
words.append('birds');
words.append('blue');
words.append('boat');
words.append('bottom');
words.append('box');
words.append('bring');
words.append('build');
words.append('building');
words.append('built');
words.append('can\'t');
words.append('care');
words.append('carefully');
words.append('carried');
words.append('carry');
words.append('center');
words.append('check');
words.append('class');
words.append('coming');
words.append('common');
words.append('complete');
words.append('dark');
words.append('deep');
words.append('distance');
words.append('doing');
words.append('dry');
words.append('easy');
words.append('either');
words.append('else');
words.append('everyone');
words.append('everything');
words.append('fact');
words.append('fall');
words.append('fast');
words.append('felt');
words.append('field');
words.append('finally');
words.append('fine');
words.append('floor');
words.append('follow');
words.append('foot');
words.append('friend');
words.append('full');
words.append('game');
words.append('getting');
words.append('girl');
words.append('glass');
words.append('goes');
words.append('gold');
words.append('gone');
words.append('happened');
words.append('having');
words.append('heart');
words.append('heavy');
words.append('held');
words.append('hold');
words.append('horse');
words.append('hot');
words.append('hour');
words.append('hundred');
words.append('ice');
words.append('Indian');
words.append('instead');
words.append('itself');
words.append('job');
words.append('kept');
words.append('language');
words.append('lay');
words.append('least');
words.append('leave');
words.append('let\'s');
words.append('list');
words.append('longer');
words.append('low');
words.append('main');
words.append('map');
words.append('matter');
words.append('mind');
words.append('Miss');
words.append('moon');
words.append('mountain');
words.append('moving');
words.append('music');
words.append('needed');
words.append('notice');
words.append('outside');
words.append('past');
words.append('pattern');
words.append('person');
words.append('piece');
words.append('plant');
words.append('poor');
words.append('possible');
words.append('power');
words.append('probably');
words.append('problem');
words.append('question');
words.append('quickly');
words.append('quite');
words.append('rain');
words.append('ran');
words.append('real');
words.append('river');
words.append('road');
words.append('rock');
words.append('round');
words.append('sat');
words.append('scientist');
words.append('shall');
words.append('ship');
words.append('simple');
words.append('size');
words.append('sky');
words.append('slowly');
words.append('snow');
words.append('someone');
words.append('special');
words.append('stand');
words.append('start');
words.append('state');
words.append('stay');
words.append('stood');
words.append('stop');
words.append('stopped');
words.append('strong');
words.append('suddenly');
words.append('summer');
words.append('surface');
words.append('system');
words.append('taken');
words.append('talk');
words.append('tall');
words.append('ten');
words.append('that\'s');
words.append('themselves');
words.append('third');
words.append('tiny');
words.append('town');
words.append('tried');
words.append('voice');
words.append('walk');
words.append('warm');
words.append('watch');
words.append('weather');
words.append('whether');
words.append('wide');
words.append('wild');
words.append('winter');
words.append('within');
words.append('writing');
words.append('written');
words.append('age');
words.append('ask');
words.append('baby');
words.append('base');
words.append('beside');
words.append('bright');
words.append('business');
words.append('buy');
words.append('case');
words.append('catch');
words.append('caught');
words.append('child');
words.append('choose');
words.append('circle');
words.append('clear');
words.append('color');
words.append('copy');
words.append('correct');
words.append('couldn\'t');
words.append('difference');
words.append('direction');
words.append('dried');
words.append('easily');
words.append('edge');
words.append('egg');
words.append('eight');
words.append('energy');
words.append('England');
words.append('especially');
words.append('Europe');
words.append('exactly');
words.append('except');
words.append('explain');
words.append('famous');
words.append('farm');
words.append('fell');
words.append('figure');
words.append('flat');
words.append('fly');
words.append('forest');
words.append('free');
words.append('French');
words.append('fun');
words.append('George');
words.append('government');
words.append('grass');
words.append('grew');
words.append('hair');
words.append('happy');
words.append('he\'s');
words.append('heat');
words.append('history');
words.append('human');
words.append('I\'ve');
words.append('inch');
words.append('information');
words.append('iron');
words.append('Jim');
words.append('Joe');
words.append('King');
words.append('larger');
words.append('late');
words.append('leg');
words.append('length');
words.append('listen');
words.append('lost');
words.append('lot');
words.append('lower');
words.append('machine');
words.append('mark');
words.append('maybe');
words.append('measure');
words.append('meet');
words.append('middle');
words.append('milk');
words.append('minute');
words.append('modern');
words.append('moment');
words.append('month');
words.append('mouth');
words.append('natural');
words.append('nearly');
words.append('necessary');
words.append('New York');
words.append('north');
words.append('object');
words.append('ocean');
words.append('oil');
words.append('pay');
words.append('per');
words.append('plan');
words.append('plane');
words.append('present');
words.append('product');
words.append('rather');
words.append('reach');
words.append('reason');
words.append('record');
words.append('running');
words.append('seems');
words.append('sent');
words.append('seven');
words.append('shape');
words.append('sides');
words.append('single');
words.append('skin');
words.append('sleep');
words.append('smaller');
words.append('soft');
words.append('soil');
words.append('south');
words.append('speak');
words.append('speed');
words.append('spring');
words.append('square');
words.append('star');
words.append('step');
words.append('store');
words.append('straight');
words.append('strange');
words.append('street');
words.append('subject');
words.append('suppose');
words.append('teacher');
words.append('thousand');
words.append('thus');
words.append('Tom');
words.append('travel');
words.append('trip');
words.append('trouble');
words.append('unit');
words.append('village');
words.append('wall');
words.append('war');
words.append('wasn\'t');
words.append('week');
words.append('whose');
words.append('window');
words.append('wish');
words.append('women');
words.append('won\'t');
words.append('wood');
words.append('wrote');
words.append('yellow');
words.append('you\'re');
words.append('yourself');
words.append('action');
words.append('addition');
words.append('afraid');
words.append('afternoon');
words.append('ahead');
words.append('amount');
words.append('ancient');
words.append('anyone');
words.append('arm');
words.append('bad');
words.append('bear');
words.append('beyond');
words.append('bit');
words.append('blood');
words.append('board');
words.append('Bob');
words.append('born');
words.append('break');
words.append('British');
words.append('broken');
words.append('brother');
words.append('brown');
words.append('busy');
words.append('capital');
words.append('cat');
words.append('cattle');
words.append('cause');
words.append('century');
words.append('chance');
words.append('clean');
words.append('clothes');
words.append('coast');
words.append('control');
words.append('cool');
words.append('corn');
words.append('corner');
words.append('cover');
words.append('cross');
words.append('Dan');
words.append('dead');
words.append('deal');
words.append('death');
words.append('decide');
words.append('difficult');
words.append('doesn\'t');
words.append('drive');
words.append('engine');
words.append('evening');
words.append('farmer');
words.append('faster');
words.append('fight');
words.append('fill');
words.append('finger');
words.append('force');
words.append('forward');
words.append('France');
words.append('fresh');
words.append('garden');
words.append('general');
words.append('glad');
words.append('greater');
words.append('greatest');
words.append('guess');
words.append('happen');
words.append('Henry');
words.append('higher');
words.append('hit');
words.append('hole');
words.append('hope');
words.append('huge');
words.append('interest');
words.append('island');
words.append('isn\'t');
words.append('jack');
words.append('lady');
words.append('largest');
words.append('lead');
words.append('led');
words.append('level');
words.append('love');
words.append('Mary');
words.append('material');
words.append('meant');
words.append('meat');
words.append('method');
words.append('missing');
words.append('needs');
words.append('nor');
words.append('nose');
words.append('note');
words.append('opposite');
words.append('pair');
words.append('party');
words.append('pass');
words.append('period');
words.append('please');
words.append('position');
words.append('pound');
words.append('practice');
words.append('pretty');
words.append('produce');
words.append('pull');
words.append('quiet');
words.append('race');
words.append('radio');
words.append('region');
words.append('result');
words.append('return');
words.append('rich');
words.append('ride');
words.append('ring');
words.append('rule');
words.append('sand');
words.append('science');
words.append('section');
words.append('seed');
words.append('send');
words.append('sense');
words.append('sets');
words.append('sharp');
words.append('sight');
words.append('sign');
words.append('silver');
words.append('similar');
words.append('sit');
words.append('son');
words.append('song');
words.append('spent');
words.append('spread');
words.append('stick');
words.append('stone');
words.append('tail');
words.append('team');
words.append('teeth');
words.append('temperature');
words.append('test');
words.append('there\'s');
words.append('therefore');
words.append('thick');
words.append('thin');
words.append('train');
words.append('various');
words.append('wait');
words.append('Washington');
words.append('wave');
words.append('we\'ll');
words.append('weight');
words.append('west');
words.append('wife');
words.append('wouldn\'t');
words.append('wrong');
words.append('you\'ll');
words.append('according');
words.append('act');
words.append('actually');
words.append('Africa');
words.append('alike');
words.append('apart');
words.append('ate');
words.append('attention');
words.append('bank');
words.append('basic');
words.append('beat');
words.append('blow');
words.append('bone');
words.append('bread');
words.append('careful');
words.append('chair');
words.append('chief');
words.append('Christmas');
words.append('church');
words.append('cloth');
words.append('cloud');
words.append('column');
words.append('compare');
words.append('contain');
words.append('continued');
words.append('cost');
words.append('cotton');
words.append('count');
words.append('dance');
words.append('describe');
words.append('desert');
words.append('dinner');
words.append('doctor');
words.append('dollar');
words.append('drop');
words.append('dropped');
words.append('ear');
words.append('east');
words.append('electric');
words.append('element');
words.append('enjoy');
words.append('equal');
words.append('exercise');
words.append('experiment');
words.append('familiar');
words.append('farther');
words.append('fear');
words.append('forth');
words.append('gas');
words.append('giving');
words.append('gray');
words.append('grown');
words.append('hardly');
words.append('hat');
words.append('hill');
words.append('hurt');
words.append('I\'d');
words.append('imagine');
words.append('include');
words.append('indeed');
words.append('Johnny');
words.append('joined');
words.append('key');
words.append('kitchen');
words.append('knowledge');
words.append('law');
words.append('lie');
words.append('major');
words.append('met');
words.append('metal');
words.append('movement');
words.append('nation');
words.append('nature');
words.append('nine');
words.append('none');
words.append('office');
words.append('older');
words.append('onto');
words.append('original');
words.append('paragraph');
words.append('parent');
words.append('particular');
words.append('path');
words.append('Paul');
words.append('Peter');
words.append('pick');
words.append('president');
words.append('pressure');
words.append('process');
words.append('public');
words.append('quick');
words.append('report');
words.append('rope');
words.append('rose');
words.append('row');
words.append('safe');
words.append('salt');
words.append('Sam');
words.append('scale');
words.append('sell');
words.append('separate');
words.append('sheep');
words.append('shoe');
words.append('shore');
words.append('simply');
words.append('sing');
words.append('sister');
words.append('sitting');
words.append('sold');
words.append('soldier');
words.append('solve');
words.append('speech');
words.append('spend');
words.append('steel');
words.append('string');
words.append('student');
words.append('studied');
words.append('sugar');
words.append('television');
words.append('term');
words.append('throughout');
words.append('tired');
words.append('total');
words.append('touch');
words.append('trade');
words.append('truck');
words.append('twice');
words.append('type');
words.append('uncle');
words.append('unless');
words.append('useful');
words.append('value');
words.append('verb');
words.append('visit');
words.append('wear');
words.append('what\'s');
words.append('wheel');
words.append('William');
words.append('wing');
words.append('wire');
words.append('won');
words.append('wonder');
words.append('worker');
words.append('yard');
words.append('alive');
words.append('angry');
words.append('army');
words.append('average');
words.append('bag');
words.append('band');
words.append('Billy');
words.append('branch');
words.append('breakfast');
words.append('breath');
words.append('broke');
words.append('bus');
words.append('cabin');
words.append('California');
words.append('camp');
words.append('captain');
words.append('cell');
words.append('cent');
words.append('certainly');
words.append('changing');
words.append('closer');
words.append('coal');
words.append('coat');
words.append('community');
words.append('company');
words.append('completely');
words.append('compound');
words.append('condition');
words.append('consider');
words.append('correctly');
words.append('crop');
words.append('crowd');
words.append('current');
words.append('danger');
words.append('dear');
words.append('degree');
words.append('develop');
words.append('die');
words.append('directly');
words.append('discover');
words.append('divide');
words.append('double');
words.append('Dr.');
words.append('dress');
words.append('drink');
words.append('drove');
words.append('dust');
words.append('easier');
words.append('effect');
words.append('electricity');
words.append('empty');
words.append('entire');
words.append('everybody');
words.append('exciting');
words.append('expect');
words.append('experience');
words.append('express');
words.append('fair');
words.append('feed');
words.append('final');
words.append('finish');
words.append('flew');
words.append('fruit');
words.append('further');
words.append('future');
words.append('Greek');
words.append('guide');
words.append('gun');
words.append('herself');
words.append('hungry');
words.append('instrument');
words.append('Jane');
words.append('join');
words.append('jump');
words.append('laid');
words.append('liquid');
words.append('loud');
words.append('market');
words.append('member');
words.append('Mexico');
words.append('Mike');
words.append('mine');
words.append('motion');
words.append('myself');
words.append('neck');
words.append('news');
words.append('nice');
words.append('noise');
words.append('noun');
words.append('oxygen');
words.append('paid');
words.append('phrase');
words.append('plain');
words.append('poem');
words.append('population');
words.append('proper');
words.append('proud');
words.append('provide');
words.append('purpose');
words.append('putting');
words.append('quietly');
words.append('raise');
words.append('range');
words.append('rate');
words.append('regular');
words.append('related');
words.append('replied');
words.append('represent');
words.append('rise');
words.append('scientific');
words.append('season');
words.append('seat');
words.append('share');
words.append('shot');
words.append('shoulder');
words.append('slow');
words.append('smile');
words.append('solid');
words.append('solution');
words.append('sort');
words.append('southern');
words.append('stage');
words.append('statement');
words.append('station');
words.append('steam');
words.append('stream');
words.append('strength');
words.append('supply');
words.append('surprise');
words.append('symbol');
words.append('till');
words.append('tomorrow');
words.append('tube');
words.append('twelve');
words.append('twenty');
words.append('usual');
words.append('valley');
words.append('variety');
words.append('vowel');
words.append('we\'re');
words.append('wet');
words.append('wooden');
words.append('worth');
words.append('airplane');
words.append('angle');
words.append('Ann');
words.append('apple');
words.append('art');
words.append('Atlantic');
words.append('atmosphere');
words.append('bar');
words.append('barn');
words.append('baseball');
words.append('beauty');
words.append('believed');
words.append('bell');
words.append('belong');
words.append('beneath');
words.append('bigger');
words.append('bottle');
words.append('bowl');
words.append('broad');
words.append('chapter');
words.append('chart');
words.append('Chinese');
words.append('clearly');
words.append('climate');
words.append('clock');
words.append('closely');
words.append('clothing');
words.append('coffee');
words.append('cow');
words.append('cry');
words.append('Dad');
words.append('dangerous');
words.append('deer');
words.append('desk');
words.append('detail');
words.append('development');
words.append('drew');
words.append('driver');
words.append('event');
words.append('everywhere');
words.append('fat');
words.append('favorite');
words.append('fence');
words.append('fifty');
words.append('flight');
words.append('flow');
words.append('flower');
words.append('forget');
words.append('fourth');
words.append('friendly');
words.append('generally');
words.append('German');
words.append('Germany');
words.append('giant');
words.append('golden');
words.append('grain');
words.append('handle');
words.append('height');
words.append('hung');
words.append('hurry');
words.append('immediately');
words.append('industry');
words.append('instance');
words.append('Italy');
words.append('James');
words.append('knife');
words.append('lake');
words.append('Latin');
words.append('leader');
words.append('leaving');
words.append('likely');
words.append('lunch');
words.append('mass');
words.append('master');
words.append('mile');
words.append('mix');
words.append('model');
words.append('mud');
words.append('muscle');
words.append('nearby');
words.append('nearest');
words.append('nest');
words.append('newspaper');
words.append('nobody');
words.append('observe');
words.append('Pacific');
words.append('peace');
words.append('plate');
words.append('plenty');
words.append('popular');
words.append('powerful');
words.append('push');
words.append('railroad');
words.append('rapidly');
words.append('root');
words.append('rubber');
words.append('sad');
words.append('sail');
words.append('save');
words.append('score');
words.append('seeing');
words.append('serious');
words.append('service');
words.append('sheet');
words.append('shop');
words.append('silent');
words.append('smell');
words.append('smoke');
words.append('smooth');
words.append('source');
words.append('spell');
words.append('storm');
words.append('structure');
words.append('supper');
words.append('support');
words.append('sweet');
words.append('swim');
words.append('telephone');
words.append('Texas');
words.append('threw');
words.append('throw');
words.append('tone');
words.append('tool');
words.append('track');
words.append('trail');
words.append('understanding');
words.append('upper');
words.append('view');
words.append('wagon');
words.append('western');
words.append('whatever');
words.append('wheat');
words.append('whenever');
words.append('whom');
words.append('win');
words.append('wonderful');
words.append('wore');
words.append('ability');
words.append('agree');
words.append('ants');
words.append('Asia');
words.append('asleep');
words.append('attack');
words.append('balance');
words.append('bat');
words.append('battle');
words.append('Ben');
words.append('block');
words.append('bow');
words.append('brain');
words.append('brave');
words.append('bridge');
words.append('cave');
words.append('charge');
words.append('chemical');
words.append('China');
words.append('clay');
words.append('climb');
words.append('composition');
words.append('congress');
words.append('copper');
words.append('crew');
words.append('cup');
words.append('daughter');
words.append('design');
words.append('determine');
words.append('direct');
words.append('discuss');
words.append('division');
words.append('drawn');
words.append('earlier');
words.append('eaten');
words.append('education');
words.append('enemy');
words.append('enter');
words.append('equipment');
words.append('escape');
words.append('European');
words.append('excited');
words.append('expression');
words.append('extra');
words.append('factory');
words.append('feathers');
words.append('fellow');
words.append('fighting');
words.append('fought');
words.append('Frank');
words.append('freedom');
words.append('funny');
words.append('fur');
words.append('growth');
words.append('hall');
words.append('he\'d');
words.append('health');
words.append('highest');
words.append('hunt');
words.append('including');
words.append('increase');
words.append('indicate');
words.append('individual');
words.append('Japanese');
words.append('kill');
words.append('laugh');
words.append('library');
words.append('lift');
words.append('lion');
words.append('local');
words.append('lose');
words.append('lovely');
words.append('lying');
words.append('magic');
words.append('Mama');
words.append('manner');
words.append('Mark');
words.append('May');
words.append('mostly');
words.append('national');
words.append('neighbor');
words.append('ordinary');
words.append('parallel');
words.append('park');
words.append('particularly');
words.append('pencil');
words.append('perfect');
words.append('planet');
words.append('planned');
words.append('pleasant');
words.append('pocket');
words.append('police');
words.append('political');
words.append('post');
words.append('potatoes');
words.append('price');
words.append('printed');
words.append('program');
words.append('property');
words.append('prove');
words.append('remain');
words.append('riding');
words.append('roll');
words.append('Roman');
words.append('roof');
words.append('rough');
words.append('scene');
words.append('search');
words.append('secret');
words.append('series');
words.append('serve');
words.append('settlers');
words.append('shinning');
words.append('shut');
words.append('signal');
words.append('Sir');
words.append('skill');
words.append('smallest');
words.append('social');
words.append('softly');
words.append('St.');
words.append('struck');
words.append('studying');
words.append('success');
words.append('suit');
words.append('sunlight');
words.append('swimming');
words.append('taste');
words.append('taught');
words.append('thank');
words.append('they\'re');
words.append('tip');
words.append('title');
words.append('tongue');
words.append('valuable');
words.append('vast');
words.append('vegetable');
words.append('wash');
words.append('weak');
words.append('you\'ve');
words.append('activity');
words.append('Alaska');
words.append('appearance');
words.append('aren\'t');
words.append('article');
words.append('Aunt');
words.append('automobile');
words.append('avoid');
words.append('basket');
words.append('birthday');
words.append('cage');
words.append('cake');
words.append('Canada');
words.append('central');
words.append('character');
words.append('Charles');
words.append('chicken');
words.append('chosen');
words.append('club');
words.append('cook');
words.append('court');
words.append('cream');
words.append('cutting');
words.append('daily');
words.append('darkness');
words.append('diagram');
words.append('Dick');
words.append('disappear');
words.append('doubt');
words.append('dozen');
words.append('dream');
words.append('driving');
words.append('effort');
words.append('establish');
words.append('exact');
words.append('excitement');
words.append('fifteen');
words.append('flag');
words.append('flies');
words.append('football');
words.append('foreign');
words.append('frame');
words.append('frequently');
words.append('frighten');
words.append('function');
words.append('gate');
words.append('gently');
words.append('gradually');
words.append('hadn\'t');
words.append('harder');
words.append('hide');
words.append('hurried');
words.append('identity');
words.append('importance');
words.append('impossible');
words.append('India');
words.append('invented');
words.append('Italian');
words.append('jar');
words.append('journey');
words.append('joy');
words.append('lesson');
words.append('Lincoln');
words.append('lips');
words.append('log');
words.append('London');
words.append('loose');
words.append('massage');
words.append('minerals');
words.append('outer');
words.append('paint');
words.append('Papa');
words.append('Paris');
words.append('particles');
words.append('personal');
words.append('physical');
words.append('pie');
words.append('pipe');
words.append('pole');
words.append('pond');
words.append('progress');
words.append('quarter');
words.append('rays');
words.append('recent');
words.append('recognize');
words.append('replace');
words.append('rhythm');
words.append('Richard');
words.append('Robert');
words.append('rod');
words.append('ruler');
words.append('safety');
words.append('Sally');
words.append('sang');
words.append('setting');
words.append('shells');
words.append('sick');
words.append('situation');
words.append('slightly');
words.append('Spain');
words.append('spirit');
words.append('steady');
words.append('stepped');
words.append('strike');
words.append('successful');
words.append('sudden');
words.append('sum');
words.append('terrible');
words.append('tie');
words.append('traffic');
words.append('unusual');
words.append('volume');
words.append('whale');
words.append('wise');
words.append('yesterday');
words.append('account');
words.append('allow');
words.append('anywhere');
words.append('attached');
words.append('audience');
words.append('available');
words.append('balloon');
words.append('bare');
words.append('bark');
words.append('begun');
words.append('bent');
words.append('biggest');
words.append('bill');
words.append('blank');
words.append('blew');
words.append('breathing');
words.append('butter');
words.append('cap');
words.append('carbon');
words.append('card');
words.append('chain');
words.append('cheese');
words.append('chest');
words.append('Chicago');
words.append('choice');
words.append('circus');
words.append('citizen');
words.append('classroom');
words.append('college');
words.append('consist');
words.append('continent');
words.append('conversation');
words.append('courage');
words.append('cowboy');
words.append('creature');
words.append('date');
words.append('depend');
words.append('differ');
words.append('discovery');
words.append('disease');
words.append('duck');
words.append('due');
words.append('Dutch');
words.append('entirely');
words.append('environment');
words.append('exclaimed');
words.append('factor');
words.append('fog');
words.append('forgot');
words.append('forgotten');
words.append('frozen');
words.append('fuel');
words.append('furniture');
words.append('gather');
words.append('gentle');
words.append('globe');
words.append('grandfather');
words.append('greatly');
words.append('haven\'t');
words.append('helpful');
words.append('hidden');
words.append('honor');
words.append('husband');
words.append('involved');
words.append('Japan');
words.append('jet');
words.append('Jimmy');
words.append('layers');
words.append('leaf');
words.append('leather');
words.append('load');
words.append('lonely');
words.append('Louis');
words.append('March');
words.append('meal');
words.append('medicine');
words.append('merely');
words.append('mice');
words.append('molecular');
words.append('musical');
words.append('native');
words.append('Negro');
words.append('noon');
words.append('occur');
words.append('orange');
words.append('ought');
words.append('pack');
words.append('partly');
words.append('pet');
words.append('pine');
words.append('pink');
words.append('pitch');
words.append('pool');
words.append('prepare');
words.append('press');
words.append('prevent');
words.append('pure');
words.append('queen');
words.append('rabbit');
words.append('ranch');
words.append('realize');
words.append('receive');
words.append('recently');
words.append('rice');
words.append('rising');
words.append('rocket');
words.append('Saturday');
words.append('saved');
words.append('shade');
words.append('shadow');
words.append('shirt');
words.append('shoot');
words.append('shorter');
words.append('silence');
words.append('slipped');
words.append('Smith');
words.append('snake');
words.append('somewhere');
words.append('spoken');
words.append('standard');
words.append('straw');
words.append('strip');
words.append('substance');
words.append('suggest');
words.append('Sunday');
words.append('teach');
words.append('tears');
words.append('thirty');
words.append('Thomas');
words.append('thread');
words.append('throat');
words.append('tight');
words.append('tin');
words.append('triangle');
words.append('truth');
words.append('union');
words.append('warn');
words.append('whispered');
words.append('wool');
words.append('you\'d');
words.append('aid');
words.append('aloud');
words.append('Andy');
words.append('anyway');
words.append('arrow');
words.append('aside');
words.append('atomic');
words.append('author');
words.append('basis');
words.append('bean');
words.append('becoming');
words.append('Betsy');
words.append('bicycle');
words.append('blanket');
words.append('brush');
words.append('buffalo');
words.append('burn');
words.append('burst');
words.append('bush');
words.append('Carlos');
words.append('collect');
words.append('colony');
words.append('combination');
words.append('combine');
words.append('comfortable');
words.append('complex');
words.append('composed');
words.append('concerned');
words.append('connected');
words.append('construction');
words.append('couple');
words.append('create');
words.append('curious');
words.append('dig');
words.append('dirt');
words.append('distant');
words.append('dot');
words.append('Edward');
words.append('elephant');
words.append('etc.');
words.append('evidence');
words.append('examine');
words.append('excellent');
words.append('failed');
words.append('fallen');
words.append('fastened');
words.append('feature');
words.append('fed');
words.append('gain');
words.append('graph');
words.append('hearing');
words.append('highway');
words.append('improve');
words.append('influence');
words.append('July');
words.append('June');
words.append('lack');
words.append('lamp');
words.append('locate');
words.append('luck');
words.append('mail');
words.append('married');
words.append('mighty');
words.append('mirror');
words.append('Mississippi');
words.append('motor');
words.append('mouse');
words.append('needle');
words.append('nodded');
words.append('numeral');
words.append('offer');
words.append('oldest');
words.append('operation');
words.append('orbit');
words.append('organized');
words.append('outline');
words.append('pain');
words.append('pan');
words.append('pen');
words.append('piano');
words.append('pictured');
words.append('pig');
words.append('pile');
words.append('planning');
words.append('pony');
words.append('principal');
words.append('production');
words.append('refer');
words.append('religious');
words.append('repeat');
words.append('research');
words.append('respect');
words.append('review');
words.append('route');
words.append('silk');
words.append('slept');
words.append('spite');
words.append('stretch');
words.append('stronger');
words.append('stuck');
words.append('swing');
words.append('task');
words.append('tax');
words.append('tea');
words.append('tent');
words.append('thee');
words.append('theory');
words.append('thrown');
words.append('tonight');
words.append('topic');
words.append('tower');
words.append('transportation');
words.append('trick');
words.append('underline');
words.append('unknown');
words.append('upward');
words.append('Virginia');
words.append('waste');
words.append('we\'ve');
words.append('wherever');
words.append('willing');
words.append('worry');
words.append('worse');
words.append('youth');
words.append('accept');
words.append('accident');
words.append('active');
words.append('additional');
words.append('adjective');
words.append('affect');
words.append('Alice');
words.append('alphabet');
words.append('announced');
words.append('anybody');
words.append('April');
words.append('arrange');
words.append('Australia');
words.append('aware');
words.append('badly');
words.append('bee');
words.append('belt');
words.append('bite');
words.append('blind');
words.append('bound');
words.append('castle');
words.append('characteristic');
words.append('Columbus');
words.append('compass');
words.append('consonant');
words.append('curve');
words.append('definition');
words.append('dish');
words.append('Don');
words.append('driven');
words.append('dug');
words.append('earn');
words.append('Eddy');
words.append('eventually');
words.append('explore');
words.append('fairly');
words.append('fewer');
words.append('fifth');
words.append('Florida');
words.append('gasoline');
words.append('gift');
words.append('grade');
words.append('halfway');
words.append('hang');
words.append('he\'ll');
words.append('headed');
words.append('herd');
words.append('hollow');
words.append('income');
words.append('industrial');
words.append('introduced');
words.append('Johnson');
words.append('Jones');
words.append('judge');
words.append('loss');
words.append('lucky');
words.append('machinery');
words.append('mad');
words.append('magnet');
words.append('Mars');
words.append('military');
words.append('mistake');
words.append('mood');
words.append('nails');
words.append('naturally');
words.append('negative');
words.append('obtain');
words.append('origin');
words.append('owner');
words.append('passage');
words.append('percent');
words.append('perfectly');
words.append('pilot');
words.append('pleasure');
words.append('plural');
words.append('plus');
words.append('poet');
words.append('porch');
words.append('pot');
words.append('powder');
words.append('previous');
words.append('primitive');
words.append('principle');
words.append('prize');
words.append('purple');
words.append('raw');
words.append('reader');
words.append('remove');
words.append('salmon');
words.append('screen');
words.append('seldom');
words.append('select');
words.append('society');
words.append('somebody');
words.append('specific');
words.append('spider');
words.append('sport');
words.append('stairs');
words.append('stared');
words.append('steep');
words.append('stomach');
words.append('stove');
words.append('stranger');
words.append('struggle');
words.append('surrounded');
words.append('swam');
words.append('syllable');
words.append('tank');
words.append('tape');
words.append('thou');
words.append('tightly');
words.append('Tim');
words.append('trace');
words.append('tribe');
words.append('trunk');
words.append('TV');
words.append('universe');
words.append('visitor');
words.append('vote');
words.append('weigh');
words.append('Wilson');
words.append('younger');
words.append('zero');
words.append('aboard');
words.append('accurate');
words.append('actual');
words.append('adventure');
words.append('apartment');
words.append('applied');
words.append('appropriate');
words.append('arrive');
words.append('atom');
words.append('Bay');
words.append('behavior');
words.append('bend');
words.append('bet');
words.append('birth');
words.append('brass');
words.append('breathe');
words.append('brief');
words.append('buried');
words.append('camera');
words.append('captured');
words.append('chamber');
words.append('command');
words.append('crack');
words.append('Daniel');
words.append('David');
words.append('dawn');
words.append('declared');
words.append('diameter');
words.append('difficulty');
words.append('dirty');
words.append('dull');
words.append('duty');
words.append('eager');
words.append('eleven');
words.append('engineer');
words.append('equally');
words.append('equator');
words.append('fierce');
words.append('firm');
words.append('fix');
words.append('flame');
words.append('former');
words.append('forty');
words.append('fox');
words.append('Fred');
words.append('frog');
words.append('fully');
words.append('goose');
words.append('gravity');
words.append('Greece');
words.append('guard');
words.append('gulf');
words.append('handsome');
words.append('harbor');
words.append('hay');
words.append('hello');
words.append('horn');
words.append('hospital');
words.append('ill');
words.append('interior');
words.append('Jeff');
words.append('jungle');
words.append('labor');
words.append('limited');
words.append('location');
words.append('mainly');
words.append('managed');
words.append('Maria');
words.append('mental');
words.append('mixture');
words.append('movie');
words.append('nearer');
words.append('nervous');
words.append('noted');
words.append('October');
words.append('officer');
words.append('Ohio');
words.append('opinion');
words.append('opportunity');
words.append('organization');
words.append('package');
words.append('pale');
words.append('plastic');
words.append('Pole');
words.append('port');
words.append('pour');
words.append('private');
words.append('properly');
words.append('protection');
words.append('pupil');
words.append('rear');
words.append('refused');
words.append('roar');
words.append('Rome');
words.append('Russia');
words.append('Russian');
words.append('saddle');
words.append('settle');
words.append('shelf');
words.append('shelter');
words.append('shine');
words.append('sink');
words.append('slabs');
words.append('slave');
words.append('somehow');
words.append('split');
words.append('stems');
words.append('stock');
words.append('swept');
words.append('thy');
words.append('tide');
words.append('torn');
words.append('troops');
words.append('tropical');
words.append('typical');
words.append('unhappy');
words.append('vertical');
words.append('victory');
words.append('voyage');
words.append('welcome');
words.append('weren\'t');
words.append('whistle');
words.append('widely');
words.append('worried');
words.append('wrapped');
words.append('writer');
words.append('acres');
words.append('adult');
words.append('advice');
words.append('arrangement');
words.append('attempt');
words.append('August');
words.append('Autumn');
words.append('border');
words.append('breeze');
words.append('brick');
words.append('calm');
words.append('canal');
words.append('Casey');
words.append('cast');
words.append('chose');
words.append('claws');
words.append('coach');
words.append('constantly');
words.append('contrast');
words.append('cookies');
words.append('customs');
words.append('damage');
words.append('Danny');
words.append('deeply');
words.append('depth');
words.append('discussion');
words.append('doll');
words.append('donkey');
words.append('Egypt');
words.append('Ellen');
words.append('essential');
words.append('exchange');
words.append('exist');
words.append('explanation');
words.append('facing');
words.append('film');
words.append('finest');
words.append('fireplace');
words.append('floating');
words.append('folks');
words.append('fort');
words.append('garage');
words.append('grabbed');
words.append('grandmother');
words.append('habit');
words.append('happily');
words.append('Harry');
words.append('heading');
words.append('hunter');
words.append('Illinois');
words.append('image');
words.append('independent');
words.append('instant');
words.append('January');
words.append('kids');
words.append('label');
words.append('Lee');
words.append('lungs');
words.append('manufacturing');
words.append('Martin');
words.append('mathematics');
words.append('melted');
words.append('memory');
words.append('mill');
words.append('mission');
words.append('monkey');
words.append('Mount');
words.append('mysterious');
words.append('neighborhood');
words.append('Norway');
words.append('nuts');
words.append('occasionally');
words.append('official');
words.append('ourselves');
words.append('palace');
words.append('Pennsylvania');
words.append('Philadelphia');
words.append('plates');
words.append('poetry');
words.append('policeman');
words.append('positive');
words.append('possibly');
words.append('practical');
words.append('pride');
words.append('promised');
words.append('recall');
words.append('relationship');
words.append('remarkable');
words.append('require');
words.append('rhyme');
words.append('rocky');
words.append('rubbed');
words.append('rush');
words.append('sale');
words.append('satellites');
words.append('satisfied');
words.append('scared');
words.append('selection');
words.append('shake');
words.append('shaking');
words.append('shallow');
words.append('shout');
words.append('silly');
words.append('simplest');
words.append('slight');
words.append('slip');
words.append('slope');
words.append('soap');
words.append('solar');
words.append('species');
words.append('spin');
words.append('stiff');
words.append('swung');
words.append('tales');
words.append('thumb');
words.append('tobacco');
words.append('toy');
words.append('trap');
words.append('treated');
words.append('tune');
words.append('University');
words.append('vapor');
words.append('vessels');
words.append('wealth');
words.append('wolf');
words.append('zoo');
|
Inquisitor-Sasha/xkcd936
|
lib/wordList.py
|
Python
|
gpl-3.0
| 50,310
|
[
"COLUMBUS",
"VisIt",
"exciting"
] |
5a1ec729fa9bf6427faa70f3b90e7d50a6b0a769e310b1aa89ece9d21c8af4be
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License
from __future__ import division, unicode_literals
import re
import numpy as np
from monty.io import zopen
from pymatgen.core.units import bohr_to_angstrom, Ry_to_eV
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Spin
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.num import round_to_sigfigs
"""
Module for implementing a CTRL file object class for the Stuttgart
LMTO-ASA code. It will primarily be used to generate a pymatgen
Structure object in the pymatgen.electronic_structure.cohp.py module.
"""
__author__ = "Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Marco Esters"
__email__ = "esters@uoregon.edu"
__date__ = "Nov 30, 2017"
class LMTOCtrl(object):
"""
Class for parsing CTRL files from the Stuttgart LMTO-ASA code.
Currently, only HEADER, VERS and the structure can be used.
Args/attributes:
structure: The structure as a pymatgen Structure object.
header: The header for the CTRL file .
Defaults to None.
version: The LMTO version that is used for the VERS category.
Defaults to the newest version (4.7).
"""
def __init__(self, structure, header=None, version="LMASA-47"):
self.structure = structure
self.header = header
self.version = version
def __eq__(self, other):
return self.get_string() == other.get_string()
def __repr__(self):
"""
Representation of the CTRL file is as a string.
"""
return self.get_string()
def __str__(self):
"""
String representation of the CTRL file.
"""
return self.get_string()
def get_string(self, sigfigs=8):
"""
Generates the string representation of the CTRL file. This is
the mininmal CTRL file necessary to execute lmhart.run.
"""
ctrl_dict = self.as_dict()
lines = [] if "HEADER" not in ctrl_dict else \
["HEADER".ljust(10) + self.header]
if "VERS" in ctrl_dict:
lines.append("VERS".ljust(10) + self.version)
lines.append("STRUC".ljust(10) +
"ALAT="+str(round(ctrl_dict["ALAT"], sigfigs)))
for l, latt in enumerate(ctrl_dict["PLAT"]):
if l == 0:
line = "PLAT=".rjust(15)
else:
line = " ".ljust(15)
line += " ".join([str(round(v, sigfigs)) for v in latt])
lines.append(line)
for cat in ["CLASS", "SITE"]:
for a, atoms in enumerate(ctrl_dict[cat]):
if a == 0:
line = [cat.ljust(9)]
else:
line = [" ".ljust(9)]
for token, val in sorted(atoms.items()):
if token == "POS":
line.append("POS=" +
" ".join([str(round(p, sigfigs))
for p in val]))
else:
line.append(token + "=" + str(val))
line = " ".join(line)
lines.append(line)
return "\n".join(lines)+"\n"
def as_dict(self):
"""
Returns the CTRL as a dictionary. "SITE" and "CLASS" are of
the form {'CATEGORY': {'TOKEN': value}}, the rest is of the
form 'TOKEN'/'CATEGORY': value. It gets the conventional standard
structure because primitive cells use the conventional
a-lattice parameter as the scaling factor and not the a-lattice
parameter of the primitive cell.
"""
ctrl_dict = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if self.header is not None:
ctrl_dict["HEADER"] = self.header
if self.version is not None:
ctrl_dict["VERS"] = self.version
sga = SpacegroupAnalyzer(self.structure)
alat = sga.get_conventional_standard_structure().lattice.a
plat = self.structure.lattice.matrix/alat
"""
The following is to find the classes (atoms that are not symmetry
equivalent, and create labels. Note that LMTO only attaches
numbers with the second atom of the same species, e.g. "Bi", "Bi1",
"Bi2", etc.
"""
eq_atoms = sga.get_symmetry_dataset()['equivalent_atoms']
ineq_sites_index = list(set(eq_atoms))
sites = []
classes = []
num_atoms = {}
for s, site in enumerate(self.structure.sites):
atom = site.specie
label_index = ineq_sites_index.index(eq_atoms[s])
if atom.symbol in num_atoms:
if label_index + 1 > sum(num_atoms.values()):
num_atoms[atom.symbol] += 1
atom_label = atom.symbol + str(num_atoms[atom.symbol] - 1)
classes.append({"ATOM": atom_label, "Z": atom.Z})
else:
num_atoms[atom.symbol] = 1
classes.append({"ATOM": atom.symbol, "Z": atom.Z})
sites.append({"ATOM": classes[label_index]["ATOM"],
"POS": site.coords/alat})
ctrl_dict.update({"ALAT": alat/bohr_to_angstrom,
"PLAT": plat,
"CLASS": classes,
"SITE": sites})
return ctrl_dict
def write_file(self, filename="CTRL", **kwargs):
"""
Writes a CTRL file with structure, HEADER, and VERS that can be
used as input for lmhart.run.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
@classmethod
def from_file(cls, filename="CTRL", **kwargs):
"""
Creates a CTRL file object from an existing file.
Args:
filename: The name of the CTRL file. Defaults to 'CTRL'.
Returns:
An LMTOCtrl object.
"""
with zopen(filename, "rt") as f:
contents = f.read()
return LMTOCtrl.from_string(contents, **kwargs)
@classmethod
def from_string(cls, data, sigfigs=8):
"""
Creates a CTRL file object from a string. This will mostly be
used to read an LMTOCtrl object from a CTRL file. Empty spheres
are ignored.
Args:
data: String representation of the CTRL file.
Returns:
An LMTOCtrl object.
"""
lines = data.split("\n")[:-1]
struc_lines = {"HEADER": [], "VERS": [], "SYMGRP": [],
"STRUC": [], "CLASS": [], "SITE": []}
for line in lines:
if line != "" and not line.isspace():
if not line[0].isspace():
cat = line.split()[0]
if cat in struc_lines:
struc_lines[cat].append(line)
else:
pass
for cat in struc_lines:
struc_lines[cat] = " ".join(struc_lines[cat]).replace("= ", "=")
structure_tokens = {"ALAT": None,
"PLAT": [],
"CLASS": [],
"SITE": []}
for cat in ["STRUC", "CLASS", "SITE"]:
fields = struc_lines[cat].split("=")
for f, field in enumerate(fields):
token = field.split()[-1]
if token == "ALAT":
alat = round(float(fields[f+1].split()[0]), sigfigs)
structure_tokens["ALAT"] = alat
elif token == "ATOM":
atom = fields[f+1].split()[0]
if not bool(re.match("E[0-9]*$", atom)):
if cat == "CLASS":
structure_tokens["CLASS"].append(atom)
else:
structure_tokens["SITE"].append({"ATOM": atom})
else:
pass
elif token in ["PLAT", "POS"]:
try:
arr = np.array([round(float(i), sigfigs)
for i in fields[f+1].split()])
except ValueError:
arr = np.array([round(float(i), sigfigs)
for i in fields[f+1].split()[:-1]])
if token == "PLAT":
structure_tokens["PLAT"] = arr.reshape([3, 3])
elif not bool(re.match("E[0-9]*$", atom)):
structure_tokens["SITE"][-1]["POS"] = arr
else:
pass
else:
pass
try:
spcgrp_index = struc_lines["SYMGRP"].index("SPCGRP")
spcgrp = struc_lines["SYMGRP"][spcgrp_index:spcgrp_index+12]
structure_tokens["SPCGRP"] = spcgrp.split("=")[1].split()[0]
except ValueError:
pass
for token in ["HEADER", "VERS"]:
try:
value = re.split(token + r"\s*", struc_lines[token])[1]
structure_tokens[token] = value.strip()
except IndexError:
pass
return LMTOCtrl.from_dict(structure_tokens)
@classmethod
def from_dict(cls, d):
"""
Creates a CTRL file object from a dictionary. The dictionary
must contain the items "ALAT", PLAT" and "SITE".
Valid dictionary items are:
ALAT: the a-lattice parameter
PLAT: (3x3) array for the lattice vectors
SITE: list of dictionaries: {'ATOM': class label,
'POS': (3x1) array of fractional
coordinates}
CLASS (optional): list of unique atom labels as str
SPCGRP (optional): space group symbol (str) or number (int)
HEADER (optional): HEADER text as a str
VERS (optional): LMTO version as a str
Args:
d: The CTRL file as a dictionary.
Returns:
An LMTOCtrl object.
"""
for cat in ["HEADER", "VERS"]:
if cat not in d:
d[cat] = None
alat = d["ALAT"] * bohr_to_angstrom
plat = d["PLAT"] * alat
species = []
positions = []
for site in d["SITE"]:
species.append(re.split("[0-9*]", site["ATOM"])[0])
positions.append(site["POS"] * alat)
# Only check if the structure is to be generated from the space
# group if the number of sites is the same as the number of classes.
# If lattice and the spacegroup don't match, assume it's primitive.
if "CLASS" in d and "SPCGRP" in d \
and len(d["SITE"]) == len(d["CLASS"]):
try:
structure = Structure.from_spacegroup(d["SPCGRP"], plat,
species, positions,
coords_are_cartesian=True)
except ValueError:
structure = Structure(plat, species, positions,
coords_are_cartesian=True,
to_unit_cell=True)
else:
structure = Structure(plat, species, positions,
coords_are_cartesian=True,
to_unit_cell=True)
return cls(structure, header=d["HEADER"], version=d["VERS"])
class LMTOCopl(object):
"""
Class for reading COPL files, which contain COHP data.
Args:
filename: filename of the COPL file. Defaults to "COPL".
to_eV: LMTO-ASA gives energies in Ry. To convert energies into
eV, set to True. Defaults to False for energies in Ry.
.. attribute: cohp_data
Dict that contains the COHP data of the form:
{bond: {"COHP": {Spin.up: cohps, Spin.down:cohps},
"ICOHP": {Spin.up: icohps, Spin.down: icohps},
"length": bond length}
.. attribute: efermi
The Fermi energy in Ry or eV.
.. attribute: energies
Sequence of energies in Ry or eV.
.. attribute: is_spin_polarized
Boolean to indicate if the calculation is spin polarized.
"""
def __init__(self, filename="COPL", to_eV=False):
# COPL files have an extra trailing blank line
with zopen(filename, "rt") as f:
contents = f.read().split("\n")[:-1]
# The parameters line is the second line in a COPL file. It
# contains all parameters that are needed to map the file.
parameters = contents[1].split()
num_bonds = int(parameters[0])
if int(parameters[1]) == 2:
spins = [Spin.up, Spin.down]
self.is_spin_polarized = True
else:
spins = [Spin.up]
self.is_spin_polarized = False
# The COHP data start in row num_bonds + 3
data = np.array([np.array(row.split(), dtype=float)
for row in contents[num_bonds+2:]]).transpose()
if to_eV:
# LMTO energies have 5 sig figs
self.energies = np.array([round_to_sigfigs(energy, 5)
for energy in data[0] * Ry_to_eV],
dtype=float)
self.efermi = round_to_sigfigs(float(parameters[-1])*Ry_to_eV, 5)
else:
self.energies = data[0]
self.efermi = float(parameters[-1])
cohp_data = {}
for bond in range(num_bonds):
label, length, sites = self._get_bond_data(contents[2+bond])
cohp = {spin: data[2*(bond+s*num_bonds)+1]
for s, spin in enumerate(spins)}
if to_eV:
icohp = {spin: np.array([round_to_sigfigs(i, 5) for i in
data[2*(bond+s*num_bonds)+2] * Ry_to_eV])
for s, spin in enumerate(spins)}
else:
icohp = {spin: data[2*(bond+s*num_bonds)+2]
for s, spin in enumerate(spins)}
# This takes care of duplicate labels
if label in cohp_data:
i = 1
lab = "%s-%d" % (label, i)
while lab in cohp_data:
i += 1
lab = "%s-%d" % (label, i)
label = lab
cohp_data[label] = {"COHP": cohp, "ICOHP": icohp,
"length": length, "sites": sites}
self.cohp_data = cohp_data
@staticmethod
def _get_bond_data(line):
"""
Subroutine to extract bond label, site indices, and length from
a COPL header line. The site indices are zero-based, so they
can be easily used with a Structure object.
Example header line: Fe-1/Fe-1-tr(-1,-1,-1) : 2.482 Ang.
Args:
line: line in the COHPCAR header describing the bond.
Returns:
The bond label, the bond length and a tuple of the site
indices.
"""
line = line.split()
length = float(line[2])
# Replacing "/" with "-" makes splitting easier
sites = line[0].replace("/", "-").split("-")
site_indices = tuple(int(ind) - 1 for ind in sites[1:4:2])
species = tuple(re.split(r"\d+", spec)[0] for spec in sites[0:3:2])
label = "%s%d-%s%d" % (species[0], site_indices[0] + 1,
species[1], site_indices[1] + 1)
return label, length, site_indices
|
czhengsci/pymatgen
|
pymatgen/io/lmto.py
|
Python
|
mit
| 15,857
|
[
"pymatgen"
] |
3ef0fbf2856a02138e7b80b68015f2bd7239829f172462a6e43f8b7fd7d4e2ab
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
One repository to update them all
On mbed.org the mbed SDK is split up in multiple repositories, this script takes
care of updating them all.
"""
import sys
from copy import copy
from os import walk, remove, makedirs
from os.path import join, abspath, dirname, relpath, exists, isfile
from shutil import copyfile
from optparse import OptionParser
import re
import string
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from workspace_tools.settings import MBED_ORG_PATH, MBED_ORG_USER, BUILD_DIR
from workspace_tools.paths import LIB_DIR
from workspace_tools.utils import run_cmd
MBED_URL = "mbed.org"
MBED_USER = "mbed_official"
changed = []
push_remote = True
quiet = False
commit_msg = ''
# Code that does have a mirror in the mbed SDK
# Tuple data: (repo_name, list_of_code_dirs, [team])
# team is optional - if not specified, the code is published under mbed_official
OFFICIAL_CODE = (
("mbed-dev" , "mbed"),
("mbed-rtos", "rtos"),
("mbed-dsp" , "dsp"),
("mbed-rpc" , "rpc"),
("lwip" , "net/lwip/lwip"),
("lwip-sys", "net/lwip/lwip-sys"),
("Socket" , "net/lwip/Socket"),
("lwip-eth" , "net/eth/lwip-eth"),
("EthernetInterface", "net/eth/EthernetInterface"),
("USBDevice", "USBDevice"),
("USBHost" , "USBHost"),
("CellularModem", "net/cellular/CellularModem"),
("CellularUSBModem", "net/cellular/CellularUSBModem"),
("UbloxUSBModem", "net/cellular/UbloxUSBModem"),
("UbloxModemHTTPClientTest", ["tests/net/cellular/http/common", "tests/net/cellular/http/ubloxusb"]),
("UbloxModemSMSTest", ["tests/net/cellular/sms/common", "tests/net/cellular/sms/ubloxusb"]),
("FATFileSystem", "fs/fat", "mbed-official"),
)
# Code that does have dependencies to libraries should point to
# the latest revision. By default, they point to a specific revision.
CODE_WITH_DEPENDENCIES = (
# Libraries
"EthernetInterface",
# RTOS Examples
"rtos_basic",
"rtos_isr",
"rtos_mail",
"rtos_mutex",
"rtos_queue",
"rtos_semaphore",
"rtos_signals",
"rtos_timer",
# Net Examples
"TCPEchoClient",
"TCPEchoServer",
"TCPSocket_HelloWorld",
"UDPSocket_HelloWorld",
"UDPEchoClient",
"UDPEchoServer",
"BroadcastReceive",
"BroadcastSend",
# mbed sources
"mbed-src-program",
)
# A list of regular expressions that will be checked against each directory
# name and skipped if they match.
IGNORE_DIRS = (
)
IGNORE_FILES = (
'COPYING',
'\.md',
"\.lib",
"\.bld"
)
def ignore_path(name, reg_exps):
for r in reg_exps:
if re.search(r, name):
return True
return False
class MbedRepository:
@staticmethod
def run_and_print(command, cwd):
stdout, _, _ = run_cmd(command, wd=cwd, redirect=True)
print(stdout)
def __init__(self, name, team = None):
self.name = name
self.path = join(MBED_ORG_PATH, name)
if team is None:
self.url = "http://" + MBED_URL + "/users/" + MBED_USER + "/code/%s/"
else:
self.url = "http://" + MBED_URL + "/teams/" + team + "/code/%s/"
if not exists(self.path):
# Checkout code
if not exists(MBED_ORG_PATH):
makedirs(MBED_ORG_PATH)
self.run_and_print(['hg', 'clone', self.url % name], cwd=MBED_ORG_PATH)
else:
# Update
self.run_and_print(['hg', 'pull'], cwd=self.path)
self.run_and_print(['hg', 'update'], cwd=self.path)
def publish(self):
# The maintainer has to evaluate the changes first and explicitly accept them
self.run_and_print(['hg', 'addremove'], cwd=self.path)
stdout, _, _ = run_cmd(['hg', 'status'], wd=self.path)
if stdout == '':
print "No changes"
return False
print stdout
if quiet:
commit = 'Y'
else:
commit = raw_input(push_remote and "Do you want to commit and push? Y/N: " or "Do you want to commit? Y/N: ")
if commit == 'Y':
args = ['hg', 'commit', '-u', MBED_ORG_USER]
if commit_msg:
args = args + ['-m', commit_msg]
self.run_and_print(args, cwd=self.path)
if push_remote:
self.run_and_print(['hg', 'push'], cwd=self.path)
return True
# Check if a file is a text file or a binary file
# Taken from http://code.activestate.com/recipes/173220/
text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
def is_text_file(filename):
block_size = 1024
def istext(s):
if "\0" in s:
return 0
if not s: # Empty files are considered text
return 1
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = s.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if float(len(t))/len(s) > 0.30:
return 0
return 1
with open(filename) as f:
res = istext(f.read(block_size))
return res
# Return the line ending type for the given file ('cr' or 'crlf')
def get_line_endings(f):
examine_size = 1024
try:
tf = open(f, "rb")
lines, ncrlf = tf.readlines(examine_size), 0
tf.close()
for l in lines:
if l.endswith("\r\n"):
ncrlf = ncrlf + 1
return 'crlf' if ncrlf > len(lines) >> 1 else 'cr'
except:
return 'cr'
# Copy file to destination, but preserve destination line endings if possible
# This prevents very annoying issues with huge diffs that appear because of
# differences in line endings
def copy_with_line_endings(sdk_file, repo_file):
if not isfile(repo_file):
copyfile(sdk_file, repo_file)
return
is_text = is_text_file(repo_file)
if is_text:
sdk_le = get_line_endings(sdk_file)
repo_le = get_line_endings(repo_file)
if not is_text or sdk_le == repo_le:
copyfile(sdk_file, repo_file)
else:
print "Converting line endings in '%s' to '%s'" % (abspath(repo_file), repo_le)
f = open(sdk_file, "rb")
data = f.read()
f.close()
f = open(repo_file, "wb")
data = data.replace("\r\n", "\n") if repo_le == 'cr' else data.replace('\n','\r\n')
f.write(data)
f.close()
def visit_files(path, visit):
for root, dirs, files in walk(path):
# Ignore hidden directories
for d in copy(dirs):
full = join(root, d)
if d.startswith('.'):
dirs.remove(d)
if ignore_path(full, IGNORE_DIRS):
print "Skipping '%s'" % full
dirs.remove(d)
for file in files:
if ignore_path(file, IGNORE_FILES):
continue
visit(join(root, file))
def update_repo(repo_name, sdk_paths, team_name):
repo = MbedRepository(repo_name, team_name)
# copy files from mbed SDK to mbed_official repository
def visit_mbed_sdk(sdk_file):
repo_file = join(repo.path, relpath(sdk_file, sdk_path))
repo_dir = dirname(repo_file)
if not exists(repo_dir):
makedirs(repo_dir)
copy_with_line_endings(sdk_file, repo_file)
for sdk_path in sdk_paths:
visit_files(sdk_path, visit_mbed_sdk)
# remove repository files that do not exist in the mbed SDK
def visit_repo(repo_file):
for sdk_path in sdk_paths:
sdk_file = join(sdk_path, relpath(repo_file, repo.path))
if exists(sdk_file):
break
else:
remove(repo_file)
print "remove: %s" % repo_file
visit_files(repo.path, visit_repo)
if repo.publish():
changed.append(repo_name)
def update_code(repositories):
for r in repositories:
repo_name, sdk_dir = r[0], r[1]
team_name = r[2] if len(r) == 3 else None
print '\n=== Updating "%s" ===' % repo_name
sdk_dirs = [sdk_dir] if type(sdk_dir) != type([]) else sdk_dir
sdk_path = [join(LIB_DIR, d) for d in sdk_dirs]
update_repo(repo_name, sdk_path, team_name)
def update_single_repo(repo):
repos = [r for r in OFFICIAL_CODE if r[0] == repo]
if not repos:
print "Repository '%s' not found" % repo
else:
update_code(repos)
def update_dependencies(repositories):
for repo_name in repositories:
print '\n=== Updating "%s" ===' % repo_name
repo = MbedRepository(repo_name)
# point to the latest libraries
def visit_repo(repo_file):
with open(repo_file, "r") as f:
url = f.read()
with open(repo_file, "w") as f:
f.write(url[:(url.rindex('/')+1)])
visit_files(repo.path, visit_repo, None, MBED_REPO_EXT)
if repo.publish():
changed.append(repo_name)
def update_mbed():
update_repo("mbed", [join(BUILD_DIR, "mbed")], None)
def do_sync(options):
global push_remote, quiet, commit_msg, changed
push_remote = not options.nopush
quiet = options.quiet
commit_msg = options.msg
chnaged = []
if options.code:
update_code(OFFICIAL_CODE)
if options.dependencies:
update_dependencies(CODE_WITH_DEPENDENCIES)
if options.mbed:
update_mbed()
if options.repo:
update_single_repo(options.repo)
if changed:
print "Repositories with changes:", changed
return changed
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--code",
action="store_true", default=False,
help="Update the mbed_official code")
parser.add_option("-d", "--dependencies",
action="store_true", default=False,
help="Update the mbed_official code dependencies")
parser.add_option("-m", "--mbed",
action="store_true", default=False,
help="Release a build of the mbed library")
parser.add_option("-n", "--nopush",
action="store_true", default=False,
help="Commit the changes locally only, don't push them")
parser.add_option("", "--commit_message",
action="store", type="string", default='', dest='msg',
help="Commit message to use for all the commits")
parser.add_option("-r", "--repository",
action="store", type="string", default='', dest='repo',
help="Synchronize only the given repository")
parser.add_option("-q", "--quiet",
action="store_true", default=False,
help="Don't ask for confirmation before commiting or pushing")
(options, args) = parser.parse_args()
do_sync(options)
|
jferreir/mbed
|
workspace_tools/synch.py
|
Python
|
apache-2.0
| 11,521
|
[
"VisIt"
] |
49888820000335bfe1f7530f934a90cda7905ec8885d833381d421ade851d3df
|
#
# complete_text.py: text mode congratulations windows
#
# Copyright 2001-2006 Red Hat, Inc.
#
# This software may be freely redistributed under the terms of the GNU
# library public license.
#
# You should have received a copy of the GNU Library Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
from snack import *
from constants_text import *
from rhpl.translate import _
from constants import *
import rhpl
class FinishedWindow:
def __call__ (self, screen, anaconda):
bootstr = ""
buttonstr = _("Reboot")
if rhpl.getArch() in ['s390', 's390x']:
floppystr = _("Press <Enter> to end the installation process.\n\n")
bottomstr = _("<Enter> to exit")
if not anaconda.canReIPL:
buttonstr = _("Shutdown")
if not anaconda.reIPLMessage is None:
floppystr = anaconda.reIPLMessage + "\n\n" + floppystr
else:
floppystr = _("Remove any media used during the installation "
"process and press <Enter> to reboot your system."
"\n\n")
bottomstr = _("<Enter> to reboot")
screen.pushHelpLine (string.center(bottomstr, screen.width))
txt = _("Congratulations, your %s installation is "
"complete.\n\n"
"%s%s") %(productName, floppystr, bootstr)
foo = _("For information on errata (updates and bug fixes), visit "
"http://www.redhat.com/errata/.\n\n"
"Information on using your "
"system is available in the %s manuals at "
"http://www.redhat.com/docs/.") %(productName,)
rc = ButtonChoiceWindow (screen, _("Complete"), txt,
[ buttonstr ], help = "finished", width=60)
return INSTALL_OK
|
sergey-senozhatsky/anaconda-11-vlan-support
|
textw/complete_text.py
|
Python
|
gpl-2.0
| 1,907
|
[
"VisIt"
] |
9a2f840bd792e29310e825f144939fc4d22857404a054b5648abfa62d3b9eca0
|
########################################################################
# File : Watchdog.py
# Author: Stuart Paterson
########################################################################
""" The Watchdog class is used by the Job Wrapper to resolve and monitor
the system resource consumption. The Watchdog can determine if
a running job is stalled and indicate this to the Job Wrapper.
Furthermore, the Watchdog will identify when the Job CPU limit has been
exceeded and fail jobs meaningfully.
Information is returned to the WMS via the heart-beat mechanism. This
also interprets control signals from the WMS e.g. to kill a running
job.
- Still to implement:
- CPU normalization for correct comparison with job limit
"""
__RCSID__ = "$Id$"
import os
import time
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import Time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemInstance
from DIRAC.Core.Utilities.ProcessMonitor import ProcessMonitor
from DIRAC.Core.Utilities.TimeLeft.TimeLeft import TimeLeft
class Watchdog( object ):
#############################################################################
def __init__( self, pid, exeThread, spObject, jobCPUtime, memoryLimit = 0, processors = 1, systemFlag = 'linux' ):
""" Constructor, takes system flag as argument.
"""
self.log = gLogger.getSubLogger( "Watchdog" )
self.systemFlag = systemFlag
self.exeThread = exeThread
self.wrapperPID = pid
self.appPID = self.exeThread.getCurrentPID()
self.spObject = spObject
self.jobCPUtime = jobCPUtime
self.memoryLimit = memoryLimit
self.calibration = 0
self.initialValues = {}
self.parameters = {}
self.peekFailCount = 0
self.peekRetry = 5
self.processMonitor = ProcessMonitor()
self.checkError = ''
self.currentStats = {}
self.initialized = False
self.count = 0
#defaults
self.testWallClock = 1
self.testDiskSpace = 1
self.testLoadAvg = 1
self.maxWallClockTime = 3 * 24 * 60 * 60
self.testCPUConsumed = 1
self.testCPULimit = 0
self.testMemoryLimit = 0
self.testTimeLeft = 1
self.pollingTime = 10 # 10 seconds
self.checkingTime = 30 * 60 # 30 minute period
self.minCheckingTime = 20 * 60 # 20 mins
self.maxWallClockTime = 3 * 24 * 60 * 60 # e.g. 4 days
self.jobPeekFlag = 1 # on / off
self.minDiskSpace = 10 # MB
self.loadAvgLimit = 1000 # > 1000 and jobs killed
self.sampleCPUTime = 30 * 60 # e.g. up to 20mins sample
self.jobCPUMargin = 20 # %age buffer before killing job
self.minCPUWallClockRatio = 5 # ratio %age
self.nullCPULimit = 5 # After 5 sample times return null CPU consumption kill job
self.checkCount = 0
self.nullCPUCount = 0
self.grossTimeLeftLimit = 10 * self.checkingTime
self.timeLeftUtil = TimeLeft()
self.timeLeft = 0
self.littleTimeLeft = False
self.scaleFactor = 1.0
self.processors = processors
#############################################################################
def initialize( self, loops = 0 ):
""" Watchdog initialization.
"""
if self.initialized:
self.log.info( 'Watchdog already initialized' )
return S_OK()
else:
self.initialized = True
setup = gConfig.getValue( '/DIRAC/Setup', '' )
if not setup:
return S_ERROR( 'Can not get the DIRAC Setup value' )
wms_instance = getSystemInstance( "WorkloadManagement" )
if not wms_instance:
return S_ERROR( 'Can not get the WorkloadManagement system instance' )
self.section = '/Systems/WorkloadManagement/%s/JobWrapper' % wms_instance
self.maxcount = loops
self.log.verbose( 'Watchdog initialization' )
self.log.info( 'Attempting to Initialize Watchdog for: %s' % ( self.systemFlag ) )
# Test control flags
self.testWallClock = gConfig.getValue( self.section + '/CheckWallClockFlag', 1 )
self.testDiskSpace = gConfig.getValue( self.section + '/CheckDiskSpaceFlag', 1 )
self.testLoadAvg = gConfig.getValue( self.section + '/CheckLoadAvgFlag', 1 )
self.testCPUConsumed = gConfig.getValue( self.section + '/CheckCPUConsumedFlag', 1 )
self.testCPULimit = gConfig.getValue( self.section + '/CheckCPULimitFlag', 0 )
self.testMemoryLimit = gConfig.getValue( self.section + '/CheckMemoryLimitFlag', 0 )
self.testTimeLeft = gConfig.getValue( self.section + '/CheckTimeLeftFlag', 1 )
# Other parameters
self.pollingTime = gConfig.getValue( self.section + '/PollingTime', 10 ) # 10 seconds
self.checkingTime = gConfig.getValue( self.section + '/CheckingTime', 30 * 60 ) # 30 minute period
self.minCheckingTime = gConfig.getValue( self.section + '/MinCheckingTime', 20 * 60 ) # 20 mins
self.maxWallClockTime = gConfig.getValue( self.section + '/MaxWallClockTime', 3 * 24 * 60 * 60 ) # e.g. 4 days
self.jobPeekFlag = gConfig.getValue( self.section + '/JobPeekFlag', 1 ) # on / off
self.minDiskSpace = gConfig.getValue( self.section + '/MinDiskSpace', 10 ) # MB
self.loadAvgLimit = gConfig.getValue( self.section + '/LoadAverageLimit', 1000 ) # > 1000 and jobs killed
self.sampleCPUTime = gConfig.getValue( self.section + '/CPUSampleTime', 30 * 60 ) # e.g. up to 20mins sample
self.jobCPUMargin = gConfig.getValue( self.section + '/JobCPULimitMargin', 20 ) # %age buffer before killing job
self.minCPUWallClockRatio = gConfig.getValue( self.section + '/MinCPUWallClockRatio', 5 ) # ratio %age
self.nullCPULimit = gConfig.getValue( self.section + '/NullCPUCountLimit', 5 ) # After 5 sample times return null CPU consumption kill job
if self.checkingTime < self.minCheckingTime:
self.log.info( 'Requested CheckingTime of %s setting to %s seconds (minimum)' % ( self.checkingTime, self.minCheckingTime ) )
self.checkingTime = self.minCheckingTime
# The time left is returned in seconds @ 250 SI00 = 1 HS06,
# the self.checkingTime and self.pollingTime are in seconds,
# thus they need to be multiplied by a large enough factor
self.fineTimeLeftLimit = gConfig.getValue( self.section + '/TimeLeftLimit', 150 * self.pollingTime )
self.scaleFactor = gConfig.getValue( '/LocalSite/CPUScalingFactor', 1.0 )
return S_OK()
def run( self ):
""" The main watchdog execution method
"""
result = self.initialize()
if not result['OK']:
self.log.always( 'Can not start watchdog for the following reason' )
self.log.always( result['Message'] )
return result
try:
while True:
self.log.debug( 'Starting watchdog loop # %d' % self.count )
start_cycle_time = time.time()
result = self.execute()
exec_cycle_time = time.time() - start_cycle_time
if not result[ 'OK' ]:
self.log.error( "Watchdog error during execution", result[ 'Message' ] )
break
elif result['Value'] == "Ended":
break
self.count += 1
if exec_cycle_time < self.pollingTime:
time.sleep( self.pollingTime - exec_cycle_time )
return S_OK()
except Exception:
self.log.exception()
return S_ERROR( 'Exception' )
#############################################################################
def execute( self ):
""" The main agent execution method of the Watchdog.
"""
if not self.exeThread.isAlive():
self.__getUsageSummary()
self.log.info( 'Process to monitor has completed, Watchdog will exit.' )
return S_OK( "Ended" )
if self.littleTimeLeft:
# if we have gone over enough iterations query again
if self.littleTimeLeftCount == 0 and self.__timeLeft() == -1:
self.checkError = 'Job has reached the CPU limit of the queue'
self.log.error( self.checkError, self.timeLeft )
self.__killRunningThread()
return S_OK()
else:
self.littleTimeLeftCount -= 1
# Note: need to poll regularly to see if the thread is alive
# but only perform checks with a certain frequency
if ( time.time() - self.initialValues['StartTime'] ) > self.checkingTime * self.checkCount:
self.checkCount += 1
result = self._performChecks()
if not result['OK']:
self.log.warn( 'Problem during recent checks' )
self.log.warn( result['Message'] )
return S_OK()
else:
# self.log.debug('Application thread is alive: checking count is %s' %(self.checkCount))
return S_OK()
#############################################################################
def _performChecks( self ):
"""The Watchdog checks are performed at a different period to the checking of the
application thread and correspond to the checkingTime.
"""
self.log.verbose( '------------------------------------' )
self.log.verbose( 'Checking loop starts for Watchdog' )
heartBeatDict = {}
msg = ''
loadAvg = self.getLoadAverage()
if not loadAvg['OK']:
msg += 'LoadAvg: ERROR'
else:
loadAvg = loadAvg['Value']
msg += 'LoadAvg: %d ' % loadAvg
heartBeatDict['LoadAverage'] = loadAvg
if not self.parameters.has_key( 'LoadAverage' ):
self.parameters['LoadAverage'] = []
self.parameters['LoadAverage'].append( loadAvg )
memoryUsed = self.getMemoryUsed()
if not memoryUsed['OK']:
msg += 'MemUsed: ERROR '
else:
memoryUsed = memoryUsed['Value']
msg += 'MemUsed: %.1f kb ' % ( memoryUsed )
heartBeatDict['MemoryUsed'] = memoryUsed
if not self.parameters.has_key( 'MemoryUsed' ):
self.parameters['MemoryUsed'] = []
self.parameters['MemoryUsed'].append( memoryUsed )
result = self.processMonitor.getMemoryConsumed( self.wrapperPID )
if result['OK']:
vsize = result['Value']['Vsize']/1024.
rss = result['Value']['RSS']/1024.
heartBeatDict['Vsize'] = vsize
heartBeatDict['RSS'] = rss
self.parameters.setdefault( 'Vsize', [] )
self.parameters['Vsize'].append( vsize )
self.parameters.setdefault( 'RSS', [] )
self.parameters['RSS'].append( rss )
msg += "Job Vsize: %.1f kb " % vsize
msg += "Job RSS: %.1f kb " % rss
result = self.getDiskSpace()
if not result['OK']:
self.log.warn( "Could not establish DiskSpace", result['Message'] )
else:
msg += 'DiskSpace: %.1f MB ' % ( result['Value'] )
if not self.parameters.has_key( 'DiskSpace' ):
self.parameters['DiskSpace'] = []
if result['OK']:
self.parameters['DiskSpace'].append( result['Value'] )
heartBeatDict['AvailableDiskSpace'] = result['Value']
cpu = self.__getCPU()
if not cpu['OK']:
msg += 'CPU: ERROR '
hmsCPU = 0
else:
cpu = cpu['Value']
msg += 'CPU: %s (h:m:s) ' % ( cpu )
if not self.parameters.has_key( 'CPUConsumed' ):
self.parameters['CPUConsumed'] = []
self.parameters['CPUConsumed'].append( cpu )
hmsCPU = cpu
rawCPU = self.__convertCPUTime( hmsCPU )
if rawCPU['OK']:
heartBeatDict['CPUConsumed'] = rawCPU['Value']
result = self.__getWallClockTime()
if not result['OK']:
self.log.warn( "Failed determining wall clock time", result['Message'] )
else:
msg += 'WallClock: %.2f s ' % ( result['Value'] )
self.parameters.setdefault( 'WallClockTime', list() ).append( result['Value'] )
heartBeatDict['WallClockTime'] = result['Value']
self.log.info( msg )
result = self._checkProgress()
if not result['OK']:
self.checkError = result['Message']
self.log.warn( self.checkError )
if self.jobPeekFlag:
result = self.__peek()
if result['OK']:
outputList = result['Value']
size = len( outputList )
self.log.info( 'Last %s lines of available application output:' % ( size ) )
self.log.info( '================START================' )
for line in outputList:
self.log.info( line )
self.log.info( '=================END=================' )
self.__killRunningThread()
return S_OK()
recentStdOut = 'None'
if self.jobPeekFlag:
result = self.__peek()
if result['OK']:
outputList = result['Value']
size = len( outputList )
recentStdOut = 'Last %s lines of application output from Watchdog on %s [UTC]:' % ( size, Time.dateTime() )
border = '=' * len( recentStdOut )
cpuTotal = 'Last reported CPU consumed for job is %s (h:m:s)' % ( hmsCPU )
if self.timeLeft:
cpuTotal += ', Batch Queue Time Left %s (s @ HS06)' % self.timeLeft
recentStdOut = '\n%s\n%s\n%s\n%s\n' % ( border, recentStdOut, cpuTotal, border )
self.log.info( recentStdOut )
for line in outputList:
self.log.info( line )
recentStdOut += line + '\n'
else:
recentStdOut = 'Watchdog is initializing and will attempt to obtain standard output from application thread'
self.log.info( recentStdOut )
self.peekFailCount += 1
if self.peekFailCount > self.peekRetry:
self.jobPeekFlag = 0
self.log.warn( 'Turning off job peeking for remainder of execution' )
if not os.environ.has_key( 'JOBID' ):
self.log.info( 'Running without JOBID so parameters will not be reported' )
return S_OK()
jobID = os.environ['JOBID']
staticParamDict = {'StandardOutput':recentStdOut}
self.__sendSignOfLife( int( jobID ), heartBeatDict, staticParamDict )
return S_OK( 'Watchdog checking cycle complete' )
#############################################################################
def __getCPU( self ):
"""Uses os.times() to get CPU time and returns HH:MM:SS after conversion.
"""
try:
cpuTime = self.processMonitor.getCPUConsumed( self.wrapperPID )
if not cpuTime['OK']:
self.log.warn( 'Problem while checking consumed CPU' )
return cpuTime
cpuTime = cpuTime['Value']
if cpuTime:
self.log.verbose( "Raw CPU time consumed (s) = %s" % ( cpuTime ) )
return self.__getCPUHMS( cpuTime )
else:
self.log.error( "CPU time consumed found to be 0" )
return S_ERROR()
except Exception as e:
self.log.warn( 'Could not determine CPU time consumed with exception' )
self.log.exception( e )
return S_ERROR( "Could not determine CPU time consumed with exception" )
#############################################################################
def __getCPUHMS( self, cpuTime ):
mins, secs = divmod( cpuTime, 60 )
hours, mins = divmod( mins, 60 )
humanTime = '%02d:%02d:%02d' % ( hours, mins, secs )
self.log.verbose( 'Human readable CPU time is: %s' % humanTime )
return S_OK( humanTime )
#############################################################################
def __interpretControlSignal( self, signalDict ):
"""This method is called whenever a signal is sent via the result of
sending a sign of life.
"""
self.log.info( 'Received control signal' )
if type( signalDict ) == type( {} ):
if signalDict.has_key( 'Kill' ):
self.log.info( 'Received Kill signal, stopping job via control signal' )
self.checkError = 'Received Kill signal'
self.__killRunningThread()
else:
self.log.info( 'The following control signal was sent but not understood by the watchdog:' )
self.log.info( signalDict )
else:
self.log.info( 'Expected dictionary for control signal, received:\n%s' % ( signalDict ) )
return S_OK()
#############################################################################
def _checkProgress( self ):
"""This method calls specific tests to determine whether the job execution
is proceeding normally. CS flags can easily be added to add or remove
tests via central configuration.
"""
report = ''
if self.testWallClock:
result = self.__checkWallClockTime()
if not result['OK']:
self.log.warn( result['Message'] )
return result
report += 'WallClock: OK, '
else:
report += 'WallClock: NA,'
if self.testDiskSpace:
result = self.__checkDiskSpace()
if not result['OK']:
self.log.warn( result['Message'] )
return result
report += 'DiskSpace: OK, '
else:
report += 'DiskSpace: NA,'
if self.testLoadAvg:
result = self.__checkLoadAverage()
if not result['OK']:
self.log.warn( "Check of load average failed, but won't fail because of that: %s" % result['Message'] )
report += 'LoadAverage: ERROR, '
return S_OK()
report += 'LoadAverage: OK, '
else:
report += 'LoadAverage: NA,'
if self.testCPUConsumed:
result = self.__checkCPUConsumed()
if not result['OK']:
return result
report += 'CPUConsumed: OK, '
else:
report += 'CPUConsumed: NA, '
if self.testCPULimit:
result = self.__checkCPULimit()
if not result['OK']:
self.log.warn( result['Message'] )
return result
report += 'CPULimit OK, '
else:
report += 'CPULimit: NA, '
if self.testTimeLeft:
self.__timeLeft()
if self.timeLeft:
report += 'TimeLeft: OK'
else:
report += 'TimeLeft: NA'
if self.testMemoryLimit:
result = self.__checkMemoryLimit()
if not result['OK']:
self.log.warn( result['Message'] )
return result
report += 'MemoryLimit OK, '
else:
report += 'MemoryLimit: NA, '
self.log.info( report )
return S_OK( 'All enabled checks passed' )
#############################################################################
def __checkCPUConsumed( self ):
""" Checks whether the CPU consumed by application process is reasonable. This
method will report stalled jobs to be killed.
"""
self.log.info( "Checking CPU Consumed" )
if 'WallClockTime' not in self.parameters:
return S_ERROR( 'Missing WallClockTime info' )
if 'CPUConsumed' not in self.parameters:
return S_ERROR( 'Missing CPUConsumed info' )
wallClockTime = self.parameters['WallClockTime'][-1]
if wallClockTime < self.sampleCPUTime:
self.log.info( "Stopping check, wallclock time (%s) is still smaller than sample time (%s)" % ( wallClockTime,
self.sampleCPUTime ) )
return S_OK()
intervals = max( 1, int( self.sampleCPUTime / self.checkingTime ) )
if len( self.parameters['CPUConsumed'] ) < intervals + 1:
self.log.info( "Not enough snapshots to calculate, there are %s and we need %s" % ( len( self.parameters['CPUConsumed'] ),
intervals + 1 ) )
return S_OK()
wallClockTime = self.parameters['WallClockTime'][-1] - self.parameters['WallClockTime'][-1 - intervals ]
try:
cpuTime = self.__convertCPUTime( self.parameters['CPUConsumed'][-1] )['Value']
# For some reason, some times the CPU consumed estimation returns 0
# if cpuTime == 0:
# return S_OK()
cpuTime -= self.__convertCPUTime( self.parameters['CPUConsumed'][-1 - intervals ] )['Value']
if cpuTime < 0:
self.log.warn( 'Consumed CPU time negative, something wrong may have happened, ignore' )
return S_OK()
if wallClockTime <= 0:
self.log.warn( 'Wallclock time should not be negative or zero, Ignore' )
return S_OK()
ratio = ( cpuTime / wallClockTime ) * 100.
self.log.info( "CPU/Wallclock ratio is %.2f%%" % ratio )
# in case of error cpuTime might be 0, exclude this
if ratio < self.minCPUWallClockRatio:
if os.path.exists( 'DISABLE_WATCHDOG_CPU_WALLCLOCK_CHECK' ):
self.log.info( 'N.B. job would be declared as stalled but CPU / WallClock check is disabled by payload' )
return S_OK()
self.log.info( "Job is stalled!" )
return S_ERROR( 'Watchdog identified this job as stalled' )
except Exception as e:
self.log.error( "Cannot convert CPU consumed from string to int", str( e ) )
return S_OK()
#############################################################################
def __convertCPUTime( self, cputime ):
""" Method to convert the CPU time as returned from the Watchdog
instances to the equivalent DIRAC normalized CPU time to be compared
to the Job CPU requirement.
"""
cpuValue = 0
cpuHMS = cputime.split( ':' )
# for i in xrange( len( cpuHMS ) ):
# cpuHMS[i] = cpuHMS[i].replace( '00', '0' )
try:
hours = float( cpuHMS[0] ) * 60 * 60
mins = float( cpuHMS[1] ) * 60
secs = float( cpuHMS[2] )
cpuValue = float( hours + mins + secs )
except Exception as x:
self.log.warn( str( x ) )
return S_ERROR( 'Could not calculate CPU time' )
# Normalization to be implemented
normalizedCPUValue = cpuValue
result = S_OK()
result['Value'] = normalizedCPUValue
self.log.debug( 'CPU value %s converted to %s' % ( cputime, normalizedCPUValue ) )
return result
#############################################################################
def __checkCPULimit( self ):
""" Checks that the job has consumed more than the job CPU requirement
(plus a configurable margin) and kills them as necessary.
"""
consumedCPU = 0
if self.parameters.has_key( 'CPUConsumed' ):
consumedCPU = self.parameters['CPUConsumed'][-1]
consumedCPUDict = self.__convertCPUTime( consumedCPU )
if consumedCPUDict['OK']:
currentCPU = consumedCPUDict['Value']
else:
return S_OK( 'Not possible to determine current CPU consumed' )
if consumedCPU:
limit = self.jobCPUtime + self.jobCPUtime * ( self.jobCPUMargin / 100 )
cpuConsumed = float( currentCPU )
if cpuConsumed > limit:
self.log.info( 'Job has consumed more than the specified CPU limit with an additional %s%% margin' % ( self.jobCPUMargin ) )
return S_ERROR( 'Job has exceeded maximum CPU time limit' )
else:
return S_OK( 'Job within CPU limit' )
elif not currentCPU:
self.log.verbose( 'Both initial and current CPU consumed are null' )
return S_OK( 'CPU consumed is not measurable yet' )
else:
return S_OK( 'Not possible to determine CPU consumed' )
def __checkMemoryLimit( self ):
""" Checks that the job memory consumption is within a limit
"""
if self.parameters.has_key( 'Vsize' ):
vsize = self.parameters['Vsize'][-1]
if vsize and self.memoryLimit:
if vsize > self.memoryLimit:
vsize = vsize
# Just a warning for the moment
self.log.warn( "Job has consumed %f.2 KB of memory with the limit of %f.2 KB" % ( vsize, self.memoryLimit ) )
return S_OK()
#############################################################################
def __checkDiskSpace( self ):
"""Checks whether the CS defined minimum disk space is available.
"""
if self.parameters.has_key( 'DiskSpace' ):
availSpace = self.parameters['DiskSpace'][-1]
if availSpace >= 0 and availSpace < self.minDiskSpace:
self.log.info( 'Not enough local disk space for job to continue, defined in CS as %s MB' % ( self.minDiskSpace ) )
return S_ERROR( 'Job has insufficient disk space to continue' )
else:
return S_OK( 'Job has enough disk space available' )
else:
return S_ERROR( 'Available disk space could not be established' )
#############################################################################
def __checkWallClockTime( self ):
"""Checks whether the job has been running for the CS defined maximum
wall clock time.
"""
if self.initialValues.has_key( 'StartTime' ):
startTime = self.initialValues['StartTime']
if time.time() - startTime > self.maxWallClockTime:
self.log.info( 'Job has exceeded maximum wall clock time of %s seconds' % ( self.maxWallClockTime ) )
return S_ERROR( 'Job has exceeded maximum wall clock time' )
else:
return S_OK( 'Job within maximum wall clock time' )
else:
return S_ERROR( 'Job start time could not be established' )
#############################################################################
def __checkLoadAverage( self ):
"""Checks whether the CS defined maximum load average is exceeded.
"""
if self.parameters.has_key( 'LoadAverage' ):
loadAvg = self.parameters['LoadAverage'][-1]
if loadAvg > float( self.loadAvgLimit ):
self.log.info( 'Maximum load average exceeded, defined in CS as %s ' % ( self.loadAvgLimit ) )
return S_ERROR( 'Job exceeded maximum load average' )
else:
return S_OK( 'Job running with normal load average' )
else:
return S_ERROR( 'Job load average not established' )
#############################################################################
def __peek( self ):
""" Uses ExecutionThread.getOutput() method to obtain standard output
from running thread via subprocess callback function.
"""
result = self.exeThread.getOutput()
if not result['OK']:
self.log.warn( 'Could not obtain output from running application thread' )
self.log.warn( result['Message'] )
return result
#############################################################################
def calibrate( self ):
""" The calibrate method obtains the initial values for system memory and load
and calculates the margin for error for the rest of the Watchdog cycle.
"""
self.__getWallClockTime()
self.parameters['WallClockTime'] = []
cpuConsumed = self.__getCPU()
if not cpuConsumed['OK']:
self.log.warn( "Could not establish CPU consumed, setting to 0.0" )
cpuConsumed = 0.0
else:
cpuConsumed = cpuConsumed['Value']
self.initialValues['CPUConsumed'] = cpuConsumed
self.parameters['CPUConsumed'] = []
loadAvg = self.getLoadAverage()
if not loadAvg['OK']:
self.log.warn( "Could not establish LoadAverage, setting to 0" )
loadAvg = 0
else:
loadAvg = loadAvg['Value']
self.initialValues['LoadAverage'] = loadAvg
self.parameters['LoadAverage'] = []
memUsed = self.getMemoryUsed()
if not memUsed['OK']:
self.log.warn( "Could not establish MemoryUsed, setting to 0" )
memUsed = 0
else:
memUsed = memUsed['Value']
self.initialValues['MemoryUsed'] = memUsed
self.parameters['MemoryUsed'] = []
result = self.processMonitor.getMemoryConsumed( self.wrapperPID )
self.log.verbose( 'Job Memory: %s' % ( result['Value'] ) )
if not result['OK']:
self.log.warn( 'Could not get job memory usage' )
self.initialValues['Vsize'] = result['Value']['Vsize']/1024.
self.initialValues['RSS'] = result['Value']['RSS']/1024.
self.parameters['Vsize'] = []
self.parameters['RSS'] = []
result = self.getDiskSpace()
self.log.verbose( 'DiskSpace: %s' % ( result ) )
if not result['OK']:
self.log.warn( "Could not establish DiskSpace" )
self.initialValues['DiskSpace'] = result['Value']
self.parameters['DiskSpace'] = []
result = self.getNodeInformation()
self.log.verbose( 'NodeInfo: %s' % ( result ) )
if not result['OK']:
self.log.warn( "Could not establish static system information" )
if os.environ.has_key( 'LSB_JOBID' ):
result['LocalJobID'] = os.environ['LSB_JOBID']
if os.environ.has_key( 'PBS_JOBID' ):
result['LocalJobID'] = os.environ['PBS_JOBID']
if os.environ.has_key( 'QSUB_REQNAME' ):
result['LocalJobID'] = os.environ['QSUB_REQNAME']
if os.environ.has_key( 'JOB_ID' ):
result['LocalJobID'] = os.environ['JOB_ID']
self.__reportParameters( result, 'NodeInformation', True )
self.__reportParameters( self.initialValues, 'InitialValues' )
return S_OK()
def __timeLeft( self ):
"""
return Normalized CPU time left in the batch system
0 if not available
update self.timeLeft and self.littleTimeLeft
"""
# Get CPU time left in the batch system
result = self.timeLeftUtil.getTimeLeft( 0.0 )
if not result['OK']:
# Could not get CPU time left, we might need to wait for the first loop
# or the Utility is not working properly for this batch system
# or we are in a batch system
timeLeft = 0
else:
timeLeft = result['Value']
self.timeLeft = timeLeft
if not self.littleTimeLeft:
if timeLeft and timeLeft < self.grossTimeLeftLimit:
self.log.info( 'TimeLeft bellow %s, now checking with higher frequency' % timeLeft )
self.littleTimeLeft = True
# TODO: better configurable way of doing this to be coded
self.littleTimeLeftCount = 15
else:
if self.timeLeft and self.timeLeft < self.fineTimeLeftLimit:
timeLeft = -1
return timeLeft
#############################################################################
def __getUsageSummary( self ):
""" Returns average load, memory etc. over execution of job thread
"""
summary = {}
# CPUConsumed
if self.parameters.has_key( 'CPUConsumed' ):
cpuList = self.parameters['CPUConsumed']
if cpuList:
hmsCPU = cpuList[-1]
rawCPU = self.__convertCPUTime( hmsCPU )
if rawCPU['OK']:
summary['LastUpdateCPU(s)'] = rawCPU['Value']
else:
summary['LastUpdateCPU(s)'] = 'Could not be estimated'
# DiskSpace
if self.parameters.has_key( 'DiskSpace' ):
space = self.parameters['DiskSpace']
if space:
value = abs( float( space[-1] ) - float( self.initialValues['DiskSpace'] ) )
if value < 0:
value = 0
summary['DiskSpace(MB)'] = value
else:
summary['DiskSpace(MB)'] = 'Could not be estimated'
# MemoryUsed
if self.parameters.has_key( 'MemoryUsed' ):
memory = self.parameters['MemoryUsed']
if memory:
summary['MemoryUsed(kb)'] = abs( float( memory[-1] ) - float( self.initialValues['MemoryUsed'] ) )
else:
summary['MemoryUsed(kb)'] = 'Could not be estimated'
# LoadAverage
if self.parameters.has_key( 'LoadAverage' ):
laList = self.parameters['LoadAverage']
if laList:
summary['LoadAverage'] = float( sum( laList ) ) / float( len( laList ) )
else:
summary['LoadAverage'] = 'Could not be estimated'
result = self.__getWallClockTime()
if not result['OK']:
self.log.warn( "Failed determining wall clock time", result['Message'] )
summary['WallClockTime(s)'] = 0
summary['ScaledCPUTime(s)'] = 0
else:
wallClock = result['Value']
summary['WallClockTime(s)'] = wallClock
summary['ScaledCPUTime(s)'] = wallClock * self.scaleFactor * self.processors
self.__reportParameters( summary, 'UsageSummary', True )
self.currentStats = summary
#############################################################################
def __reportParameters( self, params, title = None, report = False ):
"""Will report parameters for job.
"""
try:
parameters = []
self.log.info( '==========================================================' )
if title:
self.log.info( 'Watchdog will report %s' % ( title ) )
else:
self.log.info( 'Watchdog will report parameters' )
self.log.info( '==========================================================' )
vals = params
if params.has_key( 'Value' ):
if vals['Value']:
vals = params['Value']
for k, v in vals.items():
if v:
self.log.info( str( k ) + ' = ' + str( v ) )
parameters.append( ( k, v ) )
if report:
self.__setJobParamList( parameters )
self.log.info( '==========================================================' )
except Exception as x:
self.log.warn( 'Problem while reporting parameters' )
self.log.warn( str( x ) )
#############################################################################
def __getWallClockTime( self ):
""" Establishes the Wall Clock time spent since the Watchdog initialization"""
result = S_OK()
if self.initialValues.has_key( 'StartTime' ):
currentTime = time.time()
wallClock = currentTime - self.initialValues['StartTime']
result['Value'] = wallClock
else:
self.initialValues['StartTime'] = time.time()
result['Value'] = 0.0
return result
#############################################################################
def __killRunningThread( self ):
""" Will kill the running thread process and any child processes."""
self.log.info( 'Sending kill signal to application PID %s' % ( self.spObject.getChildPID() ) )
result = self.spObject.killChild()
self.applicationKilled = True
self.log.info( 'Subprocess.killChild() returned:%s ' % ( result ) )
return S_OK( 'Thread killed' )
#############################################################################
def __sendSignOfLife( self, jobID, heartBeatDict, staticParamDict ):
""" Sends sign of life 'heartbeat' signal and triggers control signal
interpretation.
"""
jobReport = RPCClient( 'WorkloadManagement/JobStateUpdate', timeout = 120 )
result = jobReport.sendHeartBeat( jobID, heartBeatDict, staticParamDict )
if not result['OK']:
self.log.warn( 'Problem sending sign of life' )
self.log.warn( result )
if result['OK'] and result['Value']:
self.__interpretControlSignal( result['Value'] )
return result
#############################################################################
def __setJobParamList( self, value ):
"""Wraps around setJobParameters of state update client
"""
# job wrapper template sets the jobID variable
if not os.environ.has_key( 'JOBID' ):
self.log.info( 'Running without JOBID so parameters will not be reported' )
return S_OK()
jobID = os.environ['JOBID']
jobReport = RPCClient( 'WorkloadManagement/JobStateUpdate', timeout = 120 )
jobParam = jobReport.setJobParameters( int( jobID ), value )
self.log.verbose( 'setJobParameters(%s,%s)' % ( jobID, value ) )
if not jobParam['OK']:
self.log.warn( jobParam['Message'] )
return jobParam
#############################################################################
def getNodeInformation( self ):
""" Attempts to retrieve all static system information, should be overridden in a subclass"""
methodName = 'getNodeInformation'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
#############################################################################
def getLoadAverage( self ):
""" Attempts to get the load average, should be overridden in a subclass"""
methodName = 'getLoadAverage'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
#############################################################################
def getMemoryUsed( self ):
""" Attempts to get the memory used, should be overridden in a subclass"""
methodName = 'getMemoryUsed'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
#############################################################################
def getDiskSpace( self ):
""" Attempts to get the available disk space, should be overridden in a subclass"""
methodName = 'getDiskSpace'
self.log.warn( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
return S_ERROR( 'Watchdog: ' + methodName + ' method should be implemented in a subclass' )
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
Andrew-McNab-UK/DIRAC
|
WorkloadManagementSystem/JobWrapper/Watchdog.py
|
Python
|
gpl-3.0
| 36,586
|
[
"DIRAC"
] |
1e0dfb75141149533ef48ef4b99750bc379dedda904e872260c454460b4ceaee
|
# -*- coding: utf8 -*-
# Script de reconnaissance de digit
# Partie Réseau de neurones
# @author Valentin PORCHET
import sys # Pour utiliser le script de Alex Yang
import time # Pour le temps d'exécution
if len(sys.argv) > 3:
generate = True if sys.argv[3] == "1" else False
else:
generate = False
if len(sys.argv) > 2:
training = True if sys.argv[2] == "1" else False
else:
training = False
if len(sys.argv) > 1:
# On va exécuter le crop and resize sur l'image donnée en paramètre
# On met bien 5 et 7 pour correspondre à l'algo de Alex Yang
sys.argv = ["crop_and_resize.py", sys.argv[1], "resized", "5", "7"]
execfile("../crop_and_resize.py")
# L'image est bien créée
# Si jamais on veut faire le training du neurone, on le fait
sys.argv = []
if generate: # Si on veut générer les images avec le bruit
execfile("digit_maker.py")
if training: # Si on veut entraîner un nouveau neurone
execfile("digit_recog.py")
sys.argv = ["digit_recog.py", "data/neuron"]
# On fait maintenant la reconnaissance
execfile("digit_recog.py")
else:
print("Utilisation : python main.py [cheminImage]")
|
TeKrop/PyDigR
|
reseau_neurones/main.py
|
Python
|
gpl-2.0
| 1,122
|
[
"NEURON"
] |
8be48dd81c98e05a40de510176c9d5d431368c2481f135eefd2d24ada8571ce8
|
"""Provides some utilities widely used by other modules"""
import bisect
import collections
import collections.abc
import heapq
import operator
import os.path
import random
import math
import functools
import numpy as np
from itertools import chain, combinations
# ______________________________________________________________________________
# Functions on Sequences and Iterables
def sequence(iterable):
"""Converts iterable to sequence, if it is not already one."""
return (iterable if isinstance(iterable, collections.abc.Sequence)
else tuple([iterable]))
def removeall(item, seq):
"""Return a copy of seq (or string) with all occurrences of item removed."""
if isinstance(seq, str):
return seq.replace(item, '')
else:
return [x for x in seq if x != item]
def unique(seq):
"""Remove duplicate elements from seq. Assumes hashable elements."""
return list(set(seq))
def count(seq):
"""Count the number of items in sequence that are interpreted as true."""
return sum(map(bool, seq))
def multimap(items):
"""Given (key, val) pairs, return {key: [val, ....], ...}."""
result = collections.defaultdict(list)
for (key, val) in items:
result[key].append(val)
return dict(result)
def multimap_items(mmap):
"""Yield all (key, val) pairs stored in the multimap."""
for (key, vals) in mmap.items():
for val in vals:
yield key, val
def product(numbers):
"""Return the product of the numbers, e.g. product([2, 3, 10]) == 60"""
result = 1
for x in numbers:
result *= x
return result
def first(iterable, default=None):
"""Return the first element of an iterable; or default."""
return next(iter(iterable), default)
def is_in(elt, seq):
"""Similar to (elt in seq), but compares with 'is', not '=='."""
return any(x is elt for x in seq)
def mode(data):
"""Return the most common data item. If there are ties, return any one of them."""
[(item, count)] = collections.Counter(data).most_common(1)
return item
def powerset(iterable):
"""powerset([1,2,3]) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"""
s = list(iterable)
return list(chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)))[1:]
# ______________________________________________________________________________
# argmin and argmax
identity = lambda x: x
argmin = min
argmax = max
def argmin_random_tie(seq, key=identity):
"""Return a minimum element of seq; break ties at random."""
return argmin(shuffled(seq), key=key)
def argmax_random_tie(seq, key=identity):
"""Return an element with highest fn(seq[i]) score; break ties at random."""
return argmax(shuffled(seq), key=key)
def shuffled(iterable):
"""Randomly shuffle a copy of iterable."""
items = list(iterable)
random.shuffle(items)
return items
# ______________________________________________________________________________
# Statistical and mathematical functions
def histogram(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first."""
if bin_function:
values = map(bin_function, values)
bins = {}
for val in values:
bins[val] = bins.get(val, 0) + 1
if mode:
return sorted(list(bins.items()), key=lambda x: (x[1], x[0]),
reverse=True)
else:
return sorted(bins.items())
def dotproduct(X, Y):
"""Return the sum of the element-wise product of vectors X and Y."""
return sum(x * y for x, y in zip(X, Y))
def element_wise_product(X, Y):
"""Return vector as an element-wise product of vectors X and Y"""
assert len(X) == len(Y)
return [x * y for x, y in zip(X, Y)]
def matrix_multiplication(X_M, *Y_M):
"""Return a matrix as a matrix-multiplication of X_M and arbitrary number of matrices *Y_M"""
def _mat_mult(X_M, Y_M):
"""Return a matrix as a matrix-multiplication of two matrices X_M and Y_M
>>> matrix_multiplication([[1, 2, 3],
[2, 3, 4]],
[[3, 4],
[1, 2],
[1, 0]])
[[8, 8],[13, 14]]
"""
assert len(X_M[0]) == len(Y_M)
result = [[0 for i in range(len(Y_M[0]))] for j in range(len(X_M))]
for i in range(len(X_M)):
for j in range(len(Y_M[0])):
for k in range(len(Y_M)):
result[i][j] += X_M[i][k] * Y_M[k][j]
return result
result = X_M
for Y in Y_M:
result = _mat_mult(result, Y)
return result
def vector_to_diagonal(v):
"""Converts a vector to a diagonal matrix with vector elements
as the diagonal elements of the matrix"""
diag_matrix = [[0 for i in range(len(v))] for j in range(len(v))]
for i in range(len(v)):
diag_matrix[i][i] = v[i]
return diag_matrix
def vector_add(a, b):
"""Component-wise addition of two vectors."""
return tuple(map(operator.add, a, b))
def scalar_vector_product(X, Y):
"""Return vector as a product of a scalar and a vector"""
return [X * y for y in Y]
def scalar_matrix_product(X, Y):
"""Return matrix as a product of a scalar and a matrix"""
return [scalar_vector_product(X, y) for y in Y]
def inverse_matrix(X):
"""Inverse a given square matrix of size 2x2"""
assert len(X) == 2
assert len(X[0]) == 2
det = X[0][0] * X[1][1] - X[0][1] * X[1][0]
assert det != 0
inv_mat = scalar_matrix_product(1.0 / det, [[X[1][1], -X[0][1]], [-X[1][0], X[0][0]]])
return inv_mat
def probability(p):
"""Return true with probability p."""
return p > random.uniform(0.0, 1.0)
def weighted_sample_with_replacement(n, seq, weights):
"""Pick n samples from seq at random, with replacement, with the
probability of each element in proportion to its corresponding
weight."""
sample = weighted_sampler(seq, weights)
return [sample() for _ in range(n)]
def weighted_sampler(seq, weights):
"""Return a random-sample function that picks from seq weighted by weights."""
totals = []
for w in weights:
totals.append(w + totals[-1] if totals else w)
return lambda: seq[bisect.bisect(totals, random.uniform(0, totals[-1]))]
def weighted_choice(choices):
"""A weighted version of random.choice"""
# NOTE: Shoule be replaced by random.choices if we port to Python 3.6
total = sum(w for _, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w >= r:
return c, w
upto += w
def rounder(numbers, d=4):
"""Round a single number, or sequence of numbers, to d decimal places."""
if isinstance(numbers, (int, float)):
return round(numbers, d)
else:
constructor = type(numbers) # Can be list, set, tuple, etc.
return constructor(rounder(n, d) for n in numbers)
def num_or_str(x): # TODO: rename as `atom`
"""The argument is a string; convert to a number if
possible, or strip it."""
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip()
def normalize(dist):
"""Multiply each number by a constant such that the sum is 1.0"""
if isinstance(dist, dict):
total = sum(dist.values())
for key in dist:
dist[key] = dist[key] / total
assert 0 <= dist[key] <= 1, "Probabilities must be between 0 and 1."
return dist
total = sum(dist)
return [(n / total) for n in dist]
def norm(X, n=2):
"""Return the n-norm of vector X"""
return sum([x ** n for x in X]) ** (1 / n)
def clip(x, lowest, highest):
"""Return x clipped to the range [lowest..highest]."""
return max(lowest, min(x, highest))
def sigmoid_derivative(value):
return value * (1 - value)
def sigmoid(x):
"""Return activation value of x with sigmoid function"""
return 1 / (1 + math.exp(-x))
def relu_derivative(value):
if value > 0:
return 1
else:
return 0
def elu(x, alpha=0.01):
if x > 0:
return x
else:
return alpha * (math.exp(x) - 1)
def elu_derivative(value, alpha = 0.01):
if value > 0:
return 1
else:
return alpha * math.exp(value)
def tanh(x):
return np.tanh(x)
def tanh_derivative(value):
return (1 - (value ** 2))
def leaky_relu(x, alpha = 0.01):
if x > 0:
return x
else:
return alpha * x
def leaky_relu_derivative(value, alpha=0.01):
if value > 0:
return 1
else:
return alpha
def relu(x):
return max(0, x)
def relu_derivative(value):
if value > 0:
return 1
else:
return 0
def step(x):
"""Return activation value of x with sign function"""
return 1 if x >= 0 else 0
def gaussian(mean, st_dev, x):
"""Given the mean and standard deviation of a distribution, it returns the probability of x."""
return 1 / (math.sqrt(2 * math.pi) * st_dev) * math.e ** (-0.5 * (float(x - mean) / st_dev) ** 2)
try: # math.isclose was added in Python 3.5; but we might be in 3.4
from math import isclose
except ImportError:
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
"""Return true if numbers a and b are close to each other."""
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# ______________________________________________________________________________
# Grid Functions
orientations = EAST, NORTH, WEST, SOUTH = [(1, 0), (0, 1), (-1, 0), (0, -1)]
turns = LEFT, RIGHT = (+1, -1)
def turn_heading(heading, inc, headings=orientations):
return headings[(headings.index(heading) + inc) % len(headings)]
def turn_right(heading):
return turn_heading(heading, RIGHT)
def turn_left(heading):
return turn_heading(heading, LEFT)
def distance(a, b):
"""The distance between two (x, y) points."""
xA, yA = a
xB, yB = b
return math.hypot((xA - xB), (yA - yB))
def distance_squared(a, b):
"""The square of the distance between two (x, y) points."""
xA, yA = a
xB, yB = b
return (xA - xB) ** 2 + (yA - yB) ** 2
def vector_clip(vector, lowest, highest):
"""Return vector, except if any element is less than the corresponding
value of lowest or more than the corresponding value of highest, clip to
those values."""
return type(vector)(map(clip, vector, lowest, highest))
# ______________________________________________________________________________
# Misc Functions
class injection():
"""Dependency injection of temporary values for global functions/classes/etc.
E.g., `with injection(DataBase=MockDataBase): ...`"""
def __init__(self, **kwds):
self.new = kwds
def __enter__(self):
self.old = {v: globals()[v] for v in self.new}
globals().update(self.new)
def __exit__(self, type, value, traceback):
globals().update(self.old)
def memoize(fn, slot=None, maxsize=32):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, use lru_cache for caching the values."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
@functools.lru_cache(maxsize=maxsize)
def memoized_fn(*args):
return fn(*args)
return memoized_fn
def name(obj):
"""Try to find some reasonable name for the object."""
return (getattr(obj, 'name', 0) or getattr(obj, '__name__', 0) or
getattr(getattr(obj, '__class__', 0), '__name__', 0) or
str(obj))
def isnumber(x):
"""Is x a number?"""
return hasattr(x, '__int__')
def issequence(x):
"""Is x a sequence?"""
return isinstance(x, collections.abc.Sequence)
def print_table(table, header=None, sep=' ', numfmt='{}'):
"""Print a list of lists as a table, so that columns line up nicely.
header, if specified, will be printed as the first row.
numfmt is the format for all numbers; you might want e.g. '{:.2f}'.
(If you want different formats in different columns,
don't use print_table.) sep is the separator between columns."""
justs = ['rjust' if isnumber(x) else 'ljust' for x in table[0]]
if header:
table.insert(0, header)
table = [[numfmt.format(x) if isnumber(x) else x for x in row]
for row in table]
sizes = list(
map(lambda seq: max(map(len, seq)),
list(zip(*[map(str, row) for row in table]))))
for row in table:
print(sep.join(getattr(
str(x), j)(size) for (j, size, x) in zip(justs, sizes, row)))
def open_data(name, mode='r'):
aima_root = os.path.dirname(__file__)
aima_file = os.path.join(aima_root, *['aima-data', name])
return open(aima_file, mode=mode)
def failure_test(algorithm, tests):
"""Grades the given algorithm based on how many tests it passes.
Most algorithms have arbitrary output on correct execution, which is difficult
to check for correctness. On the other hand, a lot of algorithms output something
particular on fail (for example, False, or None).
tests is a list with each element in the form: (values, failure_output)."""
from statistics import mean
return mean(int(algorithm(x) != y) for x, y in tests)
# ______________________________________________________________________________
# Expressions
# See https://docs.python.org/3/reference/expressions.html#operator-precedence
# See https://docs.python.org/3/reference/datamodel.html#special-method-names
class Expr(object):
"""A mathematical expression with an operator and 0 or more arguments.
op is a str like '+' or 'sin'; args are Expressions.
Expr('x') or Symbol('x') creates a symbol (a nullary Expr).
Expr('-', x) creates a unary; Expr('+', x, 1) creates a binary."""
def __init__(self, op, *args):
self.op = str(op)
self.args = args
# Operator overloads
def __neg__(self):
return Expr('-', self)
def __pos__(self):
return Expr('+', self)
def __invert__(self):
return Expr('~', self)
def __add__(self, rhs):
return Expr('+', self, rhs)
def __sub__(self, rhs):
return Expr('-', self, rhs)
def __mul__(self, rhs):
return Expr('*', self, rhs)
def __pow__(self, rhs):
return Expr('**', self, rhs)
def __mod__(self, rhs):
return Expr('%', self, rhs)
def __and__(self, rhs):
return Expr('&', self, rhs)
def __xor__(self, rhs):
return Expr('^', self, rhs)
def __rshift__(self, rhs):
return Expr('>>', self, rhs)
def __lshift__(self, rhs):
return Expr('<<', self, rhs)
def __truediv__(self, rhs):
return Expr('/', self, rhs)
def __floordiv__(self, rhs):
return Expr('//', self, rhs)
def __matmul__(self, rhs):
return Expr('@', self, rhs)
def __or__(self, rhs):
"""Allow both P | Q, and P |'==>'| Q."""
if isinstance(rhs, Expression):
return Expr('|', self, rhs)
else:
return PartialExpr(rhs, self)
# Reverse operator overloads
def __radd__(self, lhs):
return Expr('+', lhs, self)
def __rsub__(self, lhs):
return Expr('-', lhs, self)
def __rmul__(self, lhs):
return Expr('*', lhs, self)
def __rdiv__(self, lhs):
return Expr('/', lhs, self)
def __rpow__(self, lhs):
return Expr('**', lhs, self)
def __rmod__(self, lhs):
return Expr('%', lhs, self)
def __rand__(self, lhs):
return Expr('&', lhs, self)
def __rxor__(self, lhs):
return Expr('^', lhs, self)
def __ror__(self, lhs):
return Expr('|', lhs, self)
def __rrshift__(self, lhs):
return Expr('>>', lhs, self)
def __rlshift__(self, lhs):
return Expr('<<', lhs, self)
def __rtruediv__(self, lhs):
return Expr('/', lhs, self)
def __rfloordiv__(self, lhs):
return Expr('//', lhs, self)
def __rmatmul__(self, lhs):
return Expr('@', lhs, self)
def __call__(self, *args):
"Call: if 'f' is a Symbol, then f(0) == Expr('f', 0)."
if self.args:
raise ValueError('can only do a call for a Symbol, not an Expr')
else:
return Expr(self.op, *args)
# Equality and repr
def __eq__(self, other):
"'x == y' evaluates to True or False; does not build an Expr."
return (isinstance(other, Expr)
and self.op == other.op
and self.args == other.args)
def __hash__(self):
return hash(self.op) ^ hash(self.args)
def __repr__(self):
op = self.op
args = [str(arg) for arg in self.args]
if op.isidentifier(): # f(x) or f(x, y)
return '{}({})'.format(op, ', '.join(args)) if args else op
elif len(args) == 1: # -x or -(x + 1)
return op + args[0]
else: # (x - y)
opp = (' ' + op + ' ')
return '(' + opp.join(args) + ')'
# An 'Expression' is either an Expr or a Number.
# Symbol is not an explicit type; it is any Expr with 0 args.
Number = (int, float, complex)
Expression = (Expr, Number)
def Symbol(name):
"""A Symbol is just an Expr with no args."""
return Expr(name)
def symbols(names):
"""Return a tuple of Symbols; names is a comma/whitespace delimited str."""
return tuple(Symbol(name) for name in names.replace(',', ' ').split())
def subexpressions(x):
"""Yield the subexpressions of an Expression (including x itself)."""
yield x
if isinstance(x, Expr):
for arg in x.args:
yield from subexpressions(arg)
def arity(expression):
"""The number of sub-expressions in this expression."""
if isinstance(expression, Expr):
return len(expression.args)
else: # expression is a number
return 0
# For operators that are not defined in Python, we allow new InfixOps:
class PartialExpr:
"""Given 'P |'==>'| Q, first form PartialExpr('==>', P), then combine with Q."""
def __init__(self, op, lhs):
self.op, self.lhs = op, lhs
def __or__(self, rhs):
return Expr(self.op, self.lhs, rhs)
def __repr__(self):
return "PartialExpr('{}', {})".format(self.op, self.lhs)
def expr(x):
"""Shortcut to create an Expression. x is a str in which:
- identifiers are automatically defined as Symbols.
- ==> is treated as an infix |'==>'|, as are <== and <=>.
If x is already an Expression, it is returned unchanged. Example:
>>> expr('P & Q ==> Q')
((P & Q) ==> Q)
"""
if isinstance(x, str):
return eval(expr_handle_infix_ops(x), defaultkeydict(Symbol))
else:
return x
infix_ops = '==> <== <=>'.split()
def expr_handle_infix_ops(x):
"""Given a str, return a new str with ==> replaced by |'==>'|, etc.
>>> expr_handle_infix_ops('P ==> Q')
"P |'==>'| Q"
"""
for op in infix_ops:
x = x.replace(op, '|' + repr(op) + '|')
return x
class defaultkeydict(collections.defaultdict):
"""Like defaultdict, but the default_factory is a function of the key.
>>> d = defaultkeydict(len); d['four']
4
"""
def __missing__(self, key):
self[key] = result = self.default_factory(key)
return result
class hashabledict(dict):
"""Allows hashing by representing a dictionary as tuple of key:value pairs
May cause problems as the hash value may change during runtime
"""
def __hash__(self):
return 1
# ______________________________________________________________________________
# Queues: Stack, FIFOQueue, PriorityQueue
# Stack and FIFOQueue are implemented as list and collection.deque
# PriorityQueue is implemented here
class PriorityQueue:
"""A Queue in which the minimum (or maximum) element (as determined by f and
order) is returned first.
If order is 'min', the item with minimum f(x) is
returned first; if order is 'max', then it is the item with maximum f(x).
Also supports dict-like lookup."""
def __init__(self, order='min', f=lambda x: x):
self.heap = []
if order == 'min':
self.f = f
elif order == 'max': # now item with max f(x)
self.f = lambda x: -f(x) # will be popped first
else:
raise ValueError("order must be either 'min' or 'max'.")
def append(self, item):
"""Insert item at its correct position."""
heapq.heappush(self.heap, (self.f(item), item))
def extend(self, items):
"""Insert each item in items at its correct position."""
for item in items:
self.append(item)
def pop(self):
"""Pop and return the item (with min or max f(x) value)
depending on the order."""
if self.heap:
return heapq.heappop(self.heap)[1]
else:
raise Exception('Trying to pop from empty PriorityQueue.')
def __len__(self):
"""Return current capacity of PriorityQueue."""
return len(self.heap)
def __contains__(self, item):
"""Return True if item in PriorityQueue."""
return (self.f(item), item) in self.heap
def __getitem__(self, key):
for _, item in self.heap:
if item == key:
return item
def __delitem__(self, key):
"""Delete the first occurrence of key."""
self.heap.remove((self.f(key), key))
heapq.heapify(self.heap)
# ______________________________________________________________________________
# Useful Shorthands
class Bool(int):
"""Just like `bool`, except values display as 'T' and 'F' instead of 'True' and 'False'"""
__str__ = __repr__ = lambda self: 'T' if self else 'F'
T = Bool(True)
F = Bool(False)
|
jo-tez/aima-python
|
utils.py
|
Python
|
mit
| 22,293
|
[
"Gaussian"
] |
c39e62abbc63f9e6bc4de3b3d64cf2ba0b2a3cb36bf2c24b7185f1fd50784943
|
# TD quantum dynamics with TD Gauss-Hermite basis
# Two basis only
# Test anharmonic system
import numpy as np
from math import *
import numba
Ntraj = 4000
print('Number of trajectories = {} \n'.format(Ntraj))
dt = 0.002
am = 1.0
# initial values has to be stored to compute autocorrelation function
x0 = -2.5
p0 = 0.0
alpha0 = 1.0+0j
alpha = alpha0
a = alpha.real
b = alpha.imag
g = 0.1 # anharmonic constant in potential
Nt = int(input('Time interval = {} \n How many time steps? '.format(dt)))
p = np.zeros(Ntraj)
# phaes of quantum trajectories
s = np.zeros(Ntraj)
sampling = 'normal'
if sampling == 'normal':
x = np.random.randn(Ntraj)/np.sqrt(2.0*alpha.real) + x0
w = np.array([1.0/float(Ntraj)]*Ntraj)
elif sampling == 'uniform':
# use uniform sampling between [xmin, xmax]
xmin = -8.0
xmax = -xmin
x = np.linspace(xmin,xmax,Ntraj)
dx = x[1]-x[0]
w = np.sqrt(a/np.pi) * np.exp(-a*(x-x0)**2) * dx
else:
print('There is no {} sampling'.format(sampling))
@numba.autojit
def derivs(x):
# v = V0 + V1*(x-xAve) + V2 * (x-xAve)**2/2.
pot = 'dwell'
if pot == 'aho':
v0 = x**2/2.0 + g * x**4 / 4.0
dv = x + g * x**3
ddv = 1.0 + 3.0 * g * x**2
elif pot == 'dwell':
eta = 1.3544
v0 = x**4/16.0/eta - x**2/2.0
dv = x**3/4.0/eta - x
ddv = 3./4./eta * x**2 - 1.0
return v0, dv, ddv
#@numba.autojit
#def Hessian(x):
# V0 = x**2/2.0 + g*x**4/4.
# V1 = x + g * x**3
# V2 = 1.0 + 3.0 * g * x**2
#
# return V0,V1,V2
#@numba.autojit
#def Potential(x):
#
# return x**2/2.0 + g * x**4 / 4.0
@numba.autojit
def LQF(x,w,xAve,var):
a = 1. / 2. / var
r = - a * (x-xAve)
dr = - a
u = -1.0 / 2.0 / am * ( r**2 + dr )
du = -1./am * (r*dr)
return u, du
def qpot(x,w,xAve,xVar):
alpha = 1.0 / xVar / 2.0
z = x - xAve
g0 = (alpha/2./np.pi)**0.5 * np.exp(-alpha*z**2/2.)
g1 = z*g0
s00 = np.sqrt(alpha/np.pi)/2.0
s11 = 1./4./np.sqrt(alpha*np.pi)
b0 = np.dot(g0,w)
b1 = np.dot(g1,w)
# c0 = b0/s00
c0 = 1.0
c1 = b1/s11
P = c0*(-alpha*(x-xAve)) + c1*(-alpha + alpha**2*(x-xAve)**2)
dP = - alpha*c0 + c1 * alpha**2 * 2.0*(x-xAve)
ddP = 2.0 * c1 * alpha**2
Q = c0 + c1*(x-xAve)
dQ = c1
# ddQ = 0.0, ignore P*ddQ/Q
r = 0.5 * P/Q
dr = 0.5 *(-P*dQ/Q**2 + dP/Q)
ddr = 0.5 * (-(dP*dQ/Q**2 - P*dQ*dQ/Q**3) + (ddP/Q - dP*dQ/Q**2))
du = -(2.0*r*dr + ddr)/2.0/am
uAve = np.dot(r**2,w)/2./am
return uAve,du
def expand(alpha,V1,V2,x,xAve,pAve,w,c):
"""
Compute potential matrix elements using to propagate coefficients c
z = x - xAve
V = < m | DeltaV | n >
Hermite polynomails
H0 = 1.
H1 = z * sqrt(2.)
H2 = (4. * z**2 - 2.) / 4. / sqrt(2.)
"""
a = alpha.real
V = Vmat(a,x,xAve,w)
#K = Kmat(alpha,x,pAve)
#D = Dmat(alpha,xAve,pAve)
V0 = derivs(xAve)[0]
matV0 = np.identity(Nb)
matV0 = matV0 * V0
M1 = M1mat(a)
M2 = M2mat(a)
matK = np.identity(Nb)
for j in range(Nb):
matK[j,j] = float(j) * a/am
#f6.write(' {} {} '.format(t,np.vdot(c,(K+V).dot(c)).real))
#V[0,2] = np.dot(DeltaV * H0 * H2, w)
#V[1,2] = np.dot(DeltaV * H1 * H2, w)
#V = symmetrize(V)
#V[1,1] = np.dot(DeltaV * H1 * H1, w)
#V[2,2] = np.dot(DeltaV * H2 * H2, w)
#print('propagation matrix',-matV0 - V1 * M1 - V2/2.0 * M2 + V)
dc = (-1j* (-matV0 - V1 * M1 - V2/2.0 * M2 + V + matK )).dot(c)
return dc
def gwp_vp(a,x,xAve,w,c):
"""
Variational principle used to determine effective potential used to propagate the GWP
V = V0 + V1*(x-xAve) + V2*(x-xAve)**2/2
V1 = <V'>, V2 = <V''>
"""
indicator = 'average_phi0'
z = np.sqrt(a)*(x-xAve)
v0,dv,ddv = derivs(x)
if indicator == 'average_psi':
mat_dv = np.zeros((Nb,Nb))
mat_ddv = np.zeros((Nb,Nb))
H = Hermite(z) # Hermite polynomails
for i in range(Nb):
for j in range(i+1):
mat_dv[j,i] = np.dot(dv * H[i] * H[j], w)
mat_ddv[j,i] = np.dot(ddv * H[i] * H[j], w)
mat_dv = sym(mat_dv)
mat_ddv = sym(mat_ddv)
V1 = np.vdot(c,mat_dv.dot(c)).real
V2 = np.vdot(c,mat_ddv.dot(c)).real
elif indicator == 'average_phi0': # average computed with ensemble only
V1 = np.dot(dv, w)
V2 = np.dot(ddv, w)
return V1,V2
def Vmat(a,x,xAve,w):
z = (x - xAve)*np.sqrt(a)
#H0 = 1.0
#H1 = z * np.sqrt(2.)
V = derivs(x)[0]
#V0 = Potential(xAve)
#V = V - V0
#v = V0 * np.identidy(Nb)
Vm = np.zeros((Nb,Nb))
method = 'QT' # or 'QT'
if method == 'QT':
H = Hermite(z) # Hermite polynomails
for i in range(Nb):
for j in range(i+1):
Vm[j,i] = np.dot(V * H[i] * H[j], w)
#Vm = symmetrize(Vm)
for i in range(Nb):
for j in range(i):
Vm[i,j] = Vm[j,i]
elif method == 'exact':
M2 = M2mat(a)
M4 = M4mat(a)
Vm = M2/2.0 + g/4.0 * M4 # incorrect
#Vm[0,0] = np.dot(V,w)
#Vm[0,1] = np.dot(V * H[] * H1, w)
#Vm[1,1] = np.dot(V * H1 * H1, w)
#Vm[1,0] = Vm[0,1]
return Vm
def Hermite(x):
"""
Define Hermite polynomials multipiled by the normalization constant.
Corresponding to the eigenstates of harmonic oscilator.
"""
cons = np.array([1. / np.sqrt(float(2**n * factorial(n))) for n in range(Nb)])
H = []
H.append(1.0)
H.append( x * 2.0 )
if Nb > 2:
for n in range(2,Nb):
Hn = 2.0 * x * H[n-1] - 2.0*(n-1) * H[n-2]
H.append(Hn)
for n in range(Nb):
H[n] = H[n]*cons[n]
return H
@numba.autojit
def M1mat(a):
M1 = np.zeros((Nb,Nb))
for m in range(Nb-1):
M1[m,m+1] = np.sqrt(float(m+1)/2.0/a)
M1 = sym(M1)
return M1
@numba.autojit
def M2mat(a):
M2 = np.zeros((Nb,Nb))
for m in range(Nb):
M2[m,m] = (float(m) + 0.5)/a
if Nb > 1:
for m in range(Nb-2):
M2[m,m+2] = np.sqrt(float((m+1)*(m+2)))/2.0/a
M2 = sym(M2)
return M2
def M3mat(a):
M3 = np.zeros((Nb,Nb))
for m in range(Nb-1):
M3[m,m+1] = 3.0 * (float(m+1)/2./a)**1.5
if Nb > 2:
for m in range(Nb-3):
M3[m,m+3] = np.sqrt(float((m+1)*(m+2)*(m+3))) / (2.0*a)**1.5
M3 = sym(M3)
return M3
def M4mat(a):
M4 = np.zeros((Nb,Nb))
for m in range(Nb):
M4[m,m] = float(3.0 * m**2 + 3.0 * (m+1)**2) / (2.*a)**2
if Nb > 1:
for m in range(Nb-2):
M4[m,m+2] = (4.0*m + 6.0) * np.sqrt(float((m+1)*(m+2))) / (2.*a)**2
if Nb > 3:
for m in range(Nb-4):
M4[m,m+4] = np.sqrt(float((m+1)*(m+2)*(m+3)*(m+4))) / (2.0*a)**2
M4 = sym(M4)
# if Nb > 1:
# if not M4[0,1] == M4[1,0]:
# print(M4)
# print('\n ERROR: Not symmetric matrix M4.\n')
# sys.exit()
return M4
@numba.autojit
def Kmat(alpha,x,pAve):
K = np.zeros((Nb,Nb),dtype=complex)
ar = alpha.real
for j in range(Nb):
K[j,j] = np.abs(alpha)**2 / ar * (2. * j + 1.)/2. + pAve**2
for j in range(1,Nb):
K[j-1,j] = -1j*np.conj(alpha) * pAve * np.sqrt(2. * j / ar)
K[j,j-1] = np.conj(K[j-1,j])
if Nb > 2:
for j in range(2,Nb):
K[j-2,j] = - np.sqrt(float((j-1)*j)) * np.conj(alpha)**2 / 2. / ar
K[j,j-2] = np.conj(K[j-2,j])
#K[0,0] = np.abs(alpha)**2/alpha.real / 2. + pAve**2
#K[1,1] = np.abs(alpha)**2/alpha.real * 3.0 / 2. + pAve**2
#K[0,1] = -1j*np.conj(alpha) * pAve * np.sqrt(2.*j/alpha.real)
#K[1,0] = np.conj(K[0,1])
K = K / (2.*am)
return K
@numba.autojit
def Dmat(alpha,xAve,pAve):
D = np.zeros((Nb,Nb),dtype=complex)
V0, V1, ak = Hessian(xAve)
# time derivative
dq = pAve / am
dp = - V1
da = (-1.0j/am * alpha**2 + 1.j * ak)
a = alpha.real
for k in range(Nb):
D[k,k] = - 1j*da.imag/2./a * (float(k) + 0.5) - 1j * pAve**2/am
for k in range(1,Nb):
D[k-1,k] = np.sqrt(float(k)/2./a) * ( - np.conj(alpha) * dq + 1j * dp)
D[k,k-1] = np.sqrt(float(k)/2./a) * ( alpha * dq + 1j * dp )
if Nb > 2:
for k in range(2,Nb):
D[k-2,k] = np.conj(da)/2./a * np.sqrt(float(k*(k-1)))/2.0
D[k,k-2] = - da/2./a * np.sqrt(float(k*(k-1)))/2.0
#D[0,0] = - 1j * pAve**2 / am
#D[1,1] = - 1j * pAve**2 / am
#D[0,1] = - (np.conj(alpha)*pAve/am + 1j * V1) / np.sqrt(2.*alpha.real)
#D[1,0] = (alpha * pAve / am - 1j * V1) / np.sqrt(2.*alpha.real)
return D
def sym(V):
n = V.shape[-1]
for i in range(n):
for j in range(i):
V[i,j] = V[j,i]
return V
def SaveWf(alpha, pAve, S,c,xAve,xVar,fname='wft.dat'):
"""
save wavefunction to file
"""
f = open(fname,'w')
x = np.linspace(-6,6,200)
a = alpha.real
z = (x - xAve) * np.sqrt(a)
#print('GWP width parameter {}, Real alpha {}'.format(a,alpha.real))
phi0 = np.exp( - alpha * (x-xAve)**2/2.0 + 1j*pAve*(x-xAve) + 1j * S)
H = Hermite(z)
basis = []
for i in range(Nb):
basis.append(H[i]*phi0)
#phi1 = Hermite(z,1) * phi0
#phi2 = Hermite(z,2) * phi0
#phi2 = (4. * z*z - 2.) / 4. / np.sqrt(2.) * phi0
for i in range(len(x)):
wf = 0.+0.j
for j in range(Nb):
wf += c[j]*basis[j][i]
f.write('{} {} {} \n'.format(x[i], wf.real,wf.imag))
f.close()
def xObs(a,c,x,xAve):
"""
Compute <x> = tr[rho * M1] + xAve
"""
X = M1mat(a)
y = np.vdot(c,X.dot(c))
return y.real + xAve
def corr(alpha,w,x,xAve,c,pAve,s):
"""
Compute correlation function
C(t) = <psi(0) | psi(t)>
For real initial wavefunction
C(t) = <psi(-t/2)|psi(t/2)> = psi(t/2)**2 = sum_n c_n**2
"""
a = alpha.real
#phase = np.exp(1j*(- b * (x-xAve)**2 / 2.0 + pAve * (x-xAve) + S.real))
#phase = phase**2
z = (x-xAve) * np.sqrt(a)
H = Hermite(z) # Hermite polynomails multiplied with normalization constant
P = np.zeros((Nb,Nb), dtype=complex) # phase matrix elements
for i in range(Nb):
for j in range(i+1):
P[j,i] = sum(H[i] * H[j] * np.exp(2j*s) * w)
P = sym(P)
cor = np.dot(c,P.dot(c))
return cor
def corr_single_basis(alpha,w,x,xAve,c,pAve,S):
"""
Compute correlation function for case of single basis. <g0|gt>
"""
global alpha0, x0, p0
a2 = -(np.conj(alpha0) + alpha) / 2.0
a1 = x0 * np.conj(alpha0) + xAve * alpha + 1j * (pAve - p0)
a0 = 1j * (S - np.conj(S0)) - (alpha0 * x0**2 + alpha * xAve**2) / 2.0 + 1j * (p0 * x0 - pAve * xAve)
return gwp_int(a2,a1) * np.exp(a0)
def gwp_int(a2,a1):
"""
compute Gaussian integral int(exp( a2 * x**2 + a1 * x ), dx)
"""
return np.exp(-a1 * a1 / 4.0 / a2) * np.sqrt(np.pi/-a2)
@numba.autojit
def overlap(aj,qj,pj,ak,qk,pk):
"""
Gaussian integration with complex alpha <zj|zk>
"""
dq = qk - qj
dp = pk - pj
return (aj.real*ak.real)**0.25 * sqrt(2./(np.conj(aj) + ak)) * exp( \
-0.5 * np.conj(aj)*ak/(np.conj(aj)+ak) * (dp**2/np.conj(aj)/ak + dq**2 \
+ 2.0*1j* (pj/np.conj(aj) + pk/ak) *dq) )
f1 = open('traj.dat', 'w')
f2 = open('xAve.dat', 'w')
f3 = open('energy.dat', 'w')
f4 = open('coeff.dat', 'w')
f5 = open('norm.dat', 'w')
f6 = open('energy.dat', 'w')
f_cor = open('corr.out', 'w')
t = 0.0
dt2 = dt/2.0
S0 = -1j*np.log(a/np.pi)/4.0 # complex phase factor in GWP
S = S0
xAve = np.dot(x,w)
xSqdAve = np.dot(x*x,w)
xVar = (xSqdAve - xAve**2)
pAve = np.dot(p,w)
print('\n Initial Time \n ')
print('Mean position = {}, Variance = {} '.format(xAve,xVar))
print('Initial momentum {}'.format(pAve))
# initial expansion coeffs
Nb = int(input('Please enter number of basis function \n '))
c = np.zeros(Nb,dtype=complex)
c[0] = 1.0
# ----------
# initialize the force
# classical Force Field
V1,V2 = gwp_vp(a,x,xAve,w,c)
print('V1,V2 = {} {} '.format(V1,V2))
# effective potential and force field
V0 = derivs(xAve)[0]
Veff = V0 + V1 * (x-xAve) + V2/2.0 * (x-xAve)**2
dv_eff = V1 + V2 * (x-xAve)
# quantum force
u, du = LQF(x,w,xAve,xVar)
uAve = sum(u * w)
V = derivs(x)[0]
v = np.dot(V,w)
print(' Quantum potential = {} \n '.format(uAve))
#print(' Potential = {} \n '.format(vAve))
#print(' Total energy = {} \n '.format(uAve+vAve))
# ---------------
SaveWf(alpha, pAve, S, c, xAve,xVar,fname='wf0.dat') # save initial wavefunction
# format for output data
fmt = ' {} '*11 + '\n'
fmtC = ' {} '*(Nb+1) + '\n'
# update c for one timestep
cold = c
dc = expand(alpha,V1,V2,x,xAve,pAve,w,c)
c = c + dc*dt
for k in range(Nt):
t += dt
# leap-frog alg for {x,p}
p = p - dv_eff*dt2 - du*dt2
x += p/am * dt2
s += (p * p / 2.0 / am - (Veff + u)) * dt
x += p/am * dt2
# compute observables
xAve = np.dot(x,w)
xSqdAve = np.dot(x*x,w)
xVar = (xSqdAve - xAve**2)
# effective force fields
V1,V2 = gwp_vp(a,x,xAve,w,c)
#print('V1,V2 = {} {} '.format(V1,V2))
Veff = V0 + V1 * (x-xAve) + V2/2.0 * (x-xAve)**2
dv_eff = V1 + V2 * (x-xAve)
u, du = LQF(x,w,xAve,xVar)
p += - dv_eff*dt2 - du*dt2
# average momentum, update phase parameters
pAve = np.dot(p,w)
alpha += (-1.0j/am * alpha**2 + 1.j * V2) * dt
#a = 1.0 / 2.0 / xVar
#b += (- (a**2 - b**2) / am + V2)*dt
a, b = alpha.real, alpha.imag
# S is the real part of the complex phase term, imaginary part is absorbed
# S contains the normalization constant N = exp(-S.imag)
V0 = derivs(xAve)[0]
S += ( pAve**2/2./am - V0 - alpha/2./am ) * dt
# classical action S += (pAve**2/2./am - V0 ) * dt
# update c, second-order difference
dc = expand(alpha,V1,V2,x,xAve,pAve,w,c)
cnew = cold + 2.0*dc*dt
cold = c
c = cnew
# observables
xt = xObs(a,c,x,xAve)
# correlation function
cor = corr(alpha,w,x,xAve,c,pAve,s)
f_cor.write('{} {} {} \n'.format(2.0 * t,cor.real, cor.imag))
f5.write( '{} {} \n'.format(t,np.vdot(cold,cold)))
#vAve = np.dot(V,w)
kAve = np.dot(p*p/2./am, w)
# overlap with a GWP ~ (xAve, xVar)
#uAve = overlap(x,w,xAve,xVar)
#print('overlap with GWP {} \n'.format(ov))
# save data
SaveWf(alpha, pAve,S, cold, xAve,xVar)
f3.write('{} {} {} \n'.format(t,kAve,uAve))
f1.write(fmt.format(t,*x[0:10]))
f2.write(' {} {} {} {} {} \n'.format(t,xt, xAve,pAve,xVar))
f4.write(fmtC.format(t,*c[0:Nb]))
f1.close()
f2.close()
f_cor.close()
|
binghongcha08/pyQMD
|
GHQT/Erhenfest/dwell.py
|
Python
|
gpl-3.0
| 15,336
|
[
"Gaussian"
] |
5ed1416087614b1b49d4662331405a6c0358b938734a7abe780bec3f2fec2ee2
|
#!/usr/bin/env python
"""Copyright 2010 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
#Basic imports
from ctypes import *
import sys
#Phidget specific imports
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import AttachEventArgs, DetachEventArgs, ErrorEventArgs, OutputChangeEventArgs, TagEventArgs
from Phidgets.Devices.RFID import RFID, RFIDTagProtocol
from Phidgets.Phidget import PhidgetLogLevel
#Create an RFID object
try:
rfid = RFID()
except RuntimeError as e:
print("Runtime Exception: %s" % e.details)
print("Exiting....")
exit(1)
#Information Display Function
def displayDeviceInfo():
print("|------------|----------------------------------|--------------|------------|")
print("|- Attached -|- Type -|- Serial No. -|- Version -|")
print("|------------|----------------------------------|--------------|------------|")
print("|- %8s -|- %30s -|- %10d -|- %8d -|" % (rfid.isAttached(), rfid.getDeviceName(), rfid.getSerialNum(), rfid.getDeviceVersion()))
print("|------------|----------------------------------|--------------|------------|")
print("Number of outputs: %i -- Antenna Status: %s -- Onboard LED Status: %s" % (rfid.getOutputCount(), rfid.getAntennaOn(), rfid.getLEDOn()))
#Event Handler Callback Functions
def rfidAttached(e):
attached = e.device
print("RFID %i Attached!" % (attached.getSerialNum()))
def rfidDetached(e):
detached = e.device
print("RFID %i Detached!" % (detached.getSerialNum()))
def rfidError(e):
try:
source = e.device
print("RFID %i: Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
def rfidOutputChanged(e):
source = e.device
print("RFID %i: Output %i State: %s" % (source.getSerialNum(), e.index, e.state))
def rfidTagGained(e):
source = e.device
rfid.setLEDOn(1)
print("RFID %i: Tag Read: %s" % (source.getSerialNum(), e.tag))
def rfidTagLost(e):
source = e.device
rfid.setLEDOn(0)
print("RFID %i: Tag Lost: %s" % (source.getSerialNum(), e.tag))
#Main Program Code
try:
#logging example, uncomment to generate a log file
#rfid.enableLogging(PhidgetLogLevel.PHIDGET_LOG_VERBOSE, "phidgetlog.log")
rfid.setOnAttachHandler(rfidAttached)
rfid.setOnDetachHandler(rfidDetached)
rfid.setOnErrorhandler(rfidError)
rfid.setOnOutputChangeHandler(rfidOutputChanged)
rfid.setOnTagHandler(rfidTagGained)
rfid.setOnTagLostHandler(rfidTagLost)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Opening phidget object....")
try:
rfid.openPhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Waiting for attach....")
try:
rfid.waitForAttach(10000)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
rfid.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
else:
displayDeviceInfo()
print("Turning on the RFID antenna....")
rfid.setAntennaOn(True)
print("Press Enter to quit....")
chr = sys.stdin.read(1)
# Write tag example:
#try:
# rfid.write("Some Tag", RFIDTagProtocol.PHIDGET_RFID_PROTOCOL_PHIDGETS)
#except PhidgetException as e:
# print("Phidget Exception %i: %s" % (e.code, e.details))
try:
lastTag = rfid.getLastTag()
print("Last Tag: %s" % (lastTag))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Closing...")
try:
rfid.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Done.")
exit(0)
|
danielsuo/mobot
|
src/move/Python/RFID-simple.py
|
Python
|
mit
| 4,259
|
[
"VisIt"
] |
4754a800c080611aed40a8e183d3887d54f829b2d3dec6986864ff79e5c6eefb
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from yepes.contrib.registry import Registry
from yepes.contrib.registry.fields import *
registry = Registry(namespace='metrics')
registry.register(
'EXCLUDED_PARAMETERS',
CommaSeparatedField(
initial = (),
label = _('Excluded Parameters'),
required = False,
))
registry.register(
'RECORD_PAGE_VIEWS',
BooleanField(
initial = False,
label = _('Record Page Views'),
required = False,
))
registry.register(
'RECORD_VISITORS',
BooleanField(
initial = False,
label = _('Record Visitors'),
required = False,
))
registry.register(
'RECORD_VISITS',
BooleanField(
initial = False,
label = _('Record Visits'),
required = False,
))
registry.register(
'TRACKED_REQUEST_METHODS',
CommaSeparatedField(
initial = ('GET', 'POST'),
label = _('Tracked Request Methods'),
max_length = 63,
required = True,
))
registry.register(
'UNTRACKED_PATHS',
CommaSeparatedField(
initial = ('/admin', '/cache', '/media', '/static'),
label = _('Untracked Paths'),
required = False,
))
registry.register(
'UNTRACKED_REFERRERS',
CommaSeparatedField(
initial = (),
label = _('Untracked Referrers'),
required = False,
))
registry.register(
'UNTRACKED_USER_AGENTS',
CommaSeparatedField(
initial = (
# GENERIC
'agent', 'archiver', 'audit', 'bot', 'check', 'crawler', 'link',
'monit', 'proxy', 'search', 'sniff', 'spider', 'test', 'valid',
# HTTP CLIENTS
'curl', 'httpclient', 'php', 'urllib', 'wget', 'winhttp',
# SEARCH ENGINES
# 'alexa' must not be listed here because 'Alexa Toolbar' would
# also matched.
# 'baidu' must not be listed here because 'baidubrowser' would
# also matched. However, Baidu robots always include 'spider'
# in their user-agent string.
'ask', 'bing', 'coccoc', 'google', 'nutch', 'topsy', 'yacy',
'yahoo', 'yandex',
# SOCIAL NETWORKS
'facebook', 'pinterest', 'twit',
# OTHERS
'craft', # netcraft.com
'fetch', # UnwindFetchor
'integrity', # Integrity
'meta', # MetaURI
'nine', # nineconnections.com
'ning', # NING
'shop', # ShopMania
'shot', # Browsershots
'solver', # urlresolver
'zoom', # Ezooms (SEOMoz bot)
),
label = _('Untracked User-Agents'),
required = False,
))
registry.register(
'VISIT_TIMEOUT',
IntegerField(
initial = 60 * 30,
label = _('Visit Timeout'),
required = True,
))
|
samuelmaudo/yepes
|
yepes/contrib/metrics/registry.py
|
Python
|
bsd-3-clause
| 3,001
|
[
"VisIt"
] |
e9c992cf017b7c15a838983e38fe3ace108a752a9c357940c434e5f78c55b539
|
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print i
1
2
>>> g = f()
>>> g.next()
1
>>> g.next()
2
"Falling off the end" stops the generator:
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> g.next()
1
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> g.next() # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> g.next()
1
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print list(g2())
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print "creator", r.next()
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print "caller", i
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = me.next()
... yield i
>>> me = g()
>>> me.next()
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print list(f1())
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print list(f2())
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> k.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> k.next() # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print list(f())
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print x,
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print x,
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = range(1, 5)
>>> for k in range(len(seq) + 2):
... print "%d-combs of %s:" % (k, seq)
... for c in gcomb(seq, k):
... print " ", c
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<type 'function'>
>>> i = g()
>>> type(i)
<type 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'next', 'send', 'throw']
>>> from test.test_support import HAVE_DOCSTRINGS
>>> print(i.next.__doc__ if HAVE_DOCSTRINGS else 'x.next() -> the next value, or raise StopIteration')
x.next() -> the next value, or raise StopIteration
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<type 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
TypeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> me.next()
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return self.generator.next()
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.WichmannHill(42)
>>> while 1:
... for s in sets:
... print "%s->%s" % (s, s.find()),
... print
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print "merged", s1, "into", s2
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged D into G
A->A B->B C->C D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged C into F
A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged L into A
A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->A M->M
merged H into E
A->A B->B C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
merged B into E
A->A B->E C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
merged J into G
A->A B->E C->F D->G E->E F->F G->G H->E I->I J->G K->K L->A M->M
merged E into G
A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->M
merged M into G
A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->G
merged I into K
A->A B->G C->F D->G E->G F->F G->G H->G I->K J->G K->K L->A M->G
merged K into A
A->A B->G C->F D->G E->G F->F G->G H->G I->A J->G K->A L->A M->G
merged F into A
A->A B->G C->A D->G E->G F->A G->G H->G I->A J->G K->A L->A M->G
merged A into G
A->G B->G C->G D->G E->G F->G G->G H->G I->G J->G K->G L->G M->G
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [g.next() for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = ints.next()
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = g.next()
... nh = h.next()
... while 1:
... if ng < nh:
... yield ng
... ng = g.next()
... elif ng > nh:
... yield nh
... nh = h.next()
... else:
... yield ng
... ng = g.next()
... nh = h.next()
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print firstn(result, 15)
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.next
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print [m235[j] for j in range(15*i, 15*(i+1))]
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield g.next() + h.next()
...
... def tail(g):
... g.next() # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print firstn(it, 15)
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield g.next() + h.next()
...
... def _fib():
... yield 1
... yield 2
... fibTail.next() # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
>>> def f():
... return 22
... yield 1
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[0]>, line 3)
>>> def f():
... yield 1
... return 22
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[1]>, line 3)
"return None" is not the same as "return" in a generator:
>>> def f():
... yield 1
... return None
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[2]>, line 3)
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<type 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<type 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<type 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<type 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... lambda x: x # shouldn't trigger here
... return # or here
... def f(i):
... return 2*i # or here
... if 0:
... return 3 # but *this* sucks (line 8)
... if 0:
... yield 2 # because it's a generator (line 10)
Traceback (most recent call last):
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[24]>, line 10)
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print g.next()
0
>>> print g.next()
1
>>> print g.next()
2
>>> print g.next()
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.func_code
True
>>> g.next()
5
>>> g.next()
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.func_code
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().next
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1L << j) | # column ordinal
(1L << (n + i-j + n-1)) | # NW-SE ordinal
(1L << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print sep
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print "|" + "|".join(squares) + "|"
print sep
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print sep
for i in range(m):
row = squares[i]
print "|" + "|".join(row) + "|"
print sep
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print c
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print n, len(all), all[0] == [0] * n, all[-1] == [1] * n
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print "Solution", count
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print count, "solutions in all."
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print "Solution", count
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print (yield 1)
... yield 2
>>> g = f()
>>> g.next()
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<type 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> c.next()
>>> print seq
[]
>>> c.send(10)
>>> print seq
[10]
>>> c.send(10)
>>> print seq
[10, 20]
>>> c.send(10)
>>> print seq
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[21]>", line 1
SyntaxError: 'yield' outside function
>>> def f(): return lambda x=(yield): 1
Traceback (most recent call last):
...
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.coroutine[22]>, line 1)
>>> def f(): x = yield = y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[23]>", line 1
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[24]>", line 1
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[25]>", line 1
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print (yield)
... except ValueError,v:
... print "caught ValueError (%s)" % (v),
>>> import sys
>>> g = f()
>>> g.next()
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print g.gi_frame
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
>>> f().throw("abc") # throw on just-opened generator
Traceback (most recent call last):
...
TypeError: exceptions must be classes, or instances, not str
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print "exiting"
>>> g = f()
>>> g.next()
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> g.next()
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print "exiting"
>>> g = f()
>>> g.next()
>>> del g
exiting
>>> class context(object):
... def __enter__(self): pass
... def __exit__(self, *args): print 'exiting'
>>> def f():
... with context():
... yield
>>> g = f()
>>> g.next()
>>> del g
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception: print 'except'
... finally: print 'finally'
>>> g = f()
>>> g.next()
>>> del g
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> g.next()
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, StringIO
>>> old, sys.stderr = sys.stderr, StringIO.StringIO()
>>> g = f()
>>> g.next()
>>> del g
>>> sys.stderr.getvalue().startswith(
... "Exception RuntimeError: 'generator ignored GeneratorExit' in "
... )
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> g.next()
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<type 'generator'>
>>> def f(): x = yield
>>> type(f())
<type 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<type 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<type 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<type 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def next(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = it.next()
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, StringIO
>>> old = sys.stderr
>>> try:
... sys.stderr = StringIO.StringIO()
... class Leaker:
... def __del__(self):
... raise RuntimeError
...
... l = Leaker()
... del l
... err = sys.stderr.getvalue().strip()
... err.startswith(
... "Exception RuntimeError: RuntimeError() in <"
... )
... err.endswith("> ignored")
... len(err.splitlines())
... finally:
... sys.stderr = old
True
True
1
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import test_support, test_generators
test_support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
|
j5shi/Thruster
|
pylibs/test/test_generators.py
|
Python
|
gpl-2.0
| 52,675
|
[
"VisIt"
] |
ac393d31ec58d9862851fd5017756ef8a185763de0b434843118189907bdc32e
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import reduce
from pyscf import scf
from pyscf import gto
from pyscf import cc
from pyscf.cc import ccsd
from pyscf.cc import addons
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.verbose = 5
mol.output = '/dev/null'
mol.basis = '631g'
mol.spin = 0
mol.build()
mf1 = scf.RHF(mol).run(conv_tol=1e-12)
gmf = scf.addons.convert_to_ghf(mf1)
myrcc = ccsd.CCSD(mf1).run()
def tearDownModule():
global mol, mf1, gmf, myrcc
mol.stdout.close()
del mol, mf1, gmf, myrcc
class KnownValues(unittest.TestCase):
def test_spin2spatial(self):
t1g = addons.spatial2spin(myrcc.t1)
t2g = addons.spatial2spin(myrcc.t2)
orbspin = gmf.mo_coeff.orbspin
t1a, t1b = addons.spin2spatial(t1g, orbspin)
t2aa, t2ab, t2bb = addons.spin2spatial(t2g, orbspin)
self.assertAlmostEqual(abs(myrcc.t1 - t1a).max(), 0, 12)
self.assertAlmostEqual(abs(myrcc.t2 - t2ab).max(), 0, 12)
self.assertAlmostEqual(abs(t1g - addons.spatial2spin((t1a,t1b), orbspin)).max(), 0, 12)
self.assertAlmostEqual(abs(t2g - addons.spatial2spin((t2aa,t2ab,t2bb), orbspin)).max(), 0, 12)
def test_convert_to_uccsd(self):
myucc = addons.convert_to_uccsd(myrcc)
myucc = addons.convert_to_uccsd(myucc)
self.assertTrue(myucc.t1[0].shape, (5,8))
self.assertTrue(myucc.t1[1].shape, (5,8))
self.assertTrue(myucc.t2[0].shape, (5,5,8,8))
self.assertTrue(myucc.t2[1].shape, (5,5,8,8))
self.assertTrue(myucc.t2[2].shape, (5,5,8,8))
def test_convert_to_gccsd(self):
mygcc = addons.convert_to_uccsd(myrcc)
mygcc = addons.convert_to_gccsd(myrcc)
self.assertTrue(mygcc.t1.shape, (10,16))
self.assertTrue(mygcc.t2.shape, (10,10,16,16))
myucc = addons.convert_to_uccsd(myrcc)
mygcc = addons.convert_to_gccsd(myucc)
self.assertTrue(mygcc.t1.shape, (10,16))
self.assertTrue(mygcc.t2.shape, (10,10,16,16))
mygcc = addons.convert_to_gccsd(cc.GCCSD(gmf))
self.assertTrue(isinstance(mygcc, cc.gccsd.GCCSD))
if __name__ == "__main__":
print("Tests for addons")
unittest.main()
|
sunqm/pyscf
|
pyscf/cc/test/test_addons.py
|
Python
|
apache-2.0
| 2,892
|
[
"PySCF"
] |
3c39b5985d8b0a8a8f2bbd6edeafd737a072f4e157d2dc7a3618908307450ee8
|
"""Module providing data structures for data collection and analysis
The main features provided in this module are the stlabmtx class and the stlabdict class as
well as the framearr_to_mtx function.
"""
from collections import OrderedDict
import numpy as np
from scipy import ndimage
import pickle
import struct
import scipy
from scipy.ndimage.filters import gaussian_filter
import pandas as pd
from scipy.interpolate import interp1d
class stlabdict(OrderedDict):
"""Class to hold a data table with multiple lines and columns
This class is DEPRECATED in favor of pandas DataFrame. They serve the same
function as an stlabdict but have much more functionality (and documentation...).
This class is essentialy an ordered_dict (is a child of) with a few
convenience methods included. Each element of the dict has an index that
labels the column and contains an array of numbers with the column data.
It is basically a matrix where the column index are string constants instead
of numbers (to more explicitly keep track or what each column contains).
Can also be indexed by column number.
"""
def __init__(self, *args, **kwargs):
"""Init method for stlabdict
Simply calls the ordered_dict constructor
"""
super(stlabdict, self).__init__(*args, **kwargs)
def addparcolumn(self,colname,colval): #adds a column to
"""Adds a parameter column
A parameter column is typically a column with a constant value for all lines (i.e. power in a vna trace).
Simply repeats the same value in an array of the same length as the other columns. Does not work
if there are no existing columns.
Parameters
----------
colname : str
Column title for the new parameter column
colval : float
Value to fill the parameter column
"""
keys = list(self.keys())
x = self[keys[0]]
n = len(x)
self[colname] = np.full(n,colval)
return
def line(self,nn):
"""Gets a line from the table
Takes a line from the stlabdict given by index. While getting a column can be done by
simply taking mystlabdict[myindex], getting a line requires iterating over the dict and
pulling out the desired line.
Parameters
----------
nn : int
Line number to be extracted
Returns
-------
ret : stlabdict
New stlabdict with only the desired line (each element is labelled by the same column
name as before but only contains a single float in each).
"""
ret = stlabdict()
for key in self.keys():
ret[key] = self[key][nn]
return ret
def __getitem__(self, key):
"""Overloaded indexing of the dict
Reimplements the getting of items from the dict to allow for indexing by column position as well
as by label
Parameters
----------
key : str or int
Desired column index or position. If a int is given, the method first checks if it is already an
index. If it is not, it returns the column given by the index position.
"""
if key in self.keys():
return super(stlabdict, self).__getitem__(key)
elif isinstance( key, int ) and key >= 0:
return self[list(self.keys())[key]]
else:
raise KeyError
def ncol(self):
"""Get the number of columns
Returns
-------
int
Number of columnms in stlabdict
"""
return len(self.keys())
def nline(self):
"""Get the number of lines in dict
Checks that all columns have the same number of lines
Returns
-------
int
Number of lines in first column (should be the same for any column)
"""
a = len(self[list(self.keys())[0]])
for key in self.keys():
if len(self[key]) is not a:
print('Columns with different length!!?')
return a
def matrix(self):
"""Converts entire table into a numpy matrix.
Returns
-------
numpy.matrix
Matrix containing the same data as the stlabdict. Loses column titles.
"""
mat = []
for key in self.keys():
col = []
for x in self[key]:
col.append(x)
mat.append(col)
mat = np.transpose(mat)
return mat
import copy
#Auxiliary processing functions for stlabmtx
def checkEqual1(iterator):
"""Check if all elements in iterator are equal or is empty
Returns
-------
bool
True if iterator empty or has the same value for all elements. False otherwise.
"""
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
def dictarr_to_mtx(data, key, rangex=None, rangey=None, xkey=None, ykey=None, xtitle=None, ytitle=None, ztitle = None):
"""Converts an array of dicts (or stlabdicts) to an stlabmtx object
Takes an array of dict-like (dict, OrderedDict, stlabdict), typically from a measurement file, and selects the appropriate columns for
conversion into an stlabmtx that allows spyview like operations and processing.
If neither ranges or titles are given, some defaults are filled in. The chosen data column from each data array element will be placed
as a line in the final matrix sequentially.
Parameters
----------
data : array of dict
Input array of data dicts. The dicts are expected to contain a series of arrays of floats with the same length.
key : str
Index of the appropriate column of each dict for the data axis of the final matrix (data values for each pixel)
xkey, ykey : str or None, optional
Columns to use to calculate the desired x and y ranges for the final matrix. If these are proviced they are also
used as the x and y titles. x runs across the matrix columns and y along the rows. This means that if x is the "slow" variable
in the measurement file, the output matrix will be transposed to accomodate this. The ranges are assumed to be the same for all lines.
rangex, rangey : array of float or None, optional
If provided, they override the xkey and ykey assingnment. They should contain arrays of the correct length for use
on the axes. These ranges will be saved along with the data (can be unevenly spaced). The ranges are assumed to be the same for all lines.
xtitle, ytitle, ztitle : str or None, optional
Titles for the x, y and z axes. If provided, they override the titles provided in xkey, ykey and key.
Returns
-------
stlabmtx
Resulting stlabmtx.
"""
#Build initial matrix. Appends each data column as line in zz
zz = [];
for line in data:
zz.append(line[key])
#convert to np matrix
zz = np.asmatrix(zz)
if not ztitle:
ztitle = key
#No keys or ranges given:
if rangex==None and rangey==None and xkey==None and ykey==None:
if xtitle == None:
xtitle = 'xtitle' #Default title
if ytitle == None:
ytitle = 'ytitle' #Default title
return stlabmtx(zz, xtitle=xtitle, ytitle=ytitle, ztitle=ztitle)
#If ranges but no keys are given
elif (xkey == None and ykey==None) and (rangex !=None and rangey != None):
if xtitle == None:
xtitle = 'xtitle' #Default title
if ytitle == None:
ytitle = 'ytitle' #Default title
return stlabmtx(zz, rangex, rangey, xtitle, ytitle, ztitle)
#If keys but no ranges given
elif (xkey != None and ykey != None) and (rangex == None and rangey == None):
#Take first dataset and extract the two relevant columns
line = data[0]
xx = line[xkey]
yy = line[ykey]
#Check which is slow (one with all equal values is slow)
xslow,yslow = (checkEqual1(xx),checkEqual1(yy))
#Both can not be fast or slow
if xslow == yslow:
print('dictarr_to_mtx: Warning, invalid xkey and ykey. Using defaults')
if xtitle == None:
xtitle = 'xtitle' #Default title
if ytitle == None:
ytitle = 'ytitle' #Default title
return stlabmtx(zz, xtitle=xtitle, ytitle=ytitle, ztitle=ztitle)
#if x is slow, matrix needs to be transposed
if xslow:
zz = zz.T
xx = []
for line in data:
xx.append(line[xkey][0])
#Case of y slow
#if y is slow, matrix is already correct
if yslow:
yy = []
for line in data:
yy.append(line[ykey][0])
xx = np.asarray(xx)
yy = np.asarray(yy)
#Sort out titles
titles = tuple(data[0].keys())
if xtitle == None:
if isinstance(xkey, str):
xtitle = xkey #Default title
elif isinstance(xkey, int):
xtitle = titles[xkey]
if ytitle == None:
if isinstance(ykey, str):
ytitle = ykey #Default title
elif isinstance(ykey, int):
ytitle = titles[ykey]
return stlabmtx(zz, xx, yy, xtitle, ytitle, ztitle)
#Mixed cases (one key and one range) are not implemented
else:
print('dictarr_to_mtx: Warning, invalid keys and ranges. Using defaults')
if xtitle == None:
xtitle = 'xtitle' #Default title
if ytitle == None:
ytitle = 'ytitle' #Default title
return stlabmtx(zz, xtitle=xtitle, ytitle=ytitle, ztitle=ztitle)
return
def sub_lbl(data, lowp=40, highp=40, low_limit=-1e99, high_limit=1e99):
new_mtx = []
mtx=data.copy() # for some reason this makes it faster
for y in mtx:
# Find boundaries
min0 = max(y.min(),low_limit)
max0 = min(y.max(),high_limit)
crop = np.logical_and(min0<=y,y<=max0) # crop list accordingly
# Find upper and lower percentiles and assign truthvalue to elements
# This is a major time contributor
if len(y[crop]) == 0:
print('sub_lbl: Warning, no values to average')
mean = 0
else:
low_thres = np.percentile(y[crop],lowp)
high_thres = np.percentile(y[crop],100-highp)
crop2 = np.logical_and(low_thres<=y,y<=high_thres) # crop again
if len(y[crop2]) == 0:
print('sub_lbl: Warning, no values to average')
mean = 0
else:
mean = y[crop2].mean() # Calculate mean of remaining values
new_mtx.append(y-mean)
return np.matrix(np.squeeze(new_mtx))
#Main stlabmtx_pd class
class stlabmtx():
"""stlabmtx class for spyview-like operations
This class implements a matrix in the form of a pandas DataFrame and contains
methods analogous to those present in spyview.
Attributes
----------
mtx : pandas.DataFrame
Original dataframe before any processing. The dataframe indexes are considered
the x and y ranges on the final matrix
pmtx : pandas.DataFrame
Processed dataframe (filters applied)
processlist : array of str
List strings specifying the applied filters (in order)
xtitle, ytitle, ztitle : str
Titles for the x,y and z (data) axes
xtitle0, ytitle0, ztitle0 : str
Initial Titles for the x,y and z (data) axes (so, in case they are changed, reset can recover them)
"""
def __init__(self, mtx, xtitle='xtitle', ytitle='ytitle', ztitle = 'ztitle'):
"""stlab mtx initialization
Takes an input DataFrame and sets up the object
Parameters
----------
mtx : pandas.DataFrame
Intup Dataframe
xtitle, ytitle, ztitle : str
Title for x,y and z axes
"""
self.mtx = copy.deepcopy(mtx)
self.mtx.index.name = str(ytitle)
self.mtx.columns.name = str(xtitle)
print(self.mtx.shape)
self.processlist = []
self.pmtx = self.mtx
self.xtitle=str(xtitle)
self.ytitle=str(ytitle)
self.ztitle = str(ztitle)
self.xtitle0=self.xtitle
self.ytitle0=self.ytitle
self.ztitle0=self.ztitle
def getextents(self):
"""Get the extents of the matrix
Returns a tuple containing (xmin, xmax, ymin, ymax) from the axis ranges,
typically to correctly scale the axes when plotting with matplotlib.pyplot.imshow
Returns
-------
tuple of float
Four element tuple containing (xmin,xmax,ymin,ymax)
"""
xs = list(self.pmtx.columns)
ys = list(self.pmtx.index)
return (xs[0],xs[-1],ys[-1],ys[0])
# Functions from spyview
def absolute(self):
"""Absolute value filter
Applies np.abs to all elements of the matrix. Process string :code:`abs`.
"""
self.pmtx = np.abs(self.pmtx)
self.processlist.append('abs')
def crop(data,left=None,right=None,up=None,low=None):
"""Crop filter
Crops data matrix to the given extents. Process string :code:`crop left,right,up,low`
Parameters
----------
left : int or None, optional
New first column of cropped array. If None, is assumed to be the first column of the whole set (no crop)
right : int or None, optional
New last column of cropped array. If None, is assumed to be the last column of the whole set (no crop).
When given a value, the actual index specified is not included in the crop
up : int or None, optional
New first row of the cropped array. If None, is assumed to be the first line of the whole set (no crop)
When given a value, the actual index specified is not included in the crop
low : int or None, optional
New first row of the cropped array. If None, is assumed to be the last line of the whole set (no crop)
"""
#TODO check
valdict={'left':left,'right':right,'up':up,'low':low}
for key,val in valdict.items():
if val==0:
valdict[key] = None
else:
valdict[key] = int(val)
data.pmtx = data.pmtx.iloc[valdict['left']:valdict['right'],valdict['up']:valdict['low']]
for key,val in valdict.items():
if val==None:
valdict[key] = 0
data.processlist.append('crop {},{},{},{}'.format(valdict['left'],valdict['right'],valdict['up'],valdict['low']))
def flip(self,x=False,y=False):
"""Flip filter
Reverses x and/or y axis. Process string :code:`flip x,y` (0 is false, 1 is true).
Parameters
----------
x, y : bool, optional
If True, x or y is flipped
"""
x=bool(x)
y=bool(y)
if x:
self.pmtx = self.pmtx.iloc[:,::-1]
if y:
self.pmtx = self.pmtx.iloc[::-1,:]
self.processlist.append('flip {:d},{:d}'.format(x,y))
def log10(self):
"""Log10 filter
Applies np.log10 to all elements in the matrix. Process string :code:`log10`
"""
self.pmtx = np.log10(self.pmtx)
self.processlist.append('log10')
def lowpass(self,x=0,y=0):
"""Low Pass filter
Applies a gaussian filter to the data with given pixel widths. Other filters are yet to be implemented.
Process string :code:`lowpass x,y`
Parameters
----------
x,y : int, optional
Width of the filter in the x and y direction
"""
# TODO implement different filter types
self.pmtx.loc[:,:] = gaussian_filter( self.pmtx, sigma=[int(y),int(x)])
self.processlist.append('lowpass {},{}'.format(x,y))
def neg(self):
"""Negative filter
Multiplies matrix by -1. Process string :code:`neg`
"""
self.pmtx = -self.pmtx
self.processlist.append('neg')
def offset(self,x=0):
"""Offset filter
Offsets data values by adding given value. Process string :code:`offset x`
Parameters
----------
x : float, optional
Value to add to all data values
"""
self.pmtx = self.pmtx + x
self.processlist.append('offset {}'.format(x))
def offset_axes(self,x=0,y=0):
"""Axes offset filter
Offset axis values. Process string :code:`offset_axes x,y`
Parameters
----------
x, y : float, optional
Values to add to the axes values of the matrix
"""
self.pmtx.columns = self.pmtx.columns + x
self.pmtx.index = self.pmtx.index + y
self.processlist.append('offset_axes {},{}'.format(x,y))
def outlier(self,line,vertical=1):
"""Outlier filter
Drop a line or column from the data. Process string :code:`outlier line,vertical`
Parameters
----------
line : int
Line or column number to drop
vertical : {1,0}, optional
If 1, drops a column. If 0, drops a line
"""
axis = 1-vertical #swap 1 and 0 since vertical axis is 0 and horizontal is 1
self.pmtx = self.pmtx.drop(line,axis = axis)
self.processlist.append('outlier {},{}'.format(line,vertical))
def pixel_avg(self,nx=0,ny=0,center=0):
"""Pixel average filter
Performs pixel averaging on matrix. Process string :code:`pixel_avg nx,ny,center`
Parameters
----------
nx,ny : int, optional
Width and height of averaging window
center : {0,1}, optional
I don't know what this does...
Looks like it omits the center point of each averaging window from the average?
"""
nx=int(nx); ny=int(ny)
if bool(center):
self.pmtx.loc[:,:] = ndimage.generic_filter(self.pmtx, np.nanmean, size=(nx,ny), mode='constant',cval=np.NaN)
else:
mask = np.ones((nx, ny))
mask[int(nx/2), int(ny/2)] = 0
self.pmtx.loc[:,:] = ndimage.generic_filter(self.pmtx, np.nanmean, footprint=mask, mode='constant', cval=np.NaN)
self.processlist.append('pixel_avg {},{},{}'.format(nx,ny,center))
def rotate_ccw(self):
"""Rotate counter-clockwise filter
Rotates matrix and axes counter-clockwise. Process string :code:`rotate_ccw`
"""
self.ytitle, self.xtitle = self.xtitle, self.ytitle
self.pmtx = self.pmtx.transpose()
self.pmtx = self.pmtx.iloc[::-1,:]
self.processlist.append('rotate_ccw')
def rotate_cw(self):
"""Rotate clockwise filter
Rotates matrix and axes clockwise. Process string :code:`rotate_cw`
"""
self.ytitle, self.xtitle = self.xtitle, self.ytitle
self.pmtx = self.pmtx.transpose()
self.pmtx = self.pmtx.iloc[:,::-1]
self.processlist.append('rotate_cw')
def scale_data(self,factor=1.):
"""Scale filter
Scales all data by given factor. Process string :code:`scale x`
Parameters
----------
factor : float, optional
Value to scale the data by
"""
self.pmtx = factor*self.pmtx
self.processlist.append('scale {}'.format(factor))
def sub_lbl(self,lowp=40, highp=40, low_limit=-1e99, high_limit=1e99):
"""Substract line by line filter
The average value of each line is substracted from the data. Parts of each line cut can be
excluded using the high and low percentile options. The idea is that all points are sorted in
increasing order and a percentage from the back and front of the list is rejected for the average
calculation. Process string :code:`sub_lbl lowp,highp,low_limit,high_limit`
Parameters
----------
lowp : float
Percentage of points to be rejected from the averaging on the low side.
highp : float
Percentage of points to be rejected from the averaging on the high side.
low_limit : float
Absolute value below which points are ignored for the average (and percentile calculations)
low_limit : float
Absolute value above which points are ignored for the average (and percentile calculations)
"""
self.pmtx.loc[:,:] = sub_lbl(self.pmtx.values,lowp,highp,low_limit,high_limit)
self.processlist.append('sub_lbl {},{},{},{}'.format(lowp,highp,low_limit,high_limit))
def sub_cbc(self,lowp=40, highp=40, low_limit=-1e99, high_limit=1e99):
""" Subtract column by column filter
Same as :any:`sub_lbl` but done on a column by column basis. Process string :code:`sub_cbc lowp,highp,low_limit,high_limit`
"""
self.pmtx.loc[:,:] = sub_lbl(self.pmtx.values.T,lowp,highp,low_limit,high_limit).T
self.processlist.append('sub_cbc {},{},{},{}'.format(lowp,highp,low_limit,high_limit))
def sub_linecut(self, pos, horizontal=1):
"""Subtract lincut filter
Selects a line or column and subtracts it from all othe lines or columns in the matrix.
Process string :code:`sub_linecut pos,horizontal`
Parameters
----------
pos : int
Index of line or column to be subtracted
horizontal : {1,0}
If 1, a line is subtrcted. If 0 a column is subtracted
"""
pos = int(pos)
if bool(horizontal):
v = self.pmtx.iloc[pos,:]
self.pmtx = self.pmtx.subtract(v,axis=1)
else:
v = self.pmtx.iloc[:,pos]
self.pmtx = self.pmtx.subtract(v,axis=0)
self.processlist.append('sub_linecut {},{}'.format(pos,horizontal))
def vi_to_iv(self,vmin,vmax,nbins):
"""vi to iv filter
Reverses the data axis with the y axis of the matrix. For example, if the data contains the voltage and the axis the current
this filter replaces the voltage data with the corresponding current data and the axis with the voltage (I think...).
Since the axes are expected to be ordered, this is not an immediate operation and may not be possible in many cases (repeated data values?).
If one desires to do this with the x axis instead of the y, the matrix must first be transposed. After the filter is applied the transpose
can be undone.
Process string :code:`vi_to_iv vmin,vmax,nbins`
Parameters
----------
vmin : float
Lower end of the new y axis
vmax : float
Upper end of the new y axis
nbins : int
Number of points in the new axis
"""
vinterpol = np.linspace(vmin,vmax,nbins)
pmtx = [interp1d(x=self.pmtx[column],y=self.pmtx.axes[0],bounds_error=False,fill_value=np.nan)(vinterpol) for column in self.pmtx]
self.pmtx = pd.DataFrame(np.array(pmtx).T, index=vinterpol, columns=self.pmtx.axes[1])
self.pmtx.index.name, self.ztitle, self.xtitle = self.ztitle, self.pmtx.index.name, self.ztitle
self.processlist.append('vi_to_iv {},{},{}'.format(vmin,vmax,nbins))
def xderiv(self,direction=1):
"""X derivative filter
Apply a derivative along the lines of the matrix. Process string :code:`xderiv direction`
Parameters
----------
direction : {1,-1}
Direction for derivative. 1 by default (normal diff derivative)
"""
self.pmtx = xderiv_pd(self.pmtx,direction)
self.processlist.append('xderiv {}'.format(direction))
def yderiv(self,direction=1):
"""Y derivative filter
Apply a derivative along the columns of the matrix. Process string :code:`yderiv direction`
Parameters
----------
direction : {1,-1}
Direction for derivative. 1 by default (normal diff derivative)
"""
self.pmtx = yderiv_pd(self.pmtx,direction)
self.processlist.append('yderiv {}'.format(direction))
def transpose(self):
"""Transpose filter
Transposes the data matrix (and axes). Process string :code:`transpose`
"""
self.ytitle, self.xtitle = self.xtitle, self.ytitle
self.pmtx = self.pmtx.transpose()
self.processlist.append('transpose')
# Processlist
def saveprocesslist(self,filename = './process.pl'):
"""Save applied filter list
Saves the applied filters and parameters to a text file (process.pl in the current folder by default)
Parameters
----------
filename : str
Name of the new file to save the list in.
"""
myfile = open(filename,'w')
for line in self.processlist:
myfile.write(line + '\n')
myfile.close()
def applystep(self,line):
"""Apply step from a process list string
Takes in input string descibing one filter application and applies it to the data
Parameters
----------
line : str
String describing the desired filter to be applied
"""
sline = line.split(' ')
if len(sline) == 1:
func = sline[0]
pars = []
else:
pars = sline[1].split(',')
func = sline[0].strip()
if func is '':
return
else:
pars = [float(x) for x in pars]
method = getattr(self, func)
print(func,pars)
method(*pars)
self.processlist.append(line.strip())
def applyprocesslist(self,pl):
"""Apply all steps in array of process strings
Takes in input list of strings descibing filters to be applied to the data and runs them.
Parameters
----------
line : str
String describing the desired filter to be applied
"""
for line in pl:
self.applystep(line)
def applyprocessfile(self,filename):
"""Apply all steps in a process list file
Takes in input file containing a process list and applies them to the data.
Parameters
----------
filename : str
Process file name
"""
with open(filename,'r') as myfile:
for line in myfile:
if '#' == line[0]:
continue
self.applystep(line)
def reset(self):
"""Reset filters
Resets all filters and returns matrix to its initial state
"""
self.processlist = []
self.xtitle = self.xtitle0
self.ytitle = self.ytitle0
self.pmtx = self.mtx
def delstep(self,ii):
"""Removes a filter from the current process list by index
Parameters
----------
ii : int
Index of filter to be removed from applied filters
"""
newpl = copy.deepcopy(self.processlist)
del newpl[ii]
self.reset()
self.applyprocesslist(newpl)
def insertstep(self,ii,line):
"""Inserts new filter into process list
Adds a new filter at a specific position in the process list
Parameters
----------
ii : int
Index for the position of the new filter
line : str
Process string for the new filter
"""
newpl = copy.deepcopy(self.processlist)
newpl.insert(ii,line)
self.reset()
self.applyprocesslist(newpl)
#Uses pickle to save to file
def save(self,name = 'output'):
"""Save matrix to file
Pickels the object and saves it to given file.
Parameters
----------
name : str
Base filename to be used. ".mtx.pkl" will be appended to given filename
"""
filename = name + '.mtx.pkl'
with open(filename, 'wb') as outfile:
pickle.dump(self,outfile, pickle.HIGHEST_PROTOCOL)
#To load:
#import pickle
#with open(filename, 'rb') as input:
# mtx1 = pickle.load(input)
def savemtx(self,filename = './output'):
"""Save to Spyview mtx format
Saves current processed matrix to a spyview mtx file
Parameters
----------
filename : str
Name of the new mtx file. ".mtx" will be appended.
"""
filename = filename + '.mtx'
with open(filename, 'wb') as outfile:
ztitle = self.ztitle
xx = np.array(self.pmtx.columns)
yy = np.array(self.pmtx.index)
line = ['Units',ztitle, self.xtitle,'{:e}'.format(xx[0]),'{:e}'.format(xx[-1]), self.ytitle,'{:e}'.format(yy[0]),'{:e}'.format(yy[-1]), 'Nothing',str(0),str(1)]
mystr = ', '.join(line)
mystr = bytes(mystr + '\n', 'ASCII')
outfile.write(mystr)
mystr = str(self.pmtx.shape[1]) + ' ' + str(self.pmtx.shape[0]) + ' ' + '1 8\n'
mystr = bytes(mystr, 'ASCII')
outfile.write(mystr)
data = self.pmtx.values
data = np.squeeze(np.asarray(np.ndarray.flatten(data,order='F')))
print(len(data))
s = struct.pack('d'*len(data), *data)
outfile.write(s)
# Units, Data Value ,Y, 0.000000e+00, 2.001000e+03,Z, 0.000000e+00, 6.010000e+02,Nothing, 0, 1
# 2001 601 1 8
#Units, Dataset name, xname, xmin, xmax, yname, ymin, ymax, zname, zmin, zmax
#nx ny nz length
#dB, S21dB, Frequency (Hz), 6.000000e+09, 8.300000e+09, Vgate (V), 3.000000e+01, -3.000000e+01, Nothing, 0, 1
#2001 601 1 8
def loadmtx(self,filename):
"""Load matrix from an existing Spyview mtx file
Parameters
----------
filename : string
Name of the mtx file to open
"""
with open(filename,'rb') as infile:
content = infile.readline()
content = content.decode('ASCII')
if content[:5] == 'Units':
content = content.split(',')
content = [x.strip() for x in content]
self.ztitle0 = content[1]
self.xtitle0 = content[2]
self.ytitle0 = content[5]
xlow = np.float64(content[3])
xhigh = np.float64(content[4])
ylow = np.float64(content[6])
yhigh = np.float64(content[7])
content = infile.readline()
content = content.decode('ASCII')
content = content.split(' ')
nx = int(content[0])
ny = int(content[1])
lb = int(content[3])
rangex0 = np.linspace(xlow,xhigh,nx)
rangey0 = np.linspace(ylow,yhigh,ny)
else:
content = content.decode('ASCII')
content = content.split(' ')
nx = int(content[0])
ny = int(content[1])
lb = int(content[3])
rangex0 = np.linspace(1,nx,nx)
rangey0 = np.linspace(1,ny,ny)
n = nx*ny
content = infile.read()
if lb == 8:
s = struct.unpack('d'*n, content)
elif lb == 4:
s = struct.unpack('f'*n, content)
s = np.asarray(s)
s = np.matrix(np.reshape(s,(ny,nx),order='F'))
self.mtx = pd.DataFrame(s)
self.mtx.columns = rangex0
self.mtx.index = rangey0
self.reset()
stlabmtx_pd = stlabmtx
def yderiv_pd(data,direction=1):
dy = np.diff(data.index)
data = data.diff(axis=0,periods=direction)
data = data.dropna(axis=0)
if direction==-1:
dy = -dy
data = data.divide(dy,axis='rows')
return data
def xderiv_pd(data,direction=1):
return yderiv_pd(data.transpose(),direction).transpose()
def framearr_to_mtx(data, key, rangex=None, rangey=None, xkey=None, ykey=None, xtitle=None, ytitle=None, ztitle = None):
"""Converts an array of pandas DataFrame to an stlabmtx object
Takes an array of pandas.DataFrame, typically from a measurement file, and selects the appropriate columns for
conversion into an stlabmtx that allows spyview like operations and processing. Is essentially the same as :any:`dictarr_to_mtx`
but adapted for pandas DataFrame.
If neither ranges or titles are given, some defaults are filled in. The chosen data column from each data array element will be placed
as a line in the final matrix sequentially.
Parameters
----------
data : array of dict
Input array of frames.
key : str
Index of the appropriate column of each frame for the data axis of the final matrix (data values for each pixel)
xkey, ykey : str or None, optional
Columns to use to calculate the desired x and y ranges for the final matrix. If these are proviced they are also
used as the x and y titles. x runs across the matrix columns and y along the rows. This means that if x is the "slow" variable
in the measurement file, the output matrix will be transposed to accomodate this. The ranges are assumed to be the same for all lines.
rangex, rangey : array of float or None, optional
If provided, they override the xkey and ykey assingnment. They should contain arrays of the correct length for use
on the axes. These ranges will be saved along with the data (can be unevenly spaced). The ranges are assumed to be the same for all lines.
xtitle, ytitle, ztitle : str or None, optional
Titles for the x, y and z axes. If provided, they override the titles provided in xkey, ykey and key.
Returns
-------
stlabmtx
Resulting stlabmtx.
"""
#Build initial matrix. Appends each data column as line in zz
zz = [];
for line in data:
zz.append(line[key])
#convert to np matrix
zz = np.array(zz)
if not ztitle:
ztitle = str(key)
#No keys or ranges given:
if rangex==None and rangey==None and xkey==None and ykey==None:
if xtitle == None:
xtitle = 'xtitle' #Default title
if ytitle == None:
ytitle = 'ytitle' #Default title
zz = pd.DataFrame(zz)
return stlabmtx(zz, xtitle=xtitle, ytitle=ytitle, ztitle=ztitle)
#If ranges but no keys are given
elif (xkey == None and ykey==None) and (rangex !=None and rangey != None):
if xtitle == None:
xtitle = 'xtitle' #Default title
if ytitle == None:
ytitle = 'ytitle' #Default title
zz = pd.DataFrame(zz, index = rangey, columns= rangex)
return stlabmtx(zz, xtitle=xtitle, ytitle=ytitle, ztitle=ztitle)
#If keys but no ranges given
elif (xkey != None and ykey != None) and (rangex == None and rangey == None):
#Take first dataset and extract the two relevant columns
line = data[0]
xx = line[xkey]
yy = line[ykey]
#Check which is slow (one with all equal values is slow)
xslow,yslow = (checkEqual1(xx),checkEqual1(yy))
#Both can not be fast or slow
if xslow == yslow:
print('dictarr_to_mtx: Warning, invalid xkey and ykey. Using defaults')
if xtitle == None:
xtitle = 'xtitle' #Default title
if ytitle == None:
ytitle = 'ytitle' #Default title
return stlabmtx(zz, xtitle=xtitle, ytitle=ytitle, ztitle=ztitle)
#if x is slow, matrix needs to be transposed
if xslow:
zz = zz.transpose()
xx = []
for line in data:
xx.append(line[xkey].iloc[0])
#Case of y slow
#if x is slow, matrix is already correct
if yslow:
yy = []
for line in data:
yy.append(line[ykey][0])
xx = np.asarray(xx)
yy = np.asarray(yy)
#Sort out titles
titles = tuple(data[0])
print(titles)
print(ykey)
print(xkey)
if xtitle == None:
xtitle = str(xkey) #Default title
if ytitle == None:
ytitle = str(ykey) #Default title
zz = pd.DataFrame(zz)
zz.index = yy
zz.columns = xx
return stlabmtx(zz, xtitle=xtitle, ytitle=ytitle, ztitle=ztitle)
#Mixed cases (one key and one range) are not implemented
else:
print('dictarr_to_mtx: Warning, invalid keys and ranges. Using defaults')
if xtitle == None:
xtitle = 'xtitle' #Default title
if ytitle == None:
ytitle = 'ytitle' #Default title
zz = pd.DataFrame(np.matrix(zz))
return stlabmtx(zz, xtitle=xtitle, ytitle=ytitle, ztitle=ztitle)
return
'''
def xderiv(data,rangex,direction=1,axis=1):
dx = np.abs(rangex[0]-rangex[1])
if direction==-1:
dx = -dx
z = np.squeeze(np.array(data))
dz = np.gradient(z, dx, axis=axis)
return np.matrix(np.squeeze(dz))
'''
# Use slow if spacing is non-uniform
'''
def xderiv_slow(data,rangex,direction=1):
mtx = data
new_mtx = []
if direction==-1:
x = rangex[::-1]
else:
x = rangex
for line in data:
z = np.squeeze(np.array(line))
dz = np.zeros(x.shape,np.float)
dz[0:-1] = np.diff(z)/np.diff(x)
dz[-1] = (z[-1] - z[-2])/(x[-1] - x[-2])
new_mtx.append(dz)
return np.matrix(new_mtx)
'''
#Main stlabmtx class
'''
class stlabmtx():
def __init__(self, mtx=np.zeros([0,0]), rangex=None, rangey=None, xtitle='xtitle', ytitle='ytitle', ztitle = 'ztitle'):
self.mtx = np.matrix(copy.deepcopy(mtx))
print(self.mtx.shape)
self.processlist = []
self.pmtx = self.mtx
if rangex is None:
self.rangex = np.arange(self.mtx.shape[1])
else:
self.rangex = np.asarray(rangex)
if rangey is None:
self.rangey = np.arange(self.mtx.shape[0])
else:
self.rangey = np.asarray(rangey)
self.xtitle=str(xtitle)
self.ytitle=str(ytitle)
self.ztitle = str(ztitle)
self.xtitle0=self.xtitle
self.ytitle0=self.ytitle
self.ztitle0=self.ztitle
self.rangex0 = self.rangex
self.rangey0 = self.rangey
def getextents(self):
return (self.rangex[0],self.rangex[-1],self.rangey[-1],self.rangey[0])
# Functions from spyview
def absolute(self):
self.pmtx = abs(self.pmtx)
self.processlist.append('abs')
def crop(data,left=None,right=None,up=None,low=None):
#TODO check
valdict={'left':left,'right':right,'up':up,'low':low}
for key,val in valdict.items():
if val==0:
valdict[key] = None
else:
valdict[key] = int(val)
data.pmtx = data.pmtx[valdict['left']:valdict['right'],valdict['up']:valdict['low']]
data.rangex = data.rangex[valdict['up']:valdict['low']]
data.rangey = data.rangey[valdict['left']:valdict['right']]
for key,val in valdict.items():
if val==None:
valdict[key] = 0
data.processlist.append('crop {},{},{},{}'.format(valdict['left'],valdict['right'],valdict['up'],valdict['low']))
def flip(self,x=False,y=False):
x=bool(x)
y=bool(y)
if x:
self.pmtx = np.fliplr(self.pmtx)
self.rangex = self.rangex[::-1]
if y:
self.pmtx = np.flipud(self.pmtx)
self.rangey = self.rangey[::-1]
self.processlist.append('flip {:d},{:d}'.format(x,y))
def log10(self):
self.pmtx = np.log10(self.pmtx)
self.processlist.append('log10')
def lowpass(self,x=0,y=0):
# TODO implement different filter types
self.pmtx = np.matrix(gaussian_filter(np.squeeze(np.asarray(self.pmtx)),sigma=[int(y),int(x)]))
self.processlist.append('lowpass {},{}'.format(x,y))
def neg(self):
self.pmtx = -self.pmtx
self.processlist.append('neg')
def offset(self,x=0):
self.pmtx = self.pmtx + x
self.processlist.append('offset {}'.format(x))
def offset_axes(self,x=0,y=0):
self.rangex+=x
self.rangey+=y
self.processlist.append('offset_axes {},{}'.format(x,y))
def outlier(self,line,vertical=1):
self.pmtx = np.delete(self.pmtx,line,axis=int(vertical))
if bool(vertical):
self.rangex = np.delete(self.rangex, line)
else:
self.rangey = np.delete(self.rangey, line)
self.processlist.append('outlier {},{}'.format(line,vertical))
def pixel_avg(self,nx=0,ny=0,center=0):
nx=int(nx); ny=int(ny)
if bool(center):
self.pmtx = ndimage.generic_filter(self.pmtx, np.nanmean, size=(nx,ny), mode='constant',cval=np.NaN)
else:
mask = np.ones((nx, ny))
mask[int(nx/2), int(ny/2)] = 0
self.pmtx = ndimage.generic_filter(self.pmtx, np.nanmean, footprint=mask, mode='constant', cval=np.NaN)
self.processlist.append('pixel_avg {},{},{}'.format(nx,ny,center))
def rotate_ccw(self):
# still lacking the switching of the axes
self.pmtx = np.rot90(self.pmtx)
self.processlist.append('rotate_ccw')
self.xtitle, self.ytitle = self.ytitle, self.xtitle
self.rangex , self.rangey = self.rangey, self.rangex[::-1]
def rotate_cw(self):
# still lacking the switching of the axes
self.pmtx = np.rot90(self.pmtx,3)
self.processlist.append('rotate_cw')
self.xtitle, self.ytitle = self.ytitle, self.xtitle
self.rangex , self.rangey = self.rangey[::-1], self.rangex
def scale_data(self,factor=1.):
self.pmtx = factor*self.pmtx
self.processlist.append('scale {}'.format(factor))
def sub_lbl(self,lowp=40, highp=40, low_limit=-1e99, high_limit=1e99):
self.pmtx = sub_lbl(self.pmtx,lowp,highp,low_limit,high_limit)
self.processlist.append('sub_lbl {},{},{},{}'.format(lowp,highp,low_limit,high_limit))
def sub_cbc(self,lowp=40, highp=40, low_limit=-1e99, high_limit=1e99):
self.pmtx = sub_lbl(self.pmtx.T,lowp,highp,low_limit,high_limit).T
self.processlist.append('sub_cbc {},{},{},{}'.format(lowp,highp,low_limit,high_limit))
def sub_linecut(self, pos, horizontal=1):
pos = int(pos)
if bool(horizontal):
v = self.pmtx[pos,:]
self.pmtx-=v
else:
v = self.pmtx[:,pos].T
mtx = self.pmtx.T - v
self.pmtx = mtx.T
self.processlist.append('sub_linecut {},{}'.format(pos,horizontal))
def xderiv(self,direction=1):
mtx = self.pmtx.copy()
self.pmtx = xderiv(mtx,self.rangex,direction)
self.processlist.append('xderiv {}'.format(direction))
def yderiv(self,direction=1):
mtx = self.pmtx.copy()
self.pmtx = xderiv(mtx,self.rangey,direction,axis=0)
self.processlist.append('yderiv {}'.format(direction))
#Use slow versions for unequally spaced ranges
def xderiv_slow(self,direction=1):
mtx = self.pmtx.copy()
self.pmtx = xderiv(mtx,self.rangex,direction)
self.processlist.append('xderiv_slow {}'.format(direction))
def yderiv_slow(self,direction=1):
mtx = self.pmtx.copy().T
self.pmtx = xderiv(mtx,self.rangey,direction).T
self.processlist.append('yderiv_slow {}'.format(direction))
def transpose(self):
self.pmtx = self.pmtx.T
self.xtitle, self.ytitle = self.ytitle, self.xtitle
self.rangex, self.rangey = self.rangey, self.rangex
self.processlist.append('transpose')
# Processlist
def saveprocesslist(self,filename = './process.pl'):
myfile = open(filename,'w')
for line in self.processlist:
myfile.write(line + '\n')
myfile.close()
def applystep(self,line):
sline = line.split(' ')
if len(sline) == 1:
func = sline[0]
pars = []
else:
pars = sline[1].split(',')
func = sline[0].strip()
if func is '':
return
else:
pars = [float(x) for x in pars]
method = getattr(self, func)
print(func,pars)
method(*pars)
def applyprocesslist(self,pl):
for line in pl:
self.applystep(line)
def applyprocessfile(self,filename):
with open(filename,'r') as myfile:
for line in myfile:
if '#' == line[0]:
continue
self.applystep(line)
def reset(self):
self.processlist = []
self.pmtx = self.mtx
self.xtitle = self.xtitle0
self.ytitle = self.ytitle0
self.rangex = self.rangex0
self.rangey = self.rangey0
def delstep(self,ii):
newpl = copy.deepcopy(self.processlist)
del newpl[ii]
self.reset()
self.applyprocesslist(newpl)
def insertstep(self,ii,line):
newpl = copy.deepcopy(self.processlist)
newpl.insert(ii,line)
self.reset()
self.applyprocesslist(newpl)
#Uses pickle to save to file
def save(self,name = 'output'):
filename = name + '.mtx.pkl'
with open(filename, 'wb') as outfile:
pickle.dump(self,outfile, pickle.HIGHEST_PROTOCOL)
#To load:
#import pickle
#with open(filename, 'rb') as input:
# mtx1 = pickle.load(input)
def savemtx(self,filename = './output'):
filename = filename + '.mtx'
with open(filename, 'wb') as outfile:
ztitle = self.ztitle
xx = self.rangex
yy = self.rangey
line = ['Units',ztitle, self.xtitle,'{:e}'.format(xx[0]),'{:e}'.format(xx[-1]), self.ytitle,'{:e}'.format(yy[0]),'{:e}'.format(yy[-1]), 'Nothing',str(0),str(1)]
mystr = ', '.join(line)
mystr = bytes(mystr + '\n', 'ASCII')
outfile.write(mystr)
mystr = str(self.pmtx.shape[1]) + ' ' + str(self.pmtx.shape[0]) + ' ' + '1 8\n'
mystr = bytes(mystr, 'ASCII')
outfile.write(mystr)
data = self.pmtx
data = np.squeeze(np.asarray(np.ndarray.flatten(data,order='F')))
print(len(data))
s = struct.pack('d'*len(data), *data)
outfile.write(s)
# Units, Data Value ,Y, 0.000000e+00, 2.001000e+03,Z, 0.000000e+00, 6.010000e+02,Nothing, 0, 1
# 2001 601 1 8
#Units, Dataset name, xname, xmin, xmax, yname, ymin, ymax, zname, zmin, zmax
#nx ny nz length
#dB, S21dB, Frequency (Hz), 6.000000e+09, 8.300000e+09, Vgate (V), 3.000000e+01, -3.000000e+01, Nothing, 0, 1
#2001 601 1 8
def loadmtx(self,filename):
with open(filename,'rb') as infile:
content = infile.readline()
content = content.decode('ASCII')
if content[:5] == 'Units':
content = content.split(',')
content = [x.strip() for x in content]
self.ztitle0 = content[1]
self.xtitle0 = content[2]
self.ytitle0 = content[5]
xlow = np.float64(content[3])
xhigh = np.float64(content[4])
ylow = np.float64(content[6])
yhigh = np.float64(content[7])
content = infile.readline()
content = content.decode('ASCII')
content = content.split(' ')
nx = int(content[0])
ny = int(content[1])
lb = int(content[3])
self.rangex0 = np.linspace(xlow,xhigh,nx)
self.rangey0 = np.linspace(ylow,yhigh,ny)
else:
content = content.decode('ASCII')
content = content.split(' ')
nx = int(content[0])
ny = int(content[1])
lb = int(content[3])
self.rangex0 = np.linspace(1,nx,nx)
self.rangey0 = np.linspace(1,ny,ny)
n = nx*ny
content = infile.read()
if lb == 8:
s = struct.unpack('d'*n, content)
elif lb == 4:
s = struct.unpack('f'*n, content)
s = np.asarray(s)
s = np.matrix(np.reshape(s,(ny,nx),order='F'))
self.mtx = s
self.reset()
'''
|
yausern/stlab
|
utils/stlabdict.py
|
Python
|
gpl-3.0
| 48,994
|
[
"Gaussian"
] |
ef23824c5b5868e1d8f3e7800dcf3003cde59e3f946d7d7358e7b7d2888bc1ea
|
# -*- coding: utf-8 -*-
"""Generate graphs with a given degree sequence or expected degree sequence.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import heapq
from itertools import combinations
import math
from operator import itemgetter
import random
import networkx as nx
from networkx.utils import random_weighted_sample
__author__ = "\n".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart <swart@lanl.gov>',
'Dan Schult <dschult@colgate.edu>'
'Joel Miller <joel.c.miller.research@gmail.com>',
'Nathan Lemons <nlemons@gmail.com>'
'Brian Cloteaux <brian.cloteaux@nist.gov>'])
__all__ = ['configuration_model',
'directed_configuration_model',
'expected_degree_graph',
'havel_hakimi_graph',
'directed_havel_hakimi_graph',
'degree_sequence_tree',
'random_degree_sequence_graph']
def configuration_model(deg_sequence,create_using=None,seed=None):
"""Return a random graph with the given degree sequence.
The configuration model generates a random pseudograph (graph with
parallel edges and self loops) by randomly assigning edges to
match the given degree sequence.
Parameters
----------
deg_sequence : list of integers
Each list entry corresponds to the degree of a node.
create_using : graph, optional (default MultiGraph)
Return graph of this type. The instance will be cleared.
seed : hashable object, optional
Seed for random number generator.
Returns
-------
G : MultiGraph
A graph with the specified degree sequence.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence.
Raises
------
NetworkXError
If the degree sequence does not have an even sum.
See Also
--------
is_valid_degree_sequence
Notes
-----
As described by Newman [1]_.
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the degree
sequence does not have an even sum.
This configuration model construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified.
The density of self-loops and parallel edges tends to decrease
as the number of nodes increases. However, typically the number
of self-loops will approach a Poisson distribution with a nonzero
mean, and similarly for the number of parallel edges. Consider a
node with k stubs. The probability of being joined to another stub of
the same node is basically (k-1)/N where k is the degree and N is
the number of nodes. So the probability of a self-loop scales like c/N
for some constant c. As N grows, this means we expect c self-loops.
Similarly for parallel edges.
References
----------
.. [1] M.E.J. Newman, "The structure and function of complex networks",
SIAM REVIEW 45-2, pp 167-256, 2003.
Examples
--------
>>> from networkx.utils import powerlaw_sequence
>>> z=nx.utils.create_degree_sequence(100,powerlaw_sequence)
>>> G=nx.configuration_model(z)
To remove parallel edges:
>>> G=nx.Graph(G)
To remove self loops:
>>> G.remove_edges_from(G.selfloop_edges())
"""
if not sum(deg_sequence)%2 ==0:
raise nx.NetworkXError('Invalid degree sequence')
if create_using is None:
create_using = nx.MultiGraph()
elif create_using.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
if not seed is None:
random.seed(seed)
# start with empty N-node graph
N=len(deg_sequence)
# allow multiedges and selfloops
G=nx.empty_graph(N,create_using)
if N==0 or max(deg_sequence)==0: # done if no edges
return G
# build stublist, a list of available degree-repeated stubs
# e.g. for deg_sequence=[3,2,1,1,1]
# initially, stublist=[1,1,1,2,2,3,4,5]
# i.e., node 1 has degree=3 and is repeated 3 times, etc.
stublist=[]
for n in G:
for i in range(deg_sequence[n]):
stublist.append(n)
# shuffle stublist and assign pairs by removing 2 elements at a time
random.shuffle(stublist)
while stublist:
n1 = stublist.pop()
n2 = stublist.pop()
G.add_edge(n1,n2)
G.name="configuration_model %d nodes %d edges"%(G.order(),G.size())
return G
def directed_configuration_model(in_degree_sequence,
out_degree_sequence,
create_using=None,seed=None):
"""Return a directed_random graph with the given degree sequences.
The configuration model generates a random directed pseudograph
(graph with parallel edges and self loops) by randomly assigning
edges to match the given degree sequences.
Parameters
----------
in_degree_sequence : list of integers
Each list entry corresponds to the in-degree of a node.
out_degree_sequence : list of integers
Each list entry corresponds to the out-degree of a node.
create_using : graph, optional (default MultiDiGraph)
Return graph of this type. The instance will be cleared.
seed : hashable object, optional
Seed for random number generator.
Returns
-------
G : MultiDiGraph
A graph with the specified degree sequences.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence.
Raises
------
NetworkXError
If the degree sequences do not have the same sum.
See Also
--------
configuration_model
Notes
-----
Algorithm as described by Newman [1]_.
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the degree
sequences does not have the same sum.
This configuration model construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified. This
"finite-size effect" decreases as the size of the graph increases.
References
----------
.. [1] Newman, M. E. J. and Strogatz, S. H. and Watts, D. J.
Random graphs with arbitrary degree distributions and their applications
Phys. Rev. E, 64, 026118 (2001)
Examples
--------
>>> D=nx.DiGraph([(0,1),(1,2),(2,3)]) # directed path graph
>>> din=list(d for n, d in D.in_degree())
>>> dout=list(d for n, d in D.out_degree())
>>> din.append(1)
>>> dout[0]=2
>>> D=nx.directed_configuration_model(din,dout)
To remove parallel edges:
>>> D=nx.DiGraph(D)
To remove self loops:
>>> D.remove_edges_from(D.selfloop_edges())
"""
if not sum(in_degree_sequence) == sum(out_degree_sequence):
raise nx.NetworkXError('Invalid degree sequences. '
'Sequences must have equal sums.')
if create_using is None:
create_using = nx.MultiDiGraph()
if not seed is None:
random.seed(seed)
nin=len(in_degree_sequence)
nout=len(out_degree_sequence)
# pad in- or out-degree sequence with zeros to match lengths
if nin>nout:
out_degree_sequence.extend((nin-nout)*[0])
else:
in_degree_sequence.extend((nout-nin)*[0])
# start with empty N-node graph
N=len(in_degree_sequence)
# allow multiedges and selfloops
G=nx.empty_graph(N,create_using)
if N==0 or max(in_degree_sequence)==0: # done if no edges
return G
# build stublists of available degree-repeated stubs
# e.g. for degree_sequence=[3,2,1,1,1]
# initially, stublist=[1,1,1,2,2,3,4,5]
# i.e., node 1 has degree=3 and is repeated 3 times, etc.
in_stublist=[]
for n in G:
for i in range(in_degree_sequence[n]):
in_stublist.append(n)
out_stublist=[]
for n in G:
for i in range(out_degree_sequence[n]):
out_stublist.append(n)
# shuffle stublists and assign pairs by removing 2 elements at a time
random.shuffle(in_stublist)
random.shuffle(out_stublist)
while in_stublist and out_stublist:
source = out_stublist.pop()
target = in_stublist.pop()
G.add_edge(source,target)
G.name="directed configuration_model %d nodes %d edges"%(G.order(),G.size())
return G
def expected_degree_graph(w, seed=None, selfloops=True):
r"""Return a random graph with given expected degrees.
Given a sequence of expected degrees `W=(w_0,w_1,\ldots,w_{n-1}`)
of length `n` this algorithm assigns an edge between node `u` and
node `v` with probability
.. math::
p_{uv} = \frac{w_u w_v}{\sum_k w_k} .
Parameters
----------
w : list
The list of expected degrees.
selfloops: bool (default=True)
Set to False to remove the possibility of self-loop edges.
seed : hashable object, optional
The seed for the random number generator.
Returns
-------
Graph
Examples
--------
>>> z=[10 for i in range(100)]
>>> G=nx.expected_degree_graph(z)
Notes
-----
The nodes have integer labels corresponding to index of expected degrees
input sequence.
The complexity of this algorithm is `\mathcal{O}(n+m)` where `n` is the
number of nodes and `m` is the expected number of edges.
The model in [1]_ includes the possibility of self-loop edges.
Set selfloops=False to produce a graph without self loops.
For finite graphs this model doesn't produce exactly the given
expected degree sequence. Instead the expected degrees are as
follows.
For the case without self loops (selfloops=False),
.. math::
E[deg(u)] = \sum_{v \ne u} p_{uv}
= w_u \left( 1 - \frac{w_u}{\sum_k w_k} \right) .
NetworkX uses the standard convention that a self-loop edge counts 2
in the degree of a node, so with self loops (selfloops=True),
.. math::
E[deg(u)] = \sum_{v \ne u} p_{uv} + 2 p_{uu}
= w_u \left( 1 + \frac{w_u}{\sum_k w_k} \right) .
References
----------
.. [1] Fan Chung and L. Lu, Connected components in random graphs with
given expected degree sequences, Ann. Combinatorics, 6,
pp. 125-145, 2002.
.. [2] Joel Miller and Aric Hagberg,
Efficient generation of networks with given expected degrees,
in Algorithms and Models for the Web-Graph (WAW 2011),
Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732,
pp. 115-126, 2011.
"""
n = len(w)
G=nx.empty_graph(n)
if n==0 or max(w)==0: # done if no edges
return G
if seed is not None:
random.seed(seed)
rho = 1/float(sum(w))
# sort weights, largest first
# preserve order of weights for integer node label mapping
order = sorted(enumerate(w),key=itemgetter(1),reverse=True)
mapping = dict((c,uv[0]) for c,uv in enumerate(order))
seq = [v for u,v in order]
last=n
if not selfloops:
last-=1
for u in range(last):
v = u
if not selfloops:
v += 1
factor = seq[u] * rho
p = seq[v]*factor
if p>1:
p = 1
while v<n and p>0:
if p != 1:
r = random.random()
v += int(math.floor(math.log(r)/math.log(1-p)))
if v < n:
q = seq[v]*factor
if q>1:
q = 1
if random.random() < q/p:
G.add_edge(mapping[u],mapping[v])
v += 1
p = q
return G
def havel_hakimi_graph(deg_sequence,create_using=None):
"""Return a simple graph with given degree sequence constructed
using the Havel-Hakimi algorithm.
Parameters
----------
deg_sequence: list of integers
Each integer corresponds to the degree of a node (need not be sorted).
create_using : graph, optional (default Graph)
Return graph of this type. The instance will be cleared.
Directed graphs are not allowed.
Raises
------
NetworkXException
For a non-graphical degree sequence (i.e. one
not realizable by some simple graph).
Notes
-----
The Havel-Hakimi algorithm constructs a simple graph by
successively connecting the node of highest degree to other nodes
of highest degree, resorting remaining nodes by degree, and
repeating the process. The resulting graph has a high
degree-associativity. Nodes are labeled 1,.., len(deg_sequence),
corresponding to their position in deg_sequence.
The basic algorithm is from Hakimi [1]_ and was generalized by
Kleitman and Wang [2]_.
References
----------
.. [1] Hakimi S., On Realizability of a Set of Integers as
Degrees of the Vertices of a Linear Graph. I,
Journal of SIAM, 10(3), pp. 496-506 (1962)
.. [2] Kleitman D.J. and Wang D.L.
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
if not nx.is_valid_degree_sequence(deg_sequence):
raise nx.NetworkXError('Invalid degree sequence')
if create_using is not None:
if create_using.is_directed():
raise nx.NetworkXError("Directed graphs are not supported")
p = len(deg_sequence)
G=nx.empty_graph(p,create_using)
num_degs = []
for i in range(p):
num_degs.append([])
dmax, dsum, n = 0, 0, 0
for d in deg_sequence:
# Process only the non-zero integers
if d>0:
num_degs[d].append(n)
dmax, dsum, n = max(dmax,d), dsum+d, n+1
# Return graph if no edges
if n==0:
return G
modstubs = [(0,0)]*(dmax+1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while len(num_degs[dmax]) == 0:
dmax -= 1;
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n-1:
raise nx.NetworkXError('Non-graphical integer sequence')
# Remove largest stub in list
source = num_degs[dmax].pop()
n -= 1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while len(num_degs[k]) == 0:
k -= 1
target = num_degs[k].pop()
G.add_edge(source, target)
n -= 1
if k > 1:
modstubs[mslen] = (k-1,target)
mslen += 1
# Add back to the list any nonzero stubs that were removed
for i in range(mslen):
(stubval, stubtarget) = modstubs[i]
num_degs[stubval].append(stubtarget)
n += 1
G.name="havel_hakimi_graph %d nodes %d edges"%(G.order(),G.size())
return G
def directed_havel_hakimi_graph(in_deg_sequence,
out_deg_sequence,
create_using=None):
"""Return a directed graph with the given degree sequences.
Parameters
----------
in_deg_sequence : list of integers
Each list entry corresponds to the in-degree of a node.
out_deg_sequence : list of integers
Each list entry corresponds to the out-degree of a node.
create_using : graph, optional (default DiGraph)
Return graph of this type. The instance will be cleared.
Returns
-------
G : DiGraph
A graph with the specified degree sequences.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence
Raises
------
NetworkXError
If the degree sequences are not digraphical.
See Also
--------
configuration_model
Notes
-----
Algorithm as described by Kleitman and Wang [1]_.
References
----------
.. [1] D.J. Kleitman and D.L. Wang
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
assert(nx.utils.is_list_of_ints(in_deg_sequence))
assert(nx.utils.is_list_of_ints(out_deg_sequence))
if create_using is None:
create_using = nx.DiGraph()
# Process the sequences and form two heaps to store degree pairs with
# either zero or nonzero out degrees
sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
maxn = max(nin, nout)
G = nx.empty_graph(maxn,create_using)
if maxn==0:
return G
maxin = 0
stubheap, zeroheap = [ ], [ ]
for n in range(maxn):
in_deg, out_deg = 0, 0
if n<nout:
out_deg = out_deg_sequence[n]
if n<nin:
in_deg = in_deg_sequence[n]
if in_deg<0 or out_deg<0:
raise nx.NetworkXError(
'Invalid degree sequences. Sequence values must be positive.')
sumin, sumout, maxin = sumin+in_deg, sumout+out_deg, max(maxin, in_deg)
if in_deg > 0:
stubheap.append((-1*out_deg, -1*in_deg,n))
elif out_deg > 0:
zeroheap.append((-1*out_deg,n))
if sumin != sumout:
raise nx.NetworkXError(
'Invalid degree sequences. Sequences must have equal sums.')
heapq.heapify(stubheap)
heapq.heapify(zeroheap)
modstubs = [(0,0,0)]*(maxin+1)
# Successively reduce degree sequence by removing the maximum
while stubheap:
# Remove first value in the sequence with a non-zero in degree
(freeout, freein, target) = heapq.heappop(stubheap)
freein *= -1
if freein > len(stubheap)+len(zeroheap):
raise nx.NetworkXError('Non-digraphical integer sequence')
# Attach arcs from the nodes with the most stubs
mslen = 0
for i in range(freein):
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0][0]):
(stubout, stubsource) = heapq.heappop(zeroheap)
stubin = 0
else:
(stubout, stubin, stubsource) = heapq.heappop(stubheap)
if stubout == 0:
raise nx.NetworkXError('Non-digraphical integer sequence')
G.add_edge(stubsource, target)
# Check if source is now totally connected
if stubout+1<0 or stubin<0:
modstubs[mslen] = (stubout+1, stubin, stubsource)
mslen += 1
# Add the nodes back to the heaps that still have available stubs
for i in range(mslen):
stub = modstubs[i]
if stub[1] < 0:
heapq.heappush(stubheap, stub)
else:
heapq.heappush(zeroheap, (stub[0], stub[2]))
if freeout<0:
heapq.heappush(zeroheap, (freeout, target))
G.name="directed_havel_hakimi_graph %d nodes %d edges"%(G.order(),G.size())
return G
def degree_sequence_tree(deg_sequence,create_using=None):
"""Make a tree for the given degree sequence.
A tree has #nodes-#edges=1 so
the degree sequence must have
len(deg_sequence)-sum(deg_sequence)/2=1
"""
if not len(deg_sequence)-sum(deg_sequence)/2.0 == 1.0:
raise nx.NetworkXError("Degree sequence invalid")
if create_using is not None and create_using.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
# single node tree
if len(deg_sequence)==1:
G=nx.empty_graph(0,create_using)
return G
# all degrees greater than 1
deg=[s for s in deg_sequence if s>1]
deg.sort(reverse=True)
# make path graph as backbone
n=len(deg)+2
G=nx.path_graph(n,create_using)
last=n
# add the leaves
for source in range(1,n-1):
nedges=deg.pop()-2
for target in range(last,last+nedges):
G.add_edge(source, target)
last+=nedges
# in case we added one too many
if len(G) > len(deg_sequence):
G.remove_node(0)
return G
def random_degree_sequence_graph(sequence, seed=None, tries=10):
r"""Return a simple random graph with the given degree sequence.
If the maximum degree `d_m` in the sequence is `O(m^{1/4})` then the
algorithm produces almost uniform random graphs in `O(m d_m)` time
where `m` is the number of edges.
Parameters
----------
sequence : list of integers
Sequence of degrees
seed : hashable object, optional
Seed for random number generator
tries : int, optional
Maximum number of tries to create a graph
Returns
-------
G : Graph
A graph with the specified degree sequence.
Nodes are labeled starting at 0 with an index
corresponding to the position in the sequence.
Raises
------
NetworkXUnfeasible
If the degree sequence is not graphical.
NetworkXError
If a graph is not produced in specified number of tries
See Also
--------
is_valid_degree_sequence, configuration_model
Notes
-----
The generator algorithm [1]_ is not guaranteed to produce a graph.
References
----------
.. [1] Moshen Bayati, Jeong Han Kim, and Amin Saberi,
A sequential algorithm for generating random graphs.
Algorithmica, Volume 58, Number 4, 860-910,
DOI: 10.1007/s00453-009-9340-1
Examples
--------
>>> sequence = [1, 2, 2, 3]
>>> G = nx.random_degree_sequence_graph(sequence)
>>> sorted(d for n, d in G.degree())
[1, 2, 2, 3]
"""
DSRG = DegreeSequenceRandomGraph(sequence, seed=seed)
for try_n in range(tries):
try:
return DSRG.generate()
except nx.NetworkXUnfeasible:
pass
raise nx.NetworkXError('failed to generate graph in %d tries'%tries)
class DegreeSequenceRandomGraph(object):
# class to generate random graphs with a given degree sequence
# use random_degree_sequence_graph()
def __init__(self, degree, seed=None):
if not nx.is_valid_degree_sequence(degree):
raise nx.NetworkXUnfeasible('degree sequence is not graphical')
if seed is not None:
random.seed(seed)
self.degree = list(degree)
# node labels are integers 0,...,n-1
self.m = sum(self.degree)/2.0 # number of edges
try:
self.dmax = max(self.degree) # maximum degree
except ValueError:
self.dmax = 0
def generate(self):
# remaining_degree is mapping from int->remaining degree
self.remaining_degree = dict(enumerate(self.degree))
# add all nodes to make sure we get isolated nodes
self.graph = nx.Graph()
self.graph.add_nodes_from(self.remaining_degree)
# remove zero degree nodes
for n,d in list(self.remaining_degree.items()):
if d == 0:
del self.remaining_degree[n]
if len(self.remaining_degree) > 0:
# build graph in three phases according to how many unmatched edges
self.phase1()
self.phase2()
self.phase3()
return self.graph
def update_remaining(self, u, v, aux_graph=None):
# decrement remaining nodes, modify auxilliary graph if in phase3
if aux_graph is not None:
# remove edges from auxilliary graph
aux_graph.remove_edge(u,v)
if self.remaining_degree[u] == 1:
del self.remaining_degree[u]
if aux_graph is not None:
aux_graph.remove_node(u)
else:
self.remaining_degree[u] -= 1
if self.remaining_degree[v] == 1:
del self.remaining_degree[v]
if aux_graph is not None:
aux_graph.remove_node(v)
else:
self.remaining_degree[v] -= 1
def p(self,u,v):
# degree probability
return 1 - self.degree[u]*self.degree[v]/(4.0*self.m)
def q(self,u,v):
# remaining degree probability
norm = float(max(self.remaining_degree.values()))**2
return self.remaining_degree[u]*self.remaining_degree[v]/norm
def suitable_edge(self):
"""Returns ``True`` if and only if an arbitrary remaining node can
potentially be joined with some other remaining node.
"""
nodes = iter(self.remaining_degree)
u = next(nodes)
return any(v not in self.graph[u] for v in nodes)
def phase1(self):
# choose node pairs from (degree) weighted distribution
while sum(self.remaining_degree.values()) >= 2 * self.dmax**2:
u,v = sorted(random_weighted_sample(self.remaining_degree, 2))
if self.graph.has_edge(u,v):
continue
if random.random() < self.p(u,v): # accept edge
self.graph.add_edge(u,v)
self.update_remaining(u,v)
def phase2(self):
# choose remaining nodes uniformly at random and use rejection sampling
while len(self.remaining_degree) >= 2 * self.dmax:
norm = float(max(self.remaining_degree.values()))**2
while True:
u,v = sorted(random.sample(self.remaining_degree.keys(), 2))
if self.graph.has_edge(u,v):
continue
if random.random() < self.q(u,v):
break
if random.random() < self.p(u,v): # accept edge
self.graph.add_edge(u,v)
self.update_remaining(u,v)
def phase3(self):
# build potential remaining edges and choose with rejection sampling
potential_edges = combinations(self.remaining_degree, 2)
# build auxilliary graph of potential edges not already in graph
H = nx.Graph([(u,v) for (u,v) in potential_edges
if not self.graph.has_edge(u,v)])
while self.remaining_degree:
if not self.suitable_edge():
raise nx.NetworkXUnfeasible('no suitable edges left')
while True:
u,v = sorted(random.choice(list(H.edges())))
if random.random() < self.q(u,v):
break
if random.random() < self.p(u,v): # accept edge
self.graph.add_edge(u,v)
self.update_remaining(u,v, aux_graph=H)
|
ltiao/networkx
|
networkx/generators/degree_seq.py
|
Python
|
bsd-3-clause
| 27,046
|
[
"Brian"
] |
bc4b4d55d62e8a98e09f9bffd2b58f03864cfd6caca97cf8dc0d9973106c8bc5
|
from typing import Optional
# Zulip Settings intended to be set by a system administrator.
#
# See http://zulip.readthedocs.io/en/latest/settings.html for
# detailed technical documentation on the Zulip settings system.
#
### MANDATORY SETTINGS
#
# These settings MUST be set in production. In a development environment,
# sensible default values will be used.
# The user-accessible Zulip hostname for this installation, e.g.
# zulip.example.com. This should match what users will put in their
# web browser. If you want to allow multiple hostnames, add the rest
# to ALLOWED_HOSTS.
#
# If you need to access the server on a specific port, you should set
# EXTERNAL_HOST to e.g. zulip.example.com:1234 here.
EXTERNAL_HOST = 'zulip.example.com'
# A comma-separated list of strings representing the host/domain names
# that your users will enter in their browsers to access your Zulip
# server. This is a security measure to prevent an attacker from
# poisoning caches and triggering password reset emails with links to
# malicious hosts by submitting requests with a fake HTTP Host
# header. See Django's documentation here:
# <https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts>.
# Zulip adds 'localhost' and '127.0.0.1' to the list automatically.
#
# The default should work unless you are using multiple hostnames or
# connecting directly to your server's IP address. If this is set
# wrong, all requests will get a 400 "Bad Request" error.
#
# Note that these should just be hostnames, without port numbers.
ALLOWED_HOSTS = [EXTERNAL_HOST.split(":")[0]]
# The email address for the person or team who maintains the Zulip
# installation. Note that this is a public-facing email address; it may
# appear on 404 pages, is used as the sender's address for many automated
# emails, and is advertised as a support address. An email address like
# support@example.com is totally reasonable, as is admin@example.com.
# Do not put a display name; e.g. 'support@example.com', not
# 'Zulip Support <support@example.com>'.
ZULIP_ADMINISTRATOR = 'zulip-admin@example.com'
# Configure the outgoing Email (aka SMTP) server below. You will need
# working SMTP to complete the installation process, in addition to
# sending email address confirmations, missed message notifications,
# onboarding follow-ups, and other user needs. If you do not have an
# SMTP server already, we recommend services intended for developers
# such as Mailgun. Detailed documentation is available at:
#
# https://zulip.readthedocs.io/en/latest/prod-email.html
#
# To configure SMTP, you will need to complete the following steps:
#
# (1) Fill out the outgoing email sending configuration below.
#
# (2) Put the SMTP password for EMAIL_HOST_USER in
# /etc/zulip/zulip-secrets.conf as e.g.:
#
# email_password = abcd1234
#
# You can quickly test your sending email configuration using:
# su zulip
# /home/zulip/deployments/current/manage.py send_test_email username@example.com
#
# A common problem is hosting provider firewalls that block outgoing SMTP traffic.
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = ''
EMAIL_PORT = 587
EMAIL_USE_TLS = True
## OPTIONAL SETTINGS
# The noreply address to be used as the sender for certain generated
# emails. Messages sent to this address could contain sensitive user
# data and should not be delivered anywhere. The default is
# e.g. noreply@zulip.example.com (if EXTERNAL_HOST is
# zulip.example.com).
#NOREPLY_EMAIL_ADDRESS = 'noreply@example.com'
### AUTHENTICATION SETTINGS
#
# Enable at least one of the following authentication backends.
# See http://zulip.readthedocs.io/en/latest/prod-authentication-methods.html
# for documentation on our authentication backends.
AUTHENTICATION_BACKENDS = (
'zproject.backends.EmailAuthBackend', # Email and password; just requires SMTP setup
# 'zproject.backends.GoogleMobileOauth2Backend', # Google Apps, setup below
# 'zproject.backends.GitHubAuthBackend', # GitHub auth, setup below
# 'zproject.backends.ZulipLDAPAuthBackend', # LDAP, setup below
# 'zproject.backends.ZulipRemoteUserBackend', # Local SSO, setup docs on readthedocs
)
# To enable Google authentication, you need to do the following:
#
# (1) Visit https://console.developers.google.com, click on Credentials on
# the left sidebar and create a Oauth2 client ID
# e.g. https://zulip.example.com/accounts/login/google/done/.
#
# (2) Go to the Library (left sidebar), then under "Social APIs" click on
# "Google+ API" and click the button to enable the API.
#
# (3) put your client secret as "google_oauth2_client_secret" in
# zulip-secrets.conf, and your client ID right here:
# GOOGLE_OAUTH2_CLIENT_ID=<your client ID from Google>
# To enable GitHub authentication, you will need to need to do the following:
#
# (1) Register an OAuth2 application with GitHub at one of:
# https://github.com/settings/developers
# https://github.com/organizations/ORGNAME/settings/developers
# Specify e.g. https://zulip.example.com/complete/github/ as the callback URL.
#
# (2) Put your "Client ID" as SOCIAL_AUTH_GITHUB_KEY below and your
# "Client secret" as social_auth_github_secret in
# /etc/zulip/zulip-secrets.conf.
# SOCIAL_AUTH_GITHUB_KEY = <your client ID from GitHub>
#
# (3) You can also configure the GitHub integration to only allow
# members of a particular GitHub team or organization to login to your
# Zulip server using GitHub authentication; to enable this, set one of the
# two parameters below:
# SOCIAL_AUTH_GITHUB_TEAM_ID = <your team id>
# SOCIAL_AUTH_GITHUB_ORG_NAME = <your org name>
# If you are using the ZulipRemoteUserBackend authentication backend,
# set this to your domain (e.g. if REMOTE_USER is "username" and the
# corresponding email address is "username@example.com", set
# SSO_APPEND_DOMAIN = "example.com")
SSO_APPEND_DOMAIN = None # type: Optional[str]
# Support for mobile push notifications. Setting controls whether
# push notifications will be forwarded through a Zulip push
# notification bouncer server to the mobile apps. See
# https://zulip.readthedocs.io/en/latest/prod-mobile-push-notifications.html
# for information on how to sign up for and configure this.
#PUSH_NOTIFICATION_BOUNCER_URL = 'https://push.zulipchat.com'
# Controls whether session cookies expire when the browser closes
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# Session cookie expiry in seconds after the last page load
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # 2 weeks
# Password strength requirements; learn about configuration at
# http://zulip.readthedocs.io/en/latest/security-model.html.
# PASSWORD_MIN_LENGTH = 6
# PASSWORD_MIN_ZXCVBN_QUALITY = 0.5
# Controls whether Zulip sends "new login" email notifications.
#SEND_LOGIN_EMAILS = True
# Controls whether or not there is a feedback button in the UI.
ENABLE_FEEDBACK = False
# Feedback sent by your users will be sent to this email address.
FEEDBACK_EMAIL = ZULIP_ADMINISTRATOR
# Controls whether or not error reports (tracebacks) are emailed to the
# server administrators.
#ERROR_REPORTING = True
# For frontend (JavaScript) tracebacks
#BROWSER_ERROR_REPORTING = False
# If True, each log message in the server logs will identify the
# Python module where it came from. Useful for tracking down a
# mysterious log message, but a little verbose.
#LOGGING_SHOW_MODULE = False
# Controls whether or not Zulip will provide inline image preview when
# a link to an image is referenced in a message. Note: this feature
# can also be disabled in a realm's organization settings.
#INLINE_IMAGE_PREVIEW = True
# Controls whether or not Zulip will provide inline previews of
# websites that are referenced in links in messages. Note: this feature
# can also be disabled in a realm's organization settings.
#INLINE_URL_EMBED_PREVIEW = False
# Controls whether or not Zulip will parse links starting with
# "file:///" as a hyperlink (useful if you have e.g. an NFS share).
ENABLE_FILE_LINKS = False
# By default, files uploaded by users and user avatars are stored
# directly on the Zulip server. If file storage in Amazon S3 is
# desired, you can configure that as follows:
#
# (1) Set s3_key and s3_secret_key in /etc/zulip/zulip-secrets.conf to
# be the S3 access and secret keys that you want to use, and setting
# the S3_AUTH_UPLOADS_BUCKET and S3_AVATAR_BUCKET to be the S3 buckets
# you've created to store file uploads and user avatars, respectively.
# Then restart Zulip (scripts/restart-server).
#
# (2) Edit /etc/nginx/sites-available/zulip-enterprise to comment out
# the nginx configuration for /user_uploads and /user_avatars (see
# https://github.com/zulip/zulip/issues/291 for discussion of a better
# solution that won't be automatically reverted by the Zulip upgrade
# script), and then restart nginx.
LOCAL_UPLOADS_DIR = "/home/zulip/uploads"
#S3_AUTH_UPLOADS_BUCKET = ""
#S3_AVATAR_BUCKET = ""
# Maximum allowed size of uploaded files, in megabytes. DO NOT SET
# ABOVE 80MB. The file upload implementation doesn't support chunked
# uploads, so browsers will crash if you try uploading larger files.
MAX_FILE_UPLOAD_SIZE = 25
# Controls whether name changes are completely disabled for this installation
# This is useful in settings where you're syncing names from an integrated LDAP/Active Directory
NAME_CHANGES_DISABLED = False
# Controls whether users who have not uploaded an avatar will receive an avatar
# from gravatar.com.
ENABLE_GRAVATAR = True
# To override the default avatar image if ENABLE_GRAVATAR is False, place your
# custom default avatar image at /home/zulip/local-static/default-avatar.png
# and uncomment the following line.
#DEFAULT_AVATAR_URI = '/local-static/default-avatar.png'
# To access an external postgres database you should define the host name in
# REMOTE_POSTGRES_HOST, you can define the password in the secrets file in the
# property postgres_password, and the SSL connection mode in REMOTE_POSTGRES_SSLMODE
# Different options are:
# disable: I don't care about security, and I don't want to pay the overhead of encryption.
# allow: I don't care about security, but I will pay the overhead of encryption if the server insists on it.
# prefer: I don't care about encryption, but I wish to pay the overhead of encryption if the server supports it.
# require: I want my data to be encrypted, and I accept the overhead. I trust that the network will make sure
# I always connect to the server I want.
# verify-ca: I want my data encrypted, and I accept the overhead. I want to be sure that I connect to a server
# that I trust.
# verify-full: I want my data encrypted, and I accept the overhead. I want to be sure that I connect to a server
# I trust, and that it's the one I specify.
#REMOTE_POSTGRES_HOST = 'dbserver.example.com'
#REMOTE_POSTGRES_SSLMODE = 'require'
# If you want to set a Terms of Service for your server, set the path
# to your markdown file, and uncomment the following line.
#TERMS_OF_SERVICE = '/etc/zulip/terms.md'
# Similarly if you want to set a Privacy Policy.
#PRIVACY_POLICY = '/etc/zulip/privacy.md'
### TWITTER INTEGRATION
# Zulip supports showing inline Tweet previews when a tweet is linked
# to in a message. To support this, Zulip must have access to the
# Twitter API via OAuth. To obtain the various access tokens needed
# below, you must register a new application under your Twitter
# account by doing the following:
#
# 1. Log in to http://dev.twitter.com.
# 2. In the menu under your username, click My Applications. From this page, create a new application.
# 3. Click on the application you created and click "create my access token".
# 4. Fill in the values for twitter_consumer_key, twitter_consumer_secret, twitter_access_token_key,
# and twitter_access_token_secret in /etc/zulip/zulip-secrets.conf.
### EMAIL GATEWAY INTEGRATION
# The Email gateway integration supports sending messages into Zulip
# by sending an email. This is useful for receiving notifications
# from third-party services that only send outgoing notifications via
# email. Once this integration is configured, each stream will have
# an email address documented on the stream settings page an emails
# sent to that address will be delivered into the stream.
#
# There are two ways to configure email mirroring in Zulip:
# 1. Local delivery: A MTA runs locally and passes mail directly to Zulip
# 2. Polling: Checks an IMAP inbox every minute for new messages.
#
# The local delivery configuration is preferred for production because
# it supports nicer looking email addresses and has no cron delay,
# while the polling mechanism is better for testing/developing this
# feature because it doesn't require a public-facing IP/DNS setup.
#
# The main email mirror setting is the email address pattern, where
# you specify the email address format you'd like the integration to
# use. It should be one of the following:
# %s@zulip.example.com (for local delivery)
# username+%s@example.com (for polling if EMAIL_GATEWAY_LOGIN=username@example.com)
EMAIL_GATEWAY_PATTERN = ""
#
# If you are using local delivery, EMAIL_GATEWAY_PATTERN is all you need
# to change in this file. You will also need to enable the Zulip postfix
# configuration to support local delivery by adding
# , zulip::postfix_localmail
# to puppet_classes in /etc/zulip/zulip.conf and then running
# `scripts/zulip-puppet-apply -f` to do the installation.
#
# If you are using polling, you will need to setup an IMAP email
# account dedicated to Zulip email gateway messages. The model is
# that users will send emails to that account via an address of the
# form username+%s@example.com (which is what you will set as
# EMAIL_GATEWAY_PATTERN); your email provider should deliver those
# emails to the username@example.com inbox. Then you run in a cron
# job `./manage.py email_mirror` (see puppet/zulip/files/cron.d/email-mirror),
# which will check that inbox and batch-process any new messages.
#
# You will need to configure authentication for the email mirror
# command to access the IMAP mailbox below and in zulip-secrets.conf.
#
# The IMAP login; username here and password as email_gateway_password in
# zulip-secrets.conf.
EMAIL_GATEWAY_LOGIN = ""
# The IMAP server & port to connect to
EMAIL_GATEWAY_IMAP_SERVER = ""
EMAIL_GATEWAY_IMAP_PORT = 993
# The IMAP folder name to check for emails. All emails sent to EMAIL_GATEWAY_PATTERN above
# must be delivered to this folder
EMAIL_GATEWAY_IMAP_FOLDER = "INBOX"
### LDAP integration configuration
# Zulip supports retrieving information about users via LDAP, and
# optionally using LDAP as an authentication mechanism.
#
# In either configuration, you will need to do the following:
#
# * Fill in the LDAP configuration options below so that Zulip can
# connect to your LDAP server
#
# * Setup the mapping between LDAP attributes and Zulip.
# There are three supported ways to setup the username and/or email mapping:
#
# (A) If users' email addresses are in LDAP and used as username, set
# LDAP_APPEND_DOMAIN = None
# AUTH_LDAP_USER_SEARCH to lookup users by email address
#
# (B) If LDAP only has usernames but email addresses are of the form
# username@example.com, you should set:
# LDAP_APPEND_DOMAIN = example.com and
# AUTH_LDAP_USER_SEARCH to lookup users by username
#
# (C) If LDAP username are completely unrelated to email addresses,
# you should set:
# LDAP_EMAIL_ATTR = "email"
# LDAP_APPEND_DOMAIN = None
# AUTH_LDAP_USER_SEARCH to lookup users by username
#
# You can quickly test whether your configuration works by running:
# ./manage.py query_ldap username@example.com
# From the root of your Zulip installation; if your configuration is working
# that will output the full name for your user.
#
# -------------------------------------------------------------
#
# If you are using LDAP for authentication, you will need to enable
# the zproject.backends.ZulipLDAPAuthBackend auth backend in
# AUTHENTICATION_BACKENDS above. After doing so, you should be able
# to login to Zulip by entering your email address and LDAP password
# on the Zulip login form.
#
# If you are using LDAP to populate names in Zulip, once you finish
# configuring this integration, you will need to run:
# ./manage.py sync_ldap_user_data
# To sync names for existing users; you may want to run this in a cron
# job to pick up name changes made on your LDAP server.
import ldap
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
# URI of your LDAP server. If set, LDAP is used to prepopulate a user's name in
# Zulip. Example: "ldaps://ldap.example.com"
AUTH_LDAP_SERVER_URI = ""
# This DN will be used to bind to your server. If unset, anonymous
# binds are performed. If set, you need to specify the password as
# 'auth_ldap_bind_password' in zulip-secrets.conf.
AUTH_LDAP_BIND_DN = ""
# Specify the search base and the property to filter on that corresponds to the
# username.
AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=users,dc=example,dc=com",
ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
# If the value of a user's "uid" (or similar) property is not their email
# address, specify the domain to append here.
LDAP_APPEND_DOMAIN = None # type: Optional[str]
# If username and email are two different LDAP attributes, specify the
# attribute to get the user's email address from LDAP here.
LDAP_EMAIL_ATTR = None # type: Optional[str]
# This map defines how to populate attributes of a Zulip user from LDAP.
AUTH_LDAP_USER_ATTR_MAP = {
# Populate the Django user's name from the LDAP directory.
"full_name": "cn",
}
# The default CAMO_URI of '/external_content/' is served by the camo
# setup in the default Voyager nginx configuration. Setting CAMO_URI
# to '' will disable the Camo integration.
CAMO_URI = '/external_content/'
# RabbitMQ configuration
#
# By default, Zulip connects to rabbitmq running locally on the machine,
# but Zulip also supports connecting to RabbitMQ over the network;
# to use a remote RabbitMQ instance, set RABBITMQ_HOST here.
# RABBITMQ_HOST = "localhost"
# To use another rabbitmq user than the default 'zulip', set RABBITMQ_USERNAME here.
# RABBITMQ_USERNAME = 'zulip'
# Memcached configuration
#
# By default, Zulip connects to memcached running locally on the machine,
# but Zulip also supports connecting to memcached over the network;
# to use a remote Memcached instance, set MEMCACHED_LOCATION here.
# Format HOST:PORT
# MEMCACHED_LOCATION = 127.0.0.1:11211
# Redis configuration
#
# By default, Zulip connects to redis running locally on the machine,
# but Zulip also supports connecting to redis over the network;
# to use a remote Redis instance, set REDIS_HOST here.
# REDIS_HOST = '127.0.0.1'
# For a different redis port set the REDIS_PORT here.
# REDIS_PORT = 6379
# If you set redis_password in zulip-secrets.conf, Zulip will use that password
# to connect to the redis server.
# Controls whether Zulip will rate-limit user requests.
# RATE_LIMITING = True
|
amanharitsh123/zulip
|
zproject/prod_settings_template.py
|
Python
|
apache-2.0
| 19,071
|
[
"VisIt"
] |
3bdad03113dfd2709129dece9af4eb83f1aaa427f0e6240c1f84c25fef254072
|
""" Test functions for stats module
"""
from __future__ import division, print_function, absolute_import
import warnings
import re
import sys
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_raises, rand, dec)
from nose import SkipTest
import numpy
import numpy as np
from numpy import typecodes, array
from scipy._lib._version import NumpyVersion
from scipy import special
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Generate test cases to test cdf and distribution consistency.
# Note that this list does not include all distributions.
dists = ['uniform','norm','lognorm','expon','beta',
'powerlaw','bradford','burr','fisk','cauchy','halfcauchy',
'foldcauchy','gamma','gengamma','loggamma',
'alpha','anglit','arcsine','betaprime','dgamma',
'exponnorm', 'exponweib','exponpow','frechet_l','frechet_r',
'gilbrat','f','ncf','chi2','chi','nakagami','genpareto',
'genextreme','genhalflogistic','pareto','lomax','halfnorm',
'halflogistic','fatiguelife','foldnorm','ncx2','t','nct',
'weibull_min','weibull_max','dweibull','maxwell','rayleigh',
'genlogistic', 'logistic','gumbel_l','gumbel_r','gompertz',
'hypsecant', 'laplace', 'reciprocal','triang','tukeylambda',
'vonmises', 'vonmises_line', 'pearson3', 'gennorm', 'halfgennorm']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
# check function for test generator
def check_distribution(dist, args, alpha):
D,pval = stats.kstest(dist,'', args=args, N=1000)
if (pval < alpha):
D,pval = stats.kstest(dist,'',args=args, N=1000)
# if (pval < alpha):
# D,pval = stats.kstest(dist,'',args=args, N=1000)
assert_(pval > alpha, msg="D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
# nose test generator
def test_all_distributions():
for dist in dists:
distfunc = getattr(stats, dist)
nargs = distfunc.numargs
alpha = 0.01
if dist == 'fatiguelife':
alpha = 0.001
if dist == 'frechet':
args = tuple(2*rand(1))+(0,)+tuple(2*rand(2))
elif dist == 'triang':
args = tuple(rand(nargs))
elif dist == 'reciprocal':
vals = rand(nargs)
vals[1] = vals[0] + 1.0
args = tuple(vals)
elif dist == 'vonmises':
yield check_distribution, dist, (10,), alpha
yield check_distribution, dist, (101,), alpha
args = tuple(1.0+rand(nargs))
else:
args = tuple(1.0+rand(nargs))
yield check_distribution, dist, args, alpha
def check_vonmises_pdf_periodic(k,l,s,x):
vm = stats.vonmises(k,loc=l,scale=s)
assert_almost_equal(vm.pdf(x),vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k,l,s,x):
vm = stats.vonmises(k,loc=l,scale=s)
assert_almost_equal(vm.cdf(x) % 1,vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0,1,numpy.pi,10,100]:
yield check_vonmises_pdf_periodic, k, 0, 1, x
yield check_vonmises_pdf_periodic, k, 1, 1, x
yield check_vonmises_pdf_periodic, k, 0, 10, x
yield check_vonmises_cdf_periodic, k, 0, 1, x
yield check_vonmises_cdf_periodic, k, 1, 1, x
yield check_vonmises_cdf_periodic, k, 0, 10, x
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
class TestRandInt(TestCase):
def test_rvs(self):
vals = stats.randint.rvs(5,30,size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5,30,size=(2,50))
assert_(numpy.shape(vals) == (2,50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15,46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15,46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k,5,30)
assert_array_almost_equal(vals,out)
def test_cdf(self):
x = numpy.r_[0:36:100j]
k = numpy.floor(x)
out = numpy.select([k >= 30,k >= 5],[1.0,(k-5.0+1)/(30-5.0)],0)
vals = stats.randint.cdf(x,5,30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(TestCase):
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100,1)
vals2 = stats.binom.pmf(0, 100,0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(TestCase):
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestNBinom(TestCase):
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0,1,1)
assert_equal(val,0)
class TestGeom(TestCase):
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1,2,3],0.5)
assert_array_almost_equal(vals,[0.5,0.25,0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1,2,3], 0.5))
vals2 = stats.geom.logpmf([1,2,3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
class TestGennorm(TestCase):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(TestCase):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(TestCase):
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5,0,1e-4,0.5, 1-1e-4,1,2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5,0,1e-4,0.5, 1-1e-4,1,2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that fails because of extreme tailness.
raise SkipTest('truncnorm rvs is know to fail at extreme tails')
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(TestCase):
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = []
for eaten in fruits_eaten:
res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten))
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
class TestLoggamma(TestCase):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogser(TestCase):
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestPareto(TestCase):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
class TestGenpareto(TestCase):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
stats.genpareto._argcheck(c) # ugh
assert_equal(stats.genpareto.a, 0.)
assert_(np.isposinf(stats.genpareto.b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
stats.genpareto._argcheck(c)
assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
class TestPearson3(TestCase):
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3,-2,-1,0,1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3,-2,-1,0,1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestPoisson(TestCase):
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
class TestZipf(TestCase):
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(TestCase):
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0,0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0.,0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(TestCase):
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"assert_* funcs broken with inf/nan")
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
assert_allclose(mvsk,
[0.05461496450, 0.0001723162534, 1.020362676, 2.055616582])
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
class TestF(TestCase):
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
@dec.knownfailureif(True, 'f stats does not properly broadcast')
def test_stats_broadcast(self):
# stats do not fully broadcast just yet
mv = stats.f.stats(dfn=11, dfd=[11, 12])
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
class TestRvDiscrete(TestCase):
def test_rvs(self):
states = [-1,0,1,2,3,4]
probability = [0.0,0.3,0.4,0.0,0.3,0.0]
samples = 1000
r = stats.rv_discrete(name='sample',values=(states,probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s,p in zip(states,probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
class TestExpon(TestCase):
def test_zero(self):
assert_equal(stats.expon.pdf(0),1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
class TestExponNorm(TestCase):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
class TestGenExpon(TestCase):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
assert_almost_equal(simps(stats.genexpon.pdf(numpy.arange(0,10,0.01),
0.5, 0.5, 2.0),
dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(TestCase):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), 5)
class TestSkellam(TestCase):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(TestCase):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
with np.errstate(divide='ignore'):
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
class TestBeta(TestCase):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0,1,0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0,0.5,1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
class TestBetaPrime(TestCase):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(TestCase):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0,1)
assert_almost_equal(logpdf, 0)
class TestChi2(TestCase):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, 14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, 14)
class TestArrayArgument(TestCase): # test for ticket:992
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5), size=(10,5))
assert_equal(rvs.shape, (10,5))
class TestDocstring(TestCase):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
self.assertTrue("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
self.assertTrue("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(TestCase):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5,0.2,0.3]
qk = [0.1,0.25,0.65]
eself = stats.entropy(pk,pk)
edouble = stats.entropy(pk,qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"assert_* funcs broken with inf/nan")
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def TestArgsreduce():
a = array([1,3,2,1,2,3,3])
b,c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3,2,2,3,3])
assert_array_equal(c, [2,2,2,2,2])
b,c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b,c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf']
@dec.slow
def test_fit(self):
def check(func, dist, args, alpha):
if dist in self.skip:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size':200})
vals = distfunc.fit(res)
vals2 = distfunc.fit(res, optimizer='powell')
# Only check the length of the return
# FIXME: should check the actual results to see if we are 'close'
# to what was created --- but what is 'close' enough
if dist == 'frechet':
assert_(len(vals) == len(args))
assert_(len(vals2) == len(args))
else:
assert_(len(vals) == 2+len(args))
assert_(len(vals2) == 2+len(args))
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
@dec.slow
def test_fix_fit(self):
def check(func, dist, args, alpha):
# Not sure why 'ncf', and 'beta' are failing
# frechet has different len(args) than distfunc.numargs
if dist in self.skip + ['frechet']:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size':200})
vals = distfunc.fit(res,floc=0)
vals2 = distfunc.fit(res,fscale=1)
assert_(len(vals) == 2+len(args))
assert_(vals[-2] == 0)
assert_(vals2[-1] == 1)
assert_(len(vals2) == 2+len(args))
if len(args) > 0:
vals3 = distfunc.fit(res, f0=args[0])
assert_(len(vals3) == 2+len(args))
assert_(vals3[0] == args[0])
if len(args) > 1:
vals4 = distfunc.fit(res, f1=args[1])
assert_(len(vals4) == 2+len(args))
assert_(vals4[1] == args[1])
if len(args) > 2:
vals5 = distfunc.fit(res, f2=args[2])
assert_(len(vals5) == 2+len(args))
assert_(vals5[2] == args[2])
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[0.25888672, 0, 20], atol=1e-5)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0,0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
class TestFrozen(TestCase):
# Test that a frozen distribution gives the same results as the original object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2,loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
rv = stats.genpareto(c=-0.1)
a, b = rv.dist.a, rv.dist.b
assert_equal([a, b], [0., 10.])
assert_equal([rv.a, rv.b], [0., 10.])
stats.genpareto.pdf(0, c=0.1) # this changes genpareto.b
assert_equal([rv.dist.a, rv.dist.b], [a, b])
assert_equal([rv.a, rv.b], [a, b])
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(TestCase):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10,5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10,5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10,10), loc=5.,
scale=2.,lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10,10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2,2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
class TestNct(TestCase):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4,7)[:,None], np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def text_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(TestCase):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(TestCase):
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
class TestExponWeib(TestCase):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestRdist(TestCase):
@dec.slow
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982),0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983),0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
g = stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see http://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See http://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', RuntimeWarning)
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
olderr = np.seterr(divide='ignore')
try:
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
finally:
np.seterr(**olderr)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
mv3 = stats.lognorm.stats([2, 2.4, -1]) # -1 is not a legal shape parameter
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(TestCase):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling" is
# ignoring *args and looking for ``extra_kwarg`` and using that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(TestCase):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall('logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall('logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
try:
_distr3_gen(name='dummy')
except TypeError:
pass
else:
raise AssertionError('TypeError not raised.')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_docstrings():
badones = [',\s*,', '\(\s*,', '^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100,1),1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100,1),1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100,1),1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9,1),1)
assert_almost_equal(p, 9.0, decimal=15)
if __name__ == "__main__":
run_module_suite()
|
nvoron23/scipy
|
scipy/stats/tests/test_distributions.py
|
Python
|
bsd-3-clause
| 85,009
|
[
"Gaussian"
] |
66d3906ed2f622aa926efbeb639f254ae7821b676dd8d3caf3cf9397a99fcde3
|
#!/usr/bin/env python
# encoding: utf-8
"""Description: pgem parallel test on UFT test fixture.
Currently supports 4 duts in parallel.
"""
__version__ = "0.1"
__author__ = "@fanmuzhi, @boqiling"
__all__ = ["Channel", "ChannelStates"]
import sys
from UFT.devices import pwr, load, aardvark
from UFT.devices import erie
from UFT.models import DUT_STATUS, DUT, Cycle, PGEMBase, Diamond4
from UFT.backend import load_config, load_test_item, get_latest_revision
from UFT.backend.session import SessionManager
from UFT.backend import simplexml
from UFT.config import *
import threading
from Queue import Queue
import logging
import time
import os
import traceback
import datetime
logger = logging.getLogger(__name__)
class ChannelStates(object):
EXIT = -1
INIT = 0x0A
LOAD_DISCHARGE = 0x0C
CHARGE = 0x0E
PROGRAM_VPD = 0x0F
CHECK_CAPACITANCE = 0x1A
CHECK_ENCRYPTED_IC = 0x1B
CHECK_TEMP = 0x1C
DUT_DISCHARGE = 0x1D
CHECK_POWER_FAIL = 0x1E
RECHARGE = 0x1F
CHECK_VPD = 0x10
HOLD = 0x0D
class BOARD_STATUS(object):
Idle = 0 # wait to test
Pass = 1 # pass the test
Fail = 2 # fail in test
Running = 8
class Channel(threading.Thread):
def __init__(self, name, barcode_list, cable_barcodes_list, capacitor_barcodes_list, mode4in1, channel_id=0):
"""initialize channel
:param name: thread name
:param barcode_list: list of 2D barcode of dut.
:param channel_id: channel ID, from 0 to 7
:return: None
"""
# channel number for mother board.
# 8 mother boards can be stacked from 0 to 7.
# use 1 motherboard in default.
self.channel = channel_id
self.channelresult = BOARD_STATUS.Idle
self.dutnumber = 0
# product type setting
self.producttype=''
# Amber 4x/e uses master port + shared port mode
self.InMode4in1 = mode4in1
# setup dut_list
self.dut_list = []
self.config_list = []
self.barcode_list = barcode_list
self.cable_barcodes_list = cable_barcodes_list
self.capacitor_barcodes_list = capacitor_barcodes_list
# progress bar, 0 to 100
self.progressbar = 0
# counter, to calculate charge and discharge time based on interval
self.counter = 0
# pre-discharge current, default to 0.8A
self.current = 2.0
# exit flag and queue for threading
self.exit = False
self.queue = Queue()
super(Channel, self).__init__(name=name)
def read_volt(self, dut):
val = dut.meas_vcap()
return val
def init(self):
""" hardware initialize in when work loop starts.
:return: None.
"""
# setup load
#self.ld.reset()
#time.sleep(2)
logger.info("Initiate Hardware of Channel {0}...".format(self.channel))
#first setup erie
if self.channel == 0:
self.erie = erie.Erie(port=ERIE_NO1, boardid=1)
elif self.channel == 1:
self.erie = erie.Erie(port=ERIE_NO2, boardid=2)
elif self.channel == 2:
self.erie = erie.Erie(port=ERIE_NO3, boardid=3)
elif self.channel == 3:
self.erie = erie.Erie(port=ERIE_NO4, boardid=4)
# aardvark
self.adk = aardvark.Adapter(self.erie)
# setup load
self.ld = load.DCLoad(self.erie)
# setup main power supply
self.ps = pwr.PowerSupply(self.erie)
logger.info("mode 4in1 is {0}".format(self.InMode4in1))
for slot in range(TOTAL_SLOTNUM):
self.ld.select_channel(slot)
self.ld.input_off()
#time.sleep(1)
#self.ld.protect_on()
#self.ld.change_func(load.DCLoad.ModeCURR)
#time.sleep(1)
self.ps.selectChannel(slot)
self.ps.deactivateOutput()
self.erie.LedOff(slot)
# setup power supply
#self.ps.selectChannel(node=PS_ADDR, ch=PS_CHAN)
setting = {"volt": PS_VOLT, "curr": PS_CURR,
"ovp": PS_OVP, "ocp": PS_OCP}
#self.ps.set(setting)
#self.ps.activateOutput()
time.sleep(1)
#volt = self.ps.measureVolt()
#curr = self.ps.measureCurr()
'''
if not ((PS_VOLT - 1) < volt < (PS_VOLT + 1)):
self.ps.setVolt(0.0)
logging.error("Power Supply Voltage {0} "
"is not in range".format(volt))
raise AssertionError("Power supply voltage is not in range")
if not (curr >= 0):
self.ps.setVolt(0.0)
logging.error("Power Supply Current {0} "
"is not in range".format(volt))
raise AssertionError("Power supply current is not in range")
'''
# setup dut_list
for i, bc in enumerate(self.barcode_list):
if bc != "":
# dut is present
dut = PGEMBase(device=self.adk,
slot=i,
barcode=bc)
logger.info("dut: {0} SN is {1}"
.format(dut.slotnum, bc))
if self.InMode4in1:
if dut.partnumber not in Mode4in1_PN:
raise Exception("This partnumber {0} does not support Mode4in1".format(dut.partnumber))
else:
if dut.partnumber in Mode4in1_PN:
if not OVERRIDE:
raise Exception("This partnumber {0} NEED Mode4in1".format(dut.partnumber))
dut.status = DUT_STATUS.Idle
dut.cable_barcode = self.cable_barcodes_list[i]
dut.capacitor_barcode = self.capacitor_barcodes_list[i]
dut.testdate = datetime.datetime.now()
self.dut_list.append(dut)
dut_config = load_config("sqlite:///" + CONFIG_DB,
dut.partnumber, dut.revision)
self.config_list.append(dut_config)
self.set_productype(dut.slotnum, dut.producttype)
latest_revision = get_latest_revision("sqlite:///" + CONFIG_DB,
dut.partnumber)
logger.info("dut: {0} has the latest revision of this partnumber is {1}"
.format(dut.slotnum, latest_revision))
if latest_revision != dut.revision:
if not OVERRIDE:
dut.errormessage = "Not the latest revision"
dut.status = DUT_STATUS.Fail
self.channelresult = BOARD_STATUS.Running
self.dutnumber += 1
else:
# dut is not loaded on fixture
self.dut_list.append(None)
self.config_list.append(None)
def _check_hardware_ready_(self, dut):
for i in range(5):
if dut.read_hwready():
return True
time.sleep(1)
return False
def _turn_off_load(self, slot):
self.ld.select_channel(slot)
self.ld.input_off()
if self.InMode4in1:
for i in range(1, 4):
self.ld.select_channel(slot + i)
self.ld.input_off()
def _turn_on_power(self, slot):
self.ps.selectChannel(slot)
self.ps.activateOutput()
time.sleep(0.1)
if self.InMode4in1:
for i in range(1, 4):
self.ps.selectChannel(slot + i)
self.ps.activateOutput()
time.sleep(0.1)
def _turn_off_power(self, slot):
self.ps.selectChannel(slot)
self.ps.deactivateOutput()
if self.InMode4in1:
for i in range(1, 4):
self.ps.selectChannel(slot + i)
self.ps.deactivateOutput()
def set_productype(self, port, pt):
if pt == "AGIGA9821":
logger.info("dut: {0} PN: {1} setting type: Pearl family".format(port, pt))
self.erie.SetProType(port, 0x00)
self.producttype='Pearl'
if pt == "AGIGA9822" or pt == "AGIGA9823" or pt == "AGIGA9824":
logger.info("dut: {0} PN: {1} setting type: Amber family ".format(port, pt))
self.erie.SetProType(port, 0x01)
self.producttype='Amber'
if pt == "AGIGA9831":
logger.info("dut: {0} PN: {1} setting type: Garnet family ".format(port, pt))
self.erie.SetProType(port, 0x02)
self.producttype='Garnet'
if pt == "AGIGA9832":
logger.info("dut: {0} PN: {1} setting type: Pearl2 family (as Amber by temporary) ".format(port, pt))
self.erie.SetProType(port, 0x01)
self.producttype='Amber2'
if pt == "AGIGA9834":
logger.info("dut: {0} PN: {1} setting type: Jamber family (as Amber by temporary) ".format(port, pt))
self.erie.SetProType(port, 0x01)
self.producttype='Jamber'
def charge_dut(self):
"""charge
"""
power_on_delay = False
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Charge")
# print dut.slotnum
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
power_on_delay = True
self._turn_on_power(dut.slotnum)
# start charge
dut.status = DUT_STATUS.Charging
all_charged = False
self.counter = 0
start_time = time.time()
if power_on_delay:
time.sleep(5)
while (not all_charged):
all_charged = True
for dut in self.dut_list:
try:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Charge")
if (not config["enable"]):
continue
if (config["stoponfail"]) & \
(dut.status != DUT_STATUS.Charging):
continue
threshold = float(config["Threshold"].strip("aAvV"))
ceiling = float(config["Ceiling"].strip("aAvV"))
max_chargetime = config["max"]
min_chargetime = config["min"]
self.switch_to_dut(dut.slotnum)
if not self._check_hardware_ready_(dut):
dut.status = DUT_STATUS.Fail
dut.errormessage = "DUT is not ready."
this_cycle = Cycle()
this_cycle.vin = dut.meas_vin()
this_cycle.counter = self.counter
this_cycle.time = time.time()
temperature = dut.check_temp()
this_cycle.temp = temperature
this_cycle.state = "charge"
self.counter += 1
self.ld.select_channel(dut.slotnum)
this_cycle.vcap = dut.meas_vcap()
chargestatue = dut.charge_status()
charge_time = this_cycle.time - start_time
dut.charge_time = charge_time
if (temperature>50 or temperature<10):
all_charged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "Temperature out of range."
elif (charge_time > max_chargetime):
all_charged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "Charge Time Too Long."
elif (chargestatue):
if(ceiling > this_cycle.vcap >= threshold)&(max_chargetime>dut.charge_time>min_chargetime): #dut.meas_chg_time()
all_charged &= True
dut.status = DUT_STATUS.Idle # pass
else:
dut.status = DUT_STATUS.Fail
dut.errormessage = "Charge Time or Vcap failed"
else:
all_charged &= False
dut.cycles.append(this_cycle)
logger.info("dut: {0} status: {1} vcap: {2} "
"temp: {3} charged: {4} message: {5} ".
format(dut.slotnum, dut.status, this_cycle.vcap,
this_cycle.temp, chargestatue, dut.errormessage))
except aardvark.USBI2CAdapterException:
logger.info("dut: {0} IIC access failed.".
format(dut.slotnum))
all_charged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "IIC access failed."
if not all_charged:
time.sleep(INTERVAL)
def hold_power_on(self):
"""
Hold the power on status for specified time
:return:
"""
logger.info("HOLD power on for {0} minutes ".format(HOLD_TIME))
keep_hold=True
total_seconds=HOLD_TIME*60
start_time = time.time()
while(keep_hold):
time.sleep(INTERVAL)
if (time.time() - start_time) > total_seconds:
keep_hold=False
def recharge_dut(self):
"""charge
"""
power_on_delay = False
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Recharge")
# print dut.slotnum
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
power_on_delay = True
self._turn_on_power(dut.slotnum)
# start charge
dut.status = DUT_STATUS.Charging
all_charged = False
self.counter = 0
start_time = time.time()
if power_on_delay:
time.sleep(5)
while (not all_charged):
all_charged = True
for dut in self.dut_list:
try:
shutdown=False
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Recharge")
if (not config["enable"]):
continue
if (config["stoponfail"]) & \
(dut.status != DUT_STATUS.Charging):
continue
threshold = float(config["Threshold"].strip("aAvV"))
ceiling = float(config["Ceiling"].strip("aAvV"))
max_chargetime = config["max"]
min_chargetime = config["min"]
if config.get("Shutdown",False)=="Yes":
shutdown=True
self.switch_to_dut(dut.slotnum)
if not self._check_hardware_ready_(dut):
dut.status = DUT_STATUS.Fail
dut.errormessage = "DUT is not ready."
#this_cycle = Cycle()
#this_cycle.vin = dut.meas_vin()
#this_cycle.counter = self.counter
#this_cycle.time = time.time()
temperature = dut.check_temp()
#this_cycle.temp = temperature
#this_cycle.state = "charge"
self.counter += 1
self.ld.select_channel(dut.slotnum)
vcap = dut.meas_vcap()
chargestatue=dut.charge_status()
charge_time = time.time() - start_time
dut.charge_time = charge_time
if (temperature>50 or temperature<10):
all_charged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "Temperature out of range."
elif (charge_time > max_chargetime):
all_charged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "Charge Time Too Long."
elif (chargestatue):
if(ceiling > vcap >= threshold)&(max_chargetime>dut.charge_time>min_chargetime): #dut.meas_chg_time()
all_charged &= True
self._turn_off_power(dut.slotnum)
if shutdown == True:
self.erie.ShutdownDUT(dut.slotnum)
dut.status = DUT_STATUS.Idle # pass
else:
dut.status = DUT_STATUS.Fail
dut.errormessage = "Charge Time or Vcap failed"
else:
all_charged &= False
#dut.cycles.append(this_cycle)
logger.info("dut: {0} status: {1} vcap: {2} "
"temp: {3} charged: {4} message: {5} ".
format(dut.slotnum, dut.status, vcap,
temperature, chargestatue, dut.errormessage))
except aardvark.USBI2CAdapterException:
logger.info("dut: {0} IIC access failed.".
format(dut.slotnum))
all_charged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "IIC access failed."
if not all_charged:
time.sleep(INTERVAL)
def discharge_dut(self):
"""discharge
"""
power_off_delay = False
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Discharge")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
power_off_delay = True
self._turn_off_power(dut.slotnum)
if power_off_delay:
time.sleep(2)
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Discharge")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
self.ld.select_channel(dut.slotnum)
self.current = float(config["Current"].strip("aAvV"))
self.ld.set_curr(self.current) # set discharge current
self.ld.input_on()
if self.InMode4in1:
for i in range(1, 4):
self.ld.select_channel(dut.slotnum + i)
self.current = float(config["Current"].strip("aAvV"))
self.ld.set_curr(self.current) # set discharge current
self.ld.input_on()
dut.status = DUT_STATUS.Discharging
# start discharge cycle
all_discharged = False
fast_loop = False
start_time = time.time()
#self.ps.setVolt(0.0)
while (not all_discharged):
all_discharged = True
for dut in self.dut_list:
try:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Discharge")
if (not config["enable"]):
continue
if (config["stoponfail"]) & \
(dut.status != DUT_STATUS.Discharging):
continue
threshold = float(config["Threshold"].strip("aAvV"))
max_dischargetime = config["max"]
min_dischargetime = config["min"]
self.switch_to_dut(dut.slotnum)
# cap_in_ltc = dut.meas_capacitor()
# print cap_in_ltc
this_cycle = Cycle()
this_cycle.vin = dut.meas_vin()
temperature = dut.check_temp()
this_cycle.temp = temperature
this_cycle.counter = self.counter
this_cycle.time = time.time()
this_cycle.state = "discharge"
self.ld.select_channel(dut.slotnum)
this_cycle.vcap = dut.meas_vcap()
if (this_cycle.vcap <= threshold + 0.2) & (fast_loop == False) & (self.producttype=='Garnet'):
fast_loop = True
# this_cycle.vcap = self.ld.read_volt()
self.counter += 1
discharge_time = this_cycle.time - start_time
dut.discharge_time = discharge_time
if (temperature>50 or temperature<10):
all_discharged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "Temperature out of range."
self._turn_off_load(dut.slotnum)
elif (discharge_time > max_dischargetime):
all_discharged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "Discharge Time Too Long."
self._turn_off_load(dut.slotnum)
elif (this_cycle.vin < 4.4):
all_discharged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "Boost voltage error."
self._turn_off_load(dut.slotnum)
elif (this_cycle.vcap < threshold):
all_discharged &= True
self._turn_off_load(dut.slotnum)
if (discharge_time < min_dischargetime):
dut.status = DUT_STATUS.Fail
dut.errormessage = "Discharge Time Too Short."
else:
if self.erie.GetGTGPin(dut.slotnum):
dut.status = DUT_STATUS.Fail
dut.errormessage = "GTG Pin check failed"
else:
dut.status = DUT_STATUS.Idle # pass
elif (self.producttype=='Garnet'):
all_discharged &= False
if (this_cycle.vcap > 5.5):
if (this_cycle.vcap - this_cycle.vin >= 0.3):
all_discharged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "Bypass voltage error."
self._turn_off_load(dut.slotnum)
else:
all_discharged &= False
dut.cycles.append(this_cycle)
logger.info("dut: {0} status: {1} vcap: {2} vout: {3} "
"temp: {4} message: {5} ".
format(dut.slotnum, dut.status, this_cycle.vcap, this_cycle.vin,
this_cycle.temp, dut.errormessage))
except aardvark.USBI2CAdapterException:
logger.info("dut: {0} IIC access failed.".
format(dut.slotnum))
all_discharged &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "IIC access failed."
self._turn_off_load(dut.slotnum)
if not all_discharged:
if not fast_loop:
time.sleep(INTERVAL)
# check shutdown function
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Discharge")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
if config.get("Recharge", False) == "Yes":
dut.status = DUT_STATUS.Discharging
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Discharge")
if (not config["enable"]):
continue
if (config["stoponfail"]) & \
(dut.status != DUT_STATUS.Discharging):
continue
fShutdownSuccessfull = False
self.switch_to_dut(dut.slotnum)
self.erie.ShutdownDUT(dut.slotnum)
try:
dut.meas_vcap()
except aardvark.USBI2CAdapterException:
# if iic exception occur, DUT shutdown already
fShutdownSuccessfull = True
if not fShutdownSuccessfull:
dut.errormessage = "Shutdown function error."
dut.status = DUT_STATUS.Fail
else:
dut.status = DUT_STATUS.Idle
logger.info("Shutdown process...dut: {0} "
"status: {1} message: {2} ".
format(dut.slotnum, dut.status, dut.errormessage))
def program_dut(self):
""" program vpd of DUT.
:return: None
"""
# STEP 1: check Present Pin first for hardware exist
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Program_VPD")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
self.switch_to_dut(dut.slotnum)
logger.info("Check PGEM Present Pin for slot {0}".format(dut.slotnum))
if not self.erie.GetPresentPin(dut.slotnum):
dut.status = DUT_STATUS.Fail
dut.errormessage = "PGEM Connection Issue"
logger.info("dut: {0} status: {1} message: {2} ".
format(dut.slotnum, dut.status, dut.errormessage))
if self.InMode4in1:
for i in range(1, 4):
self.switch_to_dut(dut.slotnum + i)
logger.info("Check PGEM Present Pin for slot {0}".format(dut.slotnum + i))
if not self.erie.GetPresentPin(dut.slotnum + i):
dut.status = DUT_STATUS.Fail
dut.errormessage = "PGEM Connection Issue"
logger.info("dut: {0} status: {1} message: {2} ".
format(dut.slotnum, dut.status, dut.errormessage))
# STEP 2: turn power on
power_on_delay = False
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Program_VPD")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
power_on_delay = True
self.ps.selectChannel(dut.slotnum) # no turning on shared port power coz Vin check
self.ps.activateOutput()
time.sleep(0.2)
if power_on_delay:
time.sleep(5)
# STEP 3: check hardware ready
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Program_VPD")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
self.switch_to_dut(dut.slotnum)
logger.info("Check PGEM Hardware Ready for slot {0}".format(dut.slotnum))
try:
if not self._check_hardware_ready_(dut):
dut.status = DUT_STATUS.Fail
dut.errormessage = "DUT is not ready."
except aardvark.USBI2CAdapterException:
dut.status = DUT_STATUS.Fail
dut.errormessage = "IIC access failed."
logger.info("dut: {0} status: {1} message: {2} ".
format(dut.slotnum, dut.status, dut.errormessage))
# STEP 3a: check Vin
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Program_VPD")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
self.switch_to_dut(dut.slotnum)
vin=dut.meas_vin()
logger.info("dut: {0} measured Vin {1}".format(dut.slotnum, vin))
if 13<vin or 10>vin:
dut.status = DUT_STATUS.Fail
dut.errormessage = "Vin error"
else:
if self.InMode4in1:
# test each shared port Vin
for i in range(1, 4):
self.ps.selectChannel(dut.slotnum + i)
self.ps.activateOutput()
time.sleep(0.1)
self.ps.selectChannel(dut.slotnum + i - 1)
self.ps.deactivateOutput()
time.sleep(1)
vin=dut.meas_vin()
logger.info("dut: {0} measured sharded port at {1} Vin {2}".format(dut.slotnum, i, vin))
if 13<vin or 10>vin:
dut.status = DUT_STATUS.Fail
dut.errormessage = "Vin error"
time.sleep(1)
# turn on every port
for i in range(0, 4):
self.ps.selectChannel(dut.slotnum + i)
self.ps.activateOutput()
time.sleep(0.1)
# STEP 4: Program VPD
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Program_VPD")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
self.switch_to_dut(dut.slotnum)
dut.status = DUT_STATUS.Program_VPD
try:
logger.info("dut: {0} start writing...".format(dut.slotnum))
dut.write_vpd(config["File"])
if self.InMode4in1:
# figure out a non-sequence writing method for shared port, yield a non-sequence cable sequence
s0 = s1 = s2 = False
for i in range(1, 4):
self.switch_to_dut(dut.slotnum + i)
addr = dut.write_shared_vpd(config["File"])
logger.info("shared port: {0} writed at address 0x{1:x}...".format(dut.slotnum + i, addr))
if addr == 0x54:
s0 = True
elif addr == 0x55:
s1 = True
elif addr == 0x56:
s2 = True
if not(s0 and s1 and s2):
raise aardvark.USBI2CAdapterException
dut.program_vpd = 1
if config.get("Flush_EE",False)=="Yes":
self.switch_to_dut(dut.slotnum)
dut.flush_ee()
else:
self._turn_off_power(dut.slotnum)
time.sleep(1)
except AssertionError:
dut.status = DUT_STATUS.Fail
dut.errormessage = "Programming VPD Fail"
logger.info("dut: {0} status: {1} message: {2} ".
format(dut.slotnum, dut.status, dut.errormessage))
except aardvark.USBI2CAdapterException:
dut.status = DUT_STATUS.Fail
dut.errormessage = "IIC access failed."
logger.info("dut: {0} status: {1} message: {2} ".
format(dut.slotnum, dut.status, dut.errormessage))
# STEP 5: turn power on again if needed
power_on_delay = False
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Program_VPD")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Program_VPD):
continue
self.ps.selectChannel(dut.slotnum)
if not self.ps.isOutputOn():
power_on_delay = True
self.ps.activateOutput()
time.sleep(0.1)
if self.InMode4in1:
for i in range(1, 4):
self.ps.selectChannel(dut.slotnum + i)
self.ps.activateOutput()
time.sleep(0.1)
if power_on_delay:
time.sleep(5)
# STEP 6: check hardware ready and perform RESET
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Program_VPD")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Program_VPD):
continue
self.switch_to_dut(dut.slotnum)
try:
if not self._check_hardware_ready_(dut):
dut.status = DUT_STATUS.Fail
dut.errormessage = "DUT is not ready."
else:
self.erie.ResetDUT(dut.slotnum)
dut.status = DUT_STATUS.Idle
except aardvark.USBI2CAdapterException:
dut.status = DUT_STATUS.Fail
dut.errormessage = "IIC access failed."
logger.info("dut: {0} status: {1} message: {2} ".
format(dut.slotnum, dut.status, dut.errormessage))
def check_vpd(self):
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Program_VPD")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
if dut.status != DUT_STATUS.Idle:
continue
self.ps.selectChannel(dut.slotnum)
if not self.ps.isOutputOn():
dut.status = DUT_STATUS.Fail
dut.errormessage = "No Power output, STOP checking VPD"
if self.InMode4in1:
for i in range(1, 4):
self.switch_to_dut(dut.slotnum + i)
self.ps.selectChannel(dut.slotnum + i)
if not self.ps.isOutputOn():
dut.status = DUT_STATUS.Fail
dut.errormessage = "No Power output, STOP checking VPD"
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Program_VPD")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
self.switch_to_dut(dut.slotnum)
try:
dut.read_vpd()
if not dut.check_vpd():
dut.status = DUT_STATUS.Fail
dut.errormessage = "Checking VPD error."
else:
dut.hwver = dut.read_hw_version()
logger.info("dut: {0} checking hardware version = {1}".format(dut.slotnum, dut.hwver))
if dut.hwver=='255':
dut.status = DUT_STATUS.Fail
dut.errormessage = "HW ver error."
else:
dut.fwver = dut.read_fw_version()
logger.info("dut: {0} checking firmware version = {1}".format(dut.slotnum, dut.fwver))
if config.get("FWver", False):
if not dut.fwver==config["FWver"]:
dut.status = DUT_STATUS.Fail
dut.errormessage = "FW ver error."
except aardvark.USBI2CAdapterException:
dut.status = DUT_STATUS.Fail
dut.errormessage = "IIC access failed."
def check_temperature_dut(self):
"""
check temperature value of IC on DUT.
:return: None.
"""
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Check_Temp")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
self.switch_to_dut(dut.slotnum)
temp = dut.check_temp()
if not (config["min"] < temp < config["max"]):
dut.status = DUT_STATUS.Fail
dut.errormessage = "Temperature out of range."
logger.info("dut: {0} status: {1} message: {2} ".
format(dut.slotnum, dut.status, dut.errormessage))
def switch_to_dut(self, slot):
self.adk.select_channel(slot)
def calculate_capacitance(self):
""" calculate the capacitance of DUT, based on vcap list in discharging.
:return: capacitor value
"""
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Capacitor")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
if dut.status != DUT_STATUS.Idle:
continue
self.ps.selectChannel(dut.slotnum)
if not self.ps.isOutputOn():
dut.status = DUT_STATUS.Fail
dut.errormessage = "No Power output, STOP cap measure"
if self.InMode4in1:
for i in range(1, 4):
self.switch_to_dut(dut.slotnum + i)
self.ps.selectChannel(dut.slotnum + i)
if not self.ps.isOutputOn():
dut.status = DUT_STATUS.Fail
dut.errormessage = "No Power output, STOP cap measure"
for dut in self.dut_list:
if dut is None:
continue
config = load_test_item(self.config_list[dut.slotnum],
"Capacitor")
if (not config["enable"]):
continue
if (config["stoponfail"]) & (dut.status != DUT_STATUS.Idle):
continue
if dut.status != DUT_STATUS.Idle:
continue
self.switch_to_dut(dut.slotnum)
try:
if (self.producttype=='Jamber'):
dut.start_cap_ext()
else:
dut.start_cap()
except aardvark.USBI2CAdapterException:
dut.status = DUT_STATUS.Fail
dut.errormessage = "IIC access failed."
#time.sleep(1)
dut.status = DUT_STATUS.Cap_Measuring
logger.info("started cap measure")
#close load and set PS
#self.ld.reset()
#time.sleep(2)
# setup power supply
#self.ps.selectChannel(node=PS_ADDR, ch=PS_CHAN)
setting = {"volt": PS_VOLT, "curr": PS_CURR,
"ovp": PS_OVP, "ocp": PS_OCP}
#self.ps.set(setting)
#self.ps.activateOutput()
time.sleep(1)
start_time = time.time()
all_cap_mears=False
while not all_cap_mears:
all_cap_mears=True
for dut in self.dut_list:
try:
if dut is None:
continue
if dut.status != DUT_STATUS.Cap_Measuring:
continue
self.switch_to_dut(dut.slotnum)
config = load_test_item(self.config_list[dut.slotnum],
"Capacitor")
if "Overtime" in config:
overtime=float(config["Overtime"])
else:
overtime=600
#self.adk.slave_addr = 0x14
#val = self.adk.read_reg(0x23,0x01)[0]
val = dut.read_PGEMSTAT(0)
#logger.info("PGEMSTAT.BIT2: {0}".format(val))
vcap_temp=dut.meas_vcap()
logger.info("dut: {0} PGEMSTAT.BIT2: {1} vcap in cap calculate: {2}".format(dut.slotnum, val, vcap_temp))
capacitor_time = time.time() - start_time
dut.capacitor_time = capacitor_time
if (val | 0xFB)==0xFB: #PGEMSTAT.BIT2==0 CAP MEASURE COMPLETE
all_cap_mears &= True
val1 = dut.read_vpd_byaddress(0x100)[0] #`````````````````````````read cap vale from VPD``````````compare````````````````````````````
logger.info("capacitance_measured value: {0}".format(val1))
dut.capacitance_measured=val1
if not (config["min"] < val1 < config["max"]):
dut.status=DUT_STATUS.Fail
dut.errormessage = "Cap is over limits"
logger.info("dut: {0} capacitor: {1} message: {2} ".
format(dut.slotnum, dut.capacitance_measured,
dut.errormessage))
else:
dut.status = DUT_STATUS.Idle # pass
elif capacitor_time > overtime:
all_cap_mears &= True
dut.status=DUT_STATUS.Fail
dut.errormessage = "Cap start over time"
logger.info("dut: {0} capacitor: {1} message: {2} ".
format(dut.slotnum, dut.capacitance_measured,
dut.errormessage))
else:
all_cap_mears &= False
except aardvark.USBI2CAdapterException:
logger.info("dut: {0} IIC access failed.".
format(dut.slotnum))
all_cap_mears &= True
dut.status = DUT_STATUS.Fail
dut.errormessage = "IIC access failed."
if not all_cap_mears:
time.sleep(INTERVAL * 5)
#check capacitance ok
for dut in self.dut_list:
all_cap_ready=True
if dut is None:
continue
if dut.status != DUT_STATUS.Idle:
continue
self.switch_to_dut(dut.slotnum)
#self.adk.slave_addr = 0x14
#val = self.adk.read_reg(0x21,0x01)[0]
try:
val = dut.read_GTG(0)
if not((val&0x02)==0x02):
dut.status=DUT_STATUS.Fail
dut.errormessage = "GTG.bit1 ==0 "
logger.info("GTG.bit1 ==0")
# check GTG_WARNING == 0x00
#temp=self.adk.read_reg(0x22)[0]
else:
temp = dut.read_GTG_WARN(0)
logger.info("GTG_Warning value: {0}".format(temp))
if not (temp==0x00):
dut.status = DUT_STATUS.Fail
dut.errormessage = "GTG_warning != 0x00"
else:
#dut.status = DUT_STATUS.Idle # pass
if not self.erie.GetGTGPin(dut.slotnum):
dut.status = DUT_STATUS.Fail
dut.errormessage = "GTG Pin check failed"
else:
if self.InMode4in1:
all_GTG = True
for i in range(1, 4):
self.switch_to_dut(dut.slotnum + i)
if not self.erie.GetGTGPin(dut.slotnum + i):
all_GTG &= False
if all_GTG:
dut.status = DUT_STATUS.Idle # pass
else:
dut.status = DUT_STATUS.Fail
dut.errormessage = "GTG Pin check failed"
else:
dut.status = DUT_STATUS.Idle # pass
except aardvark.USBI2CAdapterException:
dut.status = DUT_STATUS.Fail
dut.errormessage = "IIC access failed."
def save_db(self):
# setup database
# db should be prepared in cli.py
try:
sm = SessionManager()
sm.prepare_db("sqlite:///" + RESULT_DB, [DUT, Cycle])
session = sm.get_session("sqlite:///" + RESULT_DB)
for dut in self.dut_list:
if dut is None:
continue
for pre_dut in session.query(DUT). \
filter(DUT.barcode == dut.barcode, DUT.archived == 0).all():
pre_dut.archived = 1
session.add(pre_dut)
session.commit()
dut.archived = 0
session.add(dut)
session.commit()
session.close()
except Exception as e:
self.error(e)
def save_file(self):
""" save dut info to xml file
:return:
"""
for dut in self.dut_list:
if dut is None:
continue
if not os.path.exists(RESULT_LOG):
os.makedirs(RESULT_LOG)
filename = dut.barcode + ".xml"
filepath = os.path.join(RESULT_LOG, filename)
i = 1
while os.path.exists(filepath):
filename = "{0}({1}).xml".format(dut.barcode, i)
filepath = os.path.join(RESULT_LOG, filename)
i += 1
result = simplexml.dumps(dut.to_dict(), "entity")
with open(filepath, "wb") as f:
f.truncate()
f.write(result)
def prepare_to_exit(self):
""" cleanup and save to database before exit.
:return: None
"""
if self.dutnumber == 0:
self.channelresult = BOARD_STATUS.Idle
else:
self.channelresult = BOARD_STATUS.Pass
for dut in self.dut_list:
if dut is None:
continue
if (dut.status == DUT_STATUS.Idle):
dut.status = DUT_STATUS.Pass
msg = "passed"
else:
self.channelresult = BOARD_STATUS.Fail
self.erie.LedOn(dut.slotnum)
msg = dut.errormessage
logger.info("TEST RESULT: dut {0} ===> {1}".format(
dut.slotnum, msg))
for slot in range(TOTAL_SLOTNUM):
self.ps.selectChannel(slot)
self.ps.deactivateOutput()
# save to xml logs
self.save_file()
# power off
#self.ps.deactivateOutput()
def run(self):
""" override thread.run()
:return: None
"""
while (not self.exit):
state = self.queue.get()
if (state == ChannelStates.EXIT):
try:
self.prepare_to_exit()
self.exit = True
logger.info("Channel: Exit Successfully.")
except Exception as e:
self.error(e)
elif (state == ChannelStates.INIT):
try:
logger.info("Channel: Initialize.")
self.init()
self.progressbar += 20
except Exception as e:
self.error(e)
elif (state == ChannelStates.CHARGE):
try:
logger.info("Channel: Charge DUT.")
self.charge_dut()
self.progressbar += 20
except Exception as e:
self.error(e)
elif (state == ChannelStates.HOLD):
try:
logger.info("Channel: Hold Power On.")
self.hold_power_on()
except Exception as e:
self.error(e)
elif (state == ChannelStates.LOAD_DISCHARGE):
try:
logger.info("Channel: Discharge DUT.")
self.discharge_dut()
self.progressbar += 15
except Exception as e:
self.error(e)
elif (state == ChannelStates.PROGRAM_VPD):
try:
logger.info("Channel: Program VPD.")
self.program_dut()
self.progressbar += 5
except Exception as e:
self.error(e)
elif (state == ChannelStates.CHECK_VPD):
try:
logger.info("Channel: Check VPD")
self.check_vpd()
self.progressbar += 5
except Exception as e:
self.error(e)
elif (state == ChannelStates.CHECK_CAPACITANCE):
try:
logger.info("Channel: Check Capacitor Value")
self.calculate_capacitance()
self.progressbar += 30
except Exception as e:
self.error(e)
elif (state == ChannelStates.RECHARGE):
try:
logger.info("Channel: Recharge DUT")
self.recharge_dut()
self.progressbar += 5
except Exception as e:
self.error(e)
else:
logger.error("unknown dut state, exit...")
self.exit = True
def auto_test(self):
self.queue.put(ChannelStates.INIT)
self.queue.put(ChannelStates.PROGRAM_VPD)
self.queue.put(ChannelStates.CHARGE)
if HOLD_EN:
self.queue.put(ChannelStates.HOLD)
#self.queue.put(ChannelStates.PROGRAM_VPD)
self.queue.put(ChannelStates.CHECK_CAPACITANCE)
#self.queue.put(ChannelStates.CHECK_ENCRYPTED_IC)
self.queue.put(ChannelStates.CHECK_VPD)
#self.queue.put(ChannelStates.CHECK_POWER_FAIL)
# self.queue.put(ChannelStates.DUT_DISCHARGE)
self.queue.put(ChannelStates.LOAD_DISCHARGE)
self.queue.put(ChannelStates.RECHARGE)
self.queue.put(ChannelStates.EXIT)
self.start()
def empty(self):
for i in range(self.queue.qsize()):
self.queue.get()
def error(self, e):
exc = sys.exc_info()
logger.error(traceback.format_exc(exc))
self.exit = True
raise e
def quit(self):
self.empty()
self.queue.put(ChannelStates.EXIT)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# barcode = ["AGIGA9603-004BCA02144800000002-06",
# "AGIGA9603-004BCA02144800000002-06",
# "AGIGA9603-004BCA02144800000002-06",
# "AGIGA9603-004BCA02144800000002-06"]
barcode = ["AGIGA9811-001BCA02143900000228-01"]
ch = Channel(barcode_list=barcode, channel_id=0,
name="UFT_CHANNEL", cable_barcodes_list=[""])
# ch.start()
# ch.queue.put(ChannelStates.INIT)
# ch.queue.put(ChannelStates.CHARGE)
# ch.queue.put(ChannelStates.PROGRAM_VPD)
# ch.queue.put(ChannelStates.CHECK_ENCRYPTED_IC)
# ch.queue.put(ChannelStates.CHECK_TEMP)
# ch.queue.put(ChannelStates.LOAD_DISCHARGE)
# ch.queue.put(ChannelStates.CHECK_CAPACITANCE)
# ch.queue.put(ChannelStates.EXIT)
ch.auto_test()
# ch.switch_to_mb()
# ch.switch_to_dut(0)
# ch.init()
# ch.charge_dut()
# ch.discharge_dut()
|
hardanimal/UFT_UPGEM
|
src/UFT/channel.py
|
Python
|
gpl-3.0
| 54,026
|
[
"Amber"
] |
4cc514c6ea597f06eec38b64b2a246676ec479a8011ea1f497d06670910c16ee
|
"""
refcounting
~~~~~~~~~~~
Reference count annotations for C API functions. Has the same
result as the sphinx.ext.refcounting extension but works for all
functions regardless of the signature, and the reference counting
information is written inline with the documentation instead of a
separate file.
Adds a new directive "refcounting". The directive has no content
and one required positional parameter:: "new" or "borrow".
Example:
.. cfunction:: json_t *json_object(void)
.. refcounting:: new
<description of the json_object function>
:copyright: Copyright (c) 2009, 2010 Petri Lehtinen <petri@digip.org>
:license: MIT, see LICENSE for details.
"""
from docutils import nodes
class refcounting(nodes.emphasis): pass
def visit(self, node):
self.visit_emphasis(node)
def depart(self, node):
self.depart_emphasis(node)
def html_visit(self, node):
self.body.append(self.starttag(node, 'em', '', CLASS='refcount'))
def html_depart(self, node):
self.body.append('</em>')
def refcounting_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if arguments[0] == 'borrow':
text = 'Return value: Borrowed reference.'
elif arguments[0] == 'new':
text = 'Return value: New reference.'
else:
raise Error('Valid arguments: new, borrow')
return [refcounting(text, text)]
def setup(app):
app.add_node(refcounting,
html=(html_visit, html_depart),
latex=(visit, depart),
text=(visit, depart))
app.add_directive('refcounting', refcounting_directive, 0, (1, 0, 0))
|
cloudera/avro
|
lang/c/jansson/doc/ext/refcounting.py
|
Python
|
apache-2.0
| 1,716
|
[
"VisIt"
] |
a45659c4bb401785442425593ae81bff2aeabeef8a5bbf8325946688527e43ff
|
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
"""Validator classes are used for ComplexInputs, to validate the content
"""
import logging
from pywps.validator.mode import MODE
from pywps.inout.formats import FORMATS
import mimetypes
import os
LOGGER = logging.getLogger('PYWPS')
def validategml(data_input, mode):
"""GML validation function
:param data_input: :class:`ComplexInput`
:param pywps.validator.mode.MODE mode:
This function validates GML input based on given validation mode. Following
happens, if `mode` parameter is given:
`MODE.NONE`
it will return always `True`
`MODE.SIMPLE`
the mimetype will be checked
`MODE.STRICT`
`GDAL/OGR <http://gdal.org/>`_ is used for getting the proper format.
`MODE.VERYSTRICT`
the :class:`lxml.etree` is used along with given input `schema` and the
GML file is properly validated against given schema.
"""
LOGGER.info('validating GML; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GML.mime_type}
if mode >= MODE.STRICT:
from pywps.dependencies import ogr
data_source = ogr.Open(data_input.file)
if data_source:
passed = (data_source.GetDriver().GetName() == "GML")
else:
passed = False
if mode >= MODE.VERYSTRICT:
from lxml import etree
from pywps._compat import PY2
if PY2:
from urllib2 import urlopen
else:
from urllib.request import urlopen
try:
schema_url = data_input.data_format.schema
gmlschema_doc = etree.parse(urlopen(schema_url))
gmlschema = etree.XMLSchema(gmlschema_doc)
passed = gmlschema.validate(etree.parse(data_input.stream))
except Exception as e:
LOGGER.warning(e)
passed = False
return passed
def validatexml(data_input, mode):
"""XML validation function
:param data_input: :class:`ComplexInput`
:param pywps.validator.mode.MODE mode:
This function validates XML input based on given validation mode. Following
happens, if `mode` parameter is given:
`MODE.NONE`
it will return always `True`
`MODE.SIMPLE`
the mimetype will be checked
`MODE.STRICT` and `MODE.VERYSTRICT`
the :class:`lxml.etree` is used along with given input `schema` and the
XML file is properly validated against given schema.
"""
LOGGER.info('validating XML; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GML.mime_type}
if mode >= MODE.STRICT:
from lxml import etree
from pywps._compat import PY2
if PY2:
from urllib2 import urlopen
else:
from urllib.request import urlopen
# TODO: Raise the actual validation exception to make it easier to spot the error.
# xml = etree.parse(data_input.file)
# schema.assertValid(xml)
try:
fn = os.path.join(_get_schemas_home(), data_input.data_format.schema)
schema_doc = etree.parse(fn)
schema = etree.XMLSchema(schema_doc)
passed = schema.validate(etree.parse(data_input.file))
except Exception as e:
LOGGER.warning(e)
passed = False
return passed
def validatejson(data_input, mode):
"""JSON validation function
:param data_input: :class:`ComplexInput`
:param pywps.validator.mode.MODE mode:
This function validates JSON input based on given validation mode. Following
happens, if `mode` parameter is given:
`MODE.NONE`
No validation, returns `True`.
`MODE.SIMPLE`
Returns `True` if the mime type is correct.
`MODE.STRICT`
Returns `True` if the content can be interpreted as a json object.
"""
LOGGER.info('validating JSON; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.JSON.mime_type}
if mode >= MODE.STRICT:
import json
try:
with open(data_input.file) as f:
json.load(f)
passed = True
except ValueError:
passed = False
return passed
def validategeojson(data_input, mode):
"""GeoJSON validation example
>>> import StringIO
>>> class FakeInput(object):
... json = open('point.geojson','w')
... json.write('''{"type":"Feature", "properties":{}, "geometry":{"type":"Point", "coordinates":[8.5781228542328, 22.87500500679]}, "crs":{"type":"name", "properties":{"name":"urn:ogc:def:crs:OGC:1.3:CRS84"}}}''') # noqa
... json.close()
... file = 'point.geojson'
>>> class fake_data_format(object):
... mimetype = 'application/geojson'
>>> fake_input = FakeInput()
>>> fake_input.data_format = fake_data_format()
>>> validategeojson(fake_input, MODE.SIMPLE)
True
"""
LOGGER.info('validating GeoJSON; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GEOJSON.mime_type}
if mode >= MODE.STRICT:
from pywps.dependencies import ogr
data_source = ogr.Open(data_input.file)
if data_source:
passed = (data_source.GetDriver().GetName() == "GeoJSON")
else:
passed = False
if mode >= MODE.VERYSTRICT:
import jsonschema
import json
# this code comes from
# https://github.com/om-henners/GeoJSON_Validation/blob/master/geojsonvalidation/geojson_validation.py
schema_home = os.path.join(_get_schemas_home(), "geojson")
base_schema = os.path.join(schema_home, "geojson.json")
with open(base_schema) as fh:
geojson_base = json.load(fh)
with open(os.path.join(schema_home, "crs.json")) as fh:
crs_json = json.load(fh)
with open(os.path.join(schema_home, "bbox.json")) as fh:
bbox_json = json.load(fh)
with open(os.path.join(schema_home, "geometry.json")) as fh:
geometry_json = json.load(fh)
cached_json = {
"http://json-schema.org/geojson/crs.json": crs_json,
"http://json-schema.org/geojson/bbox.json": bbox_json,
"http://json-schema.org/geojson/geometry.json": geometry_json
}
resolver = jsonschema.RefResolver(
"http://json-schema.org/geojson/geojson.json",
geojson_base, store=cached_json)
validator = jsonschema.Draft4Validator(geojson_base, resolver=resolver)
try:
validator.validate(json.loads(data_input.stream.read()))
passed = True
except jsonschema.ValidationError:
passed = False
return passed
def validateshapefile(data_input, mode):
"""ESRI Shapefile validation example
"""
LOGGER.info('validating Shapefile; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.SHP.mime_type}
if mode >= MODE.STRICT:
from pywps.dependencies import ogr
import zipfile
z = zipfile.ZipFile(data_input.file)
shape_name = None
for name in z.namelist():
z.extract(name, data_input.tempdir)
if os.path.splitext(name)[1].lower() == '.shp':
shape_name = name
if shape_name:
data_source = ogr.Open(os.path.join(data_input.tempdir, shape_name))
if data_source:
passed = (data_source.GetDriver().GetName() == "ESRI Shapefile")
else:
passed = False
return passed
def validategeotiff(data_input, mode):
"""GeoTIFF validation example
"""
LOGGER.info('Validating Shapefile; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GEOTIFF.mime_type}
if mode >= MODE.STRICT:
try:
from pywps.dependencies import gdal
data_source = gdal.Open(data_input.file)
passed = (data_source.GetDriver().ShortName == "GTiff")
except ImportError:
passed = False
return passed
def validatenetcdf(data_input, mode):
"""netCDF validation.
"""
LOGGER.info('Validating netCDF; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.NETCDF.mime_type}
if mode >= MODE.STRICT:
try:
from pywps.dependencies import netCDF4 as nc
nc.Dataset(data_input.file)
passed = True
except ImportError as e:
passed = False
LOGGER.exception("ImportError while validating netCDF4 file {}:\n {}".format(data_input.file, e))
except IOError as e:
passed = False
LOGGER.exception("IOError while validating netCDF4 file {}:\n {}".format(data_input.file, e))
return passed
def validatedods(data_input, mode):
"""OPeNDAP validation.
"""
LOGGER.info('Validating OPeNDAP; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.url
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.DODS.mime_type}
if mode >= MODE.STRICT:
try:
from pywps.dependencies import netCDF4 as nc
nc.Dataset(data_input.url)
passed = True
except ImportError as e:
passed = False
LOGGER.exception("ImportError while validating OPeNDAP link {}:\n {}".format(data_input.url, e))
except IOError as e:
passed = False
LOGGER.exception("IOError while validating OPeNDAP link {}:\n {}".format(data_input.url, e))
return passed
def _get_schemas_home():
"""Get path to schemas directory
"""
schema_dir = os.path.join(
os.path.abspath(
os.path.dirname(__file__)
),
os.path.pardir,
"schemas")
LOGGER.debug('Schemas directory: {}'.format(schema_dir))
return schema_dir
if __name__ == "__main__":
import doctest
from pywps.wpsserver import temp_dir
with temp_dir() as tmp:
os.chdir(tmp)
doctest.testmod()
|
bird-house/PyWPS
|
pywps/validator/complexvalidator.py
|
Python
|
mit
| 11,934
|
[
"NetCDF"
] |
fa97a4f8b9b0e516dabc4a954806ac1666d6d98bd2837e5588062fe5be386802
|
import platform, os, re
from subprocess import *
from time import strftime, gmtime, ctime, localtime, asctime
from utils import colorText
TERM_COLS = 110
LIBMESH_OPTIONS = {
'mesh_mode' : { 're_option' : r'#define\s+LIBMESH_ENABLE_PARMESH\s+(\d+)',
'default' : 'SERIAL',
'options' :
{
'PARALLEL' : '1',
'SERIAL' : '0'
}
},
'unique_ids' : { 're_option' : r'#define\s+LIBMESH_ENABLE_UNIQUE_ID\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'dtk' : { 're_option' : r'#define\s+LIBMESH_TRILINOS_HAVE_DTK\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'vtk' : { 're_option' : r'#define\s+LIBMESH_HAVE_VTK\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'tecplot' : { 're_option' : r'#define\s+LIBMESH_HAVE_TECPLOT_API\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'petsc_major' : { 're_option' : r'#define\s+LIBMESH_DETECTED_PETSC_VERSION_MAJOR\s+(\d+)',
'default' : '1'
},
'petsc_minor' : { 're_option' : r'#define\s+LIBMESH_DETECTED_PETSC_VERSION_MINOR\s+(\d+)',
'default' : '1'
},
'dof_id_bytes' : { 're_option' : r'#define\s+LIBMESH_DOF_ID_BYTES\s+(\d+)',
'default' : '4'
},
'petsc_debug' : { 're_option' : r'#define\s+LIBMESH_PETSC_USE_DEBUG\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'curl' : { 're_option' : r'#define\s+LIBMESH_HAVE_CURL\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'tbb' : { 're_option' : r'#define\s+LIBMESH_HAVE_TBB_API\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
}
## Run a command and return the output, or ERROR: + output if retcode != 0
def runCommand(cmd):
p = Popen([cmd],stdout=PIPE,stderr=STDOUT, close_fds=True, shell=True)
output = p.communicate()[0]
if (p.returncode != 0):
output = 'ERROR: ' + output
return output
## print an optionally colorified test result
#
# The test will not be colored if
# 1) options.colored is False,
# 2) the environment variable BITTEN_NOCOLOR is true, or
# 3) the color parameter is False.
def printResult(test_name, result, timing, start, end, options, color=True):
f_result = ''
cnt = (TERM_COLS-2) - len(test_name + result)
color_opts = {'code' : options.code, 'colored' : options.colored}
if color:
any_match = False
# Color leading paths
m = re.search(r'(.*):(.*)', test_name)
if m:
test_name = colorText(m.group(1), 'CYAN', **color_opts) + ':' + m.group(2)
# Color the Caveats CYAN
m = re.search(r'(\[.*?\])', result)
if m:
any_match = True
f_result += colorText(m.group(1), 'CYAN', **color_opts) + " "
# Color Exodiff or CVSdiff tests YELLOW
m = re.search('(FAILED \((?:EXODIFF|CSVDIFF)\))', result)
if m:
any_match = True
f_result += colorText(m.group(1), 'YELLOW', **color_opts)
else:
# Color remaining FAILED tests RED
m = re.search('(FAILED \(.*\))', result)
if m:
any_match = True
f_result += colorText(m.group(1), 'RED', **color_opts)
# Color deleted tests RED
m = re.search('(deleted) (\(.*\))', result)
if m:
any_match = True
f_result += colorText(m.group(1), 'RED', **color_opts) + ' ' + m.group(2)
# Color long running tests YELLOW
m = re.search('(RUNNING\.\.\.)', result)
if m:
any_match = True
f_result += colorText(m.group(1), 'YELLOW', **color_opts)
# Color PBS status CYAN
m = re.search('((?:LAUNCHED|RUNNING(?!\.)|EXITING|QUEUED))', result)
if m:
any_match = True
f_result += colorText(m.group(1), 'CYAN', **color_opts)
# Color Passed tests GREEN
m = re.search('(OK|DRY_RUN)', result)
if m:
any_match = True
f_result += colorText(m.group(1), 'GREEN', **color_opts)
if not any_match:
f_result = result
f_result = test_name + '.'*cnt + ' ' + f_result
else:
f_result = test_name + '.'*cnt + ' ' + result
# Tack on the timing if it exists
if timing:
f_result += ' [' + '%0.3f' % float(timing) + 's]'
if options.debug_harness:
f_result += ' Start: ' + '%0.3f' % start + ' End: ' + '%0.3f' % end
return f_result
## Color the error messages if the options permit, also do not color in bitten scripts because
# it messes up the trac output.
# supports weirded html for more advanced coloring schemes. \verbatim<r>,<g>,<y>,<b>\endverbatim All colors are bolded.
def getPlatforms():
# We'll use uname to figure this out. platform.uname() is available on all platforms
# while os.uname() is not (See bugs.python.org/issue8080).
# Supported platforms are LINUX, DARWIN, ML, MAVERICKS, YOSEMITE, or ALL
platforms = set(['ALL'])
raw_uname = platform.uname()
if raw_uname[0].upper() == 'DARWIN':
platforms.add('DARWIN')
if re.match("12\.", raw_uname[2]):
platforms.add('ML')
if re.match("13\.", raw_uname[2]):
platforms.add("MAVERICKS")
if re.match("14\.", raw_uname[2]):
platforms.add("YOSEMITE")
else:
platforms.add(raw_uname[0].upper())
return platforms
def runExecutable(libmesh_dir, location, bin, args):
# Installed location of libmesh executable
libmesh_installed = libmesh_dir + '/' + location + '/' + bin
# Uninstalled location of libmesh executable
libmesh_uninstalled = libmesh_dir + '/' + bin
# Uninstalled location of libmesh executable
libmesh_uninstalled2 = libmesh_dir + '/contrib/bin/' + bin
# The eventual variable we will use to refer to libmesh's executable
libmesh_exe = ''
if os.path.exists(libmesh_installed):
libmesh_exe = libmesh_installed
elif os.path.exists(libmesh_uninstalled):
libmesh_exe = libmesh_uninstalled
elif os.path.exists(libmesh_uninstalled2):
libmesh_exe = libmesh_uninstalled2
else:
print "Error! Could not find '" + bin + "' in any of the usual libmesh's locations!"
exit(1)
return runCommand(libmesh_exe + " " + args).rstrip()
def getCompilers(libmesh_dir):
# Supported compilers are GCC, INTEL or ALL
compilers = set(['ALL'])
mpicxx_cmd = runExecutable(libmesh_dir, "bin", "libmesh-config", "--cxx")
# Account for usage of distcc or ccache
if "distcc" in mpicxx_cmd or "ccache" in mpicxx_cmd:
mpicxx_cmd = mpicxx_cmd.split()[-1]
# If mpi ic on the command, run -show to get the compiler
if "mpi" in mpicxx_cmd:
raw_compiler = runCommand(mpicxx_cmd + " -show")
else:
raw_compiler = mpicxx_cmd
if re.match('icpc', raw_compiler) != None:
compilers.add("INTEL")
elif re.match('[cg]\+\+', raw_compiler) != None:
compilers.add("GCC")
elif re.match('clang\+\+', raw_compiler) != None:
compilers.add("CLANG")
return compilers
def getPetscVersion(libmesh_dir):
major_version = getLibMeshConfigOption(libmesh_dir, 'petsc_major')
minor_version = getLibMeshConfigOption(libmesh_dir, 'petsc_minor')
if len(major_version) != 1 or len(minor_version) != 1:
print "Error determining PETSC version"
exit(1)
return major_version.pop() + '.' + minor_version.pop()
# Break down petsc version logic in a new define
# TODO: find a way to eval() logic instead
def checkPetscVersion(checks, test):
# If any version of petsc works, return true immediately
if 'ALL' in set(test['petsc_version']):
return (True, None, None)
# Iterate through petsc versions in test[PETSC_VERSION] and match it against check[PETSC_VERSION]
for petsc_version in test['petsc_version']:
logic, version = re.search(r'(.*?)(\d\S+)', petsc_version).groups()
# Exact match
if logic == '' or logic == '=':
if version == checks['petsc_version']:
return (True, None, version)
else:
return (False, '!=', version)
# Logical match
if logic == '>' and checks['petsc_version'][0:3] > version[0:3]:
return (True, None, version)
elif logic == '>=' and checks['petsc_version'][0:3] >= version[0:3]:
return (True, None, version)
elif logic == '<' and checks['petsc_version'][0:3] < version[0:3]:
return (True, None, version)
elif logic == '<=' and checks['petsc_version'][0:3] <= version[0:3]:
return (True, None, version)
return (False, logic, version)
def getLibMeshConfigOption(libmesh_dir, option):
# Some tests work differently with parallel mesh enabled
# We need to detect this condition
option_set = set(['ALL'])
filenames = [
libmesh_dir + '/include/base/libmesh_config.h', # Old location
libmesh_dir + '/include/libmesh/libmesh_config.h' # New location
];
success = 0
for filename in filenames:
if success == 1:
break
try:
f = open(filename)
contents = f.read()
f.close()
info = LIBMESH_OPTIONS[option]
m = re.search(info['re_option'], contents)
if m != None:
if 'options' in info:
for value, option in info['options'].iteritems():
if m.group(1) == option:
option_set.add(value)
else:
option_set.clear()
option_set.add(m.group(1))
else:
option_set.add(info['default'])
success = 1
except IOError, e:
# print "Warning: I/O Error trying to read", filename, ":", e.strerror, "... Will try other locations."
pass
if success == 0:
print "Error! Could not find libmesh_config.h in any of the usual locations!"
exit(1)
return option_set
def getSharedOption(libmesh_dir):
# Some tests may only run properly with shared libraries on/off
# We need to detect this condition
shared_option = set(['ALL'])
result = runExecutable(libmesh_dir, "contrib/bin", "libtool", "--config | grep build_libtool_libs | cut -d'=' -f2")
if re.search('yes', result) != None:
shared_option.add('DYNAMIC')
elif re.search('no', result) != None:
shared_option.add('STATIC')
else:
# Neither no nor yes? Not possible!
print "Error! Could not determine whether shared libraries were built."
exit(1)
return shared_option
|
mellis13/moose
|
python/TestHarness/util.py
|
Python
|
lgpl-2.1
| 10,908
|
[
"VTK"
] |
89d6344253afcf661dc0c28f231609e53a954c688dab539dafa0d85d8d63318a
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 14:39:00 2015
@author: alex_
"""
# Universal Imports
import numpy as np
import sunpy.map as mp
from astropy import units as u
# Visulisation Imports
from mayavi import mlab
from mayavi_seed_streamlines import SeedStreamline, Streamline
from mayavi.tools.sources import vector_field
# Module Imports
#from classes import *
#from solarbextrapolation.map3dclasses import Map3D
from solarbextrapolation.utilities import decompose_ang_len
def visualise(aMap3D, **kwargs):
"""
Basic function for visualising a vector field from an extrapolator.
General usage involves passing boundary map and volume vector field and
these are then aligned and plotted in mayavi.
The vector field will be represented by streamlines generated from the
given (or otherwise default) seed points.
The boundary data should be rendered in approbriate colours for the given
map data.
Parameters
----------
aMap3D : Map3D
The 3D vector field from the extrapolator.
boo_debug : boolean, optional
If set, turns on logging functionality.
seeds : numpy.array, optional
If set, provides a list of manual seed points in the 3D vector field.
boundary : sunpy.map, optional
If set, provides the 2D map to place in the visulisation at the base of
the volume.
unit_length : `astropy.units.quantity.Quantity`, optional
If set, provides the length of one unit in MayaVi for scaling maps.
boundary_unit : `astropy.units.quantity.Quantity`, optional
If set, provides a single unit for the x/y-axes of the boundary map.
boundary_units : list, optional
If set, provides a list of units for the x/y-axes of the boundary map.
volume_unit : `astropy.units.quantity.Quantity`, optional
If set, provides a single unit for the x/y/z-axes of the 3D vector field.
volume_units : list, optional
If set, provides a list of units for the x/y/z-axes of the 3D vector field.
show_boundary_axes : boolean, optional
If set, enables the display of the boundary map axes.
show_volume_axes : boolean, optional
If set, enables the display of the 3D vector field axes.
"""
# Optional parameters
boo_debug = kwargs.get('debug', False)
np_seeds = kwargs.get('seeds', None)
boundary = kwargs.get('boundary', None)
mayavi_unit_length = kwargs.get('unit_length', 1.0 * u.Mm) * 1.0
boundary_unit = kwargs.get('boundary_unit', mayavi_unit_length) * 1.0
boundary_units = kwargs.get('boundary_units', [ boundary_unit, boundary_unit, boundary_unit ])
volume_unit = kwargs.get('volume_unit', mayavi_unit_length) * 1.0
volume_units = kwargs.get('volume_units', [ volume_unit, volume_unit, volume_unit ])
show_boundary_axes = kwargs.get('show_boundary_axes', True)
show_volume_axes = kwargs.get('show_volume_axes', True)
# Setup the arc to length equivilence
obs_distance = aMap3D.dsun - aMap3D.rsun_meters
radian_length = [ (u.radian, u.meter, lambda x: obs_distance * x, lambda x: x / obs_distance) ]
# Slice (scale) the fields to make the vectors usable in mayavi.
int_slice_scale = 1
print "shape: " + str(aMap3D.data.shape)
npm_3d_sliced = aMap3D.data[::int_slice_scale,::int_slice_scale,::int_slice_scale,:]
# Plot the main vector field (volume).
fig = mlab.figure()
# Make 3D coords for ever point in the 3D grid.
x_range = u.Quantity([ decompose_ang_len(aMap3D.xobsrange[0], equivalencies=radian_length),
decompose_ang_len(aMap3D.xobsrange[1], equivalencies=radian_length) ])
y_range = u.Quantity([ decompose_ang_len(aMap3D.yobsrange[0], equivalencies=radian_length),
decompose_ang_len(aMap3D.yobsrange[1], equivalencies=radian_length) ])
#z_range = aMap3D.zrange.to(u.meter, equivalencies=radian_length)
z_range = u.Quantity([ decompose_ang_len(aMap3D.zrange[0], equivalencies=radian_length),
decompose_ang_len(aMap3D.zrange[1], equivalencies=radian_length) ])
x_range_scaled = (x_range/mayavi_unit_length).decompose().value
y_range_scaled = (y_range/mayavi_unit_length).decompose().value
z_range_scaled = (z_range/mayavi_unit_length).decompose().value
X, Y, Z = np.mgrid[x_range_scaled[0]:x_range_scaled[1]:npm_3d_sliced.shape[0]*1j,
y_range_scaled[0]:y_range_scaled[1]:npm_3d_sliced.shape[1]*1j,
z_range_scaled[0]:z_range_scaled[1]:npm_3d_sliced.shape[2]*1j]
vec_field = vector_field(X, Y, Z, npm_3d_sliced[:,:,:,0], npm_3d_sliced[:,:,:,1], npm_3d_sliced[:,:,:,2],
name='Magnetic Vector Field', figure=fig)
vec_field_mag = mlab.pipeline.extract_vector_norm(vec_field, name="Magnetic Field Magnitude")
# Place a small outline around the data cube
mlab.outline()
if show_volume_axes:
# Label axes
axes = mlab.axes()
x_range_axis = decompose_ang_len((x_range/volume_units[0]).decompose(), equivalencies=radian_length, working_units=volume_units[0])#(x_range/volume_units[0]).decompose()
y_range_axis = decompose_ang_len((y_range/volume_units[1]).decompose(), equivalencies=radian_length, working_units=volume_units[1])#(y_range/volume_units[1]).decompose()
z_range_axis = decompose_ang_len((z_range/volume_units[2]).decompose(), equivalencies=radian_length, working_units=volume_units[2])#(z_range/volume_units[2]).decompose()
if boo_debug:
print '\n\n'
print 'x_range: ' + str(x_range)
print 'y_range: ' + str(y_range)
print 'z_range: ' + str(z_range)
print '\n\n'
print 'x_range_axis: ' + str(x_range_axis)
print 'y_range_axis: ' + str(y_range_axis)
print 'z_range_axis: ' + str(z_range_axis)
print '\n\n'
print 'x_range_axis[0]: ' + str(x_range_axis[0])
print 'y_range_axis[0]: ' + str(y_range_axis[0])
print 'z_range_axis[0]: ' + str(z_range_axis[0])
print '\nx_range_axis[0].value: ' + str(x_range_axis[0].value)
print 'x_range_axis[0].unit: ' + str(x_range_axis[0].unit)
print 'type(x_range_axis[0].unit): ' + str(type(x_range_axis[0].unit))
axes.axes.ranges = np.array([ x_range_axis[0], x_range_axis[1], y_range_axis[0], y_range_axis[1], z_range_axis[0], z_range_axis[1]])
axes.axes.use_ranges = True
#axes.axes.ranges = np.array([ 0.0, 10.0, 0.0, 10.0, z_range_axis[0], z_range_axis[1]])
axes.axes.x_label = 'Solar X (' + unit_label(volume_units[0]) + ')'
axes.axes.y_label = 'Solar Y (' + unit_label(volume_units[1]) + ')'
axes.axes.z_label = 'Z (' + unit_label(volume_units[2]) + ')'
# Plot the seed points
if np_seeds is None:
# Generate a plane for the streamline seed points
streamline = Streamline()
vec_field_mag.add_child(streamline)
streamline.stream_tracer.integration_direction = 'both'
streamline.seed.widget = streamline.seed.widget_list[2]
streamline.seed.widget.resolution = 10
#streamline.seed.widget.enabled = False
#streamline.seed.widget.interactor = None
# Some necessary points within the volume
z = (0.15 * (z_range_scaled[1] - z_range_scaled[0])) + z_range_scaled[0]
x_mid = (x_range_scaled[0] + x_range_scaled[1])/2.0
y_mid = (y_range_scaled[0] + y_range_scaled[1])/2.0
# Orientate, position and scale the plane
streamline.seed.widget.normal_to_z_axis = True
streamline.seed.widget.center = np.array([ x_mid, y_mid, z])
streamline.seed.widget.point1 = np.array([ x_range_scaled[1], y_range_scaled[0], z])
streamline.seed.widget.point2 = np.array([ x_range_scaled[0], y_range_scaled[1], z])
streamline.seed.widget.origin = np.array([ x_range_scaled[0], y_range_scaled[0], z])
# Update the render
scene = fig.scene
scene.render()
else:
points = mlab.points3d(np_seeds[:,0], np_seeds[:,1], np_seeds[:,2])
# Make the points smaller
points.glyph.glyph.scale_factor = 10.0 #mayavi_scale
# Make the points blue
points.actor.property.color = (0.2,0,1)
# Create the custom streamline object
streamline = SeedStreamline(seed_points=np_seeds)
# Add the streamline object to the plot and make it use the magentic field data,
# by adding it as a child of the field we created earlier.
# We add it to the magnitude field (which is in itself a child of bfield)
# so that it picks up the scalar values and colours the lines.
vec_field_mag.add_child(streamline)
# Adjust some of the streamline appearance parameters
streamline.module_manager.scalar_lut_manager.lut_mode = 'winter'#'Greys'
streamline.stream_tracer.integration_direction = 'both'
streamline.stream_tracer.maximum_propagation = 500.0
streamline.update_pipeline() # This doesn't seem to work ATM
# Add the boundary data 2D map
if boundary:
#x_range = boundary.xrange.to(u.meter, equivalencies=radian_length)
x_range = u.Quantity([ decompose_ang_len(boundary.xrange[0], equivalencies=radian_length),
decompose_ang_len(boundary.xrange[1], equivalencies=radian_length) ])
if boo_debug: print '\nboundary: x_range: ' + str(x_range)
#y_range = boundary.yrange.to(u.meter, equivalencies=radian_length)
y_range = u.Quantity([ decompose_ang_len(boundary.yrange[0], equivalencies=radian_length),
decompose_ang_len(boundary.yrange[1], equivalencies=radian_length) ])
x_range_scaled = (x_range/mayavi_unit_length).decompose().value
if boo_debug: print '\nboundary: x_range_scaled: ' + str(x_range_scaled)
y_range_scaled = (y_range/mayavi_unit_length).decompose().value
# Create explicit points in 3D space
X, Y = np.mgrid[x_range_scaled[0]:x_range_scaled[1]:boundary.data.shape[0]*1j,
y_range_scaled[0]:y_range_scaled[1]:boundary.data.shape[1]*1j]
# Plot and add to the current figure
img_boundary = mlab.pipeline.array2d_source(X, Y, boundary.data, figure=fig)
img_boundary = mlab.pipeline.image_actor(img_boundary, figure = fig)
# Color the image according to the data
mayavi_ct = boundary.plot_settings['cmap'](range(255))
img_boundary.module_manager.scalar_lut_manager.lut.table = mayavi_ct*255
# Legend details
img_boundary.module_manager.scalar_lut_manager.show_legend = True #module_manager2.scalar_lut_manager.show_legend = True
img_boundary.module_manager.scalar_lut_manager.scalar_bar_representation.position = np.array([ 0.1, 0.1 ])
# Place a small outline around the data cube
mlab.outline()
# Show the axes if selected
if show_boundary_axes:
axes = mlab.axes()
# Get the ranges of the boundary and scale to the selected units
x_range = boundary.xrange.to(boundary_units[0].unit, equivalencies=radian_length)
y_range = boundary.yrange.to(boundary_units[1].unit, equivalencies=radian_length)
x_range_scaled = (x_range/boundary_units[0]).decompose().value
y_range_scaled = (y_range/boundary_units[1]).decompose().value
# Update the ranges manually to use custom units for the boundary
axes.axes.ranges = np.array([ x_range_scaled[0], x_range_scaled[1], y_range_scaled[0], y_range_scaled[1], 0, 0])
axes.axes.use_ranges = True
axes.axes.x_label = 'Solar X (' + unit_label(boundary_units[0]) + ')'
axes.axes.y_label = 'Solar Y (' + unit_label(boundary_units[1]) + ')'
return fig
def unit_label(quantity):
"""
Small function to return a string label that is empty if value is 1.0 or is
the given number otherwise.
"""
if quantity.value == 1.0:
return str(quantity.unit)
return str(quantity)
|
Cadair/solarbextrapolation
|
solarbextrapolation/visualisation_functions.py
|
Python
|
mit
| 12,249
|
[
"Mayavi"
] |
7175a85c93b14690fe37617863bfc078040b0ec915d1a6f47440fd171e100c1a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
'''
This software has been developed by:
GI Genética, Fisiología e Historia Forestal
Dpto. Sistemas y Recursos Naturales
ETSI Montes, Forestal y del Medio Natural
Universidad Politécnica de Madrid
http://gfhforestal.com/
https://github.com/ggfhf/
Licence: GNU General Public Licence Version 3.
'''
#-------------------------------------------------------------------------------
'''
This file contains the functions related to the SOAPdenovo-Trans menus in console mode.
'''
#-------------------------------------------------------------------------------
import sys
import cbioinfoapp
import ccloud
import cdataset
import clib
import clog
import xlib
#-------------------------------------------------------------------------------
def build_menu_main():
'''
Build the menu Main.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Main')
# print the menu options
print('Options:')
print()
print(' 1. Cloud control')
print(' 2. RNA-seq')
print(' 3. Datasets')
print(' 4. Logs')
print()
print(' X. Exit NGScloud')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_cloud_control()
elif option == '2':
build_menu_rnaseq()
elif option == '3':
build_menu_datasets()
elif option == '4':
build_menu_logs()
elif option == 'X':
sure = ''
print('')
while sure not in ['Y', 'N']:
sure = input('Are you sure to exit NGScloud (y or n)?: ').upper()
if sure == 'Y':
break
#-------------------------------------------------------------------------------
def build_menu_cloud_control():
'''
Build the menu Cloud control.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Cloud control')
# print the menu options
print('Options:')
print()
print(' 1. Set environment')
print()
print(' 2. Configuration')
print(' 3. Security')
print()
print(' 4. Cluster operation')
print(' 5. Node operation')
print(' 6. Volume operation')
print()
print(' 7. Bioinfo software setup')
print()
print(' 8. Open a terminal')
print()
print(' X. Return to menu Main')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
ccloud.form_set_environment()
elif option == '2':
build_menu_configuration()
elif option == '3':
build_menu_security()
elif option == '4':
build_menu_cluster_operation()
elif option == '5':
build_menu_node_operation()
elif option == '6':
build_menu_volume_operation()
elif option == '7':
build_menu_bioinfo_software_setup()
elif option == '8':
ccloud.form_open_terminal()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_configuration():
'''
Build the menu Configuration.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Configuration')
# print the menu options
print('Options:')
print()
print(' 1. Recreate NGScloud config file')
print(' 2. View NGScloud config file')
print()
print(' 3. List cluster templates')
print()
print(' 4. Update connection data and contact e-mail')
print(' 5. Update region and zone')
print()
print(' 6. Link volume in a cluster template')
print(' 7. Delink volume in a cluster template')
print(' 8. Review volumes linked to cluster templates')
print()
print(' X. Return to menu Cloud control')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
ccloud.form_create_ngscloud_config_file(is_menu_call=True)
elif option == '2':
ccloud.form_view_ngscloud_config_file()
elif option == '3':
ccloud.form_list_templates()
elif option == '4':
ccloud.form_update_connection_data()
elif option == '5':
ccloud.form_update_region_zone()
elif option == '6':
ccloud.form_link_volume_to_template()
elif option == '7':
ccloud.form_delink_volume_from_template()
elif option == '8':
ccloud.form_review_volume_links()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_security():
'''
Build the menu Security.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Security')
# print the menu options
print('Options:')
print()
print(' 1. List key pairs')
print(' 2. Create key pairs')
print()
print(' 3. List cluster security groups (coming soon!)')
print(' 4. Force removal of a cluster security group (coming soon!)')
print()
print(' X. Return to menu Cloud control')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
ccloud.form_list_keypairs()
elif option == '2':
ccloud.form_create_keypairs()
elif option == '3':
pass
elif option == '3':
pass
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_cluster_operation():
'''
Build the menu Cluster operation.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Cluster operation')
# print the menu options
print('Options:')
print()
print(' 1. List clusters')
print()
print(' 2. Create cluster')
print(' 3. Stop cluster')
print(' 4. Restart cluster')
print(' 5. Terminate cluster')
print()
print(' 6. Force termination of a cluster')
print()
print(' 7. Show cluster composition')
print()
print(' 8. Show status of batch jobs')
print(' 9. Kill batch job')
print()
print(' X. Return to menu Cloud Control')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
ccloud.form_list_clusters()
elif option == '2':
ccloud.form_create_cluster()
elif option == '3':
ccloud.form_stop_cluster()
elif option == '4':
ccloud.form_restart_cluster()
elif option == '5':
ccloud.form_terminate_cluster(force=False)
elif option == '6':
ccloud.form_terminate_cluster(force=True)
elif option == '7':
ccloud.form_show_cluster_composing()
elif option == '8':
ccloud.form_show_status_batch_jobs()
elif option == '9':
ccloud.form_kill_batch_job()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_node_operation():
'''
Build the menu Node operation.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Node operation')
# print the menu options
print('Options:')
print()
print(' 1. List nodes')
print()
print(' 2. Add node in a cluster')
print(' 3. Remove node in a cluster')
print()
print(' X. Return to menu Cloud Control')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
ccloud.form_list_nodes()
elif option == '2':
ccloud.form_add_node()
elif option == '3':
ccloud.form_remove_node()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_volume_operation():
'''
Build the menu Volume operation.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Volume operation')
# print the menu options
print('Options:')
print()
print(' 1. List volumes')
print()
print(' 2. Create volume')
print(' 3. Remove volume')
print()
print(' 4. Terminate volume creator')
print()
print(' 5. Mount volume in a node')
print(' 6. Unmount volume in a node')
print()
print(' X. Return to menu Cloud Control')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
ccloud.form_list_volumes()
elif option == '2':
ccloud.form_create_volume()
elif option == '3':
ccloud.form_remove_volume()
elif option == '4':
ccloud.form_terminate_volume_creator()
elif option == '5':
ccloud.form_mount_volume()
elif option == '6':
ccloud.form_unmount_volume()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_bioinfo_software_setup():
'''
Build the menu Bioinfo software setup.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Bioinfo software setup')
# print the menu options
print('Options:')
print()
print(' 1. {0} (Python & Bioconda environments'.format(xlib.get_miniconda3_name()))
print()
print(' 2. {0}'.format(xlib.get_blastplus_name()))
print(' 3. {0}'.format(xlib.get_busco_name()))
print(' 4. {0}'.format(xlib.get_cd_hit_name()))
print(' 5. {0}'.format(xlib.get_detonate_name()))
print(' 6. {0}'.format(xlib.get_fastqc_name()))
print(' 7. {0}'.format(xlib.get_gmap_gsnap_name()))
print(' 8. {0}'.format(xlib.get_ngshelper_name()))
print(' 9. {0}'.format(xlib.get_quast_name()))
print(' A. {0}'.format(xlib.get_rnaquast_name()))
print(' B. {0}'.format(xlib.get_soapdenovotrans_name()))
print(' C. {0}'.format(xlib.get_star_name()))
print(' D. {0}'.format(xlib.get_transabyss_name()))
print(' E. {0}'.format(xlib.get_transrate_name()))
print(' F. {0}'.format(xlib.get_trimmomatic_name()))
print(' G. {0}'.format(xlib.get_trinity_name()))
# -- print()
# -- print(' H. {0} & analysis packages'.format(xlib.get_r_name()))
print()
print(' X. Return to menu Cloud Control')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_miniconda3_code())
elif option == '2':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_blastplus_code())
elif option == '3':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_busco_code())
elif option == '4':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_cd_hit_code())
elif option == '5':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_detonate_code())
elif option == '6':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_fastqc_code())
elif option == '7':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_gmap_gsnap_code())
elif option == '8':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_ngshelper_code())
elif option == '9':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_quast_code())
elif option == 'A':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_rnaquast_code())
elif option == 'B':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_soapdenovotrans_code())
elif option == 'C':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_star_code())
elif option == 'D':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_transabyss_code())
elif option == 'E':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_transrate_code())
elif option == 'F':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_trimmomatic_code())
elif option == 'G':
cbioinfoapp.form_setup_bioinfo_app(xlib.get_trinity_code())
# -- elif option == 'H':
# -- cbioinfoapp.form_setup_bioinfo_app(xlib.get_r_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_rnaseq():
'''
Build the menu RNA-seq.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('RNA-seq')
# print the menu options
print('Options:')
print()
print(' 1. Read quality')
print(' 2. Trimming')
print(' 3. Digital normalization')
print()
print(' 4. De novo assembly')
print(' 5. Reference-based assembly')
print()
print(' 6. Assembly quality and transcript quantification')
print(' 7. Transcriptome filtering')
print()
print(' 8. Annotation')
print()
print(' X. Return to menu Main')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_read_quality()
elif option == '2':
build_menu_trimming()
elif option == '3':
build_menu_digital_normalization()
elif option == '4':
build_menu_denovo_assembly()
elif option == '5':
build_menu_reference_based_assembly()
elif option == '6':
build_menu_assembly_assessment()
elif option == '7':
build_menu_transcriptome_filtering()
elif option == '8':
build_menu_annotation()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_read_quality():
'''
Build the menu Read quality.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Read quality')
# print the menu options
print('Options:')
print()
print(' 1. {0}'.format(xlib.get_fastqc_name()))
print()
print(' X. Return to menu RNA-seq')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_fastqc()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_fastqc():
'''
Build the menu FastQC.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment(xlib.get_fastqc_name())
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run read quality process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Quality assessment')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_fastqc_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_fastqc_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_fastqc_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_trimming():
'''
Build the menu Trimming.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Trimming')
# print the menu options
print('Options:')
print()
print(' 1. {0}'.format(xlib.get_trimmomatic_name()))
print()
print(' X. Return to menu RNA-seq')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_trimmomatic()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_trimmomatic():
'''
Build the menu Trimmomatic.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment(xlib.get_trimmomatic_name())
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run trimming process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Trimming')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_trimmomatic_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_trimmomatic_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_trimmomatic_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_digital_normalization():
'''
Build the menu Digital normalization.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Digital normalization')
# print the menu options
print('Options:')
print()
print(' 1. {0} ({1} package)'.format(xlib.get_insilico_read_normalization_name(), xlib.get_trinity_name()))
print()
print(' X. Return to menu RNA-seq')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_insilico_read_normalization()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_insilico_read_normalization():
'''
Build the menu insilico_read_normalization (Trinity package).
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('{0} ({1} package)'.format(xlib.get_insilico_read_normalization_name(), xlib.get_trinity_name()))
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run digital normalization process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Digital normalization')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_insilico_read_normalization_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_insilico_read_normalization_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_insilico_read_normalization_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_denovo_assembly():
'''
Build the menu De novo assembly.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('De novo assembly')
# print the menu options
print('Options:')
print()
print(' 1. {0}'.format(xlib.get_soapdenovotrans_name()))
print(' 2. {0}'.format(xlib.get_transabyss_name()))
print(' 3. {0}'.format(xlib.get_trinity_name()))
print()
print(' X. Return to menu RNA-seq')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_soapdenovotrans()
elif option == '2':
build_menu_transabyss()
elif option == '3':
build_menu_trinity()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_soapdenovotrans():
'''
Build the menu SOAPdenovo-Trans.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment(xlib.get_soapdenovotrans_name())
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu De novo assembly')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_soapdenovotrans_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_soapdenovotrans_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_soapdenovotrans_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_transabyss():
'''
Build the menu Trans-ABySS.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment(xlib.get_transabyss_name())
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu De novo assembly')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_transabyss_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_transabyss_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_transabyss_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_trinity():
'''
Build the menu Trinity.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment(xlib.get_trinity_name())
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu De novo assembly')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_trinity_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_trinity_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_trinity_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_reference_based_assembly():
'''
Build the menu Reference-based assembly.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Reference-based assembly')
# print the menu options
print('Options:')
print()
print(' 1. {0}'.format(xlib.get_star_name()))
print()
print(' X. Return to menu RNA-seq')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_star()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_star():
'''
Build the menu STAR.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment(xlib.get_star_name())
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu De novo assembly')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_star_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_star_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_star_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_assembly_assessment():
'''
Build the menu Assembly quality and transcript quantification.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Assembly quality and transcript quantification')
# print the menu options
print('Options:')
print()
print(' 1. {0}'.format(xlib.get_busco_name()))
print(' 2. {0} ({1} package)'.format(xlib.get_gmap_name(), xlib.get_gmap_gsnap_name()))
print(' 3. {0}'.format(xlib.get_quast_name()))
print(' 4. {0}'.format(xlib.get_rnaquast_name()))
print(' 5. {0} ({1} package)'.format(xlib.get_rsem_eval_name(), xlib.get_detonate_name()))
print(' 6. {0}'.format(xlib.get_transrate_name()))
print()
print(' X. Return to menu RNA-seq')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_busco()
elif option == '2':
build_menu_gmap()
elif option == '3':
build_menu_quast()
if option == '4':
build_menu_rnaquast()
elif option == '5':
build_menu_rsem_eval()
elif option == '6':
build_menu_transrate()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_busco():
'''
Build the menu BUSCO.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment(xlib.get_busco_name())
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly assessment process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Assembly quality and transcript quantification')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_busco_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_busco_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_busco_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_gmap():
'''
Build the menu GMAP.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('{0} ({1} package)'.format(xlib.get_gmap_name(), xlib.get_gmap_gsnap_name()))
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly assessment process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Assembly quality and transcript quantification')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_gmap_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_gmap_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_gmap_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_quast():
'''
Build the menu QUAST.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment(xlib.get_quast_name())
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly assessment process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Assembly quality and transcript quantification')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_quast_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_quast_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_quast_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_rnaquast():
'''
Build the menu rnaQUAST.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment(xlib.get_rnaquast_name())
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly assessment process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Assembly quality and transcript quantification')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_rnaquast_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_rnaquast_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_rnaquast_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_rsem_eval():
'''
Build the menu RSEM-EVAL (reference-free evaluation of DETONATE package).
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('{0} ({1} package)'.format(xlib.get_rsem_eval_name(), xlib.get_detonate_name()))
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly assessment process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Assembly quality and transcript quantification')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_rsem_eval_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_rsem_eval_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_rsem_eval_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_ref_eval():
'''
Build the menu REF-EVAL (toolkit of reference-based measures of DETONATE package).
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('{0} ({1} package)'.format(xlib.get_ref_eval_name(), xlib.get_detonate_name()))
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly assessment process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Assembly quality and transcript quantification')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_ref_eval_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_ref_eval_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_ref_eval_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_transrate():
'''
Build the menu Transrate.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment(xlib.get_transrate_name())
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run assembly assessment process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Assembly quality and transcript quantification')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_transrate_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_transrate_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_transrate_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_transcriptome_filtering():
'''
Build the menu Trnascriptome filtering.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Transcriptome filtering')
# print the menu options
print('Options:')
print()
print(' 1. {0} ({1} package)'.format(xlib.get_cd_hit_est_name(), xlib.get_cd_hit_name()))
print(' 2. {0} ({1} package)'.format(xlib.get_transcript_filter_name(), xlib.get_ngshelper_name()))
print()
print(' X. Return to menu RNA-seq')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_cd_hit_est()
elif option == '2':
build_menu_transcript_filter()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_cd_hit_est():
'''
Build the menu CD-HIT-EST.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('{0} ({1} package)'.format(xlib.get_cd_hit_est_name(), xlib.get_cd_hit_name()))
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run transcriptome filtering process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Transcriptome filtering')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_cd_hit_est_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_cd_hit_est_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_cd_hit_est_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_transcript_filter():
'''
Build the menu transcript-filter (NGShelper package).
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('{0} ({1} package)'.format(xlib.get_transcript_filter_name(), xlib.get_ngshelper_name()))
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run transcriptome filtering process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Transcriptome filtering')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_transcript_filter_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_transcript_filter_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_transcript_filter_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_annotation():
'''
Build the menu Annotation.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Annotation')
# print the menu options
print('Options:')
print()
print(' 1. {0} ({1} package)'.format(xlib.get_transcriptome_blastx_name(), xlib.get_ngshelper_name()))
print()
print(' X. Return to menu RNA-seq')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_transcriptome_blastx()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_transcriptome_blastx():
'''
Build the menu CD-HIT-EST.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('{0} ({1} package)'.format(xlib.get_transcriptome_blastx_name(), xlib.get_ngshelper_name()))
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run annotation process')
print(' (CAUTION: before running a process, the config file should be updated)')
print()
print(' X. Return to menu Annotation')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cbioinfoapp.form_recreate_transcriptome_blastx_config_file()
elif option == '2':
cbioinfoapp.form_edit_bioinfo_config_file(xlib.get_transcriptome_blastx_code())
elif option == '3':
cbioinfoapp.form_run_bioinfo_process(xlib.get_transcriptome_blastx_code())
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_datasets():
'''
Build the menu Datasets.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Datasets')
# print the menu options
print('Options:')
print()
print(' 1. List dataset (coming soon!)')
print()
print(' 2. Reference dataset file transfer')
print(' 3. Reference dataset file compression/decompression')
print(' 4. Remove reference dataset')
print()
print(' 5. Database file transfer')
print(' 6. Database file compression/decompression')
print(' 7. Remove database')
print()
print(' 8. Read dataset file transfer')
print(' 9. Read dataset file compression/decompression')
print(' A. Remove read dataset')
print()
print(' B. Result dataset file transfer')
print(' C. Result dataset file compression/decompression')
print(' D. Remove result dataset')
print()
print(' E. Remove experiment')
print()
print(' X. Return to menu Main')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
pass
elif option == '2':
build_menu_reference_file_transfer()
elif option == '3':
build_menu_reference_file_compression_decompression()
elif option == '4':
cdataset.form_remove_reference_dataset()
elif option == '5':
build_menu_database_file_transfer()
elif option == '6':
build_menu_database_file_compression_decompression()
elif option == '7':
cdataset.form_remove_database_dataset()
elif option == '8':
build_menu_read_file_transfer()
elif option == '9':
build_menu_read_file_compression_decompression()
elif option == 'A':
cdataset.form_remove_read_dataset()
elif option == 'B':
build_menu_result_file_transfer()
elif option == 'C':
build_menu_result_file_compression_decompression()
elif option == 'D':
cdataset.form_remove_result_dataset()
elif option == 'E':
cdataset.form_remove_experiment()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_reference_file_transfer():
'''
Build the menu Reference dataset file transfer.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Reference dataset file transfer')
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Upload dataset to a cluster')
print(' (CAUTION: before running a upload process, the corresponding config file should be updated)')
print()
print(' X. Return to menu Datasets')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cdataset.form_recreate_reference_transfer_config_file()
elif option == '2':
cdataset.form_edit_reference_transfer_config_file()
elif option == '3':
cdataset.form_upload_reference_dataset()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_reference_file_compression_decompression():
'''
Build the menu Reference dataset file compression/decompression.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Reference dataset file compression/decompression')
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run compression/decompression process')
print(' (CAUTION: before running a compression/decompression process,')
print(' the corresponding config file should be updated)')
print()
print(' X. Return to menu Datasets')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cdataset.form_recreate_reference_gzip_config_file()
elif option == '2':
cdataset.form_edit_reference_gzip_config_file()
elif option == '3':
cdataset.form_run_reference_gzip_process()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_database_file_transfer():
'''
Build the menu Database file transfer.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Database file transfer')
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Upload dataset to a cluster')
print(' (CAUTION: before running a upload process, the corresponding config file should be updated)')
print()
print(' X. Return to menu Datasets')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cdataset.form_recreate_database_transfer_config_file()
elif option == '2':
cdataset.form_edit_database_transfer_config_file()
elif option == '3':
cdataset.form_upload_database_dataset()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_database_file_compression_decompression():
'''
Build the menu Database file compression/decompression.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Database file compression/decompression')
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run compression/decompression process')
print(' (CAUTION: before running a compression/decompression process,')
print(' the corresponding config file should be updated)')
print()
print(' X. Return to menu Datasets')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cdataset.form_recreate_database_gzip_config_file()
elif option == '2':
cdataset.form_edit_database_gzip_config_file()
elif option == '3':
cdataset.form_run_database_gzip_process()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_read_file_transfer():
'''
Build the menu Read dataset file transfer.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Read dataset file transfer')
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Upload dataset to a cluster')
print(' (CAUTION: before running a upload process, the corresponding config file should be updated)')
print()
print(' X. Return to menu Datasets')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cdataset.form_recreate_read_transfer_config_file()
elif option == '2':
cdataset.form_edit_read_transfer_config_file()
elif option == '3':
cdataset.form_upload_read_dataset()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_read_file_compression_decompression():
'''
Build the menu Read dataset file compression/decompression.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Read dataset file compression/decompression')
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run compression/decompression process')
print(' (CAUTION: before running a compression/decompression process,')
print(' the corresponding config file should be updated)')
print()
print(' X. Return to menu Datasets')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cdataset.form_recreate_read_gzip_config_file()
elif option == '2':
cdataset.form_edit_read_gzip_config_file()
elif option == '3':
cdataset.form_run_read_gzip_process()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_result_file_transfer():
'''
Build the menu Result dataset file transfer.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Result dataset file transfer')
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Download dataset from a cluster')
print(' (CAUTION: before running a download process, the corresponding config file should be updated)')
print()
print(' X. Return to menu Datasets')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cdataset.form_recreate_result_transfer_config_file()
elif option == '2':
cdataset.form_edit_result_transfer_config_file()
elif option == '3':
cdataset.form_download_result_dataset()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_result_file_compression_decompression():
'''
Build the menu Result dataset file compression/decompression.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Result dataset file compression/decompression')
# print the menu options
print('Options:')
print()
print(' 1. Recreate config file')
print(' 2. Edit config file')
print()
print(' 3. Run compression/decompression process')
print(' (CAUTION: before running a compression/decompression process,')
print(' the corresponding config file should be updated)')
print()
print(' X. Return to menu Datasets')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
cdataset.form_recreate_result_gzip_config_file()
elif option == '2':
cdataset.form_edit_result_gzip_config_file()
elif option == '3':
cdataset.form_run_result_gzip_process()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_logs():
'''
Build the menu Logs.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Cluster logs')
# print the menu options
print('Options:')
print()
print(' 1. List submission logs in the local computer (coming soon!)')
print(' 2. View a submission log in the local computer (coming soon!)')
print()
print(' 3. List result logs in the cluster')
print(' 4. View a result log in the cluster')
print()
print(' X. Return to menu Logs')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
pass
elif option == '2':
pass
elif option == '3':
clog.form_list_cluster_experiment_processes()
elif option == '4':
clog.form_view_cluster_experiment_process_log()
elif option == 'X':
break
#-------------------------------------------------------------------------------
if __name__ == '__main__':
print('This file contains the functions related to the SOAPdenovo-Trans menus in console mode.')
sys.exit(0)
#-------------------------------------------------------------------------------
|
GGFHF/NGScloud
|
Package/cmenu.py
|
Python
|
gpl-3.0
| 58,331
|
[
"Bioconda"
] |
b8a8ae0318fed40f4b06e82b41ae9d23d1462f40bc22db32dcfc3f3dad4c3f2d
|
from __future__ import division
from __future__ import print_function
"""
version 1.0 26 October 2015
Posted in github for first time.
version 1.1 23 November 2015
Corrected description of the rounding off the matrix elements.
Corrected hard wrapped text the broke the script.
Added example of running program as a horizontal script.
Made code pep8 compliant (changed use of blank lines,
removed whitespaces in defualt arguments assignments,
inserted whitespaces after commas in lists,
removed whitespaces at the ends of lines).
Added version number.
version 1.2 23 May 2016
Edited copyright notice.
Corrected typos
version 1.3 23 July 2016
Added missing parenthesis at end of file.
Copyright Notice
================
Copyright (C) 2016 Blaine Mooers
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details:
http://www.gnu.org/licenses/.
The source code in this file is copyrighted, but you can
freely use and copy it as long as you don't change or remove any of
the copyright notices.
Blaine Mooers, PhD
blaine-mooers@ouhsc.edu
975 NE 10th St, BRC 466
University of Oklahoma Health Sciences Center,
Oklahoma City, OK, USA 73104
"""
from pymol import stored, cmd
__author__ = "Blaine Mooers"
__copyright__ = "Blaine Mooers, University of Oklahoma Health Sciences Center, Oklahoma City, OK, USA 73104"
__license__ = "GPL-3"
__version__ = "1.0.2"
__credits__ = ["William Beasley","Chiedza Kanyumbu"]
# people who reported bug fixes, made suggestions, etc.
__date__ = "30 May 2016"
__maintainer__ = "Blaine Mooers"
__email__ = "blaine-mooers@ouhsc.edu"
__status__ = "Production"
def roundview(StoredView=0, decimal_places=2, outname="roundedview.txt"):
"""
DESCRIPTION
Adds the command "roundview" that gets a view (default is 0,
the current view; you can get a stored view assigned to some
other digit with the view command) and rounds to two decimal
places (two digits to the right of the decimal point) the
viewpoint matrix elements and rewrites the matrix elements
on a single line with no whitespaces and a semicolon at the
end. The saved space eases the making of a single line of
PyMOL commands separated by semicolons. This enables rapid
and interactive editing of chunks of PyMOL commands. The
viewpoints are appended to the bottom of a text file in the
present working directory called "roundedview.txt". The line
could be easier to copy from this file than from the command
history window in the external gui. A semicolon with nothing
to the right of it at the end of a line of grouped commands
is harmless.
USAGE
roundview [view, decimal_places, outname]
Note that the values in the [] are optional.
The default values for the arguments of the function
are "0,2, roundedview.txt".
Simple one-line example with roundview.py script in current working
directory--check by typing 'pwd' and 'ls *.py' on the command line. PyMOL
should return 'roundview.py' in the lisf of files in the external (top) gui.
Next, paste the following command on the external (top) commandline, hit
return, and wait 5-10 seconds:
fetch 1lw9, async=0; run roundview.py; roundview 0,1
The following view setting will be returned without the blackslash.
set_view (1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,-155.2,35.1,11.5,9.7,122.3,188.0,-20.0);
Advanced option:
Copy roundview.py to the folder ~/.pymol/startup and then
the command will always be accessible. You may have to
create these directories.
18 elements of the view matrix (0-17)
0 - 8 = column-major 3x3 matrix that rotates the model axes
to camera axes
9 - 11 = origin of rotation relative to the camera
in camera space
12 - 14 = origin of rotation in model space
15 = front plane distance from the camera
16 = rear plane distance from the camera
17 = orthoscopic flag
(not implemented in older versions)
"""
#convert the commandline arguments from strings to integers
StoredView = int(StoredView)
decimal_places = int(decimal_places)
#call the get_view function
m = cmd.get_view(StoredView)
#Make a list of the elements in the orientation matrix.
myList = [m[0], m[1], m[2], m[3], m[4], m[5], m[6],
m[7], m[8], m[9], m[10], m[11], m[12], m[13], m[14],
m[15], m[16], m[17]]
#Round off the matrix elements to two decimal places (two fractional places)
#This rounding approach solved the problem of unwanted
#whitespaces when I tried using a string format statement
myRoundedList = [ round(elem, decimal_places) for elem in myList]
#x is the format of the output. The whitespace is required
#between the "set_view" and "(".
x = 'set_view ({0},{1},{2},{3},{4},{5},{6},{7},\
{8},{9},{10},{11},{12},{13},{14},{15},{16},{17});'
#print to the external gui.
print x.format(*myRoundedList)
#Write to a text file.
myFile = open("roundedview.txt", "a")
myFile.write(x.format(*myRoundedList) + "\n")
myFile.close()
return
#The extend command makes roundview into a PyMOL command.
cmd.extend("roundview", roundview)
|
MooersLab/EasyPyMOL
|
roundviewWindows.py
|
Python
|
gpl-3.0
| 5,784
|
[
"PyMOL"
] |
c6f48a71f761960b69babeb4f236d8eb984bf84aeb50eca1bf4adcac6351f84e
|
import numpy as np
import shutil
import os
import mdtraj as md
from mdtraj.utils import enter_temp_directory
from mdtraj.utils.delay_import import import_
import tempfile
from distutils.spawn import find_executable
import simtk.unit as units
PACKMOL_PATH = find_executable("packmol")
HEADER_TEMPLATE = """
# Mixture
tolerance %f
filetype pdb
output %s
add_amber_ter
"""
BOX_TEMPLATE = """
structure %s
number %d
inside box 0. 0. 0. %f %f %f
end structure
"""
def pack_box(pdb_filenames_or_trajectories, n_molecules_list, tolerance=2.0, box_size=None):
"""Run packmol to generate a box containing a mixture of molecules.
Parameters
----------
pdb_filenames_or_trajectories : list({str, Trajectory})
List of pdb filenames or trajectories for each component of mixture. If this is
a list of trajectories, the trajectories will be saved to as
temporary files to be run in packmol.
n_molecules_list : list(int)
The number of molecules of each mixture component.
tolerance : float, optional, default=2.0
The mininum spacing between molecules during packing. In ANGSTROMS!
box_size : float, optional
The size of the box to generate. In ANGSTROMS.
Default generates boxes that are very large for increased stability.
May require extra time for energy minimization and equilibration.
Returns
-------
trj : MDTraj.Trajectory
Single frame trajectory with mixture box.
Notes
-----
Be aware that MDTraj uses nanometers internally, but packmol uses angstrom
units. The present function takes `tolerance` and `box_size` in
angstrom units, but the output trajectory will have data in nm.
Also note that OpenMM is pretty picky about the format of unit cell input,
so use the example in tests/test_packmol.py to ensure that you do the right thing.
"""
assert len(pdb_filenames_or_trajectories) == len(n_molecules_list), "Must input same number of pdb filenames as num molecules"
pdb_filenames = []
for obj in pdb_filenames_or_trajectories:
try: # See if MDTraj Trajectory
tmp_filename = tempfile.mktemp(suffix=".pdb")
obj.save_pdb(tmp_filename)
pdb_filenames.append(tmp_filename)
except AttributeError: # Not an MDTraj Trajectory, assume filename
pdb_filenames.append(obj)
if PACKMOL_PATH is None:
raise(IOError("Packmol not found, cannot run pack_box()"))
output_filename = tempfile.mktemp(suffix=".pdb")
# approximating volume to initialize box
if box_size is None:
box_size = approximate_volume(pdb_filenames, n_molecules_list)
header = HEADER_TEMPLATE % (tolerance, output_filename)
for k in range(len(pdb_filenames)):
filename = pdb_filenames[k]
n_molecules = n_molecules_list[k]
header = header + BOX_TEMPLATE % (filename, n_molecules, box_size, box_size, box_size)
pwd = os.getcwd()
print(header)
packmol_filename = "packmol_input.txt"
packmol_filename = tempfile.mktemp(suffix=".txt")
file_handle = open(packmol_filename, 'w')
file_handle.write(header)
file_handle.close()
print(header)
os.system("%s < %s" % (PACKMOL_PATH, packmol_filename))
trj = md.load(output_filename)
assert trj.topology.n_chains == sum(n_molecules_list), "Packmol error: molecules missing from output"
#Begin hack to introduce bonds for the MISSING CONECT ENTRIES THAT PACKMOL FAILS TO WRITE
top, bonds = trj.top.to_dataframe()
trj_i = [md.load(filename) for filename in pdb_filenames]
bonds_i = [t.top.to_dataframe()[1] for t in trj_i]
offset = 0
bonds = []
for i in range(len(pdb_filenames)):
n_atoms = trj_i[i].n_atoms
for j in range(n_molecules_list[i]):
bonds.extend(bonds_i[i] + offset)
offset += n_atoms
bonds = np.array(bonds)
trj.top = md.Topology.from_dataframe(top, bonds)
trj.unitcell_vectors = np.array([np.eye(3)]) * box_size / 10.
return trj
def approximate_volume(pdb_filenames, n_molecules_list, box_scaleup_factor=2.0):
"""Approximate the appropriate box size based on the number and types of atoms present.
Parameters
----------
pdb_filenames : list(str)
List of pdb filenames for each component of mixture.
n_molecules_list : list(int)
The number of molecules of each mixture component.
box_scaleup_factor : float, optional, default = 2.0
Factor by which the estimated box size is increased
Returns
-------
box_size : float
The size of the box to generate. In ANGSTROMS.
Notes
-----
By default, boxes are very large for increased stability, and therefore may
require extra time for energy minimization and equilibration.
"""
volume = 0.0 # in cubic angstroms
for k, (pdb_file) in enumerate(pdb_filenames):
molecule_volume = 0.0
molecule_trj = md.load(pdb_filenames[k])
for atom in molecule_trj.topology.atoms:
if atom.element.symbol == 'H':
molecule_volume += 5.0 # approximated from bondi radius = 1.06 angstroms
else:
molecule_volume += 15.0 # approximated from bondi radius of carbon = 1.53 angstroms
volume += molecule_volume * n_molecules_list[k]
box_size = volume**(1.0/3.0) * box_scaleup_factor
return box_size
def approximate_volume_by_density( smiles_strings, n_molecules_list, density = 1.0, box_scaleup_factor = 1.1):
"""Generate an approximate box size based on the number and molecular weight of molecules present, and a target density for the final solvated mixture. If no density is specified, the target density is assumed to be 1 g/ml.
Parameters
----------
smiles_strings : list(str)
List of smiles strings for each component of mixture.
n_molecules_list : list(int)
The number of molecules of each mixture component.
box_scaleup_factor : float, optional, default = 1.1
Factor by which the estimated box size is increased
density : float, optional, default 1.0
Target density for final system in g/ml
Returns
-------
box_size : float
The size (edge length) of the box to generate. In ANGSTROMS.
Notes
-----
By default, boxes are only modestly large. This approach has not been extensively tested for stability but has been used in th Mobley lab for perhaps ~100 different systems without substantial problems.
"""
oechem = import_("openeye.oechem")
density = density * units.grams/units.milliliter
#Load molecules to get molecular weights
wts = []
mass = 0.0*units.grams/units.mole * 1./units.AVOGADRO_CONSTANT_NA #For calculating total mass
for (idx,smi) in enumerate(smiles_strings):
mol = oechem.OEMol()
oechem.OEParseSmiles(mol, smi)
wts.append( oechem.OECalculateMolecularWeight(mol)*units.grams/units.mole )
mass += n_molecules_list[idx] * wts[idx] * 1./units.AVOGADRO_CONSTANT_NA
#Estimate volume based on mass and density
#Density = mass/volume so volume = mass/density (volume units are ml)
vol = mass/density
#Convert to box length in angstroms
edge = vol**(1./3.)
#Compute final box size
box_size = edge*box_scaleup_factor/units.angstroms
return box_size
def rename_water_atoms( pdb_filename, O_name = 'O', H1_name = 'H1', H2_name = 'H2' ):
"""Rename water atoms in a specified PDB file to have target names. Typically used to ensure a packmol-generated box containing water has water atom names corresponding to what tleap expects for standard water models.
Parameters
----------
pdb_filename : str
The target PDB filename to edit
O_name : str, optional, default 'O'
Target name to set water oxygen names to
H1_name : str, optional, default 'H1'
Target name to set water hydrogen names to, for first hydrogen
H2_name : str, optional, default 'H2'
Target name to set water hydrogen names to, for second hydrogen
Returns
-------
Notes
-------
Uses ParmEd to makes edits. Identifies waters by reading residues from target PDB file and identifying any residue containing three atoms with names O or O#, H or H#, and H or H# (where # is a digit or sequence of digits) as water molecules.
"""
parmed = import_("parmed")
pdb = parmed.load_file( pdb_filename )
#Find waters and rename
for residue in pdb.residues:
if len(residue)==3:
#Build list of atom types (PDB files don't store these) from names after stripping off digits
types = []
for atom in residue.atoms:
name = atom.name
while name[-1].isdigit():
name = name[:-1]
types.append(name)
#See if it's water and, if so, rename
if 'O' in types and types.count('H')==2:
hct = 0
for atom in residue.atoms:
if 'O' in atom.name:
atom.name = O_name
elif 'H' in atom.name:
if hct==0:
atom.name = H1_name
else:
atom.name = H2_name
hct+=1
#Write file
pdb.write_pdb( pdb_filename )
|
jchodera/openmoltools
|
openmoltools/packmol.py
|
Python
|
gpl-2.0
| 9,546
|
[
"MDTraj",
"OpenMM"
] |
a23b38c5eed13e5e3edf522c1b6e0853c3d5948137a6711117366e6f38178a49
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import datetime
import ajax_select.fields
import six
from django import forms
from django.conf import settings
from karaage.machines.models import Account, Machine
from karaage.people.utils import (
UsernameException,
check_username_for_new_account,
validate_username_for_new_account,
)
class MachineForm(forms.ModelForm):
class Meta:
model = Machine
fields = (
'name', 'no_cpus', 'no_nodes', 'type',
'start_date', 'end_date', 'pbs_server_host',
'mem_per_core', 'scaling_factor')
class AdminAccountForm(forms.ModelForm):
username = forms.CharField(
label=six.u("Requested username"),
max_length=settings.USERNAME_MAX_LENGTH,
help_text=((settings.USERNAME_VALIDATION_ERROR_MSG
+ " and has a max length of %s.")
% settings.USERNAME_MAX_LENGTH))
default_project = ajax_select.fields.AutoCompleteSelectField(
'project', required=True)
shell = forms.ChoiceField(choices=settings.SHELLS)
def __init__(self, person, **kwargs):
self.person = person
super(AdminAccountForm, self).__init__(**kwargs)
self.old_username = self.instance.username
def clean_username(self):
username = self.cleaned_data['username']
try:
validate_username_for_new_account(self.person, username)
except UsernameException as e:
raise forms.ValidationError(e.args[0])
return username
def clean_default_project(self):
data = self.cleaned_data
if 'default_project' not in data:
return data
default_project = data['default_project']
query = self.person.projects.filter(pk=default_project.pk)
if query.count() == 0:
raise forms.ValidationError(
six.u('Person does not belong to default project.'))
return default_project
def clean(self):
data = self.cleaned_data
if 'username' not in data:
return data
username = data['username']
if (self.old_username is None
or self.old_username != username):
try:
check_username_for_new_account(self.person, username)
except UsernameException as e:
raise forms.ValidationError(e.args[0])
return data
def save(self, **kwargs):
if self.instance.pk is None:
self.instance.person = self.person
self.instance.date_created = datetime.date.today()
return super(AdminAccountForm, self).save(**kwargs)
class Meta:
model = Account
fields = (
'username',
'default_project', 'disk_quota', 'shell')
class UserAccountForm(forms.ModelForm):
shell = forms.ChoiceField(choices=settings.SHELLS)
class Meta:
model = Account
fields = ('shell',)
class AddProjectForm(forms.Form):
project = ajax_select.fields.AutoCompleteSelectField(
'project', required=True, label='Add to existing project')
|
brianmay/karaage
|
karaage/machines/forms.py
|
Python
|
gpl-3.0
| 3,812
|
[
"Brian"
] |
6b7498ef189e7de129a3115bf75dfd3fac73d12daa6bf804aea9fdea48d964e7
|
import modeller
import IMP
import IMP.test
import IMP.core
import IMP.modeller
from test_rsr_file_read import assertSimilarModellerIMPScores
class Tests(IMP.test.TestCase):
"""Check using Modeller restraints in IMP"""
def test_modeller_restraints(self):
"""Check using Modeller restraints in IMP"""
e = modeller.environ()
e.edat.dynamic_sphere = False
e.libs.topology.read('${LIB}/top_heav.lib')
e.libs.parameters.read('${LIB}/par.lib')
modmodel = modeller.model(e)
modmodel.build_sequence('GGCC')
feat = modeller.features.distance(
modmodel.atoms[0],
modmodel.atoms[-1])
r = modeller.forms.gaussian(feature=feat, mean=10.0, stdev=1.0,
group=modeller.physical.xy_distance)
modmodel.restraints.add(r)
m = IMP.Model()
protein = IMP.modeller.ModelLoader(modmodel).load_atoms(m)
atoms = IMP.atom.get_by_type(protein, IMP.atom.ATOM_TYPE)
r = IMP.modeller.ModellerRestraints(m, modmodel, atoms)
sf = IMP.core.RestraintsScoringFunction([r])
assertSimilarModellerIMPScores(self, sf, modmodel, protein)
self.assertAlmostEqual(sf.evaluate(False), 5.7837, delta=1e-3)
if __name__ == '__main__':
IMP.test.main()
|
shanot/imp
|
modules/modeller/test/test_modeller_restraints.py
|
Python
|
gpl-3.0
| 1,315
|
[
"Gaussian"
] |
eb7083b88bc6fe927341ad1f24a76337f34af61531df38701351bb7f7c27c1d1
|
# compiler.py
# Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Provides the [sqlalchemy.sql.compiler#DefaultCompiler] class, which is
responsible for generating all SQL query strings, as well as
[sqlalchemy.sql.compiler#SchemaGenerator] and [sqlalchemy.sql.compiler#SchemaDropper]
which issue CREATE and DROP DDL for tables, sequences, and indexes.
The elements in this module are used by public-facing constructs like
[sqlalchemy.sql.expression#ClauseElement] and [sqlalchemy.engine#Engine].
While dialect authors will want to be familiar with this module for the purpose of
creating database-specific compilers and schema generators, the module
is otherwise internal to SQLAlchemy.
"""
import string, re
from sqlalchemy import schema, engine, util, exceptions
from sqlalchemy.sql import operators, functions
from sqlalchemy.sql import expression as sql
RESERVED_WORDS = util.Set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = re.compile(r'[0-9$]')
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE)
ANONYMOUS_LABEL = re.compile(r'{ANON (-?\d+) (.*?)}')
BIND_TEMPLATES = {
'pyformat':"%%(%(name)s)s",
'qmark':"?",
'format':"%%s",
'numeric':"%(position)s",
'named':":%(name)s"
}
OPERATORS = {
operators.and_ : 'AND',
operators.or_ : 'OR',
operators.inv : 'NOT',
operators.add : '+',
operators.mul : '*',
operators.sub : '-',
operators.div : '/',
operators.mod : '%',
operators.truediv : '/',
operators.lt : '<',
operators.le : '<=',
operators.ne : '!=',
operators.gt : '>',
operators.ge : '>=',
operators.eq : '=',
operators.distinct_op : 'DISTINCT',
operators.concat_op : '||',
operators.like_op : 'LIKE',
operators.notlike_op : 'NOT LIKE',
operators.ilike_op : lambda x, y: "lower(%s) LIKE lower(%s)" % (x, y),
operators.notilike_op : lambda x, y: "lower(%s) NOT LIKE lower(%s)" % (x, y),
operators.between_op : 'BETWEEN',
operators.in_op : 'IN',
operators.notin_op : 'NOT IN',
operators.comma_op : ', ',
operators.desc_op : 'DESC',
operators.asc_op : 'ASC',
operators.from_ : 'FROM',
operators.as_ : 'AS',
operators.exists : 'EXISTS',
operators.is_ : 'IS',
operators.isnot : 'IS NOT'
}
FUNCTIONS = {
functions.coalesce : 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.sysdate: 'sysdate',
functions.session_user :'SESSION_USER',
functions.user: 'USER'
}
class DefaultCompiler(engine.Compiled):
"""Default implementation of Compiled.
Compiles ClauseElements into SQL strings. Uses a similar visit
paradigm as visitors.ClauseVisitor but implements its own traversal.
"""
__traverse_options__ = {'column_collections':False, 'entry':True}
operators = OPERATORS
functions = FUNCTIONS
def __init__(self, dialect, statement, column_keys=None, inline=False, **kwargs):
"""Construct a new ``DefaultCompiler`` object.
dialect
Dialect to be used
statement
ClauseElement to be compiled
column_keys
a list of column names to be compiled into an INSERT or UPDATE
statement.
"""
super(DefaultCompiler, self).__init__(dialect, statement, column_keys, **kwargs)
# if we are insert/update/delete. set to true when we visit an INSERT, UPDATE or DELETE
self.isdelete = self.isinsert = self.isupdate = False
# compile INSERT/UPDATE defaults/sequences inlined (no pre-execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to _BindParamClause instances.
self.binds = {}
# a dictionary of _BindParamClause instances to "compiled" names that are
# actually present in the generated SQL
self.bind_names = {}
# a stack. what recursive compiler doesn't have a stack ? :)
self.stack = []
# relates label names in the final SQL to
# a tuple of local column/label name, ColumnElement object (if any) and TypeEngine.
# ResultProxy uses this for type processing and column targeting
self.result_map = {}
# a dictionary of ClauseElement subclasses to counters, which are used to
# generate truncated identifier names or "anonymous" identifiers such as
# for aliases
self.generated_ids = {}
# paramstyle from the dialect (comes from DB-API)
self.paramstyle = self.dialect.paramstyle
# true if the paramstyle is positional
self.positional = self.dialect.positional
self.bindtemplate = BIND_TEMPLATES[self.paramstyle]
# a list of the compiled's bind parameter names, used to help
# formulate a positional argument list
self.positiontup = []
# an IdentifierPreparer that formats the quoting of identifiers
self.preparer = self.dialect.identifier_preparer
def compile(self):
self.string = self.process(self.statement)
def process(self, obj, stack=None, **kwargs):
if stack:
self.stack.append(stack)
try:
meth = getattr(self, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kwargs)
finally:
if stack:
self.stack.pop(-1)
def is_subquery(self, select):
return self.stack and self.stack[-1].get('is_subquery')
def get_whereclause(self, obj):
"""given a FROM clause, return an additional WHERE condition that should be
applied to a SELECT.
Currently used by Oracle to provide WHERE criterion for JOIN and OUTER JOIN
constructs in non-ansi mode.
"""
return None
def construct_params(self, params=None):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam, name in self.bind_names.iteritems():
for paramname in (bindparam, bindparam.key, bindparam.shortname, name):
if paramname in params:
pd[name] = params[paramname]
break
else:
pd[name] = bindparam.value
return pd
else:
return dict([(self.bind_names[bindparam], bindparam.value) for bindparam in self.bind_names])
params = property(construct_params)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, **kwargs):
return "(" + self.process(grouping.elem) + ")"
def visit_label(self, label, result_map=None):
labelname = self._truncated_identifier("colident", label.name)
if result_map is not None:
result_map[labelname.lower()] = (label.name, (label, label.obj, labelname), label.obj.type)
return " ".join([self.process(label.obj), self.operator_string(operators.as_), self.preparer.format_label(label, labelname)])
def visit_column(self, column, result_map=None, use_schema=False, **kwargs):
# there is actually somewhat of a ruleset when you would *not* necessarily
# want to truncate a column identifier, if its mapped to the name of a
# physical column. but thats very hard to identify at this point, and
# the identifier length should be greater than the id lengths of any physical
# columns so should not matter.
if use_schema and getattr(column, 'table', None) and getattr(column.table, 'schema', None):
schema_prefix = self.preparer.quote(column.table, column.table.schema) + '.'
else:
schema_prefix = ''
if not column.is_literal:
name = self._truncated_identifier("colident", column.name)
else:
name = column.name
if result_map is not None:
result_map[name.lower()] = (name, (column, ), column.type)
if column._is_oid:
n = self.dialect.oid_column_name(column)
if n is not None:
if column.table is None or not column.table.named_with_column:
return n
else:
return schema_prefix + self.preparer.quote(column.table, ANONYMOUS_LABEL.sub(self._process_anon, column.table.name)) + "." + n
elif len(column.table.primary_key) != 0:
pk = list(column.table.primary_key)[0]
return self.visit_column(pk, result_map=result_map, use_schema=use_schema, **kwargs)
else:
return None
elif column.table is None or not column.table.named_with_column:
if getattr(column, "is_literal", False):
return self.escape_literal_column(name)
else:
return self.preparer.quote(column, name)
else:
if getattr(column, "is_literal", False):
return schema_prefix + self.preparer.quote(column.table, ANONYMOUS_LABEL.sub(self._process_anon, column.table.name)) + "." + self.escape_literal_column(name)
else:
return schema_prefix + self.preparer.quote(column.table, ANONYMOUS_LABEL.sub(self._process_anon, column.table.name)) + "." + self.preparer.quote(column, name)
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kwargs):
return typeclause.type.dialect_impl(self.dialect).get_col_spec()
def visit_textclause(self, textclause, **kwargs):
if textclause.typemap is not None:
for colname, type_ in textclause.typemap.iteritems():
self.result_map[colname.lower()] = (colname, None, type_)
def do_bindparam(m):
name = m.group(1)
if name in textclause.bindparams:
return self.process(textclause.bindparams[name])
else:
return self.bindparam_string(name)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(lambda m: m.group(1),
BIND_PARAMS.sub(do_bindparam, textclause.text)
)
def visit_null(self, null, **kwargs):
return 'NULL'
def visit_clauselist(self, clauselist, **kwargs):
sep = clauselist.operator
if sep is None:
sep = " "
elif sep == operators.comma_op:
sep = ', '
else:
sep = " " + self.operator_string(clauselist.operator) + " "
return sep.join([s for s in [self.process(c) for c in clauselist.clauses] if s is not None])
def visit_calculatedclause(self, clause, **kwargs):
return self.process(clause.clause_expr)
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % (self.process(cast.clause), self.process(cast.typeclause))
def visit_function(self, func, result_map=None, **kwargs):
if result_map is not None:
result_map[func.name.lower()] = (func.name, None, func.type)
name = self.function_string(func)
if callable(name):
return name(*[self.process(x) for x in func.clause_expr])
else:
return ".".join(func.packagenames + [name]) % {'expr':self.function_argspec(func)}
def function_argspec(self, func):
return self.process(func.clause_expr)
def function_string(self, func):
return self.functions.get(func.__class__, func.name + "%(expr)s")
def visit_compound_select(self, cs, asfrom=False, parens=True, **kwargs):
stack_entry = {'select':cs}
if asfrom:
stack_entry['is_subquery'] = True
elif self.stack and self.stack[-1].get('select'):
stack_entry['is_subquery'] = True
self.stack.append(stack_entry)
text = string.join([self.process(c, asfrom=asfrom, parens=False) for c in cs.selects], " " + cs.keyword + " ")
group_by = self.process(cs._group_by_clause, asfrom=asfrom)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs)
text += (cs._limit or cs._offset) and self.limit_clause(cs) or ""
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def visit_unary(self, unary, **kwargs):
s = self.process(unary.element)
if unary.operator:
s = self.operator_string(unary.operator) + " " + s
if unary.modifier:
s = s + " " + self.operator_string(unary.modifier)
return s
def visit_binary(self, binary, **kwargs):
op = self.operator_string(binary.operator)
if callable(op):
return op(self.process(binary.left), self.process(binary.right))
else:
return self.process(binary.left) + " " + op + " " + self.process(binary.right)
def operator_string(self, operator):
return self.operators.get(operator, str(operator))
def visit_bindparam(self, bindparam, **kwargs):
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam and (existing.unique or bindparam.unique):
raise exceptions.CompileError("Bind parameter '%s' conflicts with unique bind parameter of the same name" % bindparam.key)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.generated_ids:
return self.generated_ids[(ident_class, name)]
anonname = ANONYMOUS_LABEL.sub(self._process_anon, name)
if len(anonname) > self.dialect.max_identifier_length:
counter = self.generated_ids.get(ident_class, 1)
truncname = anonname[0:self.dialect.max_identifier_length - 6] + "_" + hex(counter)[2:]
self.generated_ids[ident_class] = counter + 1
else:
truncname = anonname
self.generated_ids[(ident_class, name)] = truncname
return truncname
def _process_anon(self, match):
(ident, derived) = match.group(1,2)
key = ('anonymous', ident)
if key in self.generated_ids:
return self.generated_ids[key]
else:
anonymous_counter = self.generated_ids.get(('anon_counter', derived), 1)
newname = derived + "_" + str(anonymous_counter)
self.generated_ids[('anon_counter', derived)] = anonymous_counter + 1
self.generated_ids[key] = newname
return newname
def _anonymize(self, name):
return ANONYMOUS_LABEL.sub(self._process_anon, name)
def bindparam_string(self, name):
if self.positional:
self.positiontup.append(name)
return self.bindtemplate % {'name':name, 'position':len(self.positiontup)}
def visit_alias(self, alias, asfrom=False, **kwargs):
if asfrom:
return self.process(alias.original, asfrom=True, **kwargs) + " AS " + self.preparer.format_alias(alias, self._anonymize(alias.name))
else:
return self.process(alias.original, **kwargs)
def label_select_column(self, select, column, asfrom):
"""label columns present in a select()."""
if isinstance(column, sql._Label):
return column
if select.use_labels and getattr(column, '_label', None):
return column.label(column._label)
if \
asfrom and \
isinstance(column, sql._ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, sql.Select):
return column.label(column.name)
elif not isinstance(column, (sql._UnaryExpression, sql._TextClause)) and (not hasattr(column, 'name') or isinstance(column, sql._Function)):
return column.anon_label
else:
return column
def visit_select(self, select, asfrom=False, parens=True, iswrapper=False, **kwargs):
stack_entry = {'select':select}
prev_entry = self.stack and self.stack[-1] or None
if asfrom or (prev_entry and 'select' in prev_entry):
stack_entry['is_subquery'] = True
if prev_entry and 'iswrapper' in prev_entry:
column_clause_args = {'result_map':self.result_map}
else:
column_clause_args = {}
elif iswrapper:
column_clause_args = {}
stack_entry['iswrapper'] = True
else:
column_clause_args = {'result_map':self.result_map}
if self.stack and 'from' in self.stack[-1]:
existingfroms = self.stack[-1]['from']
else:
existingfroms = None
froms = select._get_display_froms(existingfroms)
correlate_froms = util.Set()
for f in froms:
correlate_froms.add(f)
correlate_froms.update(f._get_from_objects())
# TODO: might want to propigate existing froms for select(select(select))
# where innermost select should correlate to outermost
# if existingfroms:
# correlate_froms = correlate_froms.union(existingfroms)
stack_entry['from'] = correlate_froms
self.stack.append(stack_entry)
# the actual list of columns to print in the SELECT column list.
inner_columns = util.OrderedSet()
for co in select.inner_columns:
l = self.label_select_column(select, co, asfrom=asfrom)
inner_columns.add(self.process(l, **column_clause_args))
collist = string.join(inner_columns.difference(util.Set([None])), ', ')
text = " ".join(["SELECT"] + [self.process(x) for x in select._prefixes]) + " "
text += self.get_select_precolumns(select)
text += collist
whereclause = select._whereclause
from_strings = []
for f in froms:
from_strings.append(self.process(f, asfrom=True))
w = self.get_whereclause(f)
if w is not None:
if whereclause is not None:
whereclause = sql.and_(w, whereclause)
else:
whereclause = w
if froms:
text += " \nFROM "
text += string.join(from_strings, ', ')
else:
text += self.default_from()
if whereclause is not None:
t = self.process(whereclause)
if t:
text += " \nWHERE " + t
group_by = self.process(select._group_by_clause)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = self.process(select._having)
if t:
text += " \nHAVING " + t
text += self.order_by_clause(select)
text += (select._limit or select._offset) and self.limit_clause(select) or ""
text += self.for_update_clause(select)
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def get_select_precolumns(self, select):
"""Called when building a ``SELECT`` statement, position is just before column list."""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select):
order_by = self.process(select._order_by_clause)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select):
if select.for_update:
return " FOR UPDATE"
else:
return ""
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += " \n LIMIT " + str(select._limit)
if select._offset is not None:
if select._limit is None:
text += " \n LIMIT -1"
text += " OFFSET " + str(select._offset)
return text
def visit_table(self, table, asfrom=False, **kwargs):
if asfrom:
if getattr(table, "schema", None):
return self.preparer.quote(table, table.schema) + "." + self.preparer.quote(table, table.name)
else:
return self.preparer.quote(table, table.name)
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (self.process(join.left, asfrom=True) + (join.isouter and " LEFT OUTER JOIN " or " JOIN ") + \
self.process(join.right, asfrom=True) + " ON " + self.process(join.onclause))
def visit_sequence(self, seq):
return None
def visit_insert(self, insert_stmt):
self.isinsert = True
colparams = self._get_colparams(insert_stmt)
preparer = self.preparer
return ("INSERT INTO %s (%s) VALUES (%s)" %
(preparer.format_table(insert_stmt.table),
', '.join([preparer.quote(c[0], c[0].name)
for c in colparams]),
', '.join([c[1] for c in colparams])))
def visit_update(self, update_stmt):
self.stack.append({'from':util.Set([update_stmt.table])})
self.isupdate = True
colparams = self._get_colparams(update_stmt)
text = "UPDATE " + self.preparer.format_table(update_stmt.table) + " SET " + string.join(["%s=%s" % (self.preparer.quote(c[0], c[0].name), c[1]) for c in colparams], ', ')
if update_stmt._whereclause:
text += " WHERE " + self.process(update_stmt._whereclause)
self.stack.pop(-1)
return text
def _get_colparams(self, stmt):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
"""
def create_bind_param(col, value):
bindparam = sql.bindparam(col.key, value, type_=col.type)
self.binds[col.key] = bindparam
return self.bindparam_string(self._truncate_bindparam(bindparam))
self.postfetch = []
self.prefetch = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if self.column_keys is None and stmt.parameters is None:
return [(c, create_bind_param(c, None)) for c in stmt.table.columns]
# if we have statement parameters - set defaults in the
# compiled params
if self.column_keys is None:
parameters = {}
else:
parameters = dict([(getattr(key, 'key', key), None) for key in self.column_keys])
if stmt.parameters is not None:
for k, v in stmt.parameters.iteritems():
parameters.setdefault(getattr(k, 'key', k), v)
# create a list of column assignment clauses as tuples
values = []
for c in stmt.table.columns:
if c.key in parameters:
value = parameters[c.key]
if sql._is_literal(value):
value = create_bind_param(c, value)
else:
self.postfetch.append(c)
value = self.process(value.self_group())
values.append((c, value))
elif isinstance(c, schema.Column):
if self.isinsert:
if (c.primary_key and self.dialect.preexecute_pk_sequences and not self.inline):
if (((isinstance(c.default, schema.Sequence) and
not c.default.optional) or
not self.dialect.supports_pk_autoincrement) or
(c.default is not None and
not isinstance(c.default, schema.Sequence))):
values.append((c, create_bind_param(c, None)))
self.prefetch.append(c)
elif isinstance(c.default, schema.ColumnDefault):
if isinstance(c.default.arg, sql.ClauseElement):
values.append((c, self.process(c.default.arg.self_group())))
if not c.primary_key:
# dont add primary key column to postfetch
self.postfetch.append(c)
else:
values.append((c, create_bind_param(c, None)))
self.prefetch.append(c)
elif isinstance(c.default, schema.PassiveDefault):
if not c.primary_key:
self.postfetch.append(c)
elif isinstance(c.default, schema.Sequence):
proc = self.process(c.default)
if proc is not None:
values.append((c, proc))
if not c.primary_key:
self.postfetch.append(c)
elif self.isupdate:
if isinstance(c.onupdate, schema.ColumnDefault):
if isinstance(c.onupdate.arg, sql.ClauseElement):
values.append((c, self.process(c.onupdate.arg.self_group())))
self.postfetch.append(c)
else:
values.append((c, create_bind_param(c, None)))
self.prefetch.append(c)
elif isinstance(c.onupdate, schema.PassiveDefault):
self.postfetch.append(c)
return values
def visit_delete(self, delete_stmt):
self.stack.append({'from':util.Set([delete_stmt.table])})
self.isdelete = True
text = "DELETE FROM " + self.preparer.format_table(delete_stmt.table)
if delete_stmt._whereclause:
text += " WHERE " + self.process(delete_stmt._whereclause)
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def __str__(self):
return self.string or ''
class DDLBase(engine.SchemaIterator):
def find_alterables(self, tables):
alterables = []
class FindAlterables(schema.SchemaVisitor):
def visit_foreign_key_constraint(self, constraint):
if constraint.use_alter and constraint.table in tables:
alterables.append(constraint)
findalterables = FindAlterables()
for table in tables:
for c in table.constraints:
findalterables.traverse(c)
return alterables
class SchemaGenerator(DDLBase):
def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs):
super(SchemaGenerator, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables and util.Set(tables) or None
self.preparer = dialect.identifier_preparer
self.dialect = dialect
def get_column_specification(self, column, first_pk=False):
raise NotImplementedError()
def visit_metadata(self, metadata):
collection = [t for t in metadata.table_iterator(reverse=False, tables=self.tables) if (not self.checkfirst or not self.dialect.has_table(self.connection, t.name, schema=t.schema))]
for table in collection:
self.traverse_single(table)
if self.dialect.supports_alter:
for alterable in self.find_alterables(collection):
self.add_foreignkey(alterable)
def visit_table(self, table):
for listener in table.ddl_listeners['before-create']:
listener('before-create', table, self.connection)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
self.append("\nCREATE TABLE " + self.preparer.format_table(table) + " (")
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for column in table.columns:
self.append(separator)
separator = ", \n"
self.append("\t" + self.get_column_specification(column, first_pk=column.primary_key and not first_pk))
if column.primary_key:
first_pk = True
for constraint in column.constraints:
self.traverse_single(constraint)
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
if table.primary_key:
self.traverse_single(table.primary_key)
for constraint in [c for c in table.constraints if c is not table.primary_key]:
self.traverse_single(constraint)
self.append("\n)%s\n\n" % self.post_create_table(table))
self.execute()
if hasattr(table, 'indexes'):
for index in table.indexes:
self.traverse_single(index)
for listener in table.ddl_listeners['after-create']:
listener('after-create', table, self.connection)
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.default, schema.PassiveDefault):
if isinstance(column.default.arg, basestring):
return "'%s'" % column.default.arg
else:
return unicode(self._compile(column.default.arg, None))
else:
return None
def _compile(self, tocompile, parameters):
"""compile the given string/parameters using this SchemaGenerator's dialect."""
compiler = self.dialect.statement_compiler(self.dialect, tocompile, parameters)
compiler.compile()
return compiler
def visit_check_constraint(self, constraint):
self.append(", \n\t")
if constraint.name is not None:
self.append("CONSTRAINT %s " %
self.preparer.format_constraint(constraint))
self.append(" CHECK (%s)" % constraint.sqltext)
self.define_constraint_deferrability(constraint)
def visit_column_check_constraint(self, constraint):
self.append(" CHECK (%s)" % constraint.sqltext)
self.define_constraint_deferrability(constraint)
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return
self.append(", \n\t")
if constraint.name is not None:
self.append("CONSTRAINT %s " % self.preparer.format_constraint(constraint))
self.append("PRIMARY KEY ")
self.append("(%s)" % ', '.join([self.preparer.quote(c, c.name) for c in constraint]))
self.define_constraint_deferrability(constraint)
def visit_foreign_key_constraint(self, constraint):
if constraint.use_alter and self.dialect.supports_alter:
return
self.append(", \n\t ")
self.define_foreign_key(constraint)
def add_foreignkey(self, constraint):
self.append("ALTER TABLE %s ADD " % self.preparer.format_table(constraint.table))
self.define_foreign_key(constraint)
self.execute()
def define_foreign_key(self, constraint):
preparer = self.preparer
if constraint.name is not None:
self.append("CONSTRAINT %s " %
preparer.format_constraint(constraint))
table = list(constraint.elements)[0].column.table
self.append("FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join([preparer.quote(f.parent, f.parent.name) for f in constraint.elements]),
preparer.format_table(table),
', '.join([preparer.quote(f.column, f.column.name) for f in constraint.elements])
))
if constraint.ondelete is not None:
self.append(" ON DELETE %s" % constraint.ondelete)
if constraint.onupdate is not None:
self.append(" ON UPDATE %s" % constraint.onupdate)
self.define_constraint_deferrability(constraint)
def visit_unique_constraint(self, constraint):
self.append(", \n\t")
if constraint.name is not None:
self.append("CONSTRAINT %s " %
self.preparer.format_constraint(constraint))
self.append(" UNIQUE (%s)" % (', '.join([self.preparer.quote(c, c.name) for c in constraint])))
self.define_constraint_deferrability(constraint)
def define_constraint_deferrability(self, constraint):
if constraint.deferrable is not None:
if constraint.deferrable:
self.append(" DEFERRABLE")
else:
self.append(" NOT DEFERRABLE")
if constraint.initially is not None:
self.append(" INITIALLY %s" % constraint.initially)
def visit_column(self, column):
pass
def visit_index(self, index):
preparer = self.preparer
self.append("CREATE ")
if index.unique:
self.append("UNIQUE ")
self.append("INDEX %s ON %s (%s)" \
% (preparer.format_index(index),
preparer.format_table(index.table),
string.join([preparer.quote(c, c.name) for c in index.columns], ', ')))
self.execute()
class SchemaDropper(DDLBase):
def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs):
super(SchemaDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
def visit_metadata(self, metadata):
collection = [t for t in metadata.table_iterator(reverse=True, tables=self.tables) if (not self.checkfirst or self.dialect.has_table(self.connection, t.name, schema=t.schema))]
if self.dialect.supports_alter:
for alterable in self.find_alterables(collection):
self.drop_foreignkey(alterable)
for table in collection:
self.traverse_single(table)
def visit_index(self, index):
self.append("\nDROP INDEX " + self.preparer.format_index(index))
self.execute()
def drop_foreignkey(self, constraint):
self.append("ALTER TABLE %s DROP CONSTRAINT %s" % (
self.preparer.format_table(constraint.table),
self.preparer.format_constraint(constraint)))
self.execute()
def visit_table(self, table):
for listener in table.ddl_listeners['before-drop']:
listener('before-drop', table, self.connection)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
self.append("\nDROP TABLE " + self.preparer.format_table(table))
self.execute()
for listener in table.ddl_listeners['after-drop']:
listener('after-drop', table, self.connection)
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
def __init__(self, dialect, initial_quote='"', final_quote=None, omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to `initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.omit_schema = omit_schema
self.__strings = {}
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace('"', '""')
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace('""', '"')
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + self._escape_identifier(value) + self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or self.illegal_initial_characters.match(value[0])
or not self.legal_characters.match(unicode(value))
or (lc_value != value))
def quote(self, obj, ident):
if getattr(obj, 'quote', False):
return self.quote_identifier(ident)
if ident in self.__strings:
return self.__strings[ident]
else:
if self._requires_quotes(ident):
self.__strings[ident] = self.quote_identifier(ident)
else:
self.__strings[ident] = ident
return self.__strings[ident]
def should_quote(self, object):
return object.quote or self._requires_quotes(object.name)
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence, sequence.name)
if not self.omit_schema and use_schema and sequence.schema is not None:
name = self.quote(sequence, sequence.schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(label, name or label.name)
def format_alias(self, alias, name=None):
return self.quote(alias, name or alias.name)
def format_savepoint(self, savepoint, name=None):
return self.quote(savepoint, name or savepoint.ident)
def format_constraint(self, constraint):
return self.quote(constraint, constraint.name)
def format_index(self, index):
return self.quote(index, index.name)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(table, name)
if not self.omit_schema and use_schema and getattr(table, "schema", None):
result = self.quote(table, table.schema) + "." + result
return result
def format_column(self, column, use_table=False, name=None, table_name=None):
"""Prepare a quoted column name.
deprecated. use preparer.quote(col, column.name) or combine with format_table()
"""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(column.table, use_schema=False, name=table_name) + "." + self.quote(column, name)
else:
return self.quote(column, name)
else:
# literal textual elements get stuck into ColumnClause alot, which shouldnt get quoted
if use_table:
return self.format_table(column.table, use_schema=False, name=table_name) + "." + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
if not self.omit_schema and use_schema and getattr(table, 'schema', None):
return (self.quote_identifier(table.schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
try:
r = self._r_identifiers
except AttributeError:
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{ 'initial': initial,
'final': final,
'escaped': escaped_final })
self._r_identifiers = r
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.3-py2.5.egg/sqlalchemy/sql/compiler.py
|
Python
|
bsd-3-clause
| 43,724
|
[
"VisIt"
] |
02445c55e65611c0d6b2ae943a0ebfeac5f84c7c4963efaa3dac145c1ec3eb4b
|
from lettuce import step,before,world,after
from lettuce.django import django_url
from django.contrib.auth.models import User
from frontend.models import Feature
from codewiki.models import Scraper
@before.each_scenario
def reset_schedule(scenario):
scraper = Scraper.objects.get(pk=1)
scraper.run_interval = -1
scraper.save()
@step(u'Given I am an? "([^"]*)" user')
def given_i_am_a_plan_user(step, plan):
plan = plan.replace(' ', '').lower()
step.behave_as("""
Given user "test" with password "pass" is logged in
And I have the "Self Service Vaults" feature enabled
And I am on the "%s" plan
""" % plan)
@step(u'(?:Then|And) I should see the privacy panel')
def i_should_see_the_privacy_panel(step):
assert world.browser.find_by_css("#privacy_status")
@step(u'(?:Then|And) I should see the button to change the privacy settings')
def i_should_see_the_button_to_change_the_privacy_settings(step):
assert world.browser.find_by_css("#show_privacy_choices")
@step(u'(?:When|And) I click the privacy button')
def i_click_the_privacy_button(step):
world.browser.find_by_css("#collaboration .buttons li a").first.click()
@step(u'(?:When|And) I click the change privacy button')
def i_click_the_change_privacy_button(step):
world.browser.find_by_css("#show_privacy_choices").first.click()
@step(u"(?:When|And) I visit my scraper's overview page$")
def and_i_am_on_the_scraper_overview_page(step):
world.browser.visit(django_url('/scrapers/test_scraper'))
@step(u'(?:Given|And) I am on the "([^"]*)" plan')
def i_am_on_the_plan(step, plan):
user = User.objects.get(username='test')
profile = user.get_profile()
profile.change_plan(plan)
|
lkundrak/scraperwiki
|
web/frontend/features/privacy_steps.py
|
Python
|
agpl-3.0
| 1,714
|
[
"VisIt"
] |
9fd4a13af4c78ae3306c80af0ac31dec6b45d83727a21fe0f3b209ede2a26c24
|
__author__ = "Zhenzhou Wu"
__copyright__ = "Copyright 2012, Zhenzhou Wu"
__credits__ = ["Zhenzhou Wu"]
__license__ = "3-clause BSD"
__email__ = "hyciswu@gmail.com"
__maintainer__ = "Zhenzhou Wu"
"""
Functionality : Define the noise that is to be added to the dataset
"""
import numpy as np
class Noise(object):
"""
This is an abstract class for applying noise to dataset
"""
def apply(self, X):
"""
DESCRIPTION:
This method applies noise to X and return a noisy X
PARAM:
X : 2d numpy array of dimension number of examples by number of dimensions
"""
raise NotImplementedError(str(type(self))+" does not implement an apply method.")
def invert(self, X):
"""
DESCRIPTION:
Remove the noise from X
PARAM:
X : 2d numpy array of dimension number of examples by number of dimensions
"""
raise NotImplementedError(str(type(self))+" does not implement an invert method.")
class MaskOut(Noise):
"""
This noise masked out a portion of the dimension from each example
"""
def __init__(self, ratio=0.5):
"""
PARAM:
ratio : float
The portion of the inputs that is masked out
"""
self.ratio = ratio
def apply(self, X):
self.noise = np.random.binomial(size=X.shape, n=1, p=(1-self.ratio))
return X * self.noise
def invert(self, X):
return X / self.noise
class Gaussian(Noise):
"""
Applies gaussian noise to each value of X
"""
def __init__(self, std=0.01, mean=0):
self.std = std
self.mean = mean
def apply(self, X):
return X + np.random.normal(loc=self.mean, scale=self.std, size=X.shape)
class BlackOut(Noise):
"""
This noise masked out a random example in a dataset,
adding noise in the time dimension
"""
def __init__(self, ratio=0.5):
"""
PARAM:
ratio : float
The portion of the examples that is masked out
"""
self.ratio = ratio
def apply(self, X):
return X * np.random.binomial(size=(X.shape[0],1), n=1, p=(1-self.ratio))
|
hycis/Pynet
|
pynet/datasets/dataset_noise.py
|
Python
|
apache-2.0
| 2,228
|
[
"Gaussian"
] |
ba2212aa9fff365744d26975c401ab89291809c2ced9c1fef8490876e4688a04
|
#
# This file is part of jetflows.
#
# Copyright (C) 2014, Henry O. Jacobs (hoj201@gmail.com), Stefan Sommer (sommer@di.ku.dk)
# https://github.com/nefan/jetflows.git
#
# jetflows is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jetflows is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jetflows. If not, see <http://www.gnu.org/licenses/>.
#
import matplotlib.pyplot as plt
import two_jets as tj
import numpy as np
import kernels.pyGaussian as gaussian
R = np.load('setup.npy')
tj.N = int(R[0])
tj.DIM = int(R[1])
tj.SIGMA = R[2]
tj.gaussian.N = tj.N
tj.gaussian.DIM = tj.DIM
tj.gaussian.SIGMA = tj.SIGMA
DIM = 2
def display_velocity_field( q , p , mu_1 , mu_2 , q1=None ):
W = 5*tj.SIGMA
res = 30
N_nodes = res**DIM
store = np.outer( np.linspace(-W,W , res), np.ones(res) )
nodes = np.zeros( [N_nodes , tj.DIM] )
nodes[:,0] = np.reshape( store , N_nodes )
nodes[:,1] = np.reshape( store.T , N_nodes )
K,DK,D2K,D3K,D4K,D5K,D6K = tj.derivatives_of_kernel( nodes , q )
vel_field = np.einsum('ijab,jb->ia',K,p)\
- np.einsum('ijabc,jbc->ia',DK,mu_1)\
+ np.einsum('ijabcd,jbcd->ia',D2K,mu_2)
U = vel_field[:,0]
V = vel_field[:,1]
f = plt.figure(1)
plt.quiver( nodes[:,0] , nodes[:,1] , U , V , color='0.50' )
plt.plot(q[:,0],q[:,1],'ro')
# generate vertices of a circle
N_vert = 20
circle_verts = np.zeros( [ 2 , N_vert + 1 ] )
theta = np.linspace(0,2*np.pi, N_vert )
circle_verts[0,0:N_vert] = 0.2*np.cos(theta)
circle_verts[1,0:N_vert] = 0.2*np.sin(theta)
verts = np.zeros([2, N_vert + 1])
units = np.ones( N_vert + 1)
for i in range(0,len(q)):
plt.arrow(q[i,0], q[i,1], 0.2*p[i,0], 0.2*p[i,1],\
head_width=0.2, head_length=0.2,\
fc='b', ec='b')
if (q1 != None):
verts = np.dot(q1[i,:,:], circle_verts ) \
+ np.outer(q[i,:],units)
print np.shape( verts )
print np.shape( q1 )
plt.plot(verts[0],verts[1],'b-')
plt.axis([- W, W,- W, W ])
return f
y_data = np.load('output/state_data.npy')
time_data = np.load('output/time_data.npy')
#print 'shape of y_data is ' + str( y_data.shape )
N_timestep = y_data.shape[0]
print 'generating png files'
for k in range(0,N_timestep):
q,q_1,q_2,p,mu_1,mu_2 = tj.state_to_weinstein_darboux( y_data[k] )
f = display_velocity_field(q,p,mu_1,mu_2,q_1)
time_s = str(time_data[k])
plt.suptitle('t = '+ time_s[0:4] , fontsize=16 , x = 0.75 , y = 0.25 )
fname = './movie_frames/frame_'+str(k)+'.png'
f.savefig( fname )
plt.close(f)
print 'done'
|
stefansommer/jetflows
|
code/generate_images.py
|
Python
|
agpl-3.0
| 3,093
|
[
"Gaussian"
] |
7ad695f16fbe48a6fdb6ecf03936154d0a21831b2f32838293220f115ecb3864
|
"""Test cases and utilities for hs_core module. See also ./tests folder."""
from dateutil import parser
import tempfile
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.messages.storage.fallback import FallbackStorage
from django.test import TestCase, RequestFactory
from hs_core.models import ResourceFile
from hs_core.hydroshare import add_resource_files
from hs_core.views.utils import create_folder, move_or_rename_file_or_folder, zip_folder, \
unzip_file, remove_folder
from hs_core.views.utils import run_ssh_command
from theme.models import UserProfile
from django_irods.icommands import SessionException
from django_irods.storage import IrodsStorage
class MockIRODSTestCaseMixin(object):
"""Mix in to allow for mock iRODS testing."""
def setUp(self):
"""Set up iRODS patchers for testing of data bags, etc."""
super(MockIRODSTestCaseMixin, self).setUp()
# only mock up testing iRODS operations when local iRODS container is not used
if settings.IRODS_HOST != 'data.local.org':
from mock import patch
self.irods_patchers = (
patch("hs_core.hydroshare.hs_bagit.delete_files_and_bag"),
patch("hs_core.hydroshare.hs_bagit.create_bag"),
patch("hs_core.hydroshare.hs_bagit.create_bag_files"),
patch("hs_core.tasks.create_bag_by_irods"),
patch("hs_core.hydroshare.utils.copy_resource_files_and_AVUs"),
)
for patcher in self.irods_patchers:
patcher.start()
def tearDown(self):
"""Stop iRODS patchers."""
if settings.IRODS_HOST != 'data.local.org':
for patcher in self.irods_patchers:
patcher.stop()
super(MockIRODSTestCaseMixin, self).tearDown()
class TestCaseCommonUtilities(object):
"""Enable common utilities for iRODS testing."""
def is_federated_irods_available(self):
"""Check if federated iRODS is available."""
if not settings.REMOTE_USE_IRODS or settings.HS_USER_ZONE_HOST != 'users.local.org' \
or settings.IRODS_HOST != 'data.local.org':
return False
else:
return True
def create_irods_user_in_user_zone(self):
"""Create corresponding irods account in user zone."""
try:
exec_cmd = "{0} {1} {2}".format(settings.HS_USER_ZONE_PROXY_USER_CREATE_USER_CMD,
self.user.username, self.user.username)
output = run_ssh_command(host=settings.HS_USER_ZONE_HOST,
uname=settings.HS_USER_ZONE_PROXY_USER,
pwd=settings.HS_USER_ZONE_PROXY_USER_PWD,
exec_cmd=exec_cmd)
if output:
if 'ERROR:' in output.upper():
# irods account failed to create
self.assertRaises(SessionException(-1, output, output))
user_profile = UserProfile.objects.filter(user=self.user).first()
user_profile.create_irods_user_account = True
user_profile.save()
except Exception as ex:
self.assertRaises(SessionException(-1, ex.message, ex.message))
def delete_irods_user_in_user_zone(self):
"""Delete irods test user in user zone."""
try:
exec_cmd = "{0} {1}".format(settings.HS_USER_ZONE_PROXY_USER_DELETE_USER_CMD,
self.user.username)
output = run_ssh_command(host=settings.HS_USER_ZONE_HOST,
uname=settings.HS_USER_ZONE_PROXY_USER,
pwd=settings.HS_USER_ZONE_PROXY_USER_PWD,
exec_cmd=exec_cmd)
if output:
if 'ERROR:' in output.upper():
# there is an error from icommand run, report the error
self.assertRaises(SessionException(-1, output, output))
user_profile = UserProfile.objects.filter(user=self.user).first()
user_profile.create_irods_user_account = False
user_profile.save()
except Exception as ex:
# there is an error from icommand run, report the error
self.assertRaises(SessionException(-1, ex.message, ex.message))
def save_files_to_user_zone(self, file_name_to_target_name_dict):
"""Save a list of files to iRODS user zone using the same IrodsStorage() object.
:param file_name_to_target_name_dict: a dictionary in the form of {ori_file, target_file}
where ori_file is the file to be save to, and the target_file is the full path file name
in iRODS user zone to save ori_file to
:return:
"""
self.irods_storage = IrodsStorage('federated')
for file_name, target_name in file_name_to_target_name_dict.iteritems():
self.irods_storage.saveFile(file_name, target_name)
def resource_file_oprs(self):
"""Test common iRODS file operations.
This is a common test utility function to be called by both regular folder operation
testing and federated zone folder operation testing.
Make sure the calling TestCase object has the following attributes defined before calling
this method:
self.res: resource that has been created that contains files listed in file_name_list
self.user: owner of the resource
self.file_name_list: a list of three file names that have been added to the res object
self.test_file_1 needs to be present for the calling object for doing regular folder
operations without involving federated zone so that the same opened file can be re-added
to the resource for testing the case where zipping cannot overwrite existing file
"""
user = self.user
res = self.res
file_name_list = self.file_name_list
# create a folder, if folder is created successfully, no exception is raised, otherwise,
# an iRODS exception will be raised which will be caught by the test runner and mark as
# a test failure
create_folder(res.short_id, 'data/contents/sub_test_dir')
istorage = res.get_irods_storage()
res_path = res.file_path
store = istorage.listdir(res_path)
self.assertIn('sub_test_dir', store[0], msg='resource does not contain created sub-folder')
# rename the third file in file_name_list
move_or_rename_file_or_folder(user, res.short_id,
'data/contents/' + file_name_list[2],
'data/contents/new_' + file_name_list[2])
# move the first two files in file_name_list to the new folder
move_or_rename_file_or_folder(user, res.short_id,
'data/contents/' + file_name_list[0],
'data/contents/sub_test_dir/' + file_name_list[0])
move_or_rename_file_or_folder(user, res.short_id,
'data/contents/' + file_name_list[1],
'data/contents/sub_test_dir/' + file_name_list[1])
updated_res_file_names = []
for rf in ResourceFile.objects.filter(object_id=res.id):
updated_res_file_names.append(rf.short_path)
self.assertIn('new_' + file_name_list[2], updated_res_file_names,
msg="resource does not contain the updated file new_" + file_name_list[2])
self.assertNotIn(file_name_list[2], updated_res_file_names,
msg='resource still contains the old file ' + file_name_list[2] +
' after renaming')
self.assertIn('sub_test_dir/' + file_name_list[0], updated_res_file_names,
msg='resource does not contain ' + file_name_list[0] + ' moved to a folder')
self.assertNotIn(file_name_list[0], updated_res_file_names,
msg='resource still contains the old ' + file_name_list[0] +
'after moving to a folder')
self.assertIn('sub_test_dir/' + file_name_list[1], updated_res_file_names,
msg='resource does not contain ' + file_name_list[1] +
'moved to a new folder')
self.assertNotIn(file_name_list[1], updated_res_file_names,
msg='resource still contains the old ' + file_name_list[1] +
' after moving to a folder')
# zip the folder
output_zip_fname, size = \
zip_folder(user, res.short_id, 'data/contents/sub_test_dir',
'sub_test_dir.zip', True)
self.assertGreater(size, 0, msg='zipped file has a size of 0')
# Now resource should contain only two files: new_file3.txt and sub_test_dir.zip
# since the folder is zipped into sub_test_dir.zip with the folder deleted
self.assertEqual(res.files.all().count(), 2,
msg="resource file count didn't match-")
# test unzip does not allow override of existing files
# add an existing file in the zip to the resource
if res.resource_federation_path:
fed_test_file1_full_path = '/{zone}/home/{uname}/{fname}'.format(
zone=settings.HS_USER_IRODS_ZONE, uname=user.username, fname=file_name_list[0])
# TODO: why isn't this a method of resource?
# TODO: Why do we repeat the resource_federation_path?
add_resource_files(res.short_id, source_names=[fed_test_file1_full_path],
move=False)
else:
# TODO: Why isn't this a method of resource?
add_resource_files(res.short_id, self.test_file_1)
# TODO: use ResourceFile.create_folder, which doesn't require data/contents prefix
create_folder(res.short_id, 'data/contents/sub_test_dir')
# TODO: use ResourceFile.rename, which doesn't require data/contents prefix
move_or_rename_file_or_folder(user, res.short_id,
'data/contents/' + file_name_list[0],
'data/contents/sub_test_dir/' + file_name_list[0])
# Now resource should contain three files: file3_new.txt, sub_test_dir.zip, and file1.txt
self.assertEqual(res.files.all().count(), 3, msg="resource file count didn't match")
with self.assertRaises(SessionException):
unzip_file(user, res.short_id, 'data/contents/sub_test_dir.zip', False)
# Resource should still contain three files: file3_new.txt, sub_test_dir.zip, and file1.txt
file_cnt = res.files.all().count()
self.assertEqual(file_cnt, 3, msg="resource file count didn't match - " +
str(file_cnt) + " != 3")
# test unzipping the file succeeds now after deleting the existing folder
# TODO: this causes a multiple delete because the paths are valid now.
istorage = res.get_irods_storage()
remove_folder(user, res.short_id, 'data/contents/sub_test_dir')
# Now resource should contain two files: file3_new.txt and sub_test_dir.zip
file_cnt = res.files.all().count()
self.assertEqual(file_cnt, 2, msg="resource file count didn't match - " +
str(file_cnt) + " != 2")
unzip_file(user, res.short_id, 'data/contents/sub_test_dir.zip', True)
# Now resource should contain three files: file1.txt, file2.txt, and file3_new.txt
self.assertEqual(res.files.all().count(), 3, msg="resource file count didn't match")
updated_res_file_names = []
for rf in ResourceFile.objects.filter(object_id=res.id):
updated_res_file_names.append(rf.short_path)
self.assertNotIn('sub_test_dir.zip', updated_res_file_names,
msg="resource still contains the zip file after unzipping")
self.assertIn('sub_test_dir/' + file_name_list[0], updated_res_file_names,
msg='resource does not contain unzipped file ' + file_name_list[0])
self.assertIn('sub_test_dir/' + file_name_list[1], updated_res_file_names,
msg='resource does not contain unzipped file ' + file_name_list[1])
self.assertIn('new_' + file_name_list[2], updated_res_file_names,
msg='resource does not contain unzipped file new_' + file_name_list[2])
# rename a folder
move_or_rename_file_or_folder(user, res.short_id,
'data/contents/sub_test_dir', 'data/contents/sub_dir')
updated_res_file_names = []
for rf in ResourceFile.objects.filter(object_id=res.id):
updated_res_file_names.append(rf.short_path)
self.assertNotIn('sub_test_dir/' + file_name_list[0], updated_res_file_names,
msg='resource still contains ' + file_name_list[0] +
' in the old folder after renaming')
self.assertIn('sub_dir/' + file_name_list[0], updated_res_file_names,
msg='resource does not contain ' + file_name_list[0] +
' in the new folder after renaming')
self.assertNotIn('sub_test_dir/' + file_name_list[1], updated_res_file_names,
msg='resource still contains ' + file_name_list[1] +
' in the old folder after renaming')
self.assertIn('sub_dir/' + file_name_list[1], updated_res_file_names,
msg='resource does not contain ' + file_name_list[1] +
' in the new folder after renaming')
# remove a folder
# TODO: utilize ResourceFile.remove_folder instead. Takes a short path.
remove_folder(user, res.short_id, 'data/contents/sub_dir')
# Now resource only contains one file
self.assertEqual(res.files.all().count(), 1, msg="resource file count didn't match")
updated_res_file_names = []
for rf in ResourceFile.objects.filter(object_id=res.id):
updated_res_file_names.append(rf.short_path)
self.assertEqual(len(updated_res_file_names), 1)
self.assertEqual(updated_res_file_names[0], 'new_' + file_name_list[2])
def raster_metadata_extraction(self):
"""Test raster metadata extraction.
This is a common test utility function to be called by both regular raster metadata
extraction testing and federated zone raster metadata extraction testing.
Make sure the calling TestCase object has self.resRaster attribute defined before calling
this method which is the raster resource that has been created containing valid raster
files.
"""
# there should be 2 content files
self.assertEqual(self.resRaster.files.all().count(), 2)
# test core metadata after metadata extraction
extracted_title = "My Test Raster Resource"
self.assertEqual(self.resRaster.metadata.title.value, extracted_title)
# there should be 1 creator
self.assertEqual(self.resRaster.metadata.creators.all().count(), 1)
# there should be 1 coverage element - box type
self.assertEqual(self.resRaster.metadata.coverages.all().count(), 1)
self.assertEqual(self.resRaster.metadata.coverages.all().filter(type='box').count(), 1)
box_coverage = self.resRaster.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 42.11270614966863)
self.assertEqual(box_coverage.value['eastlimit'], -111.45699925047542)
self.assertEqual(box_coverage.value['southlimit'], 41.66222054591102)
self.assertEqual(box_coverage.value['westlimit'], -111.81761887121905)
# there should be 2 format elements
self.assertEqual(self.resRaster.metadata.formats.all().count(), 2)
self.assertEqual(self.resRaster.metadata.formats.all().filter(
value='application/vrt').count(), 1)
self.assertEqual(self.resRaster.metadata.formats.all().filter(
value='image/tiff').count(), 1)
# testing extended metadata element: original coverage
ori_coverage = self.resRaster.metadata.originalCoverage
self.assertNotEquals(ori_coverage, None)
self.assertEqual(ori_coverage.value['northlimit'], 4662392.446916306)
self.assertEqual(ori_coverage.value['eastlimit'], 461954.01909127034)
self.assertEqual(ori_coverage.value['southlimit'], 4612592.446916306)
self.assertEqual(ori_coverage.value['westlimit'], 432404.01909127034)
self.assertEqual(ori_coverage.value['units'], 'meter')
self.assertEqual(ori_coverage.value['projection'], "NAD83 / UTM zone 12N")
self.assertEqual(ori_coverage.value['datum'], "North_American_Datum_1983")
projection_string = u'PROJCS["NAD83 / UTM zone 12N",GEOGCS["NAD83",' \
u'DATUM["North_American_Datum_1983",' \
u'SPHEROID["GRS 1980",6378137,298.257222101,' \
u'AUTHORITY["EPSG","7019"]],' \
u'TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6269"]],' \
u'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],' \
u'UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],' \
u'AUTHORITY["EPSG","4269"]],PROJECTION["Transverse_Mercator"],' \
u'PARAMETER["latitude_of_origin",0],' \
u'PARAMETER["central_meridian",-111],' \
u'PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],' \
u'PARAMETER["false_northing",0],' \
u'UNIT["metre",1,AUTHORITY["EPSG","9001"]],' \
u'AXIS["Easting",EAST],AXIS["Northing",' \
u'NORTH],AUTHORITY["EPSG","26912"]]'
self.assertEqual(ori_coverage.value['projection_string'], projection_string)
# testing extended metadata element: cell information
cell_info = self.resRaster.metadata.cellInformation
self.assertEqual(cell_info.rows, 1660)
self.assertEqual(cell_info.columns, 985)
self.assertEqual(cell_info.cellSizeXValue, 30.0)
self.assertEqual(cell_info.cellSizeYValue, 30.0)
self.assertEqual(cell_info.cellDataType, 'Float32')
# testing extended metadata element: band information
self.assertEqual(self.resRaster.metadata.bandInformations.count(), 1)
band_info = self.resRaster.metadata.bandInformations.first()
self.assertEqual(band_info.noDataValue, '-3.40282346639e+38')
self.assertEqual(band_info.maximumValue, '3031.44311523')
self.assertEqual(band_info.minimumValue, '1358.33459473')
def netcdf_metadata_extraction(self, expected_creators_count=1):
"""Test NetCDF metadata extraction.
This is a common test utility function to be called by both regular netcdf metadata
extraction testing and federated zone netCDF metadata extraction testing.
Make sure the calling TestCase object has self.resNetcdf attribute defined before calling
this method which is the netCDF resource that has been created containing valid netCDF
files.
"""
# there should 2 content file
self.assertEqual(self.resNetcdf.files.all().count(), 2)
# test core metadata after metadata extraction
extracted_title = "Snow water equivalent estimation at TWDEF site from " \
"Oct 2009 to June 2010"
self.assertEqual(self.resNetcdf.metadata.title.value, extracted_title)
# there should be an abstract element
self.assertNotEqual(self.resNetcdf.metadata.description, None)
extracted_abstract = "This netCDF data is the simulation output from Utah Energy " \
"Balance (UEB) model.It includes the simulation result " \
"of snow water equivalent during the period " \
"Oct. 2009 to June 2010 for TWDEF site in Utah."
self.assertEqual(self.resNetcdf.metadata.description.abstract, extracted_abstract)
# there should be one source element
self.assertEqual(self.resNetcdf.metadata.sources.all().count(), 1)
# there should be one license element:
self.assertNotEquals(self.resNetcdf.metadata.rights.statement, 1)
# there should be one relation element
self.assertEqual(self.resNetcdf.metadata.relations.all().filter(type='cites').count(), 1)
# there should be creators equal to expected_creators_count
self.assertEqual(self.resNetcdf.metadata.creators.all().count(), expected_creators_count)
# there should be one contributor
self.assertEqual(self.resNetcdf.metadata.contributors.all().count(), 1)
# there should be 2 coverage element - box type and period type
self.assertEqual(self.resNetcdf.metadata.coverages.all().count(), 2)
self.assertEqual(self.resNetcdf.metadata.coverages.all().filter(type='box').count(), 1)
self.assertEqual(self.resNetcdf.metadata.coverages.all().filter(type='period').count(), 1)
box_coverage = self.resNetcdf.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.867126409)
self.assertEqual(box_coverage.value['eastlimit'], -111.505940368)
self.assertEqual(box_coverage.value['southlimit'], 41.8639080745)
self.assertEqual(box_coverage.value['westlimit'], -111.51138808)
temporal_coverage = self.resNetcdf.metadata.coverages.all().filter(type='period').first()
self.assertEqual(parser.parse(temporal_coverage.value['start']).date(),
parser.parse('10/01/2009').date())
self.assertEqual(parser.parse(temporal_coverage.value['end']).date(),
parser.parse('05/30/2010').date())
# there should be 2 format elements
self.assertEqual(self.resNetcdf.metadata.formats.all().count(), 2)
self.assertEqual(self.resNetcdf.metadata.formats.all().
filter(value='text/plain').count(), 1)
self.assertEqual(self.resNetcdf.metadata.formats.all().
filter(value='application/x-netcdf').count(), 1)
# there should be one subject element
self.assertEqual(self.resNetcdf.metadata.subjects.all().count(), 1)
subj_element = self.resNetcdf.metadata.subjects.all().first()
self.assertEqual(subj_element.value, 'Snow water equivalent')
# testing extended metadata element: original coverage
ori_coverage = self.resNetcdf.metadata.ori_coverage.all().first()
self.assertNotEquals(ori_coverage, None)
self.assertEqual(ori_coverage.projection_string_type, 'Proj4 String')
proj_text = u'+proj=tmerc +y_0=0.0 +k_0=0.9996 +x_0=500000.0 +lat_0=0.0 +lon_0=-111.0'
self.assertEqual(ori_coverage.projection_string_text, proj_text)
self.assertEqual(ori_coverage.value['northlimit'], '4.63515e+06')
self.assertEqual(ori_coverage.value['eastlimit'], '458010.0')
self.assertEqual(ori_coverage.value['southlimit'], '4.63479e+06')
self.assertEqual(ori_coverage.value['westlimit'], '457560.0')
self.assertEqual(ori_coverage.value['units'], 'Meter')
self.assertEqual(ori_coverage.value['projection'], 'transverse_mercator')
# testing extended metadata element: variables
self.assertEqual(self.resNetcdf.metadata.variables.all().count(), 5)
# test time variable
var_time = self.resNetcdf.metadata.variables.all().filter(name='time').first()
self.assertNotEquals(var_time, None)
self.assertEqual(var_time.unit, 'hours since 2009-10-1 0:0:00 UTC')
self.assertEqual(var_time.type, 'Float')
self.assertEqual(var_time.shape, 'time')
self.assertEqual(var_time.descriptive_name, 'time')
# test x variable
var_x = self.resNetcdf.metadata.variables.all().filter(name='x').first()
self.assertNotEquals(var_x, None)
self.assertEqual(var_x.unit, 'Meter')
self.assertEqual(var_x.type, 'Float')
self.assertEqual(var_x.shape, 'x')
self.assertEqual(var_x.descriptive_name, 'x coordinate of projection')
# test y variable
var_y = self.resNetcdf.metadata.variables.all().filter(name='y').first()
self.assertNotEquals(var_y, None)
self.assertEqual(var_y.unit, 'Meter')
self.assertEqual(var_y.type, 'Float')
self.assertEqual(var_y.shape, 'y')
self.assertEqual(var_y.descriptive_name, 'y coordinate of projection')
# test SWE variable
var_swe = self.resNetcdf.metadata.variables.all().filter(name='SWE').first()
self.assertNotEquals(var_swe, None)
self.assertEqual(var_swe.unit, 'm')
self.assertEqual(var_swe.type, 'Float')
self.assertEqual(var_swe.shape, 'y,x,time')
self.assertEqual(var_swe.descriptive_name, 'Snow water equivalent')
self.assertEqual(var_swe.method, 'model simulation of UEB model')
self.assertEqual(var_swe.missing_value, '-9999')
# test grid mapping variable
var_grid = self.resNetcdf.metadata.variables.all().\
filter(name='transverse_mercator').first()
self.assertNotEquals(var_grid, None)
self.assertEqual(var_grid.unit, 'Unknown')
self.assertEqual(var_grid.type, 'Unknown')
self.assertEqual(var_grid.shape, 'Not defined')
def timeseries_metadata_extraction(self):
"""Test timeseries metadata extraction.
This is a common test utility function to be called by both regular timeseries metadata
extraction testing and federated zone timeseries metadata extraction testing.
Make sure the calling TestCase object has self.resTimeSeries attribute defined before
calling this method which is the timeseries resource that has been created containing
valid timeseries file.
"""
# there should one content file
self.assertEqual(self.resTimeSeries.files.all().count(), 1)
# there should be one contributor element
self.assertEqual(self.resTimeSeries.metadata.contributors.all().count(), 1)
# test core metadata after metadata extraction
extracted_title = "Water temperature data from the Little Bear River, UT"
self.assertEqual(self.resTimeSeries.metadata.title.value, extracted_title)
# there should be an abstract element
self.assertNotEqual(self.resTimeSeries.metadata.description, None)
extracted_abstract = "This dataset contains time series of observations of water " \
"temperature in the Little Bear River, UT. Data were recorded every " \
"30 minutes. The values were recorded using a HydroLab MS5 " \
"multi-parameter water quality sonde connected to a Campbell " \
"Scientific datalogger."
self.assertEqual(self.resTimeSeries.metadata.description.abstract.strip(),
extracted_abstract)
# there should be 2 coverage element - box type and period type
self.assertEqual(self.resTimeSeries.metadata.coverages.all().count(), 2)
self.assertEqual(self.resTimeSeries.metadata.coverages.all().filter(type='box').count(), 1)
self.assertEqual(self.resTimeSeries.metadata.coverages.all().filter(
type='period').count(), 1)
box_coverage = self.resTimeSeries.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.718473)
self.assertEqual(box_coverage.value['eastlimit'], -111.799324)
self.assertEqual(box_coverage.value['southlimit'], 41.495409)
self.assertEqual(box_coverage.value['westlimit'], -111.946402)
temporal_coverage = self.resTimeSeries.metadata.coverages.all().filter(
type='period').first()
self.assertEqual(parser.parse(temporal_coverage.value['start']).date(),
parser.parse('01/01/2008').date())
self.assertEqual(parser.parse(temporal_coverage.value['end']).date(),
parser.parse('01/31/2008').date())
# there should be one format element
self.assertEqual(self.resTimeSeries.metadata.formats.all().count(), 1)
format_element = self.resTimeSeries.metadata.formats.all().first()
self.assertEqual(format_element.value, 'application/sqlite')
# there should be one subject element
self.assertEqual(self.resTimeSeries.metadata.subjects.all().count(), 1)
subj_element = self.resTimeSeries.metadata.subjects.all().first()
self.assertEqual(subj_element.value, 'Temperature')
# there should be a total of 7 timeseries
self.assertEqual(self.resTimeSeries.metadata.time_series_results.all().count(), 7)
# testing extended metadata elements
# test 'site' - there should be 7 sites
self.assertEqual(self.resTimeSeries.metadata.sites.all().count(), 7)
# each site be associated with one series id
for site in self.resTimeSeries.metadata.sites.all():
self.assertEqual(len(site.series_ids), 1)
# test the data for a specific site
site = self.resTimeSeries.metadata.sites.filter(site_code='USU-LBR-Paradise').first()
self.assertNotEqual(site, None)
site_name = 'Little Bear River at McMurdy Hollow near Paradise, Utah'
self.assertEqual(site.site_name, site_name)
self.assertEqual(site.elevation_m, 1445)
self.assertEqual(site.elevation_datum, 'NGVD29')
self.assertEqual(site.site_type, 'Stream')
# test 'variable' - there should be 1 variable element
self.assertEqual(self.resTimeSeries.metadata.variables.all().count(), 1)
variable = self.resTimeSeries.metadata.variables.all().first()
# there should be 7 series ids associated with this one variable
self.assertEqual(len(variable.series_ids), 7)
# test the data for a variable
self.assertEqual(variable.variable_code, 'USU36')
self.assertEqual(variable.variable_name, 'Temperature')
self.assertEqual(variable.variable_type, 'Water Quality')
self.assertEqual(variable.no_data_value, -9999)
self.assertEqual(variable.variable_definition, None)
self.assertEqual(variable.speciation, 'Not Applicable')
# test 'method' - there should be 1 method element
self.assertEqual(self.resTimeSeries.metadata.methods.all().count(), 1)
method = self.resTimeSeries.metadata.methods.all().first()
# there should be 7 series ids associated with this one method element
self.assertEqual(len(method.series_ids), 7)
self.assertEqual(method.method_code, '28')
method_name = 'Quality Control Level 1 Data Series created from raw QC Level 0 data ' \
'using ODM Tools.'
self.assertEqual(method.method_name, method_name)
self.assertEqual(method.method_type, 'Instrument deployment')
method_des = 'Quality Control Level 1 Data Series created from raw QC Level 0 data ' \
'using ODM Tools.'
self.assertEqual(method.method_description, method_des)
self.assertEqual(method.method_link, None)
# test 'processing_level' - there should be 1 processing_level element
self.assertEqual(self.resTimeSeries.metadata.processing_levels.all().count(), 1)
proc_level = self.resTimeSeries.metadata.processing_levels.all().first()
# there should be 7 series ids associated with this one element
self.assertEqual(len(proc_level.series_ids), 7)
self.assertEqual(proc_level.processing_level_code, 1)
self.assertEqual(proc_level.definition, 'Quality controlled data')
explanation = 'Quality controlled data that have passed quality assurance procedures ' \
'such as routine estimation of timing and sensor calibration or visual ' \
'inspection and removal of obvious errors. An example is USGS published ' \
'streamflow records following parsing through USGS quality control ' \
'procedures.'
self.assertEqual(proc_level.explanation, explanation)
# test 'timeseries_result' - there should be 7 timeseries_result element
self.assertEqual(self.resTimeSeries.metadata.time_series_results.all().count(), 7)
ts_result = self.resTimeSeries.metadata.time_series_results.filter(
series_ids__contains=['182d8fa3-1ebc-11e6-ad49-f45c8999816f']).first()
self.assertNotEqual(ts_result, None)
# there should be only 1 series id associated with this element
self.assertEqual(len(ts_result.series_ids), 1)
self.assertEqual(ts_result.units_type, 'Temperature')
self.assertEqual(ts_result.units_name, 'degree celsius')
self.assertEqual(ts_result.units_abbreviation, 'degC')
self.assertEqual(ts_result.status, 'Unknown')
self.assertEqual(ts_result.sample_medium, 'Surface Water')
self.assertEqual(ts_result.value_count, 1441)
self.assertEqual(ts_result.aggregation_statistics, 'Average')
# test for CV lookup tables
# there should be 23 CV_VariableType records
self.assertEqual(self.resTimeSeries.metadata.cv_variable_types.all().count(), 23)
# there should be 805 CV_VariableName records
self.assertEqual(self.resTimeSeries.metadata.cv_variable_names.all().count(), 805)
# there should be 145 CV_Speciation records
self.assertEqual(self.resTimeSeries.metadata.cv_speciations.all().count(), 145)
# there should be 51 CV_SiteType records
self.assertEqual(self.resTimeSeries.metadata.cv_site_types.all().count(), 51)
# there should be 5 CV_ElevationDatum records
self.assertEqual(self.resTimeSeries.metadata.cv_elevation_datums.all().count(), 5)
# there should be 25 CV_MethodType records
self.assertEqual(self.resTimeSeries.metadata.cv_method_types.all().count(), 25)
# there should be 179 CV_UnitsType records
self.assertEqual(self.resTimeSeries.metadata.cv_units_types.all().count(), 179)
# there should be 4 CV_Status records
self.assertEqual(self.resTimeSeries.metadata.cv_statuses.all().count(), 4)
# there should be 17 CV_Medium records
self.assertEqual(self.resTimeSeries.metadata.cv_mediums.all().count(), 18)
# there should be 17 CV_aggregationStatistics records
self.assertEqual(self.resTimeSeries.metadata.cv_aggregation_statistics.all().count(), 17)
# there should not be any UTCOffset element
self.assertEqual(self.resTimeSeries.metadata.utc_offset, None)
class ViewTestCase(TestCase):
"""Test basic view functionality."""
def setUp(self):
"""Create request factory and set temp_dir for testing."""
self.factory = RequestFactory()
self.temp_dir = tempfile.mkdtemp()
super(ViewTestCase, self).setUp()
@staticmethod
def set_request_message_attributes(request):
"""Set session and _messages attributies on request."""
# the following 3 lines are for preventing error in unit test due to the view being
# tested uses messaging middleware
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
@staticmethod
def add_session_to_request(request):
"""Use SessionMiddleware to add a session to the request."""
"""Annotate a request object with a session"""
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
|
ResearchSoftwareInstitute/MyHPOM
|
hs_core/testing.py
|
Python
|
bsd-3-clause
| 36,889
|
[
"NetCDF"
] |
08f84a348e2f785ffcac97f6841bd437de3b1748fe8b08e8926c3ad3799ae428
|
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt
def loss_fn(X, Y, beta):
return cp.pnorm(cp.matmul(X, beta) - Y, p=2)**2
def regularizer(beta):
return cp.pnorm(beta, p=2)**2
def objective_fn(X, Y, beta, lambd):
return loss_fn(X, Y, beta) + lambd * regularizer(beta)
def mse(X, Y, beta):
return (1.0 / X.shape[0]) * loss_fn(X, Y, beta).value
def generate_data(m=1000, n=30, sigma=40):
"""Generates data for regression.
To experiment with your own data, just replace the contents of this
function with code that loads your dataset.
Args
----
m : int
The number of examples.
n : int
The number of features per example.
sigma : positive float
The standard deviation of the additive noise.
Returns
-------
X : np.array
An array of featurized examples, shape (m, n), m the number of
examples and n the number of features per example.
Y : np.array
An array of shape (m,) containing the observed labels for the
examples.
beta_star : np.array
The true parameter. This is the quantity we are trying to
estimate.
"""
beta_star = np.random.randn(n)
# Generate an ill-conditioned data matrix
X = np.random.randn(m, n)
U, _, V = np.linalg.svd(X)
s = np.linspace(30, 1, min(m, n))
S = np.zeros((m, n))
S[:min(m, n), :min(m, n)] = np.diag(s)
X = np.dot(U, np.dot(S, V))
# Corrupt the observations with additive Gaussian noise
Y = X.dot(beta_star) + np.random.normal(0, sigma, size=m)
return X, Y, beta_star
def plot_train_test_errors(train_errors, test_errors, lambd_values):
plt.plot(lambd_values, train_errors, label="Train error")
plt.plot(lambd_values, test_errors, label="Test error")
plt.xscale("log")
plt.legend(loc="upper left")
plt.xlabel(r"$\lambda$", fontsize=16)
plt.title("Mean Squared Error (mSE)")
plt.show()
def plot_regularization_path(lambd_values, beta_values):
num_coeffs = len(beta_values[0])
for i in range(num_coeffs):
plt.plot(lambd_values, [wi[i] for wi in beta_values])
plt.xlabel(r"$\lambda$", fontsize=16)
plt.xscale("log")
plt.title("Regularization Path")
plt.show()
if __name__ == "__main__":
m = 1000
n = 30
sigma = 4
X, Y, beta_star = generate_data(m, n, sigma)
X_train = X[:800, :]
Y_train = Y[:800]
X_test = X[800:, :]
Y_test = Y[800:]
beta = cp.Variable(n)
lambd = cp.Parameter(nonneg=True)
problem = cp.Problem(
cp.Minimize(objective_fn(X_train, Y_train, beta, lambd)))
lambd_values = np.logspace(-2, 2, 50)
train_errors = []
test_errors = []
beta_values = []
for v in lambd_values:
lambd.value = v
problem.solve()
train_errors.append(mse(X_train, Y_train, beta))
test_errors.append(mse(X_test, Y_test, beta))
beta_values.append(beta.value)
plot_train_test_errors(train_errors, test_errors, lambd_values)
plot_regularization_path(lambd_values, beta_values)
|
SteveDiamond/cvxpy
|
examples/machine_learning/ridge_regression.py
|
Python
|
gpl-3.0
| 3,104
|
[
"Gaussian"
] |
770d5d55901570e199b5744035c620bd4bbafbfe6a99f448000308124fc93cef
|
# -*- coding: utf-8 -*-
"""Various text used throughout the website, e.g. status messages, errors, etc.
"""
# Status Messages
#################
# NOTE: in status messages, newlines are not preserved, so triple-quotes strings
# are ok
# Status message shown at settings page on first login
# (upon clicking primary email confirmation link)
WELCOME_MESSAGE = '''
<h1>Welcome to the OSF!</h1>
<p>Visit our <a href="http://help.osf.io/" target="_blank" rel="noreferrer">Guides</a> to learn about creating a project, or get inspiration from <a href="https://osf.io/explore/activity/#popularPublicProjects">popular public projects</a>.</p>
'''
REGISTRATION_SUCCESS = '''Registration successful. Please check {email} to confirm your email address.'''
EXTERNAL_LOGIN_EMAIL_CREATE_SUCCESS = '''A new OSF account has been created with your {external_id_provider} profile. Please check {email} to confirm your email address.'''
EXTERNAL_LOGIN_EMAIL_LINK_SUCCESS = '''Your OSF account has been linked with your {external_id_provider}. Please check {email} to confirm this action.'''
# Shown if registration is turned off in website.settings
REGISTRATION_UNAVAILABLE = 'Registration currently unavailable.'
ALREADY_REGISTERED = u'The email {email} has already been registered.'
AFTER_SUBMIT_FOR_REVIEW = 'Your submission has been received. You will be notified within ten business days regarding the status of your submission. If you have questions you may contact us at prereg@cos.io.'
# Shown if user tries to login with an email that is not yet confirmed
UNCONFIRMED = ('This login email has been registered but not confirmed. Please check your email (and spam folder).'
' <a href="/resend/">Click here</a> to resend your confirmation email.')
# Shown if the user's account is disabled
DISABLED = '''
Log-in failed: Deactivated account.
'''
# Shown on incorrect password attempt
LOGIN_FAILED = '''
Log-in failed. Please try again or reset your password.
'''
# Shown at login page if user tries to access a resource that requires auth
MUST_LOGIN = '''
You must log in to access this resource.
'''
# Shown on logout
LOGOUT = '''
You have successfully logged out.
'''
EMAIL_NOT_FOUND = u'''
{email} was not found in our records.
'''
# Shown after an unregistered user claims an account and is redirected to the
# settings page
CLAIMED_CONTRIBUTOR = ('<strong>Welcome to the OSF!</strong> Edit your display name below and then check your '
'<a href="/dashboard/">dashboard</a> to see projects to which you have been added as a '
'contributor by someone else.')
# Error Pages
# ###########
# Search-related errors
SEARCH_QUERY_HELP = ('Please check our help (the question mark beside the search box) for more information '
'on advanced search queries.')
# Shown at error page if an expired/revokes email confirmation link is clicked
EXPIRED_EMAIL_CONFIRM_TOKEN = 'This confirmation link has expired. Please <a href="/login/">log in</a> to continue.'
INVALID_EMAIL_CONFIRM_TOKEN = 'This confirmation link is invalid. Please <a href="/login/">log in</a> to continue.'
CANNOT_MERGE_ACCOUNTS_SHORT = 'Cannot Merge Accounts'
CANNOT_MERGE_ACCOUNTS_LONG = (
'Accounts cannot be merged due to a possible conflict with add-ons. '
'Before you continue, please <a href="/settings/addons/"> deactivate '
'any add-ons</a> to be merged into your primary account.'
)
MERGE_COMPLETE = 'Accounts successfully merged.'
MERGE_CONFIRMATION_REQUIRED_SHORT = 'Confirmation Required: Merge Accounts'
MERGE_CONFIRMATION_REQUIRED_LONG = (
u'<p>This email is confirmed to another account. '
u'Would you like to merge <em>{src_user}</em> with the account '
u'<em>{dest_user}</em>?<p>'
u'<a class="btn btn-primary" href="?confirm_merge">Confirm merge</a> '
)
# Node Actions
AFTER_REGISTER_ARCHIVING = (
'Files are being copied to the newly created registration, and you will receive an email '
'notification when the copying is finished.'
)
BEFORE_REGISTER_HAS_POINTERS = (
u'This {category} contains links to other projects. Links will be copied '
u'into your registration, but the projects that they link to will not be '
u'registered. If you wish to register the linked projects, you must fork '
u'them from the original project before registering.'
)
BEFORE_FORK_HAS_POINTERS = (
u'This {category} contains links to other projects. Links will be copied '
u'into your fork, but the projects that they link to will not be forked. '
u'If you wish to fork the linked projects, they need to be forked from the '
u'original project.'
)
REGISTRATION_INFO = '''
<p>Registration creates a frozen version of the project that can never be
edited or deleted but can be withdrawn. You can register your project by
selecting a registration form, entering information about your project, and
then confirming. You will be able to continue editing the original project,
however, and the frozen version with timestamps will always be linked to
the original. Withdrawing a registration will leave behind metadata about
when the registration was created and withdrawn but removes the contents
of the registration.</p>
<ul>
<li>A registration can be made public immediately or entered into
an embargo period of up to four years. At the end of the embargo period,
the registration will automatically become public.</li>
<li>Before initiating a registration, make sure that the project is
in the state that you wish to freeze. Consider turning links into
forks.</li>
<li>Start by selecting a registration form from the list below. You can hit
your browser's back button if the selected form is not appropriate for
your use.</li>
</ul>
'''
REGISTRATION_EMBARGO_INFO = '''
You can choose whether to make your registration public immediately or
embargo it for up to four years. At the end of the embargo period the registration
is automatically made public. After becoming public, the only way to remove a
registration is to withdraw it. Withdrawn registrations show only the registration title,
contributors, and description to indicate that a registration was made and
later withdrawn.
<br /><br />
If you choose to embargo your registration, a notification will be sent to
all other project contributors. Other administrators will have 48 hours to
approve or cancel creating the registration. If any other administrator rejects the
registration, it will be canceled. If all other administrators approve or do
nothing, the registration will be confirmed and enter its embargo period.
'''
BEFORE_REGISTRATION_INFO = '''
Registration cannot be undone, and the archived content and files cannot be
deleted after registration. Please be sure the project is complete and
comprehensive for what you wish to register.
'''
# Nodes: forking, templating, linking
LINK_ACTION = 'Link to this Project'
LINK_DESCRIPTION = """
<p>Linking to this project will reference it in another project, without
creating a copy. The link will always point to the most up-to-date version.</p>
"""
TEMPLATE_ACTION = 'Copy Project Structure'
TEMPLATE_DESCRIPTION = """
<p>This option will create a new project, using this project as a template.
The new project will be structured in the same way, but contain no data.</p>
"""
FORK_ACTION = 'Fork this Project'
FORK_DESCRIPTION = """
<p>Fork this project if you plan to build upon it in your own work.
The new project will be an exact duplicate of this project's current state,
with you as the only contributor.</p>
"""
TEMPLATE_DROPDOWN_HELP = """Start typing to search. Selecting project as
template will duplicate its structure in the new project without importing the
content of that project."""
TEMPLATED_FROM_PREFIX = 'Templated from '
# MFR Error handling
ERROR_PREFIX = "Unable to render. <a href='?action=download'>Download</a> file to view it."
SUPPORT = u"Contact support@osf.io for further assistance."
# Custom Error Messages w/ support # TODO: Where are these used? See [#OSF-6101]
STATA_VERSION_ERROR = u'Version of given Stata file is not 104, 105, 108, 113 (Stata 8/9), 114 (Stata 10/11) or 115 (Stata 12)<p>{0}</p>'.format(SUPPORT)
BLANK_OR_CORRUPT_TABLE_ERROR = u'Is this a valid instance of this file type?<p>{0}</p>'.format(SUPPORT)
#disk saving mode
DISK_SAVING_MODE = 'Forks, registrations, and uploads to OSF Storage uploads are temporarily disabled while we are undergoing a server upgrade. These features will return shortly.'
#log out and revisit the link to confirm emails
CONFIRM_ALTERNATE_EMAIL_ERROR = 'The email address has <b>NOT</b> been added to your account. Please log out and revisit the link in your email. Thank you.'
|
wearpants/osf.io
|
website/language.py
|
Python
|
apache-2.0
| 8,749
|
[
"VisIt"
] |
34436ac3e14fd819d0160021b5c422d3f2e9556665d343e588b2ec75ebcfe0c6
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
***************************************************
**espresso.interaction.AngularUniqueCosineSquared**
***************************************************
"""
from espresso import pmi
from espresso.esutil import *
from espresso.interaction.AngularUniquePotential import *
from espresso.interaction.Interaction import *
from _espresso import interaction_AngularUniqueCosineSquared, \
interaction_FixedTripleAngleListAngularUniqueCosineSquared
class AngularUniqueCosineSquaredLocal(AngularUniquePotentialLocal, interaction_AngularUniqueCosineSquared):
'The (local) AngularUniqueCosineSquared potential.'
def __init__(self, K=1.0):
"""Initialize the local AngularUniqueCosineSquared object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_AngularUniqueCosineSquared, K)
class FixedTripleAngleListAngularUniqueCosineSquaredLocal(InteractionLocal, interaction_FixedTripleAngleListAngularUniqueCosineSquared):
'The (local) AngularUniqueCosineSquared interaction using FixedTripleAngle lists.'
def __init__(self, system, ftcl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedTripleAngleListAngularUniqueCosineSquared, system, ftcl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getFixedTripleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedTripleList(self)
if pmi.isController:
class AngularUniqueCosineSquared(AngularUniquePotential):
'The AngularUniqueCosineSquared potential.'
pmiproxydefs = dict(
cls = 'espresso.interaction.AngularUniqueCosineSquaredLocal',
pmiproperty = ['K']
)
class FixedTripleAngleListAngularUniqueCosineSquared(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.FixedTripleAngleListAngularUniqueCosineSquaredLocal',
pmicall = ['setPotential','getFixedTripleList']
)
|
BackupTheBerlios/espressopp
|
src/interaction/AngularUniqueCosineSquared.py
|
Python
|
gpl-3.0
| 3,264
|
[
"ESPResSo"
] |
a37b19bf9fe1b322c4eefb6dabe5304f404358e7917bee9c5a0ca9f5984370d1
|
import mxnet as mx
import logging
import os
import time
from math import sqrt
def _get_lr_scheduler(args, kv):
if 'lr_factor' not in args or args.lr_factor >= 1:
return (args.lr, None)
epoch_size = args.num_examples / args.batch_size
if 'dist' in args.kv_store:
epoch_size /= kv.num_workers
begin_epoch = args.load_epoch if args.load_epoch else 0
step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]
lr = args.lr
for s in step_epochs:
if begin_epoch >= s:
lr *= args.lr_factor
if lr != args.lr:
logging.info('Adjust learning rate to %e for epoch %d' %(lr, begin_epoch))
steps = [epoch_size * (x-begin_epoch) for x in step_epochs if x-begin_epoch > 0]
return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor))
def _load_model(args, rank=0):
if 'load_epoch' not in args or args.load_epoch is None:
return (None, None, None)
assert args.model_prefix is not None
model_prefix = args.model_prefix
if rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, rank)):
model_prefix += "-%d" % (rank)
sym, arg_params, aux_params = mx.model.load_checkpoint(
model_prefix, args.load_epoch)
logging.info('Loaded model %s_%04d.params', model_prefix, args.load_epoch)
return (sym, arg_params, aux_params)
def _save_model(args, rank=0):
if args.model_prefix is None:
return None
dst_dir = os.path.dirname(args.model_prefix)
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
return mx.callback.do_checkpoint(args.model_prefix if rank == 0 else "%s-%d" % (
args.model_prefix, rank))
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--test-io', type=int, default=0,
help='1 means test reading speed without training')
return train
def fit(args, network, data_loader, **kwargs):
"""
train a model
args : argparse returns
network : the symbol definition of the nerual network
data_loader : function that returns the train and val data iterators
"""
# kvstore
kv = mx.kvstore.create(args.kv_store)
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# data iterators
(train, val) = data_loader(args, kv)
if args.test_io:
tic = time.time()
for i, batch in enumerate(train):
for j in batch.data:
j.wait_to_read()
if (i+1) % args.disp_batches == 0:
logging.info('Batch [%d]\tSpeed: %.2f samples/sec' % (
i, args.disp_batches*args.batch_size/(time.time()-tic)))
tic = time.time()
return
# load model
if 'arg_params' in kwargs and 'aux_params' in kwargs:
arg_params = kwargs['arg_params']
aux_params = kwargs['aux_params']
else:
sym, arg_params, aux_params = _load_model(args, kv.rank)
# if sym is not None:
# assert sym.tojson() == network.tojson()
fixed_param_names = [name for name in network.list_arguments() \
if name.startswith('nothing')]
# helper information
if fixed_param_names:
logging.info("Freezed parameters: [" + ','.join(fixed_param_names) + ']')
# save model
checkpoint = _save_model(args, kv.rank)
# devices for training
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# learning rate
lr, lr_scheduler = _get_lr_scheduler(args, kv)
# create model
model = mx.mod.Module(
context = devs,
symbol = network,
fixed_param_names = fixed_param_names
)
lr_scheduler = lr_scheduler
optimizer_params = {
'learning_rate': lr,
'wd' : args.wd,
'lr_scheduler': lr_scheduler}
if args.optimizer == 'sgd':
optimizer_params['momentum'] = args.mom
all_layers_pattern = ".*"
conv_layers_pattern = ".*conv.*"
monitor = mx.mon.Monitor(args.monitor, pattern=all_layers_pattern, stat_func=lambda x: [
mx.nd.max(x),
mx.nd.min(x),
mx.nd.mean(x),
mx.nd.norm(x) / sqrt(x.size),
]) if args.monitor > 0 else None
initializer = mx.init.Xavier(
rnd_type='gaussian', factor_type="in", magnitude=2)
if args.pretrained:
initializer = mx.initializer.Load(args.pretrained, default_init=initializer, verbose=True)
# initializer = mx.init.Xavier(factor_type="in", magnitude=2.34),
# evaluation metrices
eval_metrics = ['accuracy']
if args.top_k > 0:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=args.top_k))
# callbacks that run after each batch
batch_end_callbacks = [mx.callback.Speedometer(args.batch_size, args.disp_batches)]
if 'batch_end_callback' in kwargs:
cbs = kwargs['batch_end_callback']
batch_end_callbacks += cbs if isinstance(cbs, list) else [cbs]
# run
model.fit(train,
begin_epoch = args.load_epoch if args.load_epoch else 0,
num_epoch = args.num_epochs,
eval_data = val,
eval_metric = eval_metrics,
kvstore = kv,
optimizer = args.optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
arg_params = arg_params,
aux_params = aux_params,
batch_end_callback = batch_end_callbacks,
epoch_end_callback = checkpoint,
allow_missing = True,
monitor = monitor)
|
hpi-xnor/BMXNet
|
smd_hpi/examples/binary-imagenet1k/common/fit.py
|
Python
|
apache-2.0
| 7,994
|
[
"Gaussian"
] |
e1091983424a29c3b7e1e6018c3f2ba7a091755835dd555727e6e4d858dadac0
|
"""Generate PSIMs Tiles.
Run from RUN_NOON.sh for the previous UTC date."""
import datetime
import os
import sys
import numpy as np
from metpy.units import units
from pyiem import iemre
from pyiem.util import utc, ncopen, convert_value
from pyiem.meteorology import gdd
def make_netcdf(fullpath, valid, west, south):
"""Make our netcdf"""
nc = ncopen(fullpath, "w")
# Dimensions
totaldays = (
valid.replace(month=12, day=31)
- valid.replace(year=1980, month=1, day=1)
).days + 1
nc.createDimension("time", totaldays)
nc.createDimension("lat", 16) # 0.125 grid over 2 degrees
nc.createDimension("lon", 16)
# Coordinate Dimensions
time = nc.createVariable("time", int, ("time",))
time.units = "days since 1980-01-01 00:00:00"
time[:] = np.arange(0, totaldays)
lat = nc.createVariable("lat", float, ("lat"))
lat.units = "degrees_north"
lat.long_name = "latitude"
lat[:] = np.arange(south + 0.125 / 2.0, south + 2.0, 0.125)
lon = nc.createVariable("lon", float, ("lon"))
lon.units = "degrees_east"
lon.long_name = "longitude"
lon[:] = np.arange(west + 0.125 / 2.0, west + 2.0, 0.125)
prcp = nc.createVariable(
"prcp", float, ("time", "lat", "lon"), fill_value=1e20
)
prcp.units = "mm/day"
prcp.long_name = "daily total precipitation"
tmax = nc.createVariable(
"tmax", float, ("time", "lat", "lon"), fill_value=1e20
)
tmax.units = "degrees C"
tmax.long_name = "daily maximum temperature"
tmin = nc.createVariable(
"tmin", float, ("time", "lat", "lon"), fill_value=1e20
)
tmin.units = "degrees C"
tmin.long_name = "daily minimum temperature"
gddf = nc.createVariable(
"gdd_f", float, ("time", "lat", "lon"), fill_value=1e20
)
gddf.units = "degrees F"
gddf.long_name = "Growing Degree Days F (base 50 ceiling 86)"
srad = nc.createVariable(
"srad", float, ("time", "lat", "lon"), fill_value=1e20
)
srad.units = "MJ"
srad.long_name = "daylight average incident shortwave radiation"
# did not do vp or cropland
nc.close()
nc = ncopen(fullpath, "a")
return nc
def replace_cfs(nc, valid, islice, jslice):
"""Copy CFS data into the given year."""
tidx0 = (valid - datetime.date(1980, 1, 1)).days
tidx1 = (
datetime.date(valid.year, 12, 31) - datetime.date(1980, 1, 1)
).days
cfsnc = ncopen(valid.strftime("/mesonet/data/iemre/cfs_%Y%m%d%H.nc"))
tidx = iemre.daily_offset(valid + datetime.timedelta(days=1))
tslice = slice(tidx0 + 1, tidx1 + 1)
# print("replace_cfs filling %s from %s" % (tslice, tidx))
# CFS is W m-2, we want MJ
nc.variables["srad"][tslice, :, :] = (
cfsnc.variables["srad"][tidx:, jslice, islice] * 86400.0 / 1000000.0
)
highc = convert_value(
cfsnc.variables["high_tmpk"][tidx:, jslice, islice], "degK", "degC"
)
lowc = convert_value(
cfsnc.variables["low_tmpk"][tidx:, jslice, islice], "degK", "degC"
)
nc.variables["tmax"][tslice, :, :] = highc
nc.variables["tmin"][tslice, :, :] = lowc
nc.variables["gdd_f"][tslice, :, :] = gdd(
units("degC") * highc, units("degC") * lowc
)
nc.variables["prcp"][tslice, :, :] = cfsnc.variables["p01d"][
tidx:, jslice, islice
]
cfsnc.close()
def copy_iemre(nc, fromyear, ncdate0, ncdate1, islice, jslice):
"""Copy IEMRE data from a given year to **inclusive** dates."""
rencfn = iemre.get_daily_ncname(fromyear)
if not os.path.isfile(rencfn):
print("reanalysis fn %s missing" % (rencfn,))
return
renc = ncopen(rencfn)
tidx0 = (ncdate0 - datetime.date(1980, 1, 1)).days
tidx1 = (ncdate1 - datetime.date(1980, 1, 1)).days
tslice = slice(tidx0, tidx1 + 1)
# time steps to fill
tsteps = (tidx1 - tidx0) + 1
# figure out the slice
if ncdate0.strftime("%m%d") == "0101":
retslice = slice(0, tsteps)
else:
retslice = slice(0 - tsteps, None)
# print("copy_iemre from %s filling %s steps nc: %s iemre: %s" % (
# fromyear, tsteps, tslice, retslice
# ))
highc = convert_value(
renc.variables["high_tmpk"][retslice, jslice, islice], "degK", "degC"
)
lowc = convert_value(
renc.variables["low_tmpk"][retslice, jslice, islice], "degK", "degC"
)
nc.variables["tmax"][tslice, :, :] = highc
nc.variables["tmin"][tslice, :, :] = lowc
nc.variables["gdd_f"][tslice, :, :] = gdd(
units("degC") * highc, units("degC") * lowc
)
nc.variables["prcp"][tslice, :, :] = renc.variables["p01d"][
retslice, jslice, islice
]
for rt, nt in zip(
list(
range(
retslice.start, 0 if retslice.stop is None else retslice.stop
)
),
list(range(tslice.start, tslice.stop)),
):
# IEMRE power_swdn is MJ, test to see if data exists
srad = renc.variables["power_swdn"][rt, jslice, islice]
# All or nothing
if np.isnan(np.mean(srad)) or srad.mask.any():
# IEMRE rsds uses W m-2, we want MJ
srad = (
renc.variables["rsds"][rt, jslice, islice]
* 86400.0
/ 1000000.0
)
nc.variables["srad"][nt, :, :] = srad
renc.close()
def tile_extraction(nc, valid, west, south):
"""Do our tile extraction"""
# update model metadata
nc.valid = "CFS model: %s" % (valid.strftime("%Y-%m-%dT%H:%M:%SZ"),)
i, j = iemre.find_ij(west, south)
islice = slice(i, i + 16)
jslice = slice(j, j + 16)
for year in range(1980, valid.year + 1):
# Current year IEMRE should be substituted for this year's data
today = datetime.date(year, valid.month, valid.day)
copy_iemre(
nc, valid.year, datetime.date(year, 1, 1), today, islice, jslice
)
# replace CFS!
if year == valid.year:
replace_cfs(nc, valid.date(), islice, jslice)
else:
# replace rest of year with previous year
copy_iemre(
nc,
year,
today + datetime.timedelta(days=1),
datetime.date(year, 12, 31),
islice,
jslice,
)
def qc(nc):
"""Quick QC of the file."""
for i, time in enumerate(nc.variables["time"][:]):
ts = datetime.date(1980, 1, 1) + datetime.timedelta(days=int(time))
avgv = np.mean(nc.variables["srad"][i, :, :])
if avgv > 0:
continue
print("ts: %s avgv: %s" % (ts, avgv))
print("done...")
def workflow(valid, ncfn, west, south):
"""Make the magic happen"""
basedir = "/mesonet/share/pickup/yieldfx/cfs%02i" % (valid.hour,)
if not os.path.isdir(basedir):
os.makedirs(basedir)
nc = make_netcdf("%s/%s" % (basedir, ncfn), valid, west, south)
tile_extraction(nc, valid, west, south)
# qc(nc)
nc.close()
def main(argv):
"""Go Main Go"""
# Run for the 12z file **two days ago**, the issue is that for a year
# without a leap day, previous year filling will ask for one too many
# days that currently does not have data
if len(argv) == 4:
today = datetime.date(int(argv[1]), int(argv[2]), int(argv[3]))
else:
today = datetime.date.today() - datetime.timedelta(days=2)
for hour in [0, 6, 12, 18]:
valid = utc(today.year, today.month, today.day, hour)
# Create tiles to cover 12 state region
for west in np.arange(-104, -80, 2):
for south in np.arange(36, 50, 2):
# psims divides its data up into 2x2-degree tiles,
# with the first number in the file name being number
# of tiles since 90 degrees north, and the second number
# being number of tiles since -180 degrees eas
ncfn = "clim_%04i_%04i.tile.nc4" % (
(90 - south) / 2,
(180 - (0 - west)) / 2 + 1,
)
workflow(valid, ncfn, west, south)
if __name__ == "__main__":
main(sys.argv)
|
akrherz/iem
|
scripts/yieldfx/cfs_tiler.py
|
Python
|
mit
| 8,190
|
[
"NetCDF"
] |
95789d1531337c8a8d768b3447f82a7e68cea97e1d93d675aa1f698116074abf
|
# Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002 Bryce "Zooko" Wilcox-O'Hearn
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
__revision__ = "$Id: EGTPConstants.py,v 1.2 2002/12/02 19:58:47 myers_carpenter Exp $"
# length of RSA public moduli in 8-bit bytes (octets)
# Note that it is allowable for some of the high order bits to be 0. It is even
# allowable for more than 8 of those bits to be 0 without changing the "length" of the
# modulus. This is really then the log-base-2 of the size of the space from which we
# randomly choose such values, rather than the "length" of the binary encoding of
# any particular value.
SIZE_OF_MODULAR_VALUES = 1024/8
# Your code should probably be written to work with any public exponent. It is best not to use
# this constant. But it is here because mesgen uses it currently.
HARDCODED_RSA_PUBLIC_EXPONENT = 3
# size of ids, secrets, random numbers, salt and other things that must be universally unique
# in 8-bit bytes (octets)
# You absolutely cannot change this number. In fact, it is just being hardcoded in all over the place
# and this variable is useful only as documentation.
SIZE_OF_UNIQS = 20
|
zooko/egtp_new
|
egtp/EGTPConstants.py
|
Python
|
lgpl-2.1
| 1,283
|
[
"VisIt"
] |
e322e181f0a2384b8ffd795b000348b99b4bc93fc7fe0e501d9d25e3db27dd62
|
# mako/pyparser.py
# Copyright (C) 2006-2013 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
from mako import exceptions, util, compat
from mako.compat import StringIO, arg_stringname
import operator
if compat.py3k:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None', 'print'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('arg')
else:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('id')
try:
import _ast
util.restore__ast(_ast)
from mako import _ast_util
except ImportError:
_ast = None
from compiler import parse as compiler_parse
from compiler import visitor
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
try:
if _ast:
return _ast_util.parse(code, '<unknown>', mode)
else:
if isinstance(code, compat.text_type):
code = code.encode('ascii', 'backslashreplace')
return compiler_parse(code, mode)
except Exception:
raise exceptions.SyntaxException(
"(%s) %s (%r)" % (
compat.exception_as().__class__.__name__,
compat.exception_as(),
code[0:50]
), **exception_kwargs)
if _ast:
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = set()
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
else:
self.local_ident_stack.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
if compat.py3k:
# ExceptHandler is in Python 2, but this block only works in
# Python 3 (and is required there)
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.listener.undeclared_identifiers.add(node.type.id)
for statement in node.body:
self.visit(statement)
def visit_Lambda(self, node, *args):
self._visit_function(node, True)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
self._visit_function(node, False)
def _expand_tuples(self, args):
for arg in args:
if isinstance(arg, _ast.Tuple):
for n in arg.elts:
yield n
else:
yield arg
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
inf = self.in_function
self.in_function = True
local_ident_stack = self.local_ident_stack
self.local_ident_stack = local_ident_stack.union([
arg_id(arg) for arg in self._expand_tuples(node.args.args)
])
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
self.local_ident_stack = local_ident_stack
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
# this is eqiuvalent to visit_AssName in
# compiler
self._add_declared(node.id)
elif node.id not in reserved and node.id \
not in self.listener.declared_identifiers and node.id \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split('.')[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(
p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(
p.undeclared_identifiers)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(arg_stringname(node.args.vararg))
if node.args.kwarg:
argnames.append(arg_stringname(node.args.kwarg))
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def value(self):
return ''.join(self.generator.result)
else:
class FindIdentifiers(object):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.local_ident_stack = set()
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
else:
self.local_ident_stack.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitLambda(self, node, *args):
self._visit_function(node, args)
def visitFunction(self, node, *args):
self._add_declared(node.name)
self._visit_function(node, args)
def _expand_tuples(self, args):
for arg in args:
if isinstance(arg, tuple):
for n in arg:
yield n
else:
yield arg
def _visit_function(self, node, args):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
inf = self.in_function
self.in_function = True
local_ident_stack = self.local_ident_stack
self.local_ident_stack = local_ident_stack.union([
arg for arg in self._expand_tuples(node.argnames)
])
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
self.local_ident_stack = local_ident_stack
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(self, node, *args):
if node.name not in reserved and node.name \
not in self.listener.declared_identifiers and node.name \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(mod)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
class FindTuple(object):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visitTuple(self, node, *args):
for n in node.nodes:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(
p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(
p.undeclared_identifiers)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
class ParseFunc(object):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visitFunction(self, node, *args):
self.listener.funcname = node.name
self.listener.argnames = node.argnames
self.listener.defaults = node.defaults
self.listener.varargs = node.varargs
self.listener.kwargs = node.kwargs
def visit(self, expr):
visitor.walk(expr, self)
class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python
expression."""
def __init__(self, astnode):
self.buf = StringIO()
visitor.walk(astnode, self) # , walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write('(')
self.visit(node.left, *args)
self.buf.write(' %s ' % op)
self.visit(node.right, *args)
self.buf.write(')')
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(' ' + op + ' ')
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator('*', node, *args)
def visitAnd(self, node, *args):
self.booleanop('and', node, *args)
def visitOr(self, node, *args):
self.booleanop('or', node, *args)
def visitBitand(self, node, *args):
self.booleanop('&', node, *args)
def visitBitor(self, node, *args):
self.booleanop('|', node, *args)
def visitBitxor(self, node, *args):
self.booleanop('^', node, *args)
def visitAdd(self, node, *args):
self.operator('+', node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write('.%s' % node.attrname)
def visitSub(self, node, *args):
self.operator('-', node, *args)
def visitNot(self, node, *args):
self.buf.write('not ')
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator('/', node, *args)
def visitFloorDiv(self, node, *args):
self.operator('//', node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
[self.visit(x) for x in node.subs]
self.buf.write(']')
def visitUnarySub(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
if node.lower is not None:
self.visit(node.lower)
self.buf.write(':')
if node.upper is not None:
self.visit(node.upper)
self.buf.write(']')
def visitDict(self, node):
self.buf.write('{')
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(': ')
self.visit(c[i + 1])
if i < len(c) - 2:
self.buf.write(', ')
self.buf.write('}')
def visitTuple(self, node):
self.buf.write('(')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(')')
def visitList(self, node):
self.buf.write('[')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(']')
def visitListComp(self, node):
self.buf.write('[')
self.visit(node.expr)
self.buf.write(' ')
for n in node.quals:
self.visit(n)
self.buf.write(']')
def visitListCompFor(self, node):
self.buf.write(' for ')
self.visit(node.assign)
self.buf.write(' in ')
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(' if ')
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write('(')
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(', ')
self.visit(a)
self.buf.write(')')
def visitLambda(self, node, *args):
self.buf.write('lambda ')
argnames = list(node.argnames)
kw = arg = None
if node.kwargs > 0:
kw = argnames.pop(-1)
if node.varargs > 0:
arg = argnames.pop(-1)
if arg:
argnames.append("*%s" % arg)
if kw:
argnames.append("**%s" % kw)
self.buf.write(", ".join(argnames))
self.buf.write(': ')
self.visit(node.code)
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print('Node:', str(node))
# print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)
|
adamwwt/chvac
|
venv/lib/python2.7/site-packages/mako/pyparser.py
|
Python
|
mit
| 19,261
|
[
"VisIt"
] |
342f30dbab201291c6805368a7a44a94179703ffa38344b2ac5ea556decd69b9
|
import numpy as np
from scipy.special import gammaln, hermitenorm
import scipy.stats
from scipy.misc import factorial
from nipy.testing import assert_almost_equal, dec
from nipy.algorithms.statistics import rft
#def rho(x, dim, df=np.inf):
# """
# EC densities for T and Gaussian (df=inf) random fields.
# """
#
# m = df
#
# if dim > 0:
# x = np.asarray(x, np.float64)
#--jarrod: shouldn't Q be rft.Q??
# q = Q(dim, dfd=df)(x)
#
# if np.isfinite(m):
# q *= np.power(1 + x**2/m, -(m-1)/2.)
# else:
# q *= np.exp(-x**2/2)
#
# return q * np.power(2*np.pi, -(dim+1)/2.)
# else:
# if np.isfinite(m):
# return scipy.stats.t.sf(x, df)
# else:
# return scipy.stats.norm.sf(x)
def K(dim=4, dfn=7, dfd=np.inf):
"""
Determine the polynomial K in
Worsley, K.J. (1994). 'Local maxima and the expected Euler
characteristic of excursion sets of \chi^2, F and t fields.'
Advances in Applied Probability, 26:13-42.
If dfd=inf, return the limiting polynomial.
"""
def lbinom(n, j):
return gammaln(n+1) - gammaln(j+1) - gammaln(n-j+1)
m = dfd
n = dfn
D = dim
k = np.arange(D)
coef = 0
for j in range(int(np.floor((D-1)/2.)+1)):
if np.isfinite(m):
t = (gammaln((m+n-D)/2.+j) -
gammaln(j+1) -
gammaln((m+n-D)/2.))
t += lbinom(m-1, k-j) - k * np.log(m)
else:
_t = np.power(2., -j) / (factorial(k-j) * factorial(j))
t = np.log(_t)
t[np.isinf(_t)] = -np.inf
t += lbinom(n-1, D-1-j-k)
coef += (-1)**(D-1) * factorial(D-1) * np.exp(t) * np.power(-1.*n, k)
return np.poly1d(coef[::-1])
def F(x, dim, dfd=np.inf, dfn=1):
"""
EC densities for F and Chi^2 (dfd=inf) random fields.
"""
m = float(dfd)
n = float(dfn)
D = float(dim)
if dim > 0:
x = np.asarray(x, np.float64)
k = K(dim=dim, dfd=dfd, dfn=dfn)(x)
if np.isfinite(m):
f = x*n/m
t = -np.log(1 + f) * (m+n-2.) / 2.
t += np.log(f) * (n-D) / 2.
t += gammaln((m+n-D)/2.) - gammaln(m/2.)
else:
f = x*n
t = np.log(f/2.) * (n-D) / 2. - f/2.
t -= np.log(2*np.pi) * D / 2. + np.log(2) * (D-2)/2. + gammaln(n/2.)
k *= np.exp(t)
return k
else:
if np.isfinite(m):
return scipy.stats.f.sf(x, dfn, dfd)
else:
return scipy.stats.chi.sf(x, dfn)
def polyF(dim, dfd=np.inf, dfn=1):
"""
Return the polynomial part of the EC density when
evaluating the polynomial on the sqrt(F) scale (or sqrt(chi^2)=chi scale).
The polynomial is such that, if dfd=inf, the F EC density in is just
polyF(dim,dfn=dfn)(sqrt(dfn*x)) * exp(-dfn*x/2) * (2\pi)^{-(dim+1)/2}
"""
n = float(dfn)
m = float(dfd)
D = float(dim)
p = K(dim=D, dfd=m, dfn=n)
c = p.c
# Take care of the powers of n (i.e. we want polynomial K evaluated
# at */n).
for i in range(p.order+1):
c[i] /= np.power(n, p.order-i)
# Now, turn it into a polynomial of x when evaluated at x**2
C = np.zeros((2*c.shape[0]-1,))
for i in range(c.shape[0]):
C[2*i] = c[i]
# Multiply by the factor x^(dfn-dim) in front (see Theorem 4.6 of
# Worsley (1994), cited above.
if dim > dfn: # divide by x^(dim-dfn)
C = C[0:(C.shape[0] - (dim-dfn))]
else: # multiply by x^(dim-dfn)
C = np.hstack([C, np.zeros((dfn-dim,))])
# Fix up constant in front
if np.isfinite(m):
C *= np.exp(gammaln((m+n-D)/2.) - gammaln(m/2.)) * np.power(m, -(n-D)/2.)
else:
C *= np.power(2, -(n-D)/2.)
C /= np.power(2, (dim-2)/2.) * np.exp(gammaln(n/2.))
C *= np.sqrt(2*np.pi)
return np.poly1d(C)
def F_alternative(x, dim, dfd=np.inf, dfn=1):
"""
Another way to compute F EC density as a product of a
polynomial and a power of (1+x^2/m).
"""
n = float(dfn)
m = float(dfd)
D = float(dim)
x = np.asarray(x, np.float64)
p = polyF(dim=dim, dfd=dfd, dfn=dfn)
v = p(np.sqrt(n*x))
if np.isfinite(m):
v *= np.power(1 + n*x/m, -(m+n-2.) / 2.)
else:
v *= np.exp(-n*x/2)
v *= np.power(2*np.pi, -(dim+1)/2.)
return v
def test_polynomial1():
# Polynomial part of Gaussian densities are Hermite polynomials.
for dim in range(1,10):
q = rft.Gaussian().quasi(dim)
h = hermitenorm(dim-1)
yield assert_almost_equal, q.c, h.c
def test_polynomial2():
# EC density of chi^2(1) is 2 * EC density of Gaussian so
# polynomial part is a factor of 2 as well.
for dim in range(1,10):
q = rft.ChiSquared(dfn=1).quasi(dim)
h = hermitenorm(dim-1)
yield assert_almost_equal, q.c, 2*h.c
@dec.slow
def test_polynomial3():
# EC density of F with infinite dfd is the same as chi^2 --
# polynomials should be the same.
for dim in range(10):
for dfn in range(5,10):
q1 = rft.FStat(dfn=dfn, dfd=np.inf).quasi(dim)
q2 = rft.ChiSquared(dfn=dfn).quasi(dim)
yield assert_almost_equal, q1.c, q2.c
@dec.slow
def test_chi1():
# EC density of F with infinite dfd is the same as chi^2 --
# EC should be the same.
x = np.linspace(0.1,10,100)
for dim in range(10):
for dfn in range(5,10):
c = rft.ChiSquared(dfn=dfn)
f = rft.FStat(dfn=dfn, dfd=np.inf)
chi1 = c.density(dfn*x, dim)
chi2 = f.density(x, dim)
yield assert_almost_equal, chi1, chi2
def test_chi2():
# Quasi-polynomial part of the chi^2 EC density should
# be the limiting polyF.
x = np.linspace(0.1,10,100)
for dim in range(1,10):
for dfn in range(5,10):
c = rft.ChiSquared(dfn=dfn)
p1 = c.quasi(dim=dim)
p2 = polyF(dim=dim, dfn=dfn)
yield assert_almost_equal, p1.c, p2.c
def test_chi3():
# EC density of chi^2(1) is 2 * EC density of Gaussian squared so
# EC densities factor of 2 as well.
x = np.linspace(0.1,10,100)
for dim in range(10):
g = rft.Gaussian()
c = rft.ChiSquared(dfn=1)
ec1 = g.density(np.sqrt(x), dim)
ec2 = c.density(x, dim)
yield assert_almost_equal, 2*ec1, ec2
def test_T1():
# O-dim EC density should be tail probality.
x = np.linspace(0.1,10,100)
for dfd in [40,50]:
t = rft.TStat(dfd=dfd)
yield assert_almost_equal, t(x), scipy.stats.t.sf(x, dfd)
t = rft.TStat(dfd=np.inf)
assert_almost_equal, t(x), scipy.stats.norm.sf(x)
@dec.slow
def test_T2():
# T is an F with dfn=1
x = np.linspace(0.1,10,100)
for dfd in [40,50,np.inf]:
t = rft.TStat(dfd=dfd)
f = rft.FStat(dfd=dfd, dfn=1)
for dim in range(7):
yield assert_almost_equal, t.density(x, dim), f.density(x**2, dim)
def test_search3():
# In the Gaussian case, test that search and product give same results.
search = rft.IntrinsicVolumes([3,4,5,7])
g1 = rft.Gaussian(search=search)
g2 = rft.Gaussian(product=search)
x = np.linspace(0.1,10,100)
y1 = g1(x)
y2 = g2(x)
assert_almost_equal(y1, y2)
def test_search():
# Test that the search region works.
search = rft.IntrinsicVolumes([3,4,5])
x = np.linspace(0.1,10,100)
stat = rft.Gaussian(search=search)
v1 = stat(x)
v2 = ((5*x + 4*np.sqrt(2*np.pi)) *
np.exp(-x**2/2.) / np.power(2*np.pi, 1.5) +
3 * scipy.stats.norm.sf(x))
assert_almost_equal(v1, v2)
@dec.slow
def test_search2():
# Test that the search region works.
search = rft.IntrinsicVolumes([3,4,5])
x = np.linspace(0.1,10,100)
stats = [rft.Gaussian(search=search)]
ostats = [rft.Gaussian()]
for dfn in range(5,10):
for dfd in [40,50,np.inf]:
stats.append(rft.FStat(dfn=dfn, dfd=dfd, search=search))
ostats.append(rft.FStat(dfn=dfn, dfd=dfd))
stats.append(rft.TStat(dfd=dfd, search=search))
ostats.append(rft.TStat(dfd=dfd))
stats.append(rft.ChiSquared(dfn=dfn, search=search))
ostats.append(rft.ChiSquared(dfn=dfn))
for i in range(len(stats)):
stat = stats[i]
ostat = ostats[i]
v1 = stat(x)
v2 = 0
for j in range(search.mu.shape[0]):
v2 += ostat.density(x, j) * search.mu[j]
assert_almost_equal(v1, v2)
@dec.slow
def test_T2():
# T is an F with dfn=1
x = np.linspace(0,5,101)
for dfd in [40,50,np.inf]:
t = rft.TStat(dfd=dfd)
f = rft.FStat(dfd=dfd, dfn=1)
for dim in range(7):
y = 2*t.density(x, dim)
z = f.density(x**2, dim)
yield assert_almost_equal, y, z
@dec.slow
def test_search1():
# Test that the search region works.
search = rft.IntrinsicVolumes([3,4,5])
x = np.linspace(0.1,10,100)
stats = [rft.Gaussian()]
for dfn in range(5,10):
for dfd in [40,50,np.inf]:
stats.append(rft.FStat(dfn=dfn, dfd=dfd))
stats.append(rft.TStat(dfd=dfd))
stats.append(rft.ChiSquared(dfn=dfn))
for dim in range(7):
for stat in stats:
v1 = stat(x, search=search)
v2 = 0
for i in range(search.mu.shape[0]):
v2 += stat.density(x, i) * search.mu[i]
def test_search4():
# Test that the search/product work well together
search = rft.IntrinsicVolumes([3,4,5])
product = rft.IntrinsicVolumes([1,2])
x = np.linspace(0.1,10,100)
g1 = rft.Gaussian()
g2 = rft.Gaussian(product=product)
y = g2(x, search=search)
z = g1(x, search=search*product)
assert_almost_equal(y, z)
def test_search5():
# Test that the search/product work well together
search = rft.IntrinsicVolumes([3,4,5])
product = rft.IntrinsicVolumes([1,2])
prodsearch = product * search
x = np.linspace(0,5,101)
g1 = rft.Gaussian()
g2 = rft.Gaussian(product=product)
z = 0
for i in range(prodsearch.mu.shape[0]):
z += g1.density(x, i) * prodsearch.mu[i]
y = g2(x, search=search)
assert_almost_equal(y, z)
@dec.slow
def test_hotelling1():
# Asymptotically, Hotelling is the same as F which is the same
# as chi^2.
x = np.linspace(0.1,10,100)
for dim in range(7):
for dfn in range(5,10):
h = rft.Hotelling(k=dfn).density(x*dfn, dim)
f = rft.FStat(dfn=dfn).density(x, dim)
yield assert_almost_equal, h, f
@dec.slow
def test_hotelling4():
# Hotelling T^2 should just be like taking product with sphere.
x = np.linspace(0.1,10,100)
for dim in range(7):
search = rft.IntrinsicVolumes([0]*(dim) + [1])
for k in range(5, 10):
p = rft.spherical_search(k)
for dfd in [np.inf,40,50]:
f = rft.FStat(dfd=dfd, dfn=1)(x, search=p*search)
t = 2*rft.TStat(dfd=dfd)(np.sqrt(x), search=p*search)
h2 = 2*rft.Hotelling(k=k, dfd=dfd).density(x, dim)
h = 2*rft.Hotelling(k=k, dfd=dfd)(x, search=search)
yield assert_almost_equal, h, t
yield assert_almost_equal, h, f
yield assert_almost_equal, h, h2
search = rft.IntrinsicVolumes([3,4,5])
for k in range(5, 10):
p = rft.spherical_search(k)
for dfd in [np.inf,40,50]:
f = rft.FStat(dfd=dfd, dfn=1)(x, search=p*search)
h = 2*rft.Hotelling(k=k, dfd=dfd)(x, search=search)
h2 = 0
for i in range(search.mu.shape[0]):
h2 += 2*rft.Hotelling(k=k, dfd=dfd).density(x, i) * search.mu[i]
yield assert_almost_equal, h, f
yield assert_almost_equal, h, h2
def test_hotelling2():
# Marginally, Hotelling's T^2(k) with m degrees of freedom
# in the denominator satisfies
# (m-k+1)/(mk) T^2 \sim F_{k,m-k+1}.
x = np.linspace(0.1,10,100)
for dfn in range(6, 10):
h = rft.Hotelling(k=dfn)(x)
chi = rft.ChiSquared(dfn=dfn)(x)
assert_almost_equal(h, chi)
chi2 = scipy.stats.chi2.sf(x, dfn)
yield assert_almost_equal, h, chi2
p = rft.spherical_search(dfn)
for dfd in [40,50]:
fac = (dfd-dfn+1.)/(dfd*dfn)
h = rft.Hotelling(dfd=dfd,k=dfn)(x)
f = scipy.stats.f.sf(x*fac, dfn, dfd-dfn+1)
f2 = rft.FStat(dfd=dfd-dfn+1,dfn=dfn)(x*fac)
yield assert_almost_equal, f2, f
yield assert_almost_equal, h, f
@dec.slow
def test_roy1():
# EC densities of Roy with dfn=1 should be twice EC densities
# of Hotelling T^2's.
x = np.linspace(0.1,10,100)
for dfd in [40,50,np.inf]:
for k in [1,4,6]:
for dim in range(7):
h = 2*rft.Hotelling(dfd=dfd,k=k).density(x, dim)
r = rft.Roy(dfd=dfd,k=k,dfn=1).density(x, dim)
yield assert_almost_equal, h, r
@dec.slow
def test_onesidedF():
# EC densities of one sided F should be a difference of
# F EC densities
x = np.linspace(0.1,10,100)
for dfd in [40,50,np.inf]:
for dfn in range(2,10):
for dim in range(7):
f1 = rft.FStat(dfd=dfd,dfn=dfn).density(x, dim)
f2 = rft.FStat(dfd=dfd,dfn=dfn-1).density(x, dim)
onesided = rft.OneSidedF(dfd=dfd,dfn=dfn).density(x, dim)
yield assert_almost_equal, onesided, 0.5*(f1-f2)
@dec.slow
def test_multivariate_forms():
# MVform with one sphere is sqrt(chi^2), two spheres is sqrt(Roy) with
# infinite degrees of freedom.
x = np.linspace(0.1,10,100)
for k1 in range(5,10):
m = rft.MultilinearForm(k1)
c = rft.ChiSquared(k1)
for dim in range(7):
mx = m.density(x, dim)
cx = c.density(x**2, dim)
yield assert_almost_equal, mx, cx
for k2 in range(5,10):
m = rft.MultilinearForm(k1,k2)
r = rft.Roy(k=k1, dfn=k2, dfd=np.inf)
for dim in range(7):
mx = 2*m.density(x, dim)
rx = r.density(x**2/k2, dim)
yield assert_almost_equal, mx, rx
def test_scale():
a = rft.IntrinsicVolumes([2,3,4])
b = rft.scale_space(a, [3,4], kappa=0.5)
def test_F1():
x = np.linspace(0.1,10,100)
for dim in range(1,10):
for dfn in range(5,10):
for dfd in [40,50,np.inf]:
f1 = F(x, dim, dfn=dfn, dfd=dfd)
f2 = F_alternative(x, dim, dfn=dfn, dfd=dfd)
yield assert_almost_equal, f1, f2
@dec.slow
def test_F2():
x = np.linspace(0.1,10,100)
for dim in range(3,7):
for dfn in range(5,10):
for dfd in [40,50,np.inf]:
f1 = rft.FStat(dfn=dfn, dfd=dfd).density(x, dim)
f2 = F_alternative(x, dim, dfn=dfn, dfd=dfd)
yield assert_almost_equal, f1, f2
@dec.slow
def test_F3():
x = np.linspace(0.1,10,100)
for dim in range(3,7):
for dfn in range(5,10):
for dfd in [40,50,np.inf]:
f1 = rft.FStat(dfn=dfn, dfd=dfd).density(x, dim)
f2 = F(x, dim, dfn=dfn, dfd=dfd)
yield assert_almost_equal, f1, f2
@dec.slow
def test_chi2():
# Quasi-polynomial part of the chi^2 EC density should
# be the limiting polyF.
x = np.linspace(0.1,10,100)
for dim in range(1,10):
for dfn in range(5,10):
c = rft.ChiSquared(dfn=dfn)
p1 = c.quasi(dim=dim)
p2 = polyF(dim=dim, dfn=dfn)
yield assert_almost_equal, p1.c, p2.c
|
yarikoptic/NiPy-OLD
|
nipy/algorithms/statistics/tests/test_rft.py
|
Python
|
bsd-3-clause
| 15,903
|
[
"Gaussian"
] |
9de42a0c909dd2ea33ab63e1e43981d90b9be59e254d5169d433075987799cbd
|
"""
Actions manager for transcripts ajax calls.
+++++++++++++++++++++++++++++++++++++++++++
Module do not support rollback (pressing "Cancel" button in Studio)
All user changes are saved immediately.
"""
import copy
import json
import logging
import os
import requests
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
from six import text_type
from student.auth import has_course_author_access
from util.json_request import JsonResponse
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.video_module.transcripts_utils import (
copy_or_rename_transcript,
download_youtube_subs,
GetTranscriptsFromYouTubeException,
get_video_transcript_content,
generate_subs_from_source,
get_transcripts_from_youtube,
manage_video_subtitles_save,
remove_subs_from_store,
Transcript,
TranscriptsRequestValidationException,
youtube_video_transcript_name,
)
from xmodule.video_module.transcripts_model_utils import (
is_val_transcript_feature_enabled_for_course
)
__all__ = [
'upload_transcripts',
'download_transcripts',
'check_transcripts',
'choose_transcripts',
'replace_transcripts',
'rename_transcripts',
'save_transcripts',
]
log = logging.getLogger(__name__)
def error_response(response, message, status_code=400):
"""
Simplify similar actions: log message and return JsonResponse with message included in response.
By default return 400 (Bad Request) Response.
"""
log.debug(message)
response['status'] = message
return JsonResponse(response, status_code)
@login_required
def upload_transcripts(request):
"""
Upload transcripts for current module.
returns: response dict::
status: 'Success' and HTTP 200 or 'Error' and HTTP 400.
subs: Value of uploaded and saved html5 sub field in video item.
"""
response = {
'status': 'Unknown server error',
'subs': '',
}
locator = request.POST.get('locator')
if not locator:
return error_response(response, 'POST data without "locator" form data.')
try:
item = _get_item(request, request.POST)
except (InvalidKeyError, ItemNotFoundError):
return error_response(response, "Can't find item by locator.")
if 'transcript-file' not in request.FILES:
return error_response(response, 'POST data without "file" form data.')
video_list = request.POST.get('video_list')
if not video_list:
return error_response(response, 'POST data without video names.')
try:
video_list = json.loads(video_list)
except ValueError:
return error_response(response, 'Invalid video_list JSON.')
# Used utf-8-sig encoding type instead of utf-8 to remove BOM(Byte Order Mark), e.g. U+FEFF
source_subs_filedata = request.FILES['transcript-file'].read().decode('utf-8-sig')
source_subs_filename = request.FILES['transcript-file'].name
if '.' not in source_subs_filename:
return error_response(response, "Undefined file extension.")
basename = os.path.basename(source_subs_filename)
source_subs_name = os.path.splitext(basename)[0]
source_subs_ext = os.path.splitext(basename)[1][1:]
if item.category != 'video':
return error_response(response, 'Transcripts are supported only for "video" modules.')
# Allow upload only if any video link is presented
if video_list:
sub_attr = source_subs_name
try:
# Generate and save for 1.0 speed, will create subs_sub_attr.srt.sjson subtitles file in storage.
generate_subs_from_source({1: sub_attr}, source_subs_ext, source_subs_filedata, item)
for video_dict in video_list:
video_name = video_dict['video']
# We are creating transcripts for every video source, if in future some of video sources would be deleted.
# Updates item.sub with `video_name` on success.
copy_or_rename_transcript(video_name, sub_attr, item, user=request.user)
response['subs'] = item.sub
response['status'] = 'Success'
except Exception as ex:
return error_response(response, text_type(ex))
else:
return error_response(response, 'Empty video sources.')
return JsonResponse(response)
@login_required
def download_transcripts(request):
"""
Passes to user requested transcripts file.
Raises Http404 if unsuccessful.
"""
locator = request.GET.get('locator')
subs_id = request.GET.get('subs_id')
if not locator:
log.debug('GET data without "locator" property.')
raise Http404
try:
item = _get_item(request, request.GET)
except (InvalidKeyError, ItemNotFoundError):
log.debug("Can't find item by locator.")
raise Http404
if item.category != 'video':
log.debug('transcripts are supported only for video" modules.')
raise Http404
try:
if not subs_id:
raise NotFoundError
filename = subs_id
content_location = StaticContent.compute_location(
item.location.course_key,
'subs_{filename}.srt.sjson'.format(filename=filename),
)
sjson_transcript = contentstore().find(content_location).data
except NotFoundError:
# Try searching in VAL for the transcript as a last resort
transcript = None
if is_val_transcript_feature_enabled_for_course(item.location.course_key):
transcript = get_video_transcript_content(
language_code=u'en',
edx_video_id=item.edx_video_id,
youtube_id_1_0=item.youtube_id_1_0,
html5_sources=item.html5_sources,
)
if not transcript:
raise Http404
filename = os.path.splitext(os.path.basename(transcript['file_name']))[0].encode('utf8')
sjson_transcript = transcript['content']
# convert sjson content into srt format.
transcript_content = Transcript.convert(sjson_transcript, input_format='sjson', output_format='srt')
if not transcript_content:
raise Http404
# Construct an HTTP response
response = HttpResponse(transcript_content, content_type='application/x-subrip; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename="{filename}.srt"'.format(filename=filename)
return response
@login_required
def check_transcripts(request):
"""
Check state of transcripts availability.
request.GET['data'] has key `videos`, which can contain any of the following::
[
{u'type': u'youtube', u'video': u'OEoXaMPEzfM', u'mode': u'youtube'},
{u'type': u'html5', u'video': u'video1', u'mode': u'mp4'}
{u'type': u'html5', u'video': u'video2', u'mode': u'webm'}
]
`type` is youtube or html5
`video` is html5 or youtube video_id
`mode` is youtube, ,p4 or webm
Returns transcripts_presence dict::
html5_local: list of html5 ids, if subtitles exist locally for them;
is_youtube_mode: bool, if we have youtube_id, and as youtube mode is of higher priority, reflect this with flag;
youtube_local: bool, if youtube transcripts exist locally;
youtube_server: bool, if youtube transcripts exist on server;
youtube_diff: bool, if youtube transcripts exist on youtube server, and are different from local youtube ones;
current_item_subs: string, value of item.sub field;
status: string, 'Error' or 'Success';
subs: string, new value of item.sub field, that should be set in module;
command: string, action to front-end what to do and what to show to user.
"""
transcripts_presence = {
'html5_local': [],
'html5_equal': False,
'is_youtube_mode': False,
'youtube_local': False,
'youtube_server': False,
'youtube_diff': True,
'current_item_subs': None,
'status': 'Error',
}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(transcripts_presence, text_type(e))
transcripts_presence['status'] = 'Success'
filename = 'subs_{0}.srt.sjson'.format(item.sub)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
local_transcripts = contentstore().find(content_location).data
transcripts_presence['current_item_subs'] = item.sub
except NotFoundError:
pass
# Check for youtube transcripts presence
youtube_id = videos.get('youtube', None)
if youtube_id:
transcripts_presence['is_youtube_mode'] = True
# youtube local
filename = 'subs_{0}.srt.sjson'.format(youtube_id)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
local_transcripts = contentstore().find(content_location).data
transcripts_presence['youtube_local'] = True
except NotFoundError:
log.debug("Can't find transcripts in storage for youtube id: %s", youtube_id)
# youtube server
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = youtube_id
youtube_transcript_name = youtube_video_transcript_name(youtube_text_api)
if youtube_transcript_name:
youtube_text_api['params']['name'] = youtube_transcript_name
youtube_response = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])
if youtube_response.status_code == 200 and youtube_response.text:
transcripts_presence['youtube_server'] = True
#check youtube local and server transcripts for equality
if transcripts_presence['youtube_server'] and transcripts_presence['youtube_local']:
try:
youtube_server_subs = get_transcripts_from_youtube(
youtube_id,
settings,
item.runtime.service(item, "i18n")
)
if json.loads(local_transcripts) == youtube_server_subs: # check transcripts for equality
transcripts_presence['youtube_diff'] = False
except GetTranscriptsFromYouTubeException:
pass
# Check for html5 local transcripts presence
html5_subs = []
for html5_id in videos['html5']:
filename = 'subs_{0}.srt.sjson'.format(html5_id)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
html5_subs.append(contentstore().find(content_location).data)
transcripts_presence['html5_local'].append(html5_id)
except NotFoundError:
log.debug("Can't find transcripts in storage for non-youtube video_id: %s", html5_id)
if len(html5_subs) == 2: # check html5 transcripts for equality
transcripts_presence['html5_equal'] = json.loads(html5_subs[0]) == json.loads(html5_subs[1])
command, subs_to_use = _transcripts_logic(transcripts_presence, videos)
if command == 'not_found':
# Try searching in VAL for the transcript as a last resort
if is_val_transcript_feature_enabled_for_course(item.location.course_key):
video_transcript = get_video_transcript_content(
language_code=u'en',
edx_video_id=item.edx_video_id,
youtube_id_1_0=item.youtube_id_1_0,
html5_sources=item.html5_sources,
)
command = 'found' if video_transcript else command
transcripts_presence.update({
'command': command,
'subs': subs_to_use,
})
return JsonResponse(transcripts_presence)
def _transcripts_logic(transcripts_presence, videos):
"""
By `transcripts_presence` content, figure what show to user:
returns: `command` and `subs`.
`command`: string, action to front-end what to do and what show to user.
`subs`: string, new value of item.sub field, that should be set in module.
`command` is one of::
replace: replace local youtube subtitles with server one's
found: subtitles are found
import: import subtitles from youtube server
choose: choose one from two html5 subtitles
not found: subtitles are not found
"""
command = None
# new value of item.sub field, that should be set in module.
subs = ''
# youtube transcripts are of high priority than html5 by design
if (
transcripts_presence['youtube_diff'] and
transcripts_presence['youtube_local'] and
transcripts_presence['youtube_server']): # youtube server and local exist
command = 'replace'
subs = videos['youtube']
elif transcripts_presence['youtube_local']: # only youtube local exist
command = 'found'
subs = videos['youtube']
elif transcripts_presence['youtube_server']: # only youtube server exist
command = 'import'
else: # html5 part
if transcripts_presence['html5_local']: # can be 1 or 2 html5 videos
if len(transcripts_presence['html5_local']) == 1 or transcripts_presence['html5_equal']:
command = 'found'
subs = transcripts_presence['html5_local'][0]
else:
command = 'choose'
subs = transcripts_presence['html5_local'][0]
else: # html5 source have no subtitles
# check if item sub has subtitles
if transcripts_presence['current_item_subs'] and not transcripts_presence['is_youtube_mode']:
log.debug("Command is use existing %s subs", transcripts_presence['current_item_subs'])
command = 'use_existing'
else:
command = 'not_found'
log.debug(
"Resulted command: %s, current transcripts: %s, youtube mode: %s",
command,
transcripts_presence['current_item_subs'],
transcripts_presence['is_youtube_mode']
)
return command, subs
@login_required
def choose_transcripts(request):
"""
Replaces html5 subtitles, presented for both html5 sources, with chosen one.
Code removes rejected html5 subtitles and updates sub attribute with chosen html5_id.
It does nothing with youtube id's.
Returns: status `Success` and resulted item.sub value or status `Error` and HTTP 400.
"""
response = {
'status': 'Error',
'subs': '',
}
try:
data, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, text_type(e))
html5_id = data.get('html5_id') # html5_id chosen by user
# find rejected html5_id and remove appropriate subs from store
html5_id_to_remove = [x for x in videos['html5'] if x != html5_id]
if html5_id_to_remove:
remove_subs_from_store(html5_id_to_remove, item)
if item.sub != html5_id: # update sub value
item.sub = html5_id
item.save_with_metadata(request.user)
response = {
'status': 'Success',
'subs': item.sub,
}
return JsonResponse(response)
@login_required
def replace_transcripts(request):
"""
Replaces all transcripts with youtube ones.
Downloads subtitles from youtube and replaces all transcripts with downloaded ones.
Returns: status `Success` and resulted item.sub value or status `Error` and HTTP 400.
"""
response = {'status': 'Error', 'subs': ''}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, text_type(e))
youtube_id = videos['youtube']
if not youtube_id:
return error_response(response, 'YouTube id {} is not presented in request data.'.format(youtube_id))
try:
download_youtube_subs(youtube_id, item, settings)
except GetTranscriptsFromYouTubeException as e:
return error_response(response, text_type(e))
item.sub = youtube_id
item.save_with_metadata(request.user)
response = {
'status': 'Success',
'subs': item.sub,
}
return JsonResponse(response)
def _validate_transcripts_data(request):
"""
Validates, that request contains all proper data for transcripts processing.
Returns tuple of 3 elements::
data: dict, loaded json from request,
videos: parsed `data` to useful format,
item: video item from storage
Raises `TranscriptsRequestValidationException` if validation is unsuccessful
or `PermissionDenied` if user has no access.
"""
data = json.loads(request.GET.get('data', '{}'))
if not data:
raise TranscriptsRequestValidationException(_('Incoming video data is empty.'))
try:
item = _get_item(request, data)
except (InvalidKeyError, ItemNotFoundError):
raise TranscriptsRequestValidationException(_("Can't find item by locator."))
if item.category != 'video':
raise TranscriptsRequestValidationException(_('Transcripts are supported only for "video" modules.'))
# parse data form request.GET.['data']['video'] to useful format
videos = {'youtube': '', 'html5': {}}
for video_data in data.get('videos'):
if video_data['type'] == 'youtube':
videos['youtube'] = video_data['video']
else: # do not add same html5 videos
if videos['html5'].get('video') != video_data['video']:
videos['html5'][video_data['video']] = video_data['mode']
return data, videos, item
@login_required
def rename_transcripts(request):
"""
Create copies of existing subtitles with new names of HTML5 sources.
Old subtitles are not deleted now, because we do not have rollback functionality.
If succeed, Item.sub will be chosen randomly from html5 video sources provided by front-end.
"""
response = {'status': 'Error', 'subs': ''}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, text_type(e))
old_name = item.sub
for new_name in videos['html5'].keys(): # copy subtitles for every HTML5 source
try:
# updates item.sub with new_name if it is successful.
copy_or_rename_transcript(new_name, old_name, item, user=request.user)
except NotFoundError:
# subtitles file `item.sub` is not presented in the system. Nothing to copy or rename.
error_response(response, "Can't find transcripts in storage for {}".format(old_name))
response['status'] = 'Success'
response['subs'] = item.sub # item.sub has been changed, it is not equal to old_name.
log.debug("Updated item.sub to %s", item.sub)
return JsonResponse(response)
@login_required
def save_transcripts(request):
"""
Saves video module with updated values of fields.
Returns: status `Success` or status `Error` and HTTP 400.
"""
response = {'status': 'Error'}
data = json.loads(request.GET.get('data', '{}'))
if not data:
return error_response(response, 'Incoming video data is empty.')
try:
item = _get_item(request, data)
except (InvalidKeyError, ItemNotFoundError):
return error_response(response, "Can't find item by locator.")
metadata = data.get('metadata')
if metadata is not None:
new_sub = metadata.get('sub')
for metadata_key, value in metadata.items():
setattr(item, metadata_key, value)
item.save_with_metadata(request.user) # item becomes updated with new values
if new_sub:
manage_video_subtitles_save(item, request.user)
else:
# If `new_sub` is empty, it means that user explicitly does not want to use
# transcripts for current video ids and we remove all transcripts from storage.
current_subs = data.get('current_subs')
if current_subs is not None:
for sub in current_subs:
remove_subs_from_store(sub, item)
response['status'] = 'Success'
return JsonResponse(response)
def _get_item(request, data):
"""
Obtains from 'data' the locator for an item.
Next, gets that item from the modulestore (allowing any errors to raise up).
Finally, verifies that the user has access to the item.
Returns the item.
"""
usage_key = UsageKey.from_string(data.get('locator'))
# This is placed before has_course_author_access() to validate the location,
# because has_course_author_access() raises r if location is invalid.
item = modulestore().get_item(usage_key)
# use the item's course_key, because the usage_key might not have the run
if not has_course_author_access(request.user, item.location.course_key):
raise PermissionDenied()
return item
|
proversity-org/edx-platform
|
cms/djangoapps/contentstore/views/transcripts_ajax.py
|
Python
|
agpl-3.0
| 21,639
|
[
"FEFF"
] |
6dc90a4524f4d0b867d7156eebe06d266065c49c817b80542db942873ecda1d0
|
#!/usr/bin/env python
#coding:utf-8
# File: lang_detect_nltk.py
# Author: Dr. Ivan S. Zapreev
# Purpose: Detecting language using a stopwords based approach
# This script requires one parameter:
# ${1} - is the input file name with a UTF-8 text
# This script tries to detect the language of the text
# based on the first 1024 UTF-8 characters thereof. The
# detected text language name (ASCII) is printed to the
# standard output. If the language is not detected then
# the script exits with an error code.
# Derived: From
# http://blog.alejandronolla.com/2013/05/15/detecting-text-language-with-python-and-nltk/
#
# Visit my Linked-in profile:
# <https://nl.linkedin.com/in/zapreevis>
# Visit my GitHub:
# <https://github.com/ivan-zapreev>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Created on November 14, 2016, 11:07 AM
#
import codecs
from sys import exit
from sys import argv
from itertools import islice
#Make sure we only work with the UTF-8 strings
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
try:
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
except ImportError:
print '[!] You need to install nltk (http://nltk.org/index.html)'
exit(1)
#----------------------------------------------------------------------
def _calculate_languages_ratios(text):
"""\
Calculate probability of given text to be written in several languages and
return a dictionary that looks like {'french': 2, 'spanish': 4, 'english': 0}
@param text: Text whose language want to be detected
@type text: str
@return: Dictionary with languages and unique stopwords seen in analyzed text
@rtype: dict
"""
languages_ratios = {}
#Tokenize into words
tokens = word_tokenize(text)
#Lowercase the words
words = [word.lower() for word in tokens]
# Compute per language included in nltk number of unique stopwords appearing in analyzed text
for language in stopwords.fileids():
stopwords_set = set(stopwords.words(language))
words_set = set(words)
common_elements = words_set.intersection(stopwords_set)
languages_ratios[language] = len(common_elements) # language "score"
return languages_ratios
#----------------------------------------------------------------------
def detect_language(text):
"""\
Calculate probability of given text to be written in several languages and
return the highest scored. In case the language can not be detected the
script exits with an error code.
It uses a stopwords based approach, counting how many unique stopwords
are seen in analyzed text.
@param text: Text whose language want to be detected
@type text: str
@return: Most scored language guessed
@rtype: str
"""
ratios = _calculate_languages_ratios(text)
most_rated_language = max(ratios, key=ratios.get)
#Check if the language was indeed dected
if ratios[most_rated_language] == 0:
print 'Could not detect the source language'
exit(1)
return most_rated_language
if __name__=='__main__':
"""
Only read a small buffer of 1024 UTF-8 characters to do
language detection, no need to do it on an entire file.
"""
text = ''
with codecs.open(argv[1], "r", "utf-8") as file:
text = file.read(1024)
language = detect_language(text)
#Return the detected language capitalizing the first letter
print language[0].capitalize() + language[1:]
|
ivan-zapreev/Distributed-Translation-Infrastructure
|
script/text/lang_detect_nltk.py
|
Python
|
gpl-2.0
| 4,201
|
[
"VisIt"
] |
06342928b6ceae06fd7d3219ddef0a5e4b12f01142daea5f3ed410f1ec7af0e3
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, The Horizomer Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
# dependencies: scikit-bio >= 0.2.3, < 0.3.0
from unittest import TestCase, main
from shutil import rmtree
from tempfile import mkdtemp
from os import makedirs
from os.path import join, exists
import numpy
import numpy.testing as npt
import pandas as pd
from skbio.util import remove_files
import skbio.io
from horizomer.distance_method import (
preprocess_data,
parse_blast,
normalize_distances,
cluster_distances,
detect_outlier_genes,
launch_blast,
launch_diamond,
distance_method)
class DistanceMethodTests(TestCase):
""" Tests for distance-method HGT detection """
def setUp(self):
""" Set up working directory and test files
"""
self.working_dir = mkdtemp()
self.target_proteomes_dir = join(self.working_dir, "DB")
if not exists(self.target_proteomes_dir):
makedirs(self.target_proteomes_dir)
# species 1
self.species_1_fp = join(self.target_proteomes_dir, "species_1.fasta")
with open(self.species_1_fp, 'w') as tmp:
tmp.write(species_1)
# species 2
self.species_2_fp = join(self.target_proteomes_dir, "species_2.fasta")
with open(self.species_2_fp, 'w') as tmp:
tmp.write(species_2)
# species 3
self.species_3_fp = join(self.target_proteomes_dir, "species_3.fasta")
with open(self.species_3_fp, 'w') as tmp:
tmp.write(species_3)
# species 4
self.species_4_fp = join(self.target_proteomes_dir, "species_4.fasta")
with open(self.species_4_fp, 'w') as tmp:
tmp.write(species_4)
# blast alignments (query vs. all species)
self.blast_fp = join(self.working_dir, "blast.txt")
with open(self.blast_fp, 'w') as tmp:
tmp.write(blast_alignments)
# blast alignments (query vs. all species)
self.phylip_fp = join(self.working_dir, "distances.txt")
with open(self.phylip_fp, 'w') as tmp:
tmp.write(phylip_output)
# list of files to remove
self.files_to_remove = [self.species_1_fp,
self.species_2_fp,
self.species_3_fp,
self.species_4_fp,
self.blast_fp,
self.phylip_fp]
def tearDown(self):
remove_files(self.files_to_remove)
rmtree(self.working_dir)
def assert_frames_equal(self, actual, expected, use_close=False):
"""
Custom assert_frames_equal() function for testing pandas DataFrame.
See
http://nbviewer.ipython.org/gist/jiffyclub/ac2e7506428d5e1d587b
for details.
Compare DataFrame items by index and column and
raise AssertionError if any item is not equal.
Ordering is unimportant, items are compared only by label.
NaN and infinite values are supported.
Parameters
----------
actual : pandas.DataFrame
expected : pandas.DataFrame
use_close : bool, optional
If True, use numpy.testing.assert_allclose instead of
numpy.testing.assert_equal.
"""
if use_close:
comp = npt.assert_allclose
else:
comp = npt.assert_equal
assert (isinstance(actual, pd.DataFrame) and
isinstance(expected, pd.DataFrame)), \
'Inputs must both be pandas DataFrames.'
for i, exp_row in expected.iterrows():
assert i in actual.index, 'Expected row {!r} not found.'.format(i)
act_row = actual.loc[i]
for j, exp_item in exp_row.iteritems():
assert j in act_row.index, \
'Expected column {!r} not found.'.format(j)
act_item = act_row[j]
try:
comp(act_item, exp_item)
except AssertionError as e:
raise AssertionError(
e.message + '\n\nColumn: {!r}\nRow: {!r}'.format(j, i))
def test_preprocess_data(self):
""" Test functionality of preprocess_data()
"""
gene_map, ref_db, species = preprocess_data(self.working_dir,
self.target_proteomes_dir,
['fa', 'fasta', 'faa'])
gene_map_exp = {'G1_SE001': '0_0', 'G1_SE002': '1_0',
'G1_SE003': '2_0', 'G1_SE004': '3_0',
'0_0': 'G1_SE001', '1_0': 'G1_SE002',
'2_0': 'G1_SE003', '3_0': 'G1_SE004',
'G2_SE001': '0_1', 'G2_SE002': '1_1',
'G2_SE003': '2_1', 'G2_SE004': '3_1',
'0_1': 'G2_SE001', '1_1': 'G2_SE002',
'2_1': 'G2_SE003', '3_1': 'G2_SE004',
'G3_SE001': '0_2', 'G3_SE002': '1_2',
'G3_SE003': '2_2', 'G3_SE004': '3_2',
'0_2': 'G3_SE001', '1_2': 'G3_SE002',
'2_2': 'G3_SE003', '3_2': 'G3_SE004',
'G4_SE001': '0_3', 'G4_SE002': '1_3',
'G4_SE003': '2_3', 'G4_SE004': '3_3',
'0_3': 'G4_SE001', '1_3': 'G4_SE002',
'2_3': 'G4_SE003', '3_3': 'G4_SE004',
'G5_SE001': '0_4', 'G5_SE002': '1_4',
'G5_SE003': '2_4', 'G5_SE004': '3_4',
'0_4': 'G5_SE001', '1_4': 'G5_SE002',
'2_4': 'G5_SE003', '3_4': 'G5_SE004'}
ref_db_exp = {}
for seq in skbio.io.read(self.species_1_fp, format='fasta'):
ref_db_exp[seq.metadata['id']] = seq
for seq in skbio.io.read(self.species_2_fp, format='fasta'):
ref_db_exp[seq.metadata['id']] = seq
for seq in skbio.io.read(self.species_3_fp, format='fasta'):
ref_db_exp[seq.metadata['id']] = seq
for seq in skbio.io.read(self.species_4_fp, format='fasta'):
ref_db_exp[seq.metadata['id']] = seq
num_species_exp = 4
self.assertDictEqual(gene_map, gene_map_exp)
self.assertDictEqual(ref_db, ref_db_exp)
self.assertEqual(species, num_species_exp)
def test_parse_blast(self):
""" Test functionality of parse_blast()
"""
hits_exp = {'G1_SE001': ['G1_SE001', 'G1_SE002', 'G1_SE003',
'G1_SE004'],
'G4_SE001': ['G4_SE001', 'G4_SE002', 'G4_SE003',
'G4_SE004'],
'G3_SE001': ['G3_SE001', 'G3_SE002', 'G3_SE003',
'G3_SE004'],
'G5_SE001': ['G5_SE001', 'G5_SE002', 'G5_SE003',
'G5_SE004'],
'G2_SE001': ['G2_SE001', 'G2_SE002', 'G2_SE003',
'G2_SE004']}
gene_map = {'G1_SE001': '0_0', 'G1_SE002': '1_0', 'G1_SE003': '2_0',
'G1_SE004': '3_0', '0_0': 'G1_SE001', '1_0': 'G1_SE002',
'2_0': 'G1_SE003', '3_0': 'G1_SE004', 'G2_SE001': '0_1',
'G2_SE002': '1_1', 'G2_SE003': '2_1', 'G2_SE004': '3_1',
'0_1': 'G2_SE001', '1_1': 'G2_SE002', '2_1': 'G2_SE003',
'3_1': 'G2_SE004', 'G3_SE001': '0_2', 'G3_SE002': '1_2',
'G3_SE003': '2_2', 'G3_SE004': '3_2', '0_2': 'G3_SE001',
'1_2': 'G3_SE002', '2_2': 'G3_SE003', '3_2': 'G3_SE004',
'G4_SE001': '0_3', 'G4_SE002': '1_3', 'G4_SE003': '2_3',
'G4_SE004': '3_3', '0_3': 'G4_SE001', '1_3': 'G4_SE002',
'2_3': 'G4_SE003', '3_3': 'G4_SE004', 'G5_SE001': '0_4',
'G5_SE002': '1_4', 'G5_SE003': '2_4', 'G5_SE004': '3_4',
'0_4': 'G5_SE001', '1_4': 'G5_SE002', '2_4': 'G5_SE003',
'3_4': 'G5_SE004'}
hits = {}
parse_blast(self.blast_fp, hits, gene_map)
self.assertDictEqual(hits, hits_exp)
def test_normalize_distances(self):
""" Test functionality of normalize_distances()
Phylip alignments (row IDs symbolize species_gene):
2_1 0.000000 0.379562 0.473355 0.521700
3_1 0.379562 0.000000 0.587981 0.660393
0_1 0.473355 0.587981 0.000000 0.722046
1_1 0.521700 0.660393 0.722046 0.000000
Z-score normalized by rows:
[[ nan -1.33276108 0.25673322 1.07602786]
[-1.36991607 nan 0.38082407 0.989092 ]
[-1.19162122 -0.06375679 nan 1.25537801]
[-1.3488877 0.30650842 1.04237928 nan]]
Re-ordered by rows and columns to correspond to ascending species names
[[ nan 1.25537801 -1.19162122 -0.06375679]
[ 1.04237928 nan -1.3488877 0.30650842]
[ 0.25673322 1.07602786 nan -1.33276108]
[ 0.38082407 0.989092 -1.36991607 nan]]
"""
num_species = 4
i = 0
species_set_dict = {}
species_set_dict_exp = {'IIII': 1}
gene_bitvector_map = {}
gene_bitvector_map_exp = {0: 'IIII'}
full_distance_matrix = numpy.zeros(
shape=(1, num_species, num_species), dtype=float)
full_distance_matrix_exp = numpy.array(
[[[numpy.nan, 1.25537801, -1.19162122, -0.06375679],
[1.04237928, numpy.nan, -1.3488877, 0.30650842],
[0.25673322, 1.07602786, numpy.nan, -1.33276108],
[0.38082407, 0.989092, -1.36991607, numpy.nan]]])
normalize_distances(phylip_fp=self.phylip_fp,
full_distance_matrix=full_distance_matrix,
num_species=num_species,
full_distance_matrix_offset=i,
species_set_dict=species_set_dict,
gene_bitvector_map=gene_bitvector_map)
numpy.testing.assert_almost_equal(full_distance_matrix[0][0],
full_distance_matrix_exp[0][0])
self.assertDictEqual(species_set_dict, species_set_dict_exp)
self.assertDictEqual(gene_bitvector_map, gene_bitvector_map_exp)
def test_cluster_distances(self):
""" Test functionality of cluster_distances()
"""
species_set_dict = {'IIIIIIII': 100, 'IIOOOIII': 50, 'IIIIIII0': 10,
'OIOIIIII': 5, 'IIIOOIII': 8, 'OOOOOIOO': 12}
gene_clusters_list_exp = [('IIIIIIII', ['IIIIIIII', 'IIIIIII0',
'IIIOOIII', 'OIOIIIII']),
('IIOOOIII', ['IIOOOIII', 'OOOOOIOO'])]
gene_clusters_list_act = cluster_distances(
species_set_dict=species_set_dict, species_set_size=30,
hamming_distance=2)
self.assertTrue(len(gene_clusters_list_exp),
len(gene_clusters_list_act))
for core_cluster_exp in gene_clusters_list_exp:
self.assertTrue(core_cluster_exp in gene_clusters_list_act)
for core_cluster_act in gene_clusters_list_act:
self.assertTrue(core_cluster_act in gene_clusters_list_exp)
def test_detect_outlier_genes(self):
""" Test functionality of detect_outlier_genes()
"""
species_set = ['IIII']
gene_bitvector_map = {0: 'IIII', 1: 'IIII', 2: 'IIII',
3: 'IIII', 4: 'IIII'}
full_distance_matrix = numpy.array(
[[[numpy.nan, 1.20467207, 0.03920422, -1.24387629],
[0.70710678, numpy.nan, -1.41421356, 0.70710678],
[0.70710678, 1.41421356, numpy.nan, -0.70710678],
[1.24387629, 1.20467207, 0.03920422, numpy.nan]],
[[numpy.nan, 1.26889551, -1.175214, -0.09368151],
[1.16820922, numpy.nan, -1.27436935, 0.10616013],
[0.50122985, 0.89462587, numpy.nan, -1.39585572],
[0.55177142, 0.85179386, -1.40356529, numpy.nan]],
[[numpy.nan, 1.33958186, -1.06239803, -0.27718382],
[1.2373387, numpy.nan, -0.02558867, -1.21175004],
[0.284687, 1.05732936, numpy.nan, -1.34201637],
[0.78533243, 0.62588164, -1.41121407, numpy.nan]],
[[numpy.nan, 1.38826553, -0.92766886, -0.46059667],
[1.15415521, numpy.nan, -1.2848518, 0.13069659],
[0.28409367, 1.05773152, numpy.nan, -1.34182519],
[0.26316662, 1.0717693, -1.33493592, numpy.nan]],
[[numpy.nan, 1.25537801, -1.19162122, -0.06375679],
[1.04237928, numpy.nan, -1.3488877, 0.30650842],
[0.25673322, 1.07602786, numpy.nan, -1.33276108],
[0.38082407, 0.989092, -1.36991607, numpy.nan]]])
outlier_genes_exp = set([0])
outlier_genes = detect_outlier_genes(
species_set=species_set,
gene_bitvector_map=gene_bitvector_map,
full_distance_matrix=full_distance_matrix,
stdev_offset=1.5,
outlier_hgt=0.5,
num_species=4,
total_genes=5)
self.assertSetEqual(outlier_genes, outlier_genes_exp)
def test_launch_blast(self):
"""Test functionality of launch_blast()
"""
align_exp = [{'qseqid': 'G1_SE001',
'sseqid': 'G1_SE002',
'pident': 60.92,
'length': 888,
'mismatch': 328,
'gapopen': 5,
'qstart': 1,
'qend': 870,
'sstart': 1,
'send': 887,
'evalue': 0.000000e+00,
'bitscore': 1098,
'qcovs': 100},
{'qseqid': 'G2_SE001',
'sseqid': 'G2_SE002',
'pident': 53.64,
'length': 494,
'mismatch': 229,
'gapopen': 0,
'qstart': 1,
'qend': 494,
'sstart': 1,
'send': 494,
'evalue': 0.000000e+00,
'bitscore': 566,
'qcovs': 100},
{'qseqid': 'G3_SE001',
'sseqid': 'G3_SE002',
'pident': 64.66,
'length': 116,
'mismatch': 40,
'gapopen': 1,
'qstart': 1,
'qend': 115,
'sstart': 1,
'send': 116,
'evalue': 2.9999999999999994e-56,
'bitscore': 164,
'qcovs': 100},
{'qseqid': 'G4_SE001',
'sseqid': 'G4_SE002',
'pident': 48.29,
'length': 292,
'mismatch': 147,
'gapopen': 1,
'qstart': 1,
'qend': 288,
'sstart': 1,
'send': 292,
'evalue': 1.9999999999999996e-106,
'bitscore': 305,
'qcovs': 100},
{'qseqid': 'G5_SE001',
'sseqid': 'G5_SE002',
'pident': 50.00,
'length': 670,
'mismatch': 320,
'gapopen': 6,
'qstart': 2,
'qend': 663,
'sstart': 1,
'send': 663,
'evalue': 0.000000e+00,
'bitscore': 674,
'qcovs': 99}]
df_exp = pd.DataFrame(align_exp,
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart', 'qend',
'sstart', 'send', 'evalue', 'bitscore',
'qcovs'])
out_file_fp = launch_blast(self.species_1_fp,
self.species_2_fp,
self.working_dir)
df_act = skbio.io.read(out_file_fp, format='blast+6',
into=pd.DataFrame,
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart',
'qend', 'sstart', 'send', 'evalue',
'bitscore', 'qcovs'])
self.assert_frames_equal(df_exp, df_act)
def test_launch_diamond(self):
"""Test functionality of launch_diamond()
"""
align_exp = [{'qseqid': 'G1_SE001',
'sseqid': 'G1_SE002',
'pident': 60.9,
'length': 888,
'mismatch': 328,
'gapopen': 5,
'qstart': 1,
'qend': 870,
'sstart': 1,
'send': 887,
'evalue': 0.000000e+00,
'bitscore': 1092.8},
{'qseqid': 'G2_SE001',
'sseqid': 'G2_SE002',
'pident': 53.6,
'length': 494,
'mismatch': 229,
'gapopen': 0,
'qstart': 1,
'qend': 494,
'sstart': 1,
'send': 494,
'evalue': 3.1999999999999995e-160,
'bitscore': 550.1},
{'qseqid': 'G3_SE001',
'sseqid': 'G3_SE002',
'pident': 64.7,
'length': 116,
'mismatch': 40,
'gapopen': 1,
'qstart': 1,
'qend': 115,
'sstart': 1,
'send': 116,
'evalue': 2.2999999999999996e-45,
'bitscore': 166.4},
{'qseqid': 'G4_SE001',
'sseqid': 'G4_SE002',
'pident': 48.3,
'length': 292,
'mismatch': 147,
'gapopen': 1,
'qstart': 1,
'qend': 288,
'sstart': 1,
'send': 292,
'evalue': 4.799999999999999e-84,
'bitscore': 296.2},
{'qseqid': 'G5_SE001',
'sseqid': 'G5_SE002',
'pident': 50.1,
'length': 669,
'mismatch': 319,
'gapopen': 6,
'qstart': 2,
'qend': 662,
'sstart': 1,
'send': 662,
'evalue': 1.4999999999999994e-192,
'bitscore': 657.9}]
df_exp = pd.DataFrame(align_exp,
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart', 'qend',
'sstart', 'send', 'evalue', 'bitscore'])
out_file_fp = launch_diamond(self.species_1_fp,
self.species_2_fp,
self.working_dir,
tmp_dir=self.working_dir)
df_act = skbio.io.read(out_file_fp, format='blast+6',
into=pd.DataFrame,
columns=['qseqid', 'sseqid', 'pident',
'length', 'mismatch', 'gapopen',
'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore'])
self.assert_frames_equal(df_exp, df_act)
def test_distance_method(self):
""" Test functionality of distance_method_main()
"""
output_hgt_fp = join(self.working_dir, "hgt_result.txt")
distance_method(self.species_1_fp,
self.target_proteomes_dir,
self.working_dir,
output_hgt_fp,
'diamond')
hgt_exp = []
hgt_act = []
with open(output_hgt_fp, 'r') as output_hgt_f:
for line in output_hgt_f:
if line.startswith('#'):
continue
if line not in ['\n', '\r\n']:
hgt_act.append(line.strip().split()[0])
self.assertListEqual(hgt_exp, hgt_act)
def test_distance_method_pass_alignments(self):
""" Test functionality of distance_method_main() with alignments
"""
output_hgt_fp = join(self.working_dir, "hgt_result.txt")
distance_method(self.species_1_fp,
self.target_proteomes_dir,
self.working_dir,
output_hgt_fp,
'diamond',
tabular_alignments_fp=self.blast_fp)
hgt_exp = []
hgt_act = []
with open(output_hgt_fp, 'r') as output_hgt_f:
for line in output_hgt_f:
if line.startswith('#'):
continue
if line not in ['\n', '\r\n']:
hgt_act.append(line.strip().split()[0])
self.assertListEqual(hgt_exp, hgt_act)
phylip_output = """ 4
2_1 0.000000 0.379562 0.473355 0.521700
3_1 0.379562 0.000000 0.587981 0.660393
0_1 0.473355 0.587981 0.000000 0.722046
1_1 0.521700 0.660393 0.722046 0.000000
"""
blast_alignments = """G1_SE001 G1_SE001 100.00 862 0 0 1 862 1 \
862 0.0 1803 100
G2_SE001 G2_SE001 100.00 494 0 0 1 494 1 494 0.0 1023 100
G3_SE001 G3_SE001 100.00 115 0 0 1 115 1 115 1e-85 239 100
G4_SE001 G4_SE001 100.00 288 0 0 1 288 1 288 0.0 599 100
G5_SE001 G5_SE001 100.00 663 0 0 1 663 1 663 0.0 1377 100
G1_SE001 G1_SE002 58.11 888 345 6 1 862 1 887 0.0 1048 100
G2_SE001 G2_SE002 53.64 494 229 0 1 494 1 494 0.0 566 100
G3_SE001 G3_SE002 64.66 116 40 1 1 115 1 116 3e-56 164 100
G4_SE001 G4_SE002 48.29 292 147 1 1 288 1 292 2e-106 305 100
G5_SE001 G5_SE002 50.00 670 320 6 2 663 1 663 0.0 674 99
G1_SE001 G1_SE003 66.74 869 280 4 1 862 1 867 0.0 1191 100
G2_SE001 G2_SE003 64.65 495 174 1 1 494 1 495 0.0 655 100
G3_SE001 G3_SE003 68.97 116 35 1 1 115 1 116 2e-59 172 100
G4_SE001 G4_SE003 57.40 277 113 2 1 273 1 276 1e-117 335 95
G5_SE001 G5_SE003 58.01 674 262 9 1 663 1 664 0.0 769 100
G1_SE001 G1_SE004 65.25 872 291 6 1 862 1 870 0.0 1142 100
G2_SE001 G2_SE004 59.92 494 198 0 1 494 1 494 0.0 578 100
G3_SE001 G3_SE004 66.12 121 35 2 1 115 1 121 6e-59 171 100
G4_SE001 G4_SE004 55.23 277 120 1 1 273 1 277 7e-111 318 95
G5_SE001 G5_SE004 56.06 685 270 8 1 663 1 676 0.0 738 100
G1_SE001 G1_SE004 100.00 862 0 0 1 862 1 862 0.0 1803 100
G2_SE001 G2_SE004 59.92 494 198 0 1 494 1 494 0.0 578 100
G3_SE001 G3_SE004 66.12 121 35 2 1 115 1 121 6e-59 171 100
G4_SE001 G4_SE004 55.23 277 120 1 1 273 1 277 7e-111 318 95
G5_SE001 G5_SE004 56.06 685 270 8 1 663 1 676 0.0 738 100
"""
species_1 = """>G1_SE001
FDDSSLLEIFTSNNSNSSFSEPTVQLASYAEADPVEAASLSGILGQCTRVRHMMSSVTREVMPLQSTRSAKYVGPGV
PPFATAGQGGGDEQFKMADTPCKGVKMEKLKWAEDRHKPLVFLIGDAMYLMVPAENKITQYYNGICNGAGEVWDHLF
YKAECLHCFGFVGESVAYGNNGWSVADVGTVGTKGAGYMVYESLHATIPYALNGRQTDGLRLTYEPEDGSMLAANAI
PYGCVGPDCGDIGEVQSYGQMSNLGEYHLATFKLERDKMRVSAKDAKDSEYPVDGQEGFTDSSDGKGVDVYGPGQHA
YARLVVGKRDRQHATLAEMAEDGYADKMEPRCAQQPATINYNAGEVVGEERITTDIIAREYMFTKLTWNKTSPGYNY
VGAVQSTLLDFPGLWTATNVSREEQAKIHHPEGNVPDHLFCQPNNPPRDYPAKLILFLGILTSTIKSPAETWDAGLS
GQDSKIEMVKLHPLYHIDSSYAPMLNKHSSCIGCPTPLMLPPSAGKLLMLRPHEGTTTATESESYDTGSSAKFFLCY
SPDPVEIFGVPMMQAHNYHPKSVWFHLGNVLKHLGGSKDTSWRGLIVHMPRLLLEQLDAFTELGNGNHKYDSEISND
LGTEGLVALKRRILAQAYAAPNANDFYIGHDTLCAPFIASRKIWAWGKTQVSLEKGNAWAHAVLSPWIIKKEVAQGT
AITALIKSRPIDLPGNGIIGTHHDRPIGAMMVSAKAEEALAATAALPTTALAVSYETASDARQGLIGGLHSSPQFAP
AITGLINYLIERTDNVDLHMAFYVLHVGIVPQKYLARKSTRRGTCWDMHQGCLNTACRSLPAPNAQYHIPISKTLTL
TTAMHKTCIDLAKVWLGDAGGPL
>G2_SE001
GKLKNSIIDPWPGDPFAIASSDQTAAVAIVHAASEYISHYGYGYKAQLMLKIDIQESCNANGGAGGGGCQYAWWTAW
ASIFTQSPDVSISQTRVIYFTSAALGLIGFWSMLLGGAFRGGAEAWNAKADLKQAKVKTRKSAFFNNKNELASVPDI
VLPYPADKSDSSSDMKYFGSSMSKKMIAGYTPAAARPRVTITVEELKMSAKAEYLEDLLKNSPHLHGEIGDSTEKML
LSMTAQCKCTATSEALSYKDQKGGRDAAAPKKDFHGSCGVTFPYGCCYPEKKTAADEIVNLALGVCSSNLKVLQRPG
NIKVEAYIDACVVLDGNVKTGDGESRITLDEIHPFSVLLGEGNISKKVTGTHIGSHFDSITIPIGGQFGLAGVELIT
YQADSKDAIGRAYDKKPIILWFQGVAHELGGPIVPAADETIRIPDYITFVEFKHFDPSTSVVCEDDAAKLDENDKER
ANETKVQEEHSLKAVPTRKRLGARAKSIPFEL
>G3_SE001
VVEDNNQGAPGVVQIFYGNGTLHQEDCFSGPQAIGPGDASPGTLIQVVRGRKTHTEFVNALIKGTDNAPTGERVHIQ
WGLLMPPNFLGPEVKTNLYLDKNFKCFKQFGSIQVKAS
>G4_SE001
LNMYVATSHQEFTGQLYDGKKPTPLVDSPPMNDCQRMSWLFMHTLNTRYKSNDLANGEVRLKAQKHVYVQSFKAATA
YASKVILIEVVTLEQVKSSTLALANAFEKISVAVYKQLLRYATVSETTPGSVLQVEVGARDGILFDGEMLVHSDEAN
SIWGLVLYKGSAKSKLHFGYLFPVTAVIGKVTFPKFKRHPNAGYVDGGLPALKMAFTLTFKFSSHFYPRVQDQRFKD
WINVFHVPYFWGDVKKQRALNLGSTLELLNGVVSDPCEYRLLEETGLGGKAKNAVRT
>G5_SE001
DTGFASEDYVGVEWHYTEVIVVLESPKDRRYKSTAFPKEIGCGYGTLAENRSIWERGRTEPNANELISSSPPLVFPP
IMAHAHGNLPPYGQSKWGSVTWYKLLLALIAETYAVLNVLGLTADPLPRRLGVGVTNVCGAHVLFHDEEKSKEGQTT
VKSSLLIDIEGAALKSILQYFLTEASDNGKDTKENLKPCVYEIRWMSEEGSIAVPDYLSATDFACGSGFVFLMMVNK
FGYFEIRDGQICGGEGTLLILVLGQEIPDEKYFMAKGTTFGRNDFESNAMNHQKVVMTILPNNWPISGVKTDTADQI
LDGCFGFIPLPSARKATFAVDSALGTGSHLIKRTGTNAMVIYVVIVLVCATLVPGSPNYLTGIMLSDVQLLVCDALS
DSVKWLFAPTKLLEIKPTYIVTADMHTSTETKAQKDVIGKRMDFASNGLNTRAIKIEFQLLYSMAYGFLGFLVLRAC
TCGKFAQIVNDVCATLVWGDALGYLSATNQNTLEYGTTGHSENTESFELNPYKIENQQTDEAPRIANKIALKRNGAA
AGRMAANYTLGDFYLLTEYSCNNCKVTDGAVFDYATGVERGLDRHTEVQLVTPEPLLTGEAQNKHQLAVRVGWLAYA
QFMAPPIEADVTQTSLLPASVTRGYERSGETGSGFTKTALGNEAGAQ
"""
species_2 = """>G1_SE002
FDDVSLLNIVTSDNSQTKFNVPTVQLSGVLEAEPTELAVLSAILAMCNAVRQILPGVTRDPTDLAKRRTARYVGPGV
PPFATAGQGGGDEKVDMNETPCKGFPIGQLIWAEERDKALVFLNSENVYLLVPSENKTEYLEGICNGATNVWGHLFY
RSDCLHCFALVGDSVPSGNGGWQVTDLGAVGTRATGYMIYEHLQAGIPYALDGLQTAGLRITYAPQVANMLPANAIP
YDCIGPDCGEIGEVLAYGHCSSLGEYHLESFKLERDKLKVSAKSATECEYPVGGTIQFTDTSSASGVDVYGPGHHVY
ERLVVAEKDHQLASFAELADDGHADKVDEACAEGSATIEYSKGEEKGEDVIPTTTLFAKTYMTKTVQRGKTSPGYNY
AGTVKSTLLAMPGMWVADNIAYEEQAKIHHPQGNVPNHLFCNPNNLPRDYPAKLILFLSILTAEIKSPMAVWTAGLS
GQDNRIRLIKLEALWHIDVHCAAPKPLPAPSMYLPCLKKSSIPLGVPDELMLPKATGKLLMLRTSHEKSKIATEFIA
YDSNMSVKFFLCYAPDQVEIYGEPFVQSQKSTPKAVWFHLGAFLKHLAGSKDKAWKGLIVHMPRILIEKLECFYDLG
NGNHNYDSEISNDLGTEGLVTLARRILAEAYAAPNDSEFYIGQDTLCSPFVASRKAWSWGRIQVSLEKGDEHKNAVL
SPWRIKKEVAMGAPITSLKKSGPIGLPANGIVGSHHDKPIKERVVSANATEALAALGALARTQIAASTQIASQEREG
KYGALQMTPKFAYSITGSISYPIERTDKVDILMARYVLHEGIIEQSYLARGSANRGLCWVYGQGCLNTACSPTGPTL
PAPNAQYHLPLSSTLVTESAMGKTCIDPVKGWPGDAGGPL
>G2_SE002
GKLKDSFFHPWPTDPLATSRTSQTSALAIIEMPSSYITHYGYGFWFHKMLKYNFSDSCQAKGGAGDGGCNRDWWTSW
ADCFQQNPDISVAQSRVMYVESAALGLMGFWFLHLGGAFKGGEEKWHVKTDLRCSNVAPKSAGFLKDKQRLASFPDI
RTPKGKDKSDTSTNMNYFGSLLSQRMISGYTPKAGRPRITLIVEELKMAKKVKYLTDFLKMIPHLHHANGDHEEKEL
LGLTPQAKCSATNQFSCFQHSKTGTDPAALKMAFNGSCGFTLSYQACFEYHASAGSAILHITVQICPSGLKVLKRQG
AVKVAASAEYAVVLDGLSKYYDGSSRIIADEHKPLQVLLGAGTLNNGVMGTHWGSHVESMTIAIGGEFGLASVELSS
YQADSCNALGSAYDNKPIILWFQAVAHNLGGPMIPSNNATVRIPQYVSFVEFEHFNPSTGVVCQDDQNRLDQDDRER
GDREGVQESHGLKALPTLKRLSKQALSIAFDL
>G3_SE002
VVEDNSQGAPGVVSVFFGNGTLHQADCFLGPQAVGGGDTHPGTLVQVIRGRKAHTELVNALVGTSNNAPTGDQVHIQ
WGISVTSEPMGPEASNKLFKIDKNFRCFKQFGSAEVWTS
>G4_SE002
MNMYIATADMQFTGKFHDGEKSAPALDAPALKEDEMFQWIFTHKKSTKYDSADLLKGEVQLKDRKHVYIDDFTADAA
FSSKVFVLEVASSKQTKTAALGLRMVLEKSNIAVVKERLRYATVYDKTRGTLLIVGVGYRNGVLFDGEMLVHNEEGN
AIWGLVLAKGDASTLMHVGYIVASASVIVSVTFRKFNRKPNDGYSDGGLPTLKASFSWTFRFCNHFWTEIFDQRLTR
QVQDIIAVISEPFYYSDAKRERKLHLGNTLKVLSGVVSDPCEFKLMDESKLAGAQKTLART
>G5_SE002
TGVESDSYIGEDPHYSNIVTVIDNPNEQKYKTNAFPQSMGCGYGSVAESHKIFGRGRHEPHLKHVMYSSPPFIFPPI
LSTANGNEPAYGQSQRGSAAWYKWLSKVSNVKIPLISSSSAALLVLGLTSLAVPKGLGSGWTAGCGNFVIFHGEEGN
EEGQATVASDLLIGVERGVLRAVLGYYLTESSDNKKDTEEDLRPCIYIIRWPSREGLQSVRNIVKATESAVTPGIVF
FMMISKFATFILGDGKVCGGAGMLLINILGGEIPEVKYFQAKGTTFGYGGFATGSMDHDNTVTVPPNNWPMTGGLTS
IAFQLLSDTFGFVPKPIAQRATFALEEELGTNAQSIKTTDTKALVIYVVGLIVCASLTPGQSHVLHGIILSDVRLVV
CDAASRGVQYAPTRELEIKPTYIFPTDSHESMATQAKTELLGAQGEFAANGLGIEKEHQSDYSFAYGFLGFISFRAG
TCGKFAEIVTDTCKSMVFGAQLRTLWASKETTLEYATDGHTAQSESWPLGPFKVEKRSTDEAKSVATNVGLKVNGED
AARETEEYRLGDFYLLTEYVDNCKVTEGKWLDYASSVEKGSDRHMKVQMIAPKPLISGKGRAGSQASNRVGWLEYRN
HMASPLESEVTRSHLDGACVRRGYDRVGEMGSGLTKNSLNISEAAAH
"""
species_3 = """>G1_SE003
FDDISLLEIVTSDNSNSSLSVATVQLMTYAEALPVEAASLSGQLAQCTTVRHIVSSVSRDVMPLQSTRSARYVGPEV
PPFKTAGQGGGDEEFNMSESPCKGLPMDKLKWAEERHKALVFLLGDAMFLLVPAENKTKWYKGICNGSGEVWDHLFY
KSECLHCFALVGMSVAYGNNGLQVAHVGTVGTKGAGYMIFEWLGAAIPYALNGLQTDGLRITYEPQIGAMLLANIPY
GCIGPDCGDVGEVHNYGQCSNLREYHLATKKLERDKMRVTAKDAKECEYPVEGQEGFTDSSDGSGVDVFGPGSHAFA
RLVVGERDHQHATLAELAEDGYADKMENTCAQGPGTINYNAGEEVGEEVIPTDIIARTYMLKKLQRNKTSPGYNYVG
TVRSTLLAMPGLWTATEVASDEQAKIHHPQGNVPDHLLCKPNSPKQDYPAKLIVFLGILTSTITSPAKVWDAGLSGQ
EDRIELIKLEPLYHINIDYAPLLQRHSICVGCPTPLMVEKSAGKLLMLRTPHEGSTTATEFESYDTGSSVKFFLCTS
PDAIEIYGVPLLQSHDSHPKSVWFHLGNFLKHLGGTKDTSWKGLIVHMPRFVLEKLEAFTQLGKGNHKYDSEISNDL
GTEGLIALTRLILAEASAAPQLNDVYIGQDTLCKPFIASRKAWAWGKIQISLEKGNAWKHSVLSPWIIKKEVAQGAA
VTAKTKSGPIDLPGNGIVGPHKDKPIDCRMVSAHAEEALAALAALPRTTYAVSTETAREAREGLYGALHMSPQFAPA
ITGLLNYIIERTDDVDLLMAFYVLHVGIIEQLYLARKSTNRGLCWDFGQGCLNHACLPAPNAQFHVPISKTLTLETA
LHKTCIDLLKGWLGDSSGAL
>G2_SE003
GKLKDSFFDPWPGDPLATARSHQTAALDIVHAASSYITHYGYGYKSHKMLKYDVSDSCEAKGGAGGGGCQHDGWTTW
AAIFTQSPDISIAQNRVIYFETAALGMMGFWFMELGGVFRGGEEAWTVRAELKCANGAPKKATFFTDKQKLASVPDI
VTPKGVSKFDSSTNMKYMGSVMSKKGLSGYIPAAGRPRITMEVEELKWASKAEYLEDFLKMSPVLHGEIGDHTKKEL
IGLTPQCKCEKTSQALSFKDQKSGVDPAALKKAFGHQSCGISFSYGACFEEKKAGADQIVHLSVQVCSNYLKVLKKQ
HAIKVEAYFESCVELDGLSKYYDGRSPVIIDEHRPLSVLLGAGQLTRKVTGTHIGTHISSMTIAIGGQFGLASVELW
NYQADACDAIGSAYDSEPIILWFQGAAHPLGGPMIPANDETVRVADYVTFVEFEHYDPHTGIVTEDDAAKLDQSERD
RGDETKVQEAHSLEAVPTLKRLGAQAKSVPFEL
>G3_SE003
VVEDNNQGAPGVVYVFHGNGTLHDEDCFTSPRAIGPPDSHPGTLVKVIHGRKAHTEFVNALIGGSENAPTGDRVHIQ
RGIVMPPEPLGPEGQANLDGLDKNFRCLKQLGTAEVRAS
>G4_SE003
LNMYVAKADVQFTGQLHDGKKPSPLFSAPALNDDERLEWVFTHIQSTRYNHGDLQKGQVQLKVRKHVYIQEFEADSA
FSAKIIVLEVKSLESKSAALALKAAFELSNVAVYKQRLRYAKVYAETRGVILLVQVGAREGILFDGAMLIHDDEADV
IVGLALHKGSAQSIMHIGLMFPAAAVIGKVTYCKFTRKPNDGYVNGGLPALKMAFPLSFKFSDHFFDEIQDQAFIRF
VKDWIAIVQVSYFYGDIQKQRKTHLGSTIELLSGVVSDPCEFKFLYFVLLHLRLAITFSEKQLGSLDLLSKMPTLID
ETNLAGADKNLAKT
>G5_SE003
NTGAESVPYVGEEWKYTNVVAILENPESQYYKSSAYPKEIGCAYGGRAPSHSIWERGRHEPNADHLLASSSPLIFAP
VAAHARGDEPPYGQSKWGTLTIHKWLAKTDSKLKLLLLSATYETFCVLGLTAAAVPKDLGKGVTNVCGNVVLFHDAK
GSVEGECTIKSSLLIGIEGAALREILEYFLKQASDKKKDTNTNLKPCLYIIRWASDEGLISVTNYLKATESAIVCGF
VFFMMISKLIYFVFKDGKVCGGEGMVLILILNGEIEEVKYFVAKGTTFGRNGFATNCLSYQGVMTAIPNEWPITGGL
TNTAFQLLDGSFGGVPLPTARKATFANDTSRGTNAQIIKTTDTRSELVIYVVKLIVCSTMNPGQPNLLHGIMLSDNR
LVVCNAASSGVKYTPTKLLEIKPSYYLPGDSHQSTKTKSQKEVIGLRLEFAANGLSIEVEFALSYSFGYGFLGILSL
RAANCGKFAAIVNDTCKTNTWRDQLNVIWAPAEGTLQLATTGQSENTESFDLGPYKVENKHSDEAPRIASKIGLKTN
GIDAGREPEEYAIRGDYYLLTDYCSNLEVTYGKVFDFAAGTEKGLDRHMEVQLITPSPMLSGKGKDQSVRAGWLAYA
NFMAPPLKREVTQSSLDGASVTRGYECHGEMGLSLTTTSLGITERGAQ
"""
species_4 = """>G1_SE004
FDDSSLLEIFTSNNSNSSFSEPTVQLASYAEADPVEAASLSGILGQCTRVRHMMSSVTREVMPLQSTRSAKYVGPGV
PPFATAGQGGGDEQFKMADTPCKGVKMEKLKWAEDRHKPLVFLIGDAMYLMVPAENKITQYYNGICNGAGEVWDHLF
YKAECLHCFGFVGESVAYGNNGWSVADVGTVGTKGAGYMVYESLHATIPYALNGRQTDGLRLTYEPEDGSMLAANAI
PYGCVGPDCGDIGEVQSYGQMSNLGEYHLATFKLERDKMRVSAKDAKDSEYPVDGQEGFTDSSDGKGVDVYGPGQHA
YARLVVGKRDRQHATLAEMAEDGYADKMEPRCAQQPATINYNAGEVVGEERITTDIIAREYMFTKLTWNKTSPGYNY
VGAVQSTLLDFPGLWTATNVSREEQAKIHHPEGNVPDHLFCQPNNPPRDYPAKLILFLGILTSTIKSPAETWDAGLS
GQDSKIEMVKLHPLYHIDSSYAPMLNKHSSCIGCPTPLMLPPSAGKLLMLRPHEGTTTATESESYDTGSSAKFFLCY
SPDPVEIFGVPMMQAHNYHPKSVWFHLGNVLKHLGGSKDTSWRGLIVHMPRLLLEQLDAFTELGNGNHKYDSEISND
LGTEGLVALKRRILAQAYAAPNANDFYIGHDTLCAPFIASRKIWAWGKTQVSLEKGNAWAHAVLSPWIIKKEVAQGT
AITALIKSRPIDLPGNGIIGTHHDRPIGAMMVSAKAEEALAATAALPTTALAVSYETASDARQGLIGGLHSSPQFAP
AITGLINYLIERTDNVDLHMAFYVLHVGIVPQKYLARKSTRRGTCWDMHQGCLNTACRSLPAPNAQYHIPISKTLTL
TTAMHKTCIDLAKVWLGDAGGPL
>G2_SE004
GKLKDSFFDPRPGNPLAVARSHQKAAHAIIHAASNSITHYGYGFRSHKMLKYDVSDAVEAKGGAGGGGCQHDRWKTW
AEIFTQSSDISIAQSRVVYFESAALGLMGFWFMHLGGAFHGGEKAWNVKADLKCANVAPKKATIGRWEVPREGVEDY
FTDEKKQAVFSSTDMKYMGSVMAKKGMSGYTPKAGSRRICVSVEEIKMASDAEYLEEFLKQSPHLRGEIGDHTKKQL
VGMNPQCNCSRTSKALSFKEQSGGVDPAELKKPFHGSCGVTFSYGACFEEKKSGADEIVELQVQTCVSHLKVLKREG
ALKVEAYIESCVVLDGLSKYYDGRSCVVVDEHRPLRVLLGEGTKSKKVTGTHIGTHIKSMTIAIGGQFGLASVESTS
YTADVCDAIGSAYDSKPIVLWFEGAEKEQGGTVIPSNDETARLPDYVTFVEFKHFDPSTGLLCDGVAAKLDQDEKER
GSETKVQEGHALSAVSTLKHLGAQAKSIPFEL
>G3_SE004
VVEDNNQGAPGVVSVFYGDSQLHQEDCFTSPKAIGPGDRHPGTLVQVIRGRKVHTEFTNALIGATEGARTGDRVHIQ
WGIIMEGNAAPPEPLGPESNSNLFGVDKNFRCFKQLGSAEARAS
>G4_SE004
MNMYVATSDIQFSGQVHEGKRPSPLFDSPALGDDKRLRCVVTHIQSTRWDSGDLQKGEVHLKARKHSYIQTLEADSS
FSAKVTVLDIESLSQSKSAALALRARLEKGDVAVYAQRLKSATVYAKTRGTLLLVEVGARMGILYDGEFLIHADEAN
AAVGLVLHKGSAQSVMPIGYLFPPAAVIGKVTFCKFTRAPNDGYVDGELPALKMSFALSFKFTSYFFPEVQDQSFNR
FVKNWIAIVQVAYFYGDIQRQRRTHLPDTIELLSGVVIDPCEGHLLYFVLLHLRLDITYVESRMGSLKQLTKMSTLI
DESNLSGSEKNLTST
>G5_SE004
DTGVESVEYVGEEWHYTTVVPDLENPEKENYKSSTFPKSIGCGFGNLAQSHTIGERGRHEPNQDHLLAQSPPLIFPP
VLAHAKGNEPPYGQSQWGVATWYKWLAKATSKLKLPLIASTYVLLLVLRFSALALPKDLGKGLANVCGNAVLFHDAK
GREENQAIIITSLLITIEGQALREIKDYFLTSAPTNQKDTDTNLQPCVYVMRRASEEGLISVTNYLKATASAIVCGF
VFFMMLAAFVYFKCESGKVCGREGMLQILILGREIWDGKYFLAKKTTFGTNGFATQCMDYQRVLTPLPNEQAAAPGF
CGKDHSWPINAGRTNTAFQLLDGVFGFRSLPKARKATFAVDTALGTNAELIKTADTKSLVIYLVKLITCATLMPGQP
NLLHGIMLCDTRLVVCDAASSGVKFAPCKLLEIKPNYMLPADSHTSTKTKSQKEVLGLRVEFADNGLSIQVEFALSY
SFGYGFLGFLSLRAGHCGTFAEAVNDACETIVWRDRKGVVWATNENSCAVTGHSTDTESFKLGTYNVENNHTDEAPR
IANKIGLKVTGIAAGREPEEYSIGDFYLVTNGCNNVQVIHGKIFDFASGIESGLDRHMEAQLVTPKCLLTGAGKAQV
QLSIRVGWLMYANLMAPPLKEDVHQDSLDGVSVTRGYECSGEMGLGLTETSMGITDAGAH
"""
if __name__ == '__main__':
main()
|
qiyunzhu/horizomer
|
horizomer/misc/distance-method/test_distance_method.py
|
Python
|
bsd-3-clause
| 34,762
|
[
"BLAST",
"scikit-bio"
] |
6e23c8b5cc43234c106a433b7e623f9671b73a4452d14ed39bd1e0cd1b772815
|
#!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Antonio Gonzalez Pena"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Antonio Gonzalez Pena, Kyle Patnode", "Yoshiki Vazquez-Baeza"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Antonio Gonzalez Pena"
__email__ = "antgonza@gmail.com"
from qiime.plot_taxa_summary import make_legend
from qiime.colors import get_qiime_hex_string_color
from qiime.util import parse_command_line_parameters, get_options_lookup
from qiime.util import make_option
from qiime.plot_semivariogram import fit_semivariogram, FitModel
from qiime.parse import parse_distmat, parse_mapping_file
from qiime.filter import (filter_samples_from_distance_matrix,
sample_ids_from_metadata_description)
from matplotlib import use
use('Agg', warn=False)
from pylab import (plot, xlabel, ylabel, title, savefig, ylim, xlim, legend,
show, figure)
from numpy import asarray
import os
from os.path import splitext
from StringIO import StringIO
from copy import deepcopy
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = "Fits a model between two distance matrices " +\
"and plots the result"
script_info['script_description'] = "Fits a spatial autocorrelation model " +\
"between two matrices and plots the result. This script will work with " +\
"two distance matrices but will ignore the 0s at the diagonal and the " +\
"values that go to N/A. See distance_matrix_from_mapping.py."
script_info['script_usage'] = []
script_info['script_usage'].append(("Fitting", "For this script, the user "
"supplies two distance matrices (i.e. resulting file from "
"beta_diversity.py), along with the output filename (e.g. semivariogram), "
"and the model to fit, as follows:", "%prog -x distance.txt -y unifrac.txt "
"-o semivariogram_exponential.png"))
script_info['script_usage'].append(("", "Modify the the default method to "
"gaussian", "%prog -x distance.txt -y unifrac.txt --model gaussian -o "
"semivariogram_gaussian.png"))
script_info['script_usage'].append(("Color semivariograms by a category in"
" the metadata mapping file", "Using a header name in the mapping file"
" (Time), create two separate semivariograms in the same plot, an "
"accompanying file with the color coding will be created"
"(categories_legend.eps), both the legends and the plot will be in eps "
"format.", "%prog -y unweighted_unifrac_dm.txt -x time_dm.txt --model "
"gaussian -m Fasting_Map.txt -o categories.eps -c Treatment"))
script_info['output_description'] = "The resulting output file consists of a " +\
"pdf image containing the plot between the two distances matrices and the" +\
" fitted model"
script_info['required_options'] = [
make_option('-x', '--input_path_x', type='existing_filepath',
help='path to distance matrix to be displayed in the x axis'),
make_option('-y', '--input_path_y', type='existing_filepath',
help='path to distance matrix to be displayed in the y axis'),
make_option('-o', '--output_path', type='new_path',
help='output path. directory for batch processing, ' +
'filename for single file operation'),
]
script_info['optional_options'] = [
make_option('-b', '--binning', type='string',
default=None, help='binning ranges. Format: [increment,top_limit], when ' +
'top_limit is -1=infinitum; you can specify several ranges using the same ' +
'format, i.e. [2.5,10][50,-1] will set two bins, one from 0-10 using 2.5 ' +
'size steps and from 10-inf using 50 size steps. Note that the binning is ' +
'used to clean the plots (reduce number of points) but ignored to fit the ' +
'model. [default: %default]'),
make_option('--ignore_missing_samples', help='This will overpass the error raised ' +
'when the matrices have different sizes/samples', action='store_true', default=False),
make_option(
'--x_max',
type='float',
help='x axis max limit [default: auto]',
default=None),
make_option(
'--x_min',
type='float',
help='x axis min limit [default: auto]',
default=None),
make_option(
'--y_max',
type='float',
help='y axis max limit [default: auto]',
default=None),
make_option(
'--y_min',
type='float',
help='y axis min limit [default: auto]',
default=None),
make_option(
'-X', '--x_label', default='Distance Dissimilarity (m)', type='string',
help='Label for the x axis [default: %default]'),
make_option(
'-Y', '--y_label', default='Community Dissimilarity', type='string',
help='Label for the y axis [default: %default]'),
make_option('-t', '--fig_title', default='Semivariogram', type='string',
help='Title of the plot [default: %default]'),
make_option('--dot_color', type='string', help='dot color for plot, more info:' +
' http://matplotlib.sourceforge.net/api/pyplot_api.html' +
' [default: %default]', default="white"),
make_option('--dot_marker', type='string', help='dot color for plot, more info:' +
' http://matplotlib.sourceforge.net/api/pyplot_api.html' +
' [default: %default]', default="o"),
make_option('--line_color', type='string', help='line color for plot, more info:' +
' http://matplotlib.sourceforge.net/api/pyplot_api.html' +
' [default: %default]', default="blue"),
make_option('--dot_alpha', type='float', help='alpha for dots, more info:' +
' http://matplotlib.sourceforge.net/api/pyplot_api.html' +
' [default: %default]', default=1),
make_option('--line_alpha', type='float', help='alpha for dots, more info:' +
' http://matplotlib.sourceforge.net/api/pyplot_api.html' +
' [default: %default]', default=1),
make_option('--model', type='choice',
choices=FitModel.options, default='exponential',
help='model to be fitted to the data. Valid ' +
'choices are:' + ', '.join(FitModel.options) + '. [default: %default]'),
make_option('-p', '--print_model', action='store_true',
help='Print in the title of the plot the function of the fit. ' +
'[default: %default]', default=False),
make_option('-c', '--category', type='string', help='category to color each of'
' the trajectories when you have multiple treatments [default: %default]',
default=None),
make_option('-m', '--mapping_fp', type='existing_filepath', help='metadata '
'mapping file, only used when coloring by a category, a file with the '
'legends and color coding will be created with the suffix legend '
'[default: %default]',
default=None)
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
category = opts.category
mapping_fp = opts.mapping_fp
colors_used = []
if (category and mapping_fp is None) or (category is None and mapping_fp):
option_parser.error('If coloring by a metadata category, both the '
'category and the mapping file must be supplied.')
elif mapping_fp and category:
mapping_data, mapping_headers, _ = parse_mapping_file(open(mapping_fp,
'U'))
if category not in mapping_headers:
option_parser.error("The category supplied must exist in the "
"metadata mapping file, '%s' does not exist." % category)
index = mapping_headers.index(category)
categories = list(set([line[index] for line in mapping_data]))
list_of_plots = []
if opts.binning is None:
ranges = []
else:
# simple ranges format validation
if opts.binning.count('[') != opts.binning.count(']') or\
opts.binning.count('[') != opts.binning.count(','):
raise ValueError("The binning input has an error: '%s'; " % +
"\nthe format should be [increment1,top_limit1][increment2,top_limit2]")
# spliting in ranges
rgn_txt = opts.binning.split('][')
# removing left [ and right ]
rgn_txt[0] = rgn_txt[0][1:]
rgn_txt[-1] = rgn_txt[-1][:-1]
# converting into int
ranges = []
max = 0
for i, r in enumerate(rgn_txt):
try:
values = map(float, r.split(','))
except ValueError:
raise ValueError(
"Not a valid format for binning %s" %
opts.binning)
if len(values) != 2:
raise ValueError(
"All ranges must have only 2 values: [%s]" %
r)
elif i + 1 != len(rgn_txt):
if values[0] > values[1]:
raise ValueError(
"The bin value can't be greater than the max value: [%s]" %
r)
elif values < 0:
raise ValueError(
"This value can not be negative: [%s]" %
r)
elif max > values[1]:
raise ValueError(
"This value can not smaller than the previous one: [%s]" %
r)
else:
max = values[1]
ranges.append(values)
x_samples, x_distmtx = parse_distmat(open(opts.input_path_x, 'U'))
y_samples, y_distmtx = parse_distmat(open(opts.input_path_y, 'U'))
if opts.ignore_missing_samples:
ignoring_from_x = list(set(x_samples) - set(y_samples))
ignoring_from_y = list(set(y_samples) - set(x_samples))
if opts.verbose:
print '\nFrom %s we are ignoring: %s\n' % (opts.input_path_x, ignoring_from_x)
print '\nFrom %s we are ignoring: %s\n' % (opts.input_path_y, ignoring_from_y)
print '\nOnly using: %s\n' % (list(set(x_samples) & set(y_samples)))
x_file = StringIO(
filter_samples_from_distance_matrix((x_samples, x_distmtx), ignoring_from_x))
x_samples, x_distmtx = parse_distmat(x_file)
y_file = StringIO(
filter_samples_from_distance_matrix((y_samples, y_distmtx), ignoring_from_y))
y_samples, y_distmtx = parse_distmat(y_file)
else:
if x_distmtx.shape != y_distmtx.shape:
raise ValueError('The distance matrices have different sizes. ' +
'You can cancel this error by passing --ignore_missing_samples')
figure()
if category is None:
x_val, y_val, x_fit, y_fit, func_text = fit_semivariogram(
(x_samples, x_distmtx), (y_samples, y_distmtx), opts.model, ranges)
plot(
x_val,
y_val,
color=opts.dot_color,
marker=opts.dot_marker,
linestyle="None",
alpha=opts.dot_alpha)
plot(
x_fit,
y_fit,
linewidth=2.0,
color=opts.line_color,
alpha=opts.line_alpha)
else:
# not all the categories that are going to be enumerated are found in
# the distance matrices i.e. the mapping file is a superset that can
# contain more samples than the distance matrices
used_categories = deepcopy(categories)
for index, single_category in enumerate(categories):
good_sample_ids = sample_ids_from_metadata_description(
open(mapping_fp), '%s:%s' % (category, single_category))
try:
_y_samples, _y_distmtx = parse_distmat(StringIO(
filter_samples_from_distance_matrix((y_samples, y_distmtx),
good_sample_ids, negate=True)))
_x_samples, _x_distmtx = parse_distmat(StringIO(
filter_samples_from_distance_matrix((x_samples, x_distmtx),
good_sample_ids, negate=True)))
except ValueError:
# no samples found for this category
used_categories.remove(single_category)
continue
x_val, y_val, x_fit, y_fit, func_text = fit_semivariogram(
(_x_samples, _x_distmtx), (_y_samples, _y_distmtx),
opts.model, ranges)
# retrieve one of the colors the "QIIME" colors and add it to the
# list of used colors for the creation of the legends in the plot
color_only = get_qiime_hex_string_color(index)
colors_used.append(color_only)
plot(x_val, y_val, color=color_only, marker=opts.dot_marker,
linestyle="None", alpha=opts.dot_alpha)
plot(x_fit, y_fit, linewidth=2.0, color=color_only,
alpha=opts.line_alpha, label=single_category)
if opts.x_min is not None and opts.x_max is not None:
xlim([opts.x_min, opts.x_max])
if opts.y_min is not None and opts.y_max is not None:
ylim([opts.y_min, opts.y_max])
x_label = opts.x_label
y_label = opts.y_label
fig_title = '%s (%s)' % (opts.fig_title, opts.model)
xlabel(x_label)
ylabel(y_label)
if opts.print_model:
title(fig_title + ' ' + func_text)
else:
title(fig_title)
savefig(opts.output_path)
# print the legends after the figure is exported to avoid conflicts
if category:
# if there's a desired format, use that, else default it to png
_, extension = splitext(opts.output_path)
# remove the dot, else, make_legend will add it to the filename
extension = extension.replace('.', '')
if extension == '':
extension = 'png'
make_legend(used_categories, colors_used, 0, 0, 'black', 'white',
opts.output_path, extension, 80)
if __name__ == "__main__":
main()
|
wasade/qiime
|
scripts/plot_semivariogram.py
|
Python
|
gpl-2.0
| 14,840
|
[
"Gaussian"
] |
57feb97c871d9d6508d455ce725f3d3e65618291d59b60ac0fb9581c698b5417
|
# Copyright (C) 2013, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os
from os.path import abspath
from zeroinstall.injector import qdom
from repo import incoming, cmd, registry
from repo.cmd import update
def handle(args):
files = [abspath(f) for f in args.path]
if not cmd.find_config(missing_ok = True):
# Import into appropriate registry for this feed
with open(files[0], 'rb') as stream:
doc = qdom.parse(stream)
master = incoming.get_feed_url(doc, files[0])
from_registry = registry.lookup(master)
assert from_registry['type'] == 'local', 'Unsupported registry type in %s' % from_registry
os.chdir(from_registry['path'])
print("Adding to registry '{path}'".format(path = from_registry['path']))
config = cmd.load_config()
messages = []
for feed in files:
print("Adding", feed)
msg = incoming.process(config, feed, delete_on_success = False)
if msg:
messages.append(msg)
update.do_update(config, messages = messages)
|
gfxmonk/0repo
|
repo/cmd/add.py
|
Python
|
lgpl-2.1
| 1,044
|
[
"VisIt"
] |
d6f833c14f4288c661d9d99e26bfcf7e9ad2cae517da853277b6cd577135f730
|
import math
import sys
import numpy
import vtk
# All returned arrays are cast into either numpy or numarray arrays
arr=numpy.array
class vtu:
"""Unstructured grid object to deal with VTK unstructured grids."""
def __init__(self, filename = None):
"""Creates a vtu object by reading the specified file."""
if filename is None:
self.ugrid = vtk.vtkUnstructuredGrid()
else:
self.gridreader = None
if filename[-4:] == ".vtu":
self.gridreader=vtk.vtkXMLUnstructuredGridReader()
elif filename[-5:] == ".pvtu":
self.gridreader=vtk.vtkXMLPUnstructuredGridReader()
else:
raise Exception("ERROR: don't recognise file extension" + filename)
self.gridreader.SetFileName(filename)
self.gridreader.Update()
self.ugrid=self.gridreader.GetOutput()
if self.ugrid.GetNumberOfPoints() + self.ugrid.GetNumberOfCells() == 0:
raise Exception("ERROR: No points or cells found after loading vtu " + filename)
self.filename=filename
def GetScalarField(self, name):
"""Returns an array with the values of the specified scalar field."""
try:
pointdata=self.ugrid.GetPointData()
vtkdata=pointdata.GetScalars(name)
vtkdata.GetNumberOfTuples()
except:
try:
celldata=self.ugrid.GetCellData()
vtkdata=celldata.GetScalars(name)
vtkdata.GetNumberOfTuples()
except:
raise Exception("ERROR: couldn't find point or cell scalar field data with name "+name+" in file "+self.filename+".")
return arr([vtkdata.GetTuple1(i) for i in range(vtkdata.GetNumberOfTuples())])
def GetScalarRange(self, name):
"""Returns the range (min, max) of the specified scalar field."""
try:
pointdata=self.ugrid.GetPointData()
vtkdata=pointdata.GetScalars(name)
vtkdata.GetRange()
except:
try:
celldata=self.ugrid.GetCellData()
vtkdata=celldata.GetScalars(name)
vtkdata.GetRange()
except:
raise Exception("ERROR: couldn't find point or cell scalar field data with name "+name+" in file "+self.filename+".")
return vtkdata.GetRange()
def GetVectorField(self, name):
"""Returns an array with the values of the specified vector field."""
try:
pointdata=self.ugrid.GetPointData()
vtkdata=pointdata.GetScalars(name)
vtkdata.GetNumberOfTuples()
except:
try:
celldata=self.ugrid.GetCellData()
vtkdata=celldata.GetScalars(name)
vtkdata.GetNumberOfTuples()
except:
raise Exception("ERROR: couldn't find point or cell vector field data with name "+name+" in file "+self.filename+".")
return arr([vtkdata.GetTuple3(i) for i in range(vtkdata.GetNumberOfTuples())])
def GetVectorNorm(self, name):
"""Return the field with the norm of the specified vector field."""
v = self.GetVectorField(name)
n = []
try:
from scipy.linalg import norm
except ImportError:
def norm(v):
r = 0.0
for x in v:
r = r + x**2
r = math.sqrt(r)
return r
for node in range(self.ugrid.GetNumberOfPoints()):
n.append(norm(v[node]))
return arr(n)
def GetField(self,name):
"""Returns an array with the values of the specified field."""
try:
pointdata=self.ugrid.GetPointData()
vtkdata=pointdata.GetArray(name)
vtkdata.GetNumberOfTuples()
except:
try:
celldata=self.ugrid.GetCellData()
vtkdata=celldata.GetArray(name)
vtkdata.GetNumberOfTuples()
except:
raise Exception("ERROR: couldn't find point or cell field data with name "+name+" in file "+self.filename+".")
nc=vtkdata.GetNumberOfComponents()
nt=vtkdata.GetNumberOfTuples()
array=arr([vtkdata.GetValue(i) for i in range(nc*nt)])
if nc==9:
return array.reshape(nt,3,3)
elif nc==4:
return array.reshape(nt,2,2)
else:
return array.reshape(nt,nc)
def GetFieldRank(self, name):
"""
Returns the rank of the supplied field.
"""
try:
pointdata=self.ugrid.GetPointData()
vtkdata=pointdata.GetArray(name)
vtkdata.GetNumberOfTuples()
except:
try:
celldata=self.ugrid.GetCellData()
vtkdata=celldata.GetArray(name)
vtkdata.GetNumberOfTuples()
except:
raise Exception("ERROR: couldn't find point or cell field data with name "+name+" in file "+self.filename+".")
comps = vtkdata.GetNumberOfComponents()
if comps == 1:
return 0
elif comps in [2, 3]:
return 1
elif comps in [4, 9]:
return 2
else:
raise Exception("Field rank > 2 encountered")
def Write(self, filename=[]):
"""Writes the grid to a vtu file.
If no filename is specified it will use the name of the file originally
read in, thus overwriting it!
"""
if filename==[]:
filename=self.filename
if filename is None:
raise Exception("No file supplied")
if filename.endswith('pvtu'):
gridwriter=vtk.vtkXMLPUnstructuredGridWriter()
else:
gridwriter=vtk.vtkXMLUnstructuredGridWriter()
gridwriter.SetFileName(filename)
gridwriter.SetInput(self.ugrid)
gridwriter.Write()
def AddScalarField(self, name, array):
"""Adds a scalar field with the specified name using the values from the array."""
data = vtk.vtkDoubleArray()
data.SetNumberOfValues(len(array))
data.SetName(name)
for i in range(len(array)):
data.SetValue(i, array[i])
if len(array) == self.ugrid.GetNumberOfPoints():
pointdata=self.ugrid.GetPointData()
pointdata.AddArray(data)
pointdata.SetActiveScalars(name)
elif len(array) == self.ugrid.GetNumberOfCells():
celldata=self.ugrid.GetCellData()
celldata.AddArray(data)
celldata.SetActiveScalars(name)
else:
raise Exception("Length neither number of nodes nor number of cells")
def AddVectorField(self, name, array):
"""Adds a vector field with the specified name using the values from the array."""
n=array.size
data = vtk.vtkDoubleArray()
data.SetNumberOfComponents(array.shape[1])
data.SetNumberOfValues(n)
data.SetName(name)
for i in range(n):
data.SetValue(i, array.reshape(n)[i])
if array.shape[0]==self.ugrid.GetNumberOfPoints():
pointdata=self.ugrid.GetPointData()
pointdata.AddArray(data)
pointdata.SetActiveVectors(name)
elif array.shape[0]==self.ugrid.GetNumberOfCells():
celldata=self.ugrid.GetCellData()
celldata.AddArray(data)
else:
raise Exception("Length neither number of nodes nor number of cells")
def AddField(self, name, array):
"""Adds a field with arbitrary number of components under the specified name using."""
n=array.size
sh=arr(array.shape)
data = vtk.vtkDoubleArray()
# number of tuples is sh[0]
# number of components is the product of the rest of sh
data.SetNumberOfComponents(sh[1:].prod())
data.SetNumberOfValues(n)
data.SetName(name)
flatarray=array.reshape(n)
for i in range(n):
data.SetValue(i, flatarray[i])
if sh[0]==self.ugrid.GetNumberOfPoints():
pointdata=self.ugrid.GetPointData()
pointdata.AddArray(data)
elif sh[0]==self.ugrid.GetNumberOfCells():
celldata=self.ugrid.GetCellData()
celldata.AddArray(data)
else:
raise Exception("Length neither number of nodes nor number of cells")
def ApplyProjection(self, projection_x, projection_y, projection_z):
"""Applys a projection to the grid coordinates. This overwrites the existing values."""
npoints = self.ugrid.GetNumberOfPoints ()
for i in range (npoints):
(x,y,z) = self.ugrid.GetPoint (i)
new_x = eval (projection_x)
new_y = eval (projection_y)
new_z = eval (projection_z)
self.ugrid.GetPoints ().SetPoint (i, new_x, new_y, new_z)
def ApplyCoordinateTransformation(self,f):
"""Applys a coordinate transformation to the grid coordinates. This overwrites the existing values."""
npoints = self.ugrid.GetNumberOfPoints ()
for i in range (npoints):
(x,y,z) = self.ugrid.GetPoint (i)
newX = f(arr([x,y,z]),t=0)
self.ugrid.GetPoints ().SetPoint (i, newX[0], newX[1], newX[2])
def ApplyEarthProjection(self):
""" Assume the input geometry is the Earth in Cartesian geometry and project to longatude, latitude, depth."""
npoints = self.ugrid.GetNumberOfPoints ()
earth_radius = 6378000.0
rad_to_deg = 180.0/math.pi
deg_to_rad = math.pi/180.0
for i in range (npoints):
(x,y,z) = self.ugrid.GetPoint (i)
r = math.sqrt(x*x+y*y+z*z)
depth = r - earth_radius
longitude = rad_to_deg*math.atan2(y, x)
latitude = 90.0 - rad_to_deg*math.acos(z/r)
self.ugrid.GetPoints ().SetPoint (i, longitude, latitude, depth)
def ProbeData(self, coordinates, name):
"""Interpolate field values at these coordinates."""
# Initialise locator
locator = vtk.vtkPointLocator()
locator.SetDataSet(self.ugrid)
locator.SetTolerance(10.0)
locator.Update()
# Initialise probe
points = vtk.vtkPoints()
points.SetDataTypeToDouble()
ilen, jlen = coordinates.shape
for i in range(ilen):
points.InsertNextPoint(coordinates[i][0], coordinates[i][1], coordinates[i][2])
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
probe = vtk.vtkProbeFilter()
probe.SetInput(polydata)
probe.SetSource(self.ugrid)
probe.Update()
# Generate a list invalidNodes, containing a map from invalid nodes in the
# result to their closest nodes in the input
valid_ids = probe.GetValidPoints()
valid_loc = 0
invalidNodes = []
for i in range(ilen):
if valid_ids.GetTuple1(valid_loc) == i:
valid_loc += 1
else:
nearest = locator.FindClosestPoint([coordinates[i][0], coordinates[i][1], coordinates[i][2]])
invalidNodes.append((i, nearest))
# Get final updated values
pointdata=probe.GetOutput().GetPointData()
vtkdata=pointdata.GetArray(name)
nc=vtkdata.GetNumberOfComponents()
nt=vtkdata.GetNumberOfTuples()
array = arr([vtkdata.GetValue(i) for i in range(nt * nc)])
# Fix the point data at invalid nodes
if len(invalidNodes) > 0:
try:
oldField = self.ugrid.GetPointData().GetArray(name)
components = oldField.GetNumberOfComponents()
except:
try:
oldField = self.ugrid.GetCellData().GetArray(name)
components = oldField.GetNumberOfComponents()
except:
raise Exception("ERROR: couldn't find point or cell field data with name "+name+" in file "+self.filename+".")
for invalidNode, nearest in invalidNodes:
for comp in range(nc):
array[invalidNode * nc + comp] = oldField.GetValue(nearest * nc + comp)
valShape = self.GetField(name)[0].shape
array.shape = tuple([nt] + list(valShape))
return array
def RemoveField(self, name):
"""Removes said field from the unstructured grid."""
pointdata=self.ugrid.GetPointData()
pointdata.RemoveArray(name)
def GetLocations(self):
"""Returns an array with the locations of the nodes."""
vtkPoints = self.ugrid.GetPoints()
if vtkPoints is None:
vtkData = vtk.vtkDoubleArray()
else:
vtkData = vtkPoints.GetData()
return arr([vtkData.GetTuple3(i) for i in range(vtkData.GetNumberOfTuples())])
def GetCellPoints(self, id):
"""Returns an array with the node numbers of each cell (ndglno)."""
idlist=vtk.vtkIdList()
self.ugrid.GetCellPoints(id, idlist)
return arr([idlist.GetId(i) for i in range(idlist.GetNumberOfIds())])
def GetFieldNames(self):
"""Returns the names of the available fields."""
vtkdata=self.ugrid.GetPointData()
fieldnames = [vtkdata.GetArrayName(i) for i in range(vtkdata.GetNumberOfArrays())]
vtkdata=self.ugrid.GetCellData()
fieldnames += [vtkdata.GetArrayName(i) for i in range(vtkdata.GetNumberOfArrays())]
return fieldnames
def GetPointCells(self, id):
"""Return an array with the elements which contain a node."""
idlist=vtk.vtkIdList()
self.ugrid.GetPointCells(id, idlist)
return arr([idlist.GetId(i) for i in range(idlist.GetNumberOfIds())])
def GetPointPoints(self, id):
"""Return the nodes connecting to a given node."""
cells = self.GetPointCells(id)
lst = []
for cell in cells:
lst = lst + list(self.GetCellPoints(cell))
s = set(lst) # remove duplicates
return arr(list(s)) # make into a list again
def GetDistance(self, x, y):
"""Return the distance in physical space between x and y."""
posx = self.ugrid.GetPoint(x)
posy = self.ugrid.GetPoint(y)
return math.sqrt(sum([(posx[i] - posy[i])**2 for i in range(len(posx))]))
def Crop(self, min_x, max_x, min_y, max_y, min_z, max_z):
"""Trim off the edges defined by a bounding box."""
trimmer = vtk.vtkExtractUnstructuredGrid()
trimmer.SetInput(self.ugrid)
trimmer.SetExtent(min_x, max_x, min_y, max_y, min_z, max_z)
trimmer.Update()
trimmed_ug = trimmer.GetOutput()
self.ugrid = trimmed_ug
def IntegrateField(self, field):
"""
Integrate the supplied scalar field, assuming a linear representation on a
tetrahedral mesh. Needs numpy-izing for speed.
"""
assert field[0].shape in [(), (1,)]
integral = 0.0
n_cells = self.ugrid.GetNumberOfCells()
vtkGhostLevels = self.ugrid.GetCellData().GetArray("vtkGhostLevels")
for cell_no in range(n_cells):
integrate_cell = True
if vtkGhostLevels:
integrate_cell = (vtkGhostLevels.GetTuple1(cell_no) == 0)
if integrate_cell:
Cell = self.ugrid.GetCell(cell_no)
Cell_points = Cell.GetPoints ()
nCell_points = Cell.GetNumberOfPoints()
if nCell_points == 4:
Volume = abs(Cell.ComputeVolume(Cell_points.GetPoint(0), \
Cell_points.GetPoint(1), \
Cell_points.GetPoint(2), \
Cell_points.GetPoint(3)))
elif nCell_points == 3:
Volume = abs(Cell.TriangleArea(Cell_points.GetPoint(0), \
Cell_points.GetPoint(1), \
Cell_points.GetPoint(2)))
else:
raise Exception("Unexpected number of points: " + str(nCell_points))
Cell_ids = Cell.GetPointIds()
for point in range(Cell_ids.GetNumberOfIds()):
PointId = Cell_ids.GetId(point)
integral = integral + (Volume*field[PointId] / float(nCell_points))
return integral
def GetCellVolume(self, id):
cell = self.ugrid.GetCell(id)
pts = cell.GetPoints()
if isinstance(cell, vtk.vtkTriangle):
return cell.TriangleArea(pts.GetPoint(0), pts.GetPoint(1), pts.GetPoint(2))
elif cell.GetNumberOfPoints() == 4:
return abs(cell.ComputeVolume(pts.GetPoint(0), pts.GetPoint(1), pts.GetPoint(2), pts.GetPoint(3)))
elif cell.GetNumberOfPoints() == 3:
return abs(cell.ComputeVolume(pts.GetPoint(0), pts.GetPoint(1), pts.GetPoint(2)))
else:
raise Exception("Unexpected number of points")
def GetFieldIntegral(self, name):
"""
Integrate the named field.
"""
return self.IntegrateField(self.GetField(name))
def GetFieldRms(self, name):
"""
Return the rms of the supplied scalar or vector field.
"""
field = self.GetField(name)
rank = self.GetFieldRank(name)
if rank == 0:
normField = arr([field[i] ** 2.0 for i in range(len(field))])
elif rank == 1:
normField = self.GetVectorNorm(name)
else:
raise Exception("Cannot calculate norm field for field rank > 1")
volField = arr([1.0 for i in range(len(field))])
rms = self.IntegrateField(normField)
rms /= self.IntegrateField(volField)
rms = numpy.sqrt(rms)
return float(rms)
def StructuredPointProbe(self, nx, ny, nz, bounding_box=None):
""" Probe the unstructured grid dataset using a structured points dataset. """
probe = vtk.vtkProbeFilter ()
probe.SetSource (self.ugrid)
sgrid = vtk.vtkStructuredPoints()
bbox = [0.0,0.0, 0.0,0.0, 0.0,0.0]
if bounding_box==None:
bbox = self.ugrid.GetBounds()
else:
bbox = bounding_box
sgrid.SetOrigin([bbox[0], bbox[2], bbox[4]])
sgrid.SetDimensions(nx, ny, nz)
spacing = [0.0, 0.0, 0.0]
if nx>1: spacing[0] = (bbox[1]-bbox[0])/(nx-1.0)
if ny>1: spacing[1] = (bbox[3]-bbox[2])/(ny-1.0)
if nz>1: spacing[2] = (bbox[5]-bbox[4])/(nz-1.0)
sgrid.SetSpacing(spacing)
probe.SetInput (sgrid)
probe.Update ()
return probe.GetOutput()
### Field manipulation methods ###
def ManipulateField(self, fieldName, manipFunc, newFieldName = None):
"""
Generic field manipulation method. Applies the supplied manipulation function
manipFunc to the field fieldName. manipFunc must be a function of the form:
def manipFunc(field, index):
# ...
return fieldValAtIndex
"""
field = self.GetField(fieldName)
if newFieldName is None or fieldName == newFieldName:
self.RemoveField(fieldName)
newFieldName = fieldName
field = arr([manipFunc(field, i) for i in range(len(field))])
self.AddField(newFieldName, field)
return
def AddFieldToField(self, fieldName, array, newFieldName = None):
def ManipFunc(field, index):
return field[index] + array[index]
self.ManipulateField(fieldName, ManipFunc, newFieldName)
return
def SubFieldFromField(self, fieldName, array, newFieldName = None):
def ManipFunc(field, index):
return field[index] - array[index]
self.ManipulateField(fieldName, ManipFunc, newFieldName)
return
def DotFieldWithField(self, fieldName, array, newFieldName = None):
"""
Dot product
"""
def ManipFunc(field, index):
sum = 0.0
for i, val in enumerate(field[index]):
sum += val * array[index][i]
return sum
self.ManipulateField(fieldName, ManipFunc, newFieldName)
return
def CrossFieldWithField(self, fieldName, array, newFieldName = None, postMultiply = True):
"""
Cross product
"""
def ManipFunc(field, index):
if postMultiply:
return numpy.cross(field[index], array[index])
else:
return numpy.cross(array[index], field[index])
self.ManipulateField(fieldName, ManipFunc, newFieldName)
return
def MatMulFieldWithField(self, fieldName, array, newFieldName = None, postMultiply = True):
"""
Matrix multiplication
"""
def ManipFunc(field, index):
if postMultiply:
return numpy.matrix(field[i]) * numpy.matix(array[i])
else:
return numpy.matix(array[i]) * numpy.matrix(field[i])
self.ManipulateField(fieldName, ManipFunc, newFieldName)
return
# Default multiplication is dot product
MulFieldByField = DotFieldWithField
def GetDerivative(self, name):
"""
Returns the derivative of field 'name', a
vector field if 'name' is scalar, and a tensor field
if 'name' is a vector. The field 'name' has to be point-wise data.
The returned array gives a cell-wise derivative.
"""
cd=vtk.vtkCellDerivatives()
cd.SetInput(self.ugrid)
pointdata=self.ugrid.GetPointData()
nc=pointdata.GetArray(name).GetNumberOfComponents()
if nc==1:
cd.SetVectorModeToComputeGradient()
cd.SetTensorModeToPassTensors()
pointdata.SetActiveScalars(name)
cd.Update()
vtkdata=cd.GetUnstructuredGridOutput().GetCellData().GetArray('ScalarGradient')
return arr([vtkdata.GetTuple3(i) for i in range(vtkdata.GetNumberOfTuples())])
else:
cd.SetTensorModeToComputeGradient()
cd.SetVectorModeToPassVectors()
pointdata.SetActiveVectors(name)
cd.Update()
vtkdata=cd.GetUnstructuredGridOutput().GetCellData().GetArray('VectorGradient')
return arr([vtkdata.GetTuple9(i) for i in range(vtkdata.GetNumberOfTuples())])
def GetVorticity(self, name):
"""
Returns the vorticity of vectorfield 'name'.
The field 'name' has to be point-wise data.
The returned array gives a cell-wise derivative.
"""
cd=vtk.vtkCellDerivatives()
cd.SetInput(self.ugrid)
pointdata=self.ugrid.GetPointData()
cd.SetVectorModeToComputeVorticity()
cd.SetTensorModeToPassTensors()
pointdata.SetActiveVectors(name)
cd.Update()
vtkdata=cd.GetUnstructuredGridOutput().GetCellData().GetArray('VectorGradient')
return arr([vtkdata.GetTuple3(i) for i in range(vtkdata.GetNumberOfTuples())])
def CellDataToPointData(self):
"""
Transforms all cell-wise fields in the vtu to point-wise fields.
All existing fields will remain.
"""
cdtpd=vtk.vtkCellDataToPointData()
cdtpd.SetInput(self.ugrid)
cdtpd.PassCellDataOn()
cdtpd.Update()
self.ugrid=cdtpd.GetUnstructuredGridOutput()
def VtuMatchLocations(vtu1, vtu2, tolerance = 1.0e-6):
"""
Check that the locations in the supplied vtus match exactly, returning True if they
match and False otherwise.
The locations must be in the same order.
"""
locations1 = vtu1.GetLocations().tolist()
locations2 = vtu2.GetLocations()
if not len(locations1) == len(locations2):
return False
for i in range(len(locations1)):
if not len(locations1[i]) == len(locations2[i]):
return False
for j in range(len(locations1[i])):
if abs(locations1[i][j] - locations2[i][j]) > tolerance:
return False
return True
def VtuMatchLocationsArbitrary(vtu1, vtu2, tolerance = 1.0e-6):
"""
Check that the locations in the supplied vtus match, returning True if they
match and False otherwise.
The locations may be in a different order.
"""
locations1 = vtu1.GetLocations()
locations2 = vtu2.GetLocations()
if not locations1.shape == locations2.shape:
return False
for j in range(locations1.shape[1]):
# compute the smallest possible precision given the range of this coordinate
epsilon = numpy.finfo(numpy.float).eps * numpy.abs(locations1[:,j]).max()
if tolerance<epsilon:
# the specified tolerance is smaller than possible machine precision
# (or something else went wrong)
raise Exception("ERROR: specified tolerance is smaller than machine precision of given locations")
# ensure epsilon doesn't get too small (might be for zero for instance)
epsilon=max(epsilon,tolerance/100.0)
# round to that many decimal places (-2 to be sure) so that
# we don't get rounding issues with lexsort
locations1[:,j]=numpy.around(locations1[:,j], int(-numpy.log10(epsilon))-2)
locations2[:,j]=numpy.around(locations2[:,j], int(-numpy.log10(epsilon))-2)
# lexical sort on x,y and z coordinates resp. of locations1 and locations2
sort_index1=numpy.lexsort(locations1.T)
sort_index2=numpy.lexsort(locations2.T)
# should now be in same order, so we can check for its biggest difference
return numpy.allclose(locations1[sort_index1],locations2[sort_index2], atol=tolerance)
def VtuDiff(vtu1, vtu2, filename = None):
"""
Generate a vtu with fields generated by taking the difference between the field
values in the two supplied vtus. Fields that are not common between the two vtus
are neglected. If probe is True, the fields of vtu2 are projected onto the cell
points of vtu1. Otherwise, the cell points of vtu1 and vtu2 must match.
"""
# Generate empty output vtu
resultVtu = vtu()
resultVtu.filename = filename
# If the input vtu point locations match, do not use probe
useProbe = not VtuMatchLocations(vtu1, vtu2)
# Copy the grid from the first input vtu into the output vtu
resultVtu.ugrid.DeepCopy(vtu1.ugrid)
# Find common field names between the input vtus and generate corresponding
# difference fields
fieldNames1 = vtu1.GetFieldNames()
fieldNames2 = vtu2.GetFieldNames()
for fieldName in fieldNames1:
if fieldName in fieldNames2:
if useProbe:
field2 = vtu2.ProbeData(vtu1.GetLocations(), fieldName)
else:
field2 = vtu2.GetField(fieldName)
resultVtu.SubFieldFromField(fieldName, field2)
else:
resultVtu.RemoveField(fieldName)
return resultVtu
|
TerraFERMA/TerraFERMA
|
buckettools/python/buckettools/vtktools.py
|
Python
|
lgpl-3.0
| 24,491
|
[
"VTK"
] |
eeab0566d6453a92dae1e0a276d3492e16930f4f8e62ed479ce41ee759309711
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.19225
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/movierename.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class movierename(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(movierename, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_95218976 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2simplexmlresult>
\t<e2state>''')
_v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11.
write(u'''</e2state>
\t<e2statetext>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15.
write(u'''</e2statetext>\t
</e2simplexmlresult>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_95218976
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_movierename= 'respond'
## END CLASS DEFINITION
if not hasattr(movierename, '_initCheetahAttributes'):
templateAPIClass = getattr(movierename, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(movierename)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=movierename()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/web/movierename.py
|
Python
|
gpl-2.0
| 5,183
|
[
"VisIt"
] |
11c2525686dbc5f1771527dcca10b80bd094745a51b77c534aaac286bdab5062
|
# Copyright 2014 Roberto Brian Sarrionandia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
import jinja2
import os
import string
import random
import datetime
from google.appengine.ext import ndb
import tusers
from models import Tournament
from forms import ProfileForm
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class UpdateProfileHandler(webapp2.RequestHandler):
def get(self):
user = tusers.get_current_user()
if user:
name = user.full_name
current_institution = user.current_institution
public = user.public_profile
phone = user.phone
form = ProfileForm()
form.name.data = name
form.institution.data = current_institution
form.public.data = public
form.phone.data = phone
template_values = {
'user' : user,
'logout' : tusers.create_logout_url('/'),
'form' : form
}
template = JINJA_ENVIRONMENT.get_template('view/update_profile.html')
self.response.write(template.render(template_values))
else:
self.redirect(tusers.create_login_url(self.request.uri))
def post(self):
user = tusers.get_current_user()
if user:
form = ProfileForm(self.request.POST)
if (form.validate()):
user.full_name = form.name.data
user.current_institution = form.institution.data
user.public_profile = form.public.data
user.phone = form.phone.data
user.put()
if form.email.data:
user.change_email(form.email.data)
if form.email_code.data:
user.verify_email(form.email_code.data)
self.redirect('/update_profile')
else:
template_values = {
'user' : user,
'logout' : tusers.create_logout_url('/'),
'form' : form
}
template = JINJA_ENVIRONMENT.get_template('view/update_profile.html')
self.response.write(template.render(template_values))
else:
self.redirect(tusers.create_login_url(self.request.uri))
class ClearEmailHandler(webapp2.RequestHandler):
def get(self):
user = tusers.get_current_user()
if user:
user.custom_email = None
user.email_verified = False
user.put()
self.redirect('/update_profile')
else:
self.redirect(tusers.create_login_url(self.request.uri))
app = webapp2.WSGIApplication([
('/update_profile', UpdateProfileHandler),
('/update_profile/clear_email', ClearEmailHandler)
], debug=True)
|
sarrionandia/tournatrack
|
update_profile.py
|
Python
|
apache-2.0
| 3,096
|
[
"Brian"
] |
6f8309f43e71ac76ec902c5a77a09d0e4f62fe2cb63742df5f195fad4dd338c4
|
# -*- coding: utf-8 -*-
#slb@slb.moe
import os
import codecs
import subprocess
from time import sleep
def OpenAndReadUtf8(filepath):
"""return unicode"""
try:
openfile = codecs.open(filepath,"r",encoding='utf-8')
infile = openfile.read()
openfile.close()
return infile
except:
e = "\nRead file " + filepath + " error!!!\n"
print e
os.system("pause")
def WriteMbcs(filepath,writethings,encoding='mbcs'):
try:
openfile = codecs.open(filepath,"w",encoding=encoding)
openfile.write(writethings)
openfile.close()
e = "\nWrite file " + filepath + " complete!!!"
print e
return 0
except:
e = "\nWrite file " + filepath + " error!!!"
print e
os.system("pause")
return 1
def hconf(conf="mr.conf"):
confs={}
try:
opconf = codecs.open(conf,"r",encoding='utf-8')
confsr = opconf.readlines()
opconf.close()
for cf in confsr:
cf = cf.split(u"#")[0]
if cf.strip():
cfAB = cf.strip().split(u"=",1)
if cfAB[1]:
confs[cfAB[0].strip()] = cfAB[1].strip()
except:
e = conf + u" error \n\n"
print e.encode("mbcs")
os.system("pause")
return confs
def filelist(sourepath,ext=None):
wpg = os.walk(sourepath)
fp = []
try:
while True:
tmp = wpg.next()
for i in tmp[-1]:
if not ext:
fp.append(os.path.join(tmp[0],i))
if ext:
iext = i.split(".")[-1].lower()
ext = ext.lower()
if ext == iext:
fp.append(os.path.join(tmp[0],i))
except StopIteration:
pass
except :
print "filelist error"
return fp
###############################################
def make_avs(rawfile):
avsS = r".\script_seeds\seed_avs.avs"
avsr = OpenAndReadUtf8(avsS)
fnne = os.path.splitext(rawfile)[0]
assfile = fnne + u".ass"
Osfp = u"<SOURCE_FILE_PATH_DONOT_EDIT_OR_REMOVE_THIS>"
Oafp = u"<SUBTITLE_FILE_PATH_DONOT_EDIT_OR_REMOVE_THIS>"
avsr = avsr.replace(Osfp,rawfile)
avsr = avsr.replace(Oafp,assfile)
avsfile = fnne.encode("mbcs") + ".avs"
c = WriteMbcs(avsfile,avsr)
if c:
print "make_avs error!"
os.system("pause")
else:
print "make_avs finish"
def make_x264ripbat(rawfile):
batS = r".\script_seeds\seed_x264rip.bat"
batr = OpenAndReadUtf8(batS)
fnne = os.path.splitext(rawfile)[0]
h264file = fnne + u".h264"
avsfile = fnne + u".avs"
Oh264fp = u"<OUTPUT_H264_FILE_PATH_DONOT_EDIT_THIS>"
Oavsfp = u"<INPUT_AVS_FILE_PATH_DONOT_EDIT_THIS>"
batr = batr.replace(Oh264fp,h264file)
batr = batr.replace(Oavsfp,avsfile)
x264ripbatfile = fnne.encode("mbcs") + "_x264rip.bat"
c = WriteMbcs(x264ripbatfile,batr)
if c:
print "make_x264ripbat error!"
os.system("pause")
else:
print "make_x264ripbat finish"
def make_getaudiobat(rawfile):
batS = r".\script_seeds\seed_getaudio.bat"
batr = OpenAndReadUtf8(batS)
fnne = os.path.splitext(rawfile)[0]
aacfile = fnne + u".aac"
Omediafp = u"<INPUT_MEDIA_FILE_PATH_DONOT_EDIT_THIS>"
Oaudiofp = u"<OUTPUT_AUDIO_FILE_PATH_DONOT_EDIT_THIS>"
batr = batr.replace(Omediafp,rawfile)
batr = batr.replace(Oaudiofp,aacfile)
getaudiobatfile = fnne.encode("mbcs") + "_audio.bat"
c = WriteMbcs(getaudiobatfile,batr)
if c:
print "make_getaudiobat error!"
os.system("pause")
else:
print "make_getaudiobat finish"
def make_mergebat(rawfile):
batS = r".\script_seeds\seed_merge.bat"
batr = OpenAndReadUtf8(batS)
fnne = os.path.splitext(rawfile)[0]
h264file = fnne + u".h264"
aacfile = fnne + u".aac"
mp4file = fnne + u"_muxed.mp4"
Oihfp = u"<INPUT_H264_FILE_PATH_DONOT_EDIT_OR_REMOVE_THIS>"
Oiafp = u"<INPUT_AAC_FILE_PATH_DONOT_EDIT_OR_REMOVE_THIS>"
Oomfp = u"<OUTPUT_MERGED_FILE_PATH_DONOT_EDIT_OR_REMOVE_THIS>"
batr = batr.replace(Oihfp,h264file)
batr = batr.replace(Oiafp,aacfile)
batr = batr.replace(Oomfp,mp4file)
mergebatfile = fnne.encode("mbcs") + "_mux.bat"
c = WriteMbcs(mergebatfile,batr)
if c:
print "make_mergebat error!"
os.system("pause")
else:
print "make_mergebat finish"
def gocmd(batfile):
batfile = '"%s"' % batfile
try:
rip = subprocess.Popen( batfile ,shell=True)
rip.wait()
except:
print "\n\n\n\nerror " + "at " + batfile
os.system("pause")
def movefile(sourefile):
sourefile = sourefile.encode("mbcs")
fnne = os.path.splitext(sourefile)[0]
fnp = os.path.split(sourefile)[0]
mp4file = fnne + "_muxed.mp4"
newfile = os.path.split(mp4file)[1]
newpath = fnp + "\\riped"
try:
os.makedirs(newpath)
print "maked " + newpath
except:
pass
newpath = newpath + "\\" + newfile
import shutil
shutil.move( mp4file , newpath )
def HandleFile(sourefile):
make_avs(sourefile)
make_x264ripbat(sourefile)
make_getaudiobat(sourefile)
make_mergebat(sourefile)
fnne = os.path.splitext(sourefile)[0].encode("mbcs")
batfile = fnne + "_audio.bat"
gocmd(batfile)
batfile = fnne + "_x264rip.bat"
gocmd(batfile)
sleep(3)
try:
os.system("taskkill /IM MP_Pipeline.dll.slave.exe /t /f")
sleep(3)
except:
print "Can not kill MP_Pipeline.dll.slave.exe,Is it stoped?\n\n"
pass
batfile = fnne + "_mux.bat"
gocmd(batfile)
movefile(sourefile)
print sourefile.encode("mbcs") + " is Complete!!!!!!!!\n=================================================\n"
if __name__ == "__main__":
print "rip start!!!!!!!!!!!!!!!!!!!!!\n=================================================\n"
wb = hconf()["wb"]
ext = hconf()["ext"]
fl = filelist(wb,ext)
for f in fl:
print "Handling... ... ..."
print f.encode("mbcs")
HandleFile(f)
print "\n=================================================\nRip All Over!\n\n\n\n"
os.system("pause")
os.system("pause")
|
staalu/mr
|
mr.py
|
Python
|
gpl-3.0
| 6,336
|
[
"MOE"
] |
e64d839f77ea01322ed4f9a136ff1dd84072c0bf7ff51f2f76e37b1f167f4560
|
# Autodetecting setup.py script for building the Python extensions
#
__version__ = "$Revision$"
import sys, os, imp, re, optparse
from glob import glob
import sysconfig
from distutils import log
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.command.build_scripts import build_scripts
from distutils.spawn import find_executable
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount')
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
# File which contains the directory for shared mods (for sys.path fixup
# when running from the build dir, see Modules/getpath.c)
_BUILDDIR_COOKIE = "pybuilddir.txt"
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (after any relative
directories) if:
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory.
"""
if dir is None or not os.path.isdir(dir) or dir in dirlist:
return
for i, path in enumerate(dirlist):
if not os.path.isabs(path):
dirlist.insert(i + 1, dir)
return
dirlist.insert(0, dir)
def macosx_sdk_root():
"""
Return the directory of the current OSX SDK,
or '/' if no SDK was specified.
"""
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
return sysroot
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return ( (path.startswith('/usr/') and not path.startswith('/usr/local'))
or path.startswith('/System/')
or path.startswith('/Library/') )
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
if sys.platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
sysroot = macosx_sdk_root()
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if sys.platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if sys.platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if sys.platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if sys.platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found"%module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
def build_extensions(self):
# Detect which modules should be compiled
missing = self.detect_modules()
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/.
srcdir = sysconfig.get_config_var('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
srcdir = os.path.abspath(srcdir)
moddirlist = [os.path.join(srcdir, 'Modules')]
# Platform-dependent module source and include directories
platform = self.get_platform()
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = [sysconfig.get_config_h_filename()]
headers += glob(os.path.join(sysconfig.get_path('include'), "*.h"))
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, moddirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
# Parse Modules/Setup and Modules/Setup.local to figure out which
# modules are turned on in the file.
remove_modules = []
for filename in ('Modules/Setup', 'Modules/Setup.local'):
input = text_file.TextFile(filename, join_lines=1)
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append(line[0])
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
# Not only do we write the builddir cookie, but we manually install
# the shared modules directory if it isn't already in sys.path.
# Otherwise trying to import the extensions after building them
# will fail.
with open(_BUILDDIR_COOKIE, "wb") as f:
f.write(self.build_lib.encode('utf-8', 'surrogateescape'))
abs_build_lib = os.path.join(os.getcwd(), self.build_lib)
if abs_build_lib not in sys.path:
sys.path.append(abs_build_lib)
build_ext.build_extensions(self)
longest = max([len(e.name) for e in self.extensions])
if self.failed:
longest = max(longest, max([len(name) for name in self.failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print("%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g))
if missing:
print()
print("Python build finished, but the necessary bits to build "
"these modules were not found:")
print_three_column(missing)
print("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print()
if self.failed:
failed = self.failed[:]
print()
print("Failed to build these modules:")
print_three_column(failed)
print()
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError) as why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if self.get_platform() == 'darwin' and (
sys.maxsize > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if self.get_platform() == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
# If the build directory didn't exist when setup.py was
# started, sys.path_importer_cache has a negative result
# cached. Clear that cache before trying to import.
sys.path_importer_cache.clear()
try:
imp.load_dynamic(ext.name, ext_filename)
except ImportError as why:
self.failed.append(ext.name)
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
# XXX -- This relies on a Vile HACK in
# distutils.command.build_ext.build_extension(). The
# _built_objects attribute is stored there strictly for
# use here.
# If there is a failure, _built_objects may not be there,
# so catch the AttributeError and move on.
try:
for filename in self._built_objects:
os.remove(filename)
except AttributeError:
self.announce('unable to remove files (ignored)')
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def get_platform(self):
# Get value of sys.platform
for platform in ['cygwin', 'darwin', 'osf1']:
if sys.platform.startswith(platform):
return platform
return sys.platform
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
cc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'%s -print-multiarch > %s 2> /dev/null' % (cc, tmpfile))
multiarch_path_component = ''
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
finally:
os.unlink(tmpfile)
if multiarch_path_component != '':
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
return
if not find_executable('dpkg-architecture'):
return
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'dpkg-architecture -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
tmpfile)
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
finally:
os.unlink(tmpfile)
def detect_modules(self):
# Ensure that /usr/local is always used, but the local build
# directories (i.e. '.' and 'Include') must be first. See issue
# 10520.
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
self.add_multiarch_paths()
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that it doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.prefix) != '/usr' \
and not sysconfig.get_config_var('PYTHONFRAMEWORK'):
# OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework
# (PYTHONFRAMEWORK is set) to avoid # linking problems when
# building a framework with different architectures than
# the one that is currently installed (issue #7473)
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
lib_dirs = self.compiler.library_dirs + [
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
]
inc_dirs = self.compiler.include_dirs + ['/usr/include']
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
with open(config_h) as file:
config_h_vars = sysconfig.parse_config_h(file)
platform = self.get_platform()
srcdir = sysconfig.get_config_var('srcdir')
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
# HP-UX11iv3 keeps files in lib/hpux folders.
if platform == 'hp-ux11':
lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32']
if platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
#
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses
# directories with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if platform == 'darwin':
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# time operations and variables
exts.append( Extension('time', ['timemodule.c', '_time.c'],
libraries=math_libs) )
exts.append( Extension('_datetime', ['_datetimemodule.c', '_time.c'],
libraries=math_libs) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# C-optimized pickle replacement
exts.append( Extension("_pickle", ["_pickle.c"]) )
# atexit
exts.append( Extension("atexit", ["atexitmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# profiler (_lsprof is for cProfile.py)
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
exts.append( Extension('unicodedata', ['unicodedata.c']) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
libs = []
if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)):
# May be necessary on AIX for flock function
libs = ['bsd']
exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# Memory-mapped files (also works on Win32).
exts.append( Extension('mmap', ['mmapmodule.c']) )
# Lance Ellinghaus's syslog module
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# Determine if readline is already linked against curses or tinfo.
if do_readline and find_executable('ldd'):
# Cannot use os.popen here in py3k.
tmpfile = os.path.join(self.build_temp, 'readline_termcap_lib')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system("ldd %s > %s" % (do_readline, tmpfile))
if ret >> 8 == 0:
with open(tmpfile) as fp:
for ln in fp:
if 'curses' in ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
break
# termcap interface split out from ncurses
if 'tinfo' in ln:
readline_termcap_library = 'tinfo'
break
os.unlink(tmpfile)
# Issue 7384: If readline is already linked against curses,
# use the same library for the readline and curses modules.
if 'curses' in readline_termcap_library:
curses_library = readline_termcap_library
elif self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
elif self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
if platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if dep_target and dep_target.split('.') < ['10', '5']:
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a staticly linked custom readline gets picked up
# before the (possibly broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if readline_termcap_library:
pass # Issue 7384: Already linked against curses or tinfo.
elif curses_library:
readline_libs.append(curses_library)
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('crypt', ['cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# POSIX subprocess module helper.
exts.append( Extension('_posixsubprocess', ['_posixsubprocess.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
ssl_incs = find_file('openssl/ssl.h', inc_dirs,
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs,
['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
else:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
# look for the openssl version header on the compiler search path.
opensslv_h = find_file('openssl/opensslv.h', [],
inc_dirs + search_for_ssl_incs_in)
if opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
if sys.platform == 'darwin' and is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
try:
with open(name, 'r') as incfile:
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = eval(m.group(1))
except IOError as msg:
print("IOError while reading opensshv.h:", msg)
pass
#print('openssl_ver = 0x%08x' % openssl_ver)
min_openssl_ver = 0x00907000
have_any_openssl = ssl_incs is not None and ssl_libs is not None
have_usable_openssl = (have_any_openssl and
openssl_ver >= min_openssl_ver)
if have_any_openssl:
if have_usable_openssl:
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
depends = ['hashlib.h'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
else:
print("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
min_sha2_openssl_ver = 0x00908000
if COMPILED_WITH_PYDEBUG or openssl_ver < min_sha2_openssl_ver:
# OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash
exts.append( Extension('_sha256', ['sha256module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha512', ['sha512module.c'],
depends=['hashlib.h']) )
if COMPILED_WITH_PYDEBUG or not have_usable_openssl:
# no openssl at all, use our own md5 and sha1
exts.append( Extension('_md5', ['md5module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha1', ['sha1module.c'],
depends=['hashlib.h']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module dbm/__init__.py provides an
# implementation independent wrapper for these; dbm/dumb.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (5, 1)
min_db_ver = (3, 3)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 4:
for x in range(max_db_ver[1]+1):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if sys.platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print("db: looking for db.h in", f)
if os.path.exists(f):
with open(f, 'rb') as file:
f = file.read()
m = re.search(br"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(br"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(br"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print("db.h:", db_ver, "patch", db_patch,
"being ignored (4.6.x must be >= 4.6.21)")
continue
if ( (db_ver not in db_ver_inc_map) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print("db.h: found", db_ver, "in", d)
else:
# we already found a header for this library version
if db_setup_debug: print("db.h: ignoring", d)
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print("db.h: no version number version in", d)
db_found_vers = list(db_ver_inc_map.keys())
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
if sys.platform != 'darwin':
db_dirs_to_check = list(filter(os.path.isdir, db_dirs_to_check))
else:
# Same as other branch, but takes OSX SDK into account
tmp = []
for dn in db_dirs_to_check:
if is_macosx_sdk_path(dn):
if os.path.isdir(os.path.join(sysroot, dn[1:])):
tmp.append(dn)
else:
if os.path.isdir(dn):
tmp.append(dn)
db_dirs_to_check = tmp
db_dirs_to_check = tmp
# Look for a version specific db-X.Y before an ambiguous dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print("db lib: ", dblib, "not found")
except db_found:
if db_setup_debug:
print("bsddb using BerkeleyDB lib:", db_ver, dblib)
print("bsddb lib dir:", dblib_dir, " inc dir:", db_incdir)
db_incs = [db_incdir]
dblibs = [dblib]
else:
if db_setup_debug: print("db: no appropriate library found")
db_incs = None
dblibs = []
dblib_dir = None
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
for d_ in inc_dirs + sqlite_inc_paths:
d = d_
if sys.platform == 'darwin' and is_macosx_sdk_path(d):
d = os.path.join(sysroot, d[1:])
f = os.path.join(d, "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print("sqlite: found %s"%f)
with open(f) as file:
incf = file.read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"([\d\.]*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print("%s/sqlite3.h: version %s"%(d, sqlite_version))
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print("%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION))
elif sqlite_setup_debug:
print("sqlite: %s had no SQLITE_VERSION"%(f,))
if sqlite_incdir:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
if sqlite_libfile:
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if sys.platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
# Enable support for loadable extensions in the sqlite3 module
# if --enable-loadable-sqlite-extensions configure option is used.
if '--enable-loadable-sqlite-extensions' not in sysconfig.get_config_var("CONFIG_ARGS"):
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
if sys.platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=["Modules/_sqlite",
sqlite_incdir],
library_dirs=sqlite_libdir,
runtime_library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
missing.append('_sqlite3')
dbm_order = ['gdbm']
# The standard Unix dbm module:
if platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
if arg.startswith('--with-dbmliborder=')]
if dbm_args:
dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":")
else:
dbm_order = "ndbm:gdbm:bdb".split(":")
dbmext = None
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others have -lgdbm_compat,
# others don't have either
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
elif self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
ndbm_libs = ['gdbm_compat']
else:
ndbm_libs = []
print("building dbm using ndbm")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_NDBM_H',None),
],
libraries=ndbm_libs)
break
elif cand == "gdbm":
if self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_NDBM_H', None),
],
libraries = gdbm_libs)
break
if find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_DASH_NDBM_H', None),
],
libraries = gdbm_libs)
break
elif cand == "bdb":
if db_incs is not None:
print("building dbm using bdb")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[
('HAVE_BERKDB_H', None),
('DB_DBM_HSEARCH', None),
],
libraries=dblibs)
break
if dbmext is not None:
exts.append(dbmext)
else:
missing.append('_dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if ('gdbm' in dbm_order and
self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('_gdbm', ['_gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('_gdbm')
# Unix-only modules
if platform != 'win32':
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
exts.append( Extension('resource', ['resource.c']) )
# Sun yellow pages. Some systems have the functions in libc.
if (platform not in ['cygwin', 'qnx6'] and
find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None):
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
else:
missing.append('nis')
else:
missing.extend(['nis', 'resource', 'termios'])
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
panel_library = 'panel'
if curses_library.startswith('ncurses'):
if curses_library == 'ncursesw':
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
curses_libs = [curses_library]
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif curses_library == 'curses' and platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = find_file('zlib.h', [], inc_dirs)
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
with open(zlib_h) as fp:
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if sys.platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if sys.platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('bz2', ['bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('bz2')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a group of
# developers on SourceForge; see www.libexpat.org for more information.
# The pyexpat module was written by Paul Prescod after a prototype by
# Jack Jansen. The Expat source is included in Modules/expat/. Usage
# of a system shared libexpat.so is possible with --with-system-expat
# configure option.
#
# More information on Expat can be found at www.libexpat.org.
#
if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"):
expat_inc = []
define_macros = []
expat_lib = ['expat']
expat_sources = []
expat_depends = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
]
expat_lib = []
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
expat_depends = ['expat/ascii.h',
'expat/asciitab.h',
'expat/expat.h',
'expat/expat_config.h',
'expat/expat_external.h',
'expat/internal.h',
'expat/latin1tab.h',
'expat/utf8tab.h',
'expat/xmlrole.h',
'expat/xmltok.h',
'expat/xmltok_impl.h'
]
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['pyexpat.c'] + expat_sources,
depends = expat_depends,
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
depends = ['pyexpat.c'] + expat_sources +
expat_depends,
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
elif platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
elif platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict()
libraries = []
elif platform.startswith('openbsd'):
macros = dict()
libraries = []
elif platform.startswith('netbsd'):
macros = dict()
libraries = []
else: # Linux and other unices
macros = dict()
libraries = ['rt']
if platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
'_multiprocessing/pipe_connection.c',
'_multiprocessing/socket_connection.c',
'_multiprocessing/win32_functions.c'
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/socket_connection.c'
]
if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not
sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
if sysconfig.get_config_var('WITH_THREAD'):
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=list(macros.items()),
include_dirs=["Modules/_multiprocessing"]))
else:
missing.append('_multiprocessing')
# End multiprocessing
# Platform-specific libraries
if (platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
'freebsd7', 'freebsd8')
or platform.startswith("gnukfreebsd")):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if sys.platform == 'darwin':
exts.append(
Extension('_gestalt', ['_gestalt.c'],
extra_link_args=['-framework', 'Carbon'])
)
exts.append(
Extension('_scproxy', ['_scproxy.c'],
extra_link_args=[
'-framework', 'SystemConfiguration',
'-framework', 'CoreFoundation',
]))
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
return missing
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
sysroot = macosx_sdk_root()
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if is_macosx_sdk_path(F):
if not exists(join(sysroot, F[1:], fw + '.framework')):
break
else:
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in ('Tcl', 'Tk')
for H in ('Headers', 'Versions/Current/PrivateHeaders')
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall('-arch\s+(\w+)', cflags)
tmpfile = os.path.join(self.build_temp, 'tk.arch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Note: cannot use os.popen or subprocess here, that
# requires extensions that are not available here.
if is_macosx_sdk_path(F):
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(os.path.join(sysroot, F[1:]), tmpfile))
else:
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(F, tmpfile))
with open(tmpfile) as fp:
detected_archs = []
for ln in fp:
a = ln.split()[-1]
if a in archs:
detected_archs.append(ln.split()[-1])
os.unlink(tmpfile)
for a in detected_archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
platform = self.get_platform()
if (platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83',
'8.2', '82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in sys.platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
if 'd' not in sys.abiflags:
ext = Extension('xxlimited', ['xxlimited.c'],
define_macros=[('Py_LIMITED_API', 1)])
self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if sys.platform == 'darwin':
return self.configure_ctypes_darwin(ext)
srcdir = sysconfig.get_config_var('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
config_args = []
if not self.verbose:
config_args.append("-q")
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print("Failed to configure _ctypes module")
return False
fficonfig = {}
with open(ffi_configfile) as f:
exec(f.read(), globals(), fficonfig)
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir,
os.path.join(ffi_srcdir, 'src')]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(os.path.join(ffi_srcdir, f) for f in
fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
if sys.platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif sys.platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif sys.platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
if sys.platform == 'darwin':
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
if not ffi_inc or ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
with open(ffi_h) as fp:
while 1:
line = fp.readline()
if not line:
ffi_inc = None
break
if line.startswith('#define LIBFFI_H'):
break
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
# this is works for EXT_SUFFIX too, which ends with SHLIB_SUFFIX
shlib_suffix = sysconfig.get_config_var("SHLIB_SUFFIX")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0o644, 0o755)
self.set_dir_modes(self.install_dir, 0o755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.shlib_suffix): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
for dirpath, dirnames, fnames in os.walk(dirname):
if os.path.islink(dirpath):
continue
log.info("changing mode of %s to %o", dirpath, mode)
if not self.dry_run: os.chmod(dirpath, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
class PyBuildScripts(build_scripts):
def copy_scripts(self):
outfiles, updated_files = build_scripts.copy_scripts(self)
fullversion = '-{0[0]}.{0[1]}'.format(sys.version_info)
minoronly = '.{0[1]}'.format(sys.version_info)
newoutfiles = []
newupdated_files = []
for filename in outfiles:
if filename.endswith('2to3'):
newfilename = filename + fullversion
else:
newfilename = filename + minoronly
log.info('renaming {} to {}'.format(filename, newfilename))
os.rename(filename, newfilename)
newoutfiles.append(newfilename)
if filename in updated_files:
newupdated_files.append(newfilename)
return newoutfiles, newupdated_files
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, OS/2, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%s" % sys.version[:3],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = [x for x in CLASSIFIERS.split("\n") if x],
platforms = ["Many"],
# Build info
cmdclass = {'build_ext': PyBuildExt,
'build_scripts': PyBuildScripts,
'install': PyBuildInstall,
'install_lib': PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# If you change the scripts installed here, you also need to
# check the PyBuildScripts command above, and change the links
# created by the bininstall target in Makefile.pre.in
scripts = ["Tools/scripts/pydoc3", "Tools/scripts/idle3",
"Tools/scripts/2to3"]
)
# --install-platlib
if __name__ == '__main__':
main()
|
LaoZhongGu/kbengine
|
kbe/src/lib/python/setup.py
|
Python
|
lgpl-3.0
| 84,306
|
[
"VisIt"
] |
ae4606842a1bb0354f187cc1001916a82c0b40e532471803ee5e57cf96ae42a5
|
#~~~~~~~GLOBAL IMPORTS~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class BlastHit(object):
"""
@class BlastHit
@brief Object oriented class containing informations of one blast hit
The following instance field are accessible :
* q_id : Query sequence name
* s_id : Subject sequence name
* identity : % of identity in the hit
* length : length of the hit
* mis : Number of mismatch in the hit
* gap : Number of gap in the hit
* q_orient : Orientation of the query along the hit
* q_start : Hit start position of the query
* q_end : Hit end position of the query
* s_orient : Orientation of the subject along the hit
* s_start : Hit start position of the subject
* s_end : Hit end position of the subject
* evalue : E value of the alignement
* bscore : Bit score of the alignement
A class list is used to track all instances generated.
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~CLASS FIELDS~~~~~~~#
Instances = [] # Class field used for instance tracking
id_count = 0
#~~~~~~~CLASS METHODS~~~~~~~#
@ classmethod
def next_id (self):
cur_id = self.id_count
self.id_count +=1
return cur_id
@ classmethod
def count_total (self):
"""
@return Overall number of BlastHit object in Instance list
"""
return (len(self.Instances))
@ classmethod
def stat_per_ref (self):
"""
@return Number of BlastHit object in Instance list sorted by reference subject sequence
"""
d = {}
for hit in self.Instances:
if hit.s_id in d:
d[hit.s_id][0] += 1
d[hit.s_id][1] += hit.length
else:
d[hit.s_id] = [1, hit.length]
return d
@ classmethod
def get (self):
"""
@return The list of all BlastHit object generated
"""
return self.Instances
@ classmethod
def get_ref (self, ref):
"""
@param ref Name of a reference sequence in the subject database
@return The list of all BlastHit object generated for this reference
"""
return [hit for hit in self.Instances if hit.s_id == "ref"]
@ classmethod
def reset_list (self):
"""
Reset the instance tracking list (Usefull after
"""
self.Instances = []
self.id_count = 0
#~~~~~~~FONDAMENTAL METHODS~~~~~~~#
def __init__(self, q_id, s_id, identity, length, mis, gap, q_start, q_end, s_start, s_end, evalue, bscore):
"""
Create a BlastHit object which is automatically added to the class tracking instance list
The object with the following parameters are required for object initialisation
@param q_id Query sequence name
@param s_id Subject sequence name
@param identity % of identity in the hit
@param length length of the hit
@param mis Number of mismatch in the hit
@param gap Number of gap in the hit
@param q_start Hit start position of the query
@param q_end Hit end position of the query
@param s_start Hit start position of the subject
@param s_end Hit end position of the subject
@param evalue E value of the alignement
@param bscore Bit score of the alignement
"""
self.id = self.next_id()
self.q_id = q_id
self.s_id = s_id
self.identity = float(identity)
self.length = int(length)
self.mis = int(mis)
self.gap = int(gap)
self.evalue = float(evalue)
self.bscore = float(bscore)
# Autoadapt start and end so that start is always smaller than end
self.q_start = int(q_start) if int(q_start) < int(q_end) else int(q_end)
self.q_end = int(q_end) if int(q_start) < int(q_end) else int(q_start)
self.s_start = int(s_start) if int(s_start) < int(s_end) else int(s_end)
self.s_end = int(s_end) if int(s_start) < int(s_end) else int(s_start)
# Orientation of the query and subject along the hit. True if positive
self.q_orient = int(q_start) < int(q_end)
self.s_orient = int(s_start) < int(s_end)
# Add the instance to the class instance tracking list
self.Instances.append(self)
def __repr__(self):
msg = "HIT {}".format(self.id)
msg += "\tQuery\t{}:{}-{}({})\n".format(self.q_id, self.q_start, self.q_end, "+" if self.q_orient else "-")
msg += "\tSubject\t{}:{}-{}({})\n".format(self.s_id, self.s_start, self.s_end, "+" if self.q_orient else "-")
msg += "\tLenght : {}\tIdentity : {}%\tEvalue : {}\tBit score : {}\n".format(self.length, self.identity, self.evalue, self.bscore)
return (msg)
def __str__(self):
return "<Instance of {} from {} >\n".format(self.__class__.__name__, self.__module__)
|
a-slide/pyDNA
|
Blast/BlastHit.py
|
Python
|
gpl-2.0
| 5,084
|
[
"BLAST"
] |
8e84841cea334e6eb0de0eec914669f7b5ef59fda3a7396136651d6775963907
|
"""
Weights.
"""
__author__ = "Sergio J. Rey <srey@asu.edu> "
import copy
from os.path import basename as BASENAME
import math
import warnings
import numpy as np
import scipy.sparse
from scipy.sparse.csgraph import connected_components
#from .util import full, WSP2W resolve import cycle by
#forcing these into methods
from . import adjtools
from ..io.fileio import FileIO as popen
__all__ = ['W', 'WSP']
class W(object):
"""
Spatial weights class.
Parameters
----------
neighbors : dictionary
Key is region ID, value is a list of neighbor IDS.
Example: {'a':['b'],'b':['a','c'],'c':['b']}
weights : dictionary
Key is region ID, value is a list of edge weights.
If not supplied all edge weights are assumed to have a weight of 1.
Example: {'a':[0.5],'b':[0.5,1.5],'c':[1.5]}
id_order : list
An ordered list of ids, defines the order of
observations when iterating over W if not set,
lexicographical ordering is used to iterate and the
id_order_set property will return False. This can be
set after creation by setting the 'id_order' property.
silent_island_warning: boolean
By default pysal.lib will print a warning if the
dataset contains any disconnected observations or
islands. To silence this warning set this
parameter to True.
silent_connected_components : boolean
By default PySAL will print a warning if the
dataset contains any disconnected components in the
adjacency matrix. These are disconnected *groups*
of islands. To silence this warning set this
parameter to True.
ids : list
Values to use for keys of the neighbors and weights dicts.
Attributes (NOTE: these are described by their docstrings. to view, use the `help` function)
----------
asymmetries
cardinalities
component_labels
diagW2
diagWtW
diagWtW_WW
histogram
id2i
id_order
id_order_set
islands
max_neighbors
mean_neighbors
min_neighbors
n
n_components
neighbor_offsets
nonzero
pct_nonzero
s0
s1
s2
s2array
sd
sparse
trcW2
trcWtW
trcWtW_WW
transform
Examples
--------
>>> from pysal.lib.weights.weights import W
>>> neighbors = {0: [3, 1], 1: [0, 4, 2], 2: [1, 5], 3: [0, 6, 4], 4: [1, 3, 7, 5], 5: [2, 4, 8], 6: [3, 7], 7: [4, 6, 8], 8: [5, 7]}
>>> weights = {0: [1, 1], 1: [1, 1, 1], 2: [1, 1], 3: [1, 1, 1], 4: [1, 1, 1, 1], 5: [1, 1, 1], 6: [1, 1], 7: [1, 1, 1], 8: [1, 1]}
>>> w = W(neighbors, weights)
>>> "%.3f"%w.pct_nonzero
'29.630'
Read from external gal file
>>> import pysal.lib
>>> w = pysal.lib.io.open(pysal.lib.examples.get_path("stl.gal")).read()
>>> w.n
78
>>> "%.3f"%w.pct_nonzero
'6.542'
Set weights implicitly
>>> neighbors = {0: [3, 1], 1: [0, 4, 2], 2: [1, 5], 3: [0, 6, 4], 4: [1, 3, 7, 5], 5: [2, 4, 8], 6: [3, 7], 7: [4, 6, 8], 8: [5, 7]}
>>> w = W(neighbors)
>>> round(w.pct_nonzero,3)
29.63
>>> from pysal.lib.weights import lat2W
>>> w = lat2W(100, 100)
>>> w.trcW2
39600.0
>>> w.trcWtW
39600.0
>>> w.transform='r'
>>> round(w.trcW2, 3)
2530.722
>>> round(w.trcWtW, 3)
2533.667
Cardinality Histogram
>>> w.histogram
[(2, 4), (3, 392), (4, 9604)]
Disconnected observations (islands)
>>> from pysal.lib.weights import W
>>> w = W({1:[0],0:[1],2:[], 3:[]})
WARNING: there are 2 disconnected observations
Island ids: [2, 3]
"""
def __init__(self, neighbors, weights=None, id_order=None,
silence_warnings=False, ids=None):
self.silent_island_warning = silence_warnings
self.silent_connected_components = silence_warnings
self.transformations = {}
self.neighbors = neighbors
if not weights:
weights = {}
for key in neighbors:
weights[key] = [1.] * len(neighbors[key])
self.weights = weights
self.transformations['O'] = self.weights.copy() # original weights
self.transform = 'O'
if id_order is None:
self._id_order = list(self.neighbors.keys())
self._id_order.sort()
self._id_order_set = False
else:
self._id_order = id_order
self._id_order_set = True
self._reset()
self._n = len(self.weights)
if self.islands and not self.silent_island_warning:
ni = len(self.islands)
if ni == 1:
warnings.warn("There is one disconnected observation"
" (no neighbors).\nIsland id: {}"
.format(str(self.islands[0])),
stacklevel=2)
else:
warnings.warn("There are %d disconnected observations" % ni + ' \n '
" Island ids: %s" % ', '.join(str(island) for island in self.islands))
if self.n_components > 1 and not self.islands and not self.silent_connected_components:
warnings.warn("The weights matrix is not fully connected. There are %d components" % self.n_components)
def _reset(self):
"""Reset properties.
"""
self._cache = {}
@classmethod
def from_file(cls, path='', format=None, **kwargs):
f = popen(dataPath=path, mode='r', dataFormat=format)
w = f.read(**kwargs)
f.close()
return w
@classmethod
def from_shapefile(cls, *args, **kwargs):
# we could also just "do the right thing," but I think it'd make sense to
# try and get people to use `Rook.from_shapefile(shapefile)` rather than
# W.from_shapefile(shapefile, type=`rook`), otherwise we'd need to build
# a type dispatch table. Generic W should be for stuff we don't know
# anything about.
raise NotImplementedError('Use type-specific constructors, like Rook,'
' Queen, DistanceBand, or Kernel')
@classmethod
def from_WSP(cls, WSP, silence_warnings=True):
return WSP2W(WSP, silence_warnings=silence_warnings)
@classmethod
def from_adjlist(cls, adjlist, focal_col='focal',
neighbor_col='neighbor', weight_col=None):
"""
Return an adjacency list representation of a weights object.
Parameters
----------
adjlist : pandas DataFrame
adjacency list with a minimum of two columns
focal_col : string
name of the column with the "source" node ids
neighbor_col : string
name of the column with the "destination" node ids
weight_col : string
name of the column with the weight information. If not provided and
the dataframe has no column named "weight" then all weights
are assumed to be 1.
"""
if weight_col is None:
weight_col = 'weight'
try_weightcol = getattr(adjlist, weight_col)
if try_weightcol is None:
adjlist = adjlist.copy(deep=True)
adjlist['weight'] = 1
all_ids = set(adjlist[focal_col].tolist())
all_ids |= set(adjlist[neighbor_col].tolist())
grouper = adjlist.groupby(focal_col)
neighbors = grouper[neighbor_col].apply(list).to_dict()
weights = grouper[weight_col].apply(list).to_dict()
neighbors.update({k:[] for k in
all_ids.difference(list(neighbors.keys()))})
weights.update({k:[] for k in
all_ids.difference(list(weights.keys()))})
return cls(neighbors=neighbors, weights=weights)
def to_adjlist(self, remove_symmetric=False,
focal_col='focal', neighbor_col='neighbor', weight_col='weight'):
"""
Compute an adjacency list representation of a weights object.
Parameters
----------
remove_symmetric : bool
whether or not to remove ``symmetric'' entries. If the W is symmetric,
a standard ``directed'' adjacency list will contain both the forward and
backward links by default because adjacency lists are a directed
graph representation. If this is True, a W created from this adjacency list
**MAY NOT BE THE SAME** as the original W. If you would like to
consider (1,2) and (2,1) as distinct links, leave this as "False".
focal_col : string
name of the column in which to store "source" node ids.
neighbor_col : string
name of the column in which to store "destination" node ids.
weight_col : string
name of the column in which to store weight information.
"""
try:
import pandas as pd
except ImportError:
raise ImportError('pandas must be installed to use this method')
adjlist = pd.DataFrame(((idx, n,w) for idx, neighb in self
for n,w in list(neighb.items())),
columns = ('focal', 'neighbor', 'weight'))
return adjtools.filter_adjlist(adjlist) if remove_symmetric else adjlist
def to_networkx(self):
"""
Convert a weights object to a networkx graph
Parameters
----------
None
Returns
-------
a networkx graph representation of the W object
"""
try:
import networkx as nx
except ImportError:
raise ImportError("NetworkX is required to use this function.")
G = nx.DiGraph() if len(self.asymmetries)>0 else nx.Graph()
return nx.from_scipy_sparse_matrix(self.sparse, create_using=G)
@classmethod
def from_networkx(cls, graph, weight_col='weight'):
"""
Convert a networkx graph to a PySAL W object.
Parameters
----------
graph : networkx graph
the graph to convert to a W
weight_col : string
if the graph is labeled, this should be the
name of the field to use as the weight for
the W.
Returns
--------
a pysal.weights.W object containing the same graph
as the networkx graph
"""
try:
import networkx as nx
except ImportError:
raise ImportError("NetworkX is required to use this function.")
sparse_matrix = nx.to_scipy_sparse_matrix(graph)
return WSP(sparse_matrix).to_W()
neighbors = dict()
weights = dict()
for focal in graph.nodes():
links = graph[focal]
neighbors.update({focal:[]})
weights.update({focal:[]})
for neighbor, weight in list(links.items()):
neighbors[focal].append(neighbor)
if weight == {}:
weights[focal].append(1)
else:
weights[focal].append(weight[weight_col])
return cls(neighbors=neighbors, weights=weights)
@property
def sparse(self):
"""Sparse matrix object.
For any matrix manipulations required for w, w.sparse should be
used. This is based on scipy.sparse.
"""
if 'sparse' not in self._cache:
self._sparse = self._build_sparse()
self._cache['sparse'] = self._sparse
return self._sparse
@property
def n_components(self):
"""Store whether the adjacency matrix is fully connected.
"""
if 'n_components' not in self._cache:
self._n_components, self._component_labels = connected_components(self.sparse)
self._cache['n_components'] = self._n_components
self._cache['component_labels'] = self._component_labels
return self._n_components
@property
def component_labels(self):
"""Store the graph component in which each observation falls.
"""
if 'component_labels' not in self._cache:
self._n_components, self._component_labels = connected_components(self.sparse)
self._cache['n_components'] = self._n_components
self._cache['component_labels'] = self._component_labels
return self._component_labels
def _build_sparse(self):
"""Construct the sparse attribute.
"""
row = []
col = []
data = []
id2i = self.id2i
for i, neigh_list in list(self.neighbor_offsets.items()):
card = self.cardinalities[i]
row.extend([id2i[i]] * card)
col.extend(neigh_list)
data.extend(self.weights[i])
row = np.array(row)
col = np.array(col)
data = np.array(data)
s = scipy.sparse.csr_matrix((data, (row, col)), shape=(self.n, self.n))
return s
@property
def id2i(self):
"""Dictionary where the key is an ID and the value is that ID's
index in W.id_order.
"""
if 'id2i' not in self._cache:
self._id2i = {}
for i, id_i in enumerate(self._id_order):
self._id2i[id_i] = i
self._id2i = self._id2i
self._cache['id2i'] = self._id2i
return self._id2i
@property
def n(self):
"""Number of units.
"""
if "n" not in self._cache:
self._n = len(self.neighbors)
self._cache['n'] = self._n
return self._n
@property
def s0(self):
"""s0 is defined as
.. math::
s0=\sum_i \sum_j w_{i,j}
"""
if 's0' not in self._cache:
self._s0 = self.sparse.sum()
self._cache['s0'] = self._s0
return self._s0
@property
def s1(self):
"""s1 is defined as
.. math::
s1=1/2 \sum_i \sum_j (w_{i,j} + w_{j,i})^2
"""
if 's1' not in self._cache:
t = self.sparse.transpose()
t = t + self.sparse
t2 = t.multiply(t) # element-wise square
self._s1 = t2.sum() / 2.
self._cache['s1'] = self._s1
return self._s1
@property
def s2array(self):
"""Individual elements comprising s2.
See Also
--------
s2
"""
if 's2array' not in self._cache:
s = self.sparse
self._s2array = np.array(s.sum(1) + s.sum(0).transpose()) ** 2
self._cache['s2array'] = self._s2array
return self._s2array
@property
def s2(self):
"""s2 is defined as
.. math::
s2=\sum_j (\sum_i w_{i,j} + \sum_i w_{j,i})^2
"""
if 's2' not in self._cache:
self._s2 = self.s2array.sum()
self._cache['s2'] = self._s2
return self._s2
@property
def trcW2(self):
"""Trace of :math:`WW`.
See Also
--------
diagW2
"""
if 'trcW2' not in self._cache:
self._trcW2 = self.diagW2.sum()
self._cache['trcw2'] = self._trcW2
return self._trcW2
@property
def diagW2(self):
"""Diagonal of :math:`WW`.
See Also
--------
trcW2
"""
if 'diagw2' not in self._cache:
self._diagW2 = (self.sparse * self.sparse).diagonal()
self._cache['diagW2'] = self._diagW2
return self._diagW2
@property
def diagWtW(self):
"""Diagonal of :math:`W^{'}W`.
See Also
--------
trcWtW
"""
if 'diagWtW' not in self._cache:
self._diagWtW = (self.sparse.transpose() * self.sparse).diagonal()
self._cache['diagWtW'] = self._diagWtW
return self._diagWtW
@property
def trcWtW(self):
"""Trace of :math:`W^{'}W`.
See Also
--------
diagWtW
"""
if 'trcWtW' not in self._cache:
self._trcWtW = self.diagWtW.sum()
self._cache['trcWtW'] = self._trcWtW
return self._trcWtW
@property
def diagWtW_WW(self):
"""Diagonal of :math:`W^{'}W + WW`.
"""
if 'diagWtW_WW' not in self._cache:
wt = self.sparse.transpose()
w = self.sparse
self._diagWtW_WW = (wt * w + w * w).diagonal()
self._cache['diagWtW_WW'] = self._diagWtW_WW
return self._diagWtW_WW
@property
def trcWtW_WW(self):
"""Trace of :math:`W^{'}W + WW`.
"""
if 'trcWtW_WW' not in self._cache:
self._trcWtW_WW = self.diagWtW_WW.sum()
self._cache['trcWtW_WW'] = self._trcWtW_WW
return self._trcWtW_WW
@property
def pct_nonzero(self):
"""Percentage of nonzero weights.
"""
if 'pct_nonzero' not in self._cache:
self._pct_nonzero = 100. * self.sparse.nnz / (1. * self._n ** 2)
self._cache['pct_nonzero'] = self._pct_nonzero
return self._pct_nonzero
@property
def cardinalities(self):
"""Number of neighbors for each observation.
"""
if 'cardinalities' not in self._cache:
c = {}
for i in self._id_order:
c[i] = len(self.neighbors[i])
self._cardinalities = c
self._cache['cardinalities'] = self._cardinalities
return self._cardinalities
@property
def max_neighbors(self):
"""Largest number of neighbors.
"""
if 'max_neighbors' not in self._cache:
self._max_neighbors = max(self.cardinalities.values())
self._cache['max_neighbors'] = self._max_neighbors
return self._max_neighbors
@property
def mean_neighbors(self):
"""Average number of neighbors.
"""
if 'mean_neighbors' not in self._cache:
self._mean_neighbors = np.mean(list(self.cardinalities.values()))
self._cache['mean_neighbors'] = self._mean_neighbors
return self._mean_neighbors
@property
def min_neighbors(self):
"""Minimum number of neighbors.
"""
if 'min_neighbors' not in self._cache:
self._min_neighbors = min(self.cardinalities.values())
self._cache['min_neighbors'] = self._min_neighbors
return self._min_neighbors
@property
def nonzero(self):
"""Number of nonzero weights.
"""
if 'nonzero' not in self._cache:
self._nonzero = self.sparse.nnz
self._cache['nonzero'] = self._nonzero
return self._nonzero
@property
def sd(self):
"""Standard deviation of number of neighbors.
"""
if 'sd' not in self._cache:
self._sd = np.std(list(self.cardinalities.values()))
self._cache['sd'] = self._sd
return self._sd
@property
def asymmetries(self):
"""List of id pairs with asymmetric weights.
"""
if 'asymmetries' not in self._cache:
self._asymmetries = self.asymmetry()
self._cache['asymmetries'] = self._asymmetries
return self._asymmetries
@property
def islands(self):
"""List of ids without any neighbors.
"""
if 'islands' not in self._cache:
self._islands = [i for i,
c in list(self.cardinalities.items()) if c == 0]
self._cache['islands'] = self._islands
return self._islands
@property
def histogram(self):
"""Cardinality histogram as a dictionary where key is the id and
value is the number of neighbors for that unit.
"""
if 'histogram' not in self._cache:
ct, bin = np.histogram(list(self.cardinalities.values()),
list(range(self.min_neighbors, self.max_neighbors + 2)))
self._histogram = list(zip(bin, ct))
self._cache['histogram'] = self._histogram
return self._histogram
def __getitem__(self, key):
"""Allow a dictionary like interaction with the weights class.
Examples
--------
>>> from pysal.lib.weights import lat2W
>>> w = lat2W()
>>> w[0] == dict({1: 1.0, 5: 1.0})
True
"""
return dict(list(zip(self.neighbors[key], self.weights[key])))
def __iter__(self):
"""
Support iteration over weights.
Examples
--------
>>> from pysal.lib.weights import lat2W
>>> w=lat2W(3,3)
>>> for i,wi in enumerate(w):
... print(i,wi[0])
...
0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
>>>
"""
for i in self._id_order:
yield i, dict(list(zip(self.neighbors[i], self.weights[i])))
def remap_ids(self, new_ids):
'''
In place modification throughout `W` of id values from `w.id_order` to
`new_ids` in all
...
Parameters
----------
new_ids : list
/ndarray
Aligned list of new ids to be inserted. Note that first
element of new_ids will replace first element of
w.id_order, second element of new_ids replaces second
element of w.id_order and so on.
Examples
--------
>>> from pysal.lib.weights import lat2W
>>> w = lat2W(3, 3)
>>> w.id_order
[0, 1, 2, 3, 4, 5, 6, 7, 8]
>>> w.neighbors[0]
[3, 1]
>>> new_ids = ['id%i'%id for id in w.id_order]
>>> _ = w.remap_ids(new_ids)
>>> w.id_order
['id0', 'id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7', 'id8']
>>> w.neighbors['id0']
['id3', 'id1']
'''
old_ids = self._id_order
if len(old_ids) != len(new_ids):
raise Exception("W.remap_ids: length of `old_ids` does not match \
that of new_ids")
if len(set(new_ids)) != len(new_ids):
raise Exception("W.remap_ids: list `new_ids` contains duplicates")
else:
new_neighbors = {}
new_weights = {}
old_transformations = self.transformations['O'].copy()
new_transformations = {}
for o,n in zip(old_ids, new_ids):
o_neighbors = self.neighbors[o]
o_weights = self.weights[o]
n_neighbors = [ new_ids[old_ids.index(j)] for j in o_neighbors]
new_neighbors[n] = n_neighbors
new_weights[n] = o_weights[:]
new_transformations[n] = old_transformations[o]
self.neighbors = new_neighbors
self.weights = new_weights
self.transformations["O"] = new_transformations
id_order = [ self._id_order.index(o) for o in old_ids]
for i,id_ in enumerate(id_order):
self.id_order[id_] = new_ids[i]
self._reset()
def __set_id_order(self, ordered_ids):
"""
Set the iteration order in w.
W can be iterated over. On construction the iteration order is set to
the lexicographic order of the keys in the w.weights dictionary. If a specific order
is required it can be set with this method.
Parameters
----------
ordered_ids : sequence
identifiers for observations in specified order
Notes
-----
ordered_ids is checked against the ids implied by the keys in
w.weights. If they are not equivalent sets an exception is raised and
the iteration order is not changed.
Examples
--------
>>> from pysal.lib.weights import lat2W
>>> w=lat2W(3,3)
>>> for i,wi in enumerate(w):
... print(i, wi[0])
...
0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
>>> w.id_order
[0, 1, 2, 3, 4, 5, 6, 7, 8]
>>> w.id_order=range(8,-1,-1)
>>> list(w.id_order)
[8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> for i,w_i in enumerate(w):
... print(i,w_i[0])
...
0 8
1 7
2 6
3 5
4 4
5 3
6 2
7 1
8 0
>>>
"""
if set(self._id_order) == set(ordered_ids):
self._id_order = ordered_ids
self._id_order_set = True
self._reset()
else:
raise Exception('ordered_ids do not align with W ids')
def __get_id_order(self):
"""Returns the ids for the observations in the order in which they
would be encountered if iterating over the weights.
"""
return self._id_order
id_order = property(__get_id_order, __set_id_order)
@property
def id_order_set(self):
"""
Returns True if user has set id_order, False if not.
Examples
--------
>>> from pysal.lib.weights import lat2W
>>> w=lat2W()
>>> w.id_order_set
True
"""
return self._id_order_set
@property
def neighbor_offsets(self):
"""
Given the current id_order, neighbor_offsets[id] is the offsets of the
id's neighbors in id_order.
Returns
-------
list
offsets of the id's neighbors in id_order
Examples
--------
>>> from pysal.lib.weights import W
>>> neighbors={'c': ['b'], 'b': ['c', 'a'], 'a': ['b']}
>>> weights ={'c': [1.0], 'b': [1.0, 1.0], 'a': [1.0]}
>>> w=W(neighbors,weights)
>>> w.id_order = ['a','b','c']
>>> w.neighbor_offsets['b']
[2, 0]
>>> w.id_order = ['b','a','c']
>>> w.neighbor_offsets['b']
[2, 1]
"""
if "neighbors_0" not in self._cache:
self.__neighbors_0 = {}
id2i = self.id2i
for j, neigh_list in list(self.neighbors.items()):
self.__neighbors_0[j] = [id2i[neigh] for neigh in neigh_list]
self._cache['neighbors_0'] = self.__neighbors_0
return self.__neighbors_0
def get_transform(self):
"""
Getter for transform property.
Returns
-------
transformation : string (or none)
Examples
--------
>>> from pysal.lib.weights import lat2W
>>> w=lat2W()
>>> w.weights[0]
[1.0, 1.0]
>>> w.transform
'O'
>>> w.transform='r'
>>> w.weights[0]
[0.5, 0.5]
>>> w.transform='b'
>>> w.weights[0]
[1.0, 1.0]
>>>
"""
return self._transform
def set_transform(self, value="B"):
"""
Transformations of weights.
Notes
-----
Transformations are applied only to the value of the weights at
instantiation. Chaining of transformations cannot be done on a W
instance.
Parameters
----------
transform : string
not case sensitive)
.. table::
:widths: auto
================ ======================================================
transform string value
================ ======================================================
B Binary
R Row-standardization (global sum=n)
D Double-standardization (global sum=1)
V Variance stabilizing
O Restore original transformation (from instantiation)
================ ======================================================
Examples
--------
>>> from pysal.lib.weights import lat2W
>>> w=lat2W()
>>> w.weights[0]
[1.0, 1.0]
>>> w.transform
'O'
>>> w.transform='r'
>>> w.weights[0]
[0.5, 0.5]
>>> w.transform='b'
>>> w.weights[0]
[1.0, 1.0]
>>>
"""
value = value.upper()
self._transform = value
if value in self.transformations:
self.weights = self.transformations[value]
self._reset()
else:
if value == "R":
# row standardized weights
weights = {}
self.weights = self.transformations['O']
for i in self.weights:
wijs = self.weights[i]
row_sum = sum(wijs) * 1.0
if row_sum == 0.0:
if not self.silent_island_warning:
print(('WARNING: ', i, ' is an island (no neighbors)'))
weights[i] = [wij / row_sum for wij in wijs]
weights = weights
self.transformations[value] = weights
self.weights = weights
self._reset()
elif value == "D":
# doubly-standardized weights
# update current chars before doing global sum
self._reset()
s0 = self.s0
ws = 1.0 / s0
weights = {}
self.weights = self.transformations['O']
for i in self.weights:
wijs = self.weights[i]
weights[i] = [wij * ws for wij in wijs]
weights = weights
self.transformations[value] = weights
self.weights = weights
self._reset()
elif value == "B":
# binary transformation
weights = {}
self.weights = self.transformations['O']
for i in self.weights:
wijs = self.weights[i]
weights[i] = [1.0 for wij in wijs]
weights = weights
self.transformations[value] = weights
self.weights = weights
self._reset()
elif value == "V":
# variance stabilizing
weights = {}
q = {}
k = self.cardinalities
s = {}
Q = 0.0
self.weights = self.transformations['O']
for i in self.weights:
wijs = self.weights[i]
q[i] = math.sqrt(sum([wij * wij for wij in wijs]))
s[i] = [wij / q[i] for wij in wijs]
Q += sum([si for si in s[i]])
nQ = self.n / Q
for i in self.weights:
weights[i] = [w * nQ for w in s[i]]
weights = weights
self.transformations[value] = weights
self.weights = weights
self._reset()
elif value == "O":
# put weights back to original transformation
weights = {}
original = self.transformations[value]
self.weights = original
self._reset()
else:
raise Exception('unsupported weights transformation')
transform = property(get_transform, set_transform)
def asymmetry(self, intrinsic=True):
"""
Asymmetry check.
Parameters
----------
intrinsic : boolean
default=True
intrinsic symmetry:
:math:`w_{i,j} == w_{j,i}`
if intrisic is False:
symmetry is defined as :math:`i \in N_j \ AND \ j \in N_i` where
:math:`N_j` is the set of neighbors for j.
Returns
-------
asymmetries : list
empty if no asymmetries are found
if asymmetries, then a list of (i,j) tuples is returned
Examples
--------
>>> from pysal.lib.weights import lat2W
>>> w=lat2W(3,3)
>>> w.asymmetry()
[]
>>> w.transform='r'
>>> w.asymmetry()
[(0, 1), (0, 3), (1, 0), (1, 2), (1, 4), (2, 1), (2, 5), (3, 0), (3, 4), (3, 6), (4, 1), (4, 3), (4, 5), (4, 7), (5, 2), (5, 4), (5, 8), (6, 3), (6, 7), (7, 4), (7, 6), (7, 8), (8, 5), (8, 7)]
>>> result = w.asymmetry(intrinsic=False)
>>> result
[]
>>> neighbors={0:[1,2,3], 1:[1,2,3], 2:[0,1], 3:[0,1]}
>>> weights={0:[1,1,1], 1:[1,1,1], 2:[1,1], 3:[1,1]}
>>> w=W(neighbors,weights)
>>> w.asymmetry()
[(0, 1), (1, 0)]
"""
if intrinsic:
wd = self.sparse.transpose() - self.sparse
else:
transform = self.transform
self.transform = 'b'
wd = self.sparse.transpose() - self.sparse
self.transform = transform
ids = np.nonzero(wd)
if len(ids[0]) == 0:
return []
else:
ijs = list(zip(ids[0], ids[1]))
ijs.sort()
return ijs
def symmetrize(self, inplace=False):
"""
Construct a symmetric KNN weight.
This ensures that the neighbors of each focal observation
consider the focal observation itself as a neighbor.
This returns a generic W object, since the object is no
longer guaranteed to have k neighbors for each observation.
"""
if not inplace:
neighbors = copy.deepcopy(self.neighbors)
weights = copy.deepcopy(self.weights)
out_W = W(neighbors, weights)
out_W.symmetrize(inplace=True)
return out_W
else:
for focal, fneighbs in list(self.neighbors.items()):
for j, neighbor in enumerate(fneighbs):
neighb_neighbors = self.neighbors[neighbor]
if focal not in neighb_neighbors:
self.neighbors[neighbor].append(focal)
self.weights[neighbor].append(self.weights[focal][j])
self._cache = dict()
return
def full(self):
"""
Generate a full numpy array.
Parameters
----------
self : W
spatial weights object
Returns
-------
(fullw, keys) : tuple
first element being the full numpy array and second element
keys being the ids associated with each row in the array.
Examples
--------
>>> from pysal.lib.weights import W, full
>>> neighbors = {'first':['second'],'second':['first','third'],'third':['second']}
>>> weights = {'first':[1],'second':[1,1],'third':[1]}
>>> w = W(neighbors, weights)
>>> wf, ids = full(w)
>>> wf
array([[0., 1., 0.],
[1., 0., 1.],
[0., 1., 0.]])
>>> ids
['first', 'second', 'third']
"""
wfull = np.zeros([self.n, self.n], dtype=float)
keys = list(self.neighbors.keys())
if self.id_order:
keys = self.id_order
for i, key in enumerate(keys):
n_i = self.neighbors[key]
w_i = self.weights[key]
for j, wij in zip(n_i, w_i):
c = keys.index(j)
wfull[i, c] = wij
return (wfull, keys)
def to_WSP(self):
'''
Generate a WSP object.
Returns
-------
implicit : pysal.lib.weights.WSP
Thin W class
Examples
--------
>>> from pysal.lib.weights import W, WSP
>>> neighbors={'first':['second'],'second':['first','third'],'third':['second']}
>>> weights={'first':[1],'second':[1,1],'third':[1]}
>>> w=W(neighbors,weights)
>>> wsp=w.to_WSP()
>>> isinstance(wsp, WSP)
True
>>> wsp.n
3
>>> wsp.s0
4
See also
--------
WSP
'''
return WSP(self.sparse, self._id_order)
def set_shapefile(self, shapefile, idVariable=None, full=False):
"""
Adding meta data for writing headers of gal and gwt files.
Parameters
----------
shapefile : string
shapefile name used to construct weights
idVariable : string
name of attribute in shapefile to associate with ids in the weights
full : boolean
True - write out entire path for shapefile, False
(default) only base of shapefile without extension
"""
if full:
self._shpName = shapefile
else:
self._shpName = BASENAME(shapefile).split(".")[0]
self._varName = idVariable
def plot(self, gdf, indexed_on=None, ax=None, color='k',
node_kws=None, edge_kws=None):
"""
Plot spatial weights objects.
NOTE: Requires matplotlib, and implicitly requires geopandas
dataframe as input.
Parameters
---------
gdf : geopandas geodataframe
the original shapes whose topological relations are
modelled in W.
indexed_on : str
column of gdf which the weights object uses as an index.
(Default: None, so the geodataframe's index is used)
ax : matplotlib axis
axis on which to plot the weights.
(Default: None, so plots on the current figure)
color : string
matplotlib color string, will color both nodes and edges
the same by default.
node_kws : keyword argument dictionary
dictionary of keyword arguments to send to pyplot.scatter,
which provide fine-grained control over the aesthetics
of the nodes in the plot
edge_kws : keyword argument dictionary
dictionary of keyword arguments to send to pyplot.plot,
which provide fine-grained control over the aesthetics
of the edges in the plot
Returns
-------
f,ax : matplotlib figure,axis on which the plot is made.
NOTE: if you'd like to overlay the actual shapes from the
geodataframe, call gdf.plot(ax=ax) after this. To plot underneath,
adjust the z-order of the geopandas plot: gdf.plot(ax=ax,zorder=0)
Examples
--------
>>> from pysal.lib.weights.contiguity import Queen
>>> import pysal.lib as lp
>>> import geopandas
>>> gdf = geopandas.read_file(lp.examples.get_path("columbus.shp"))
>>> weights = Queen.from_dataframe(gdf)
>>> tmp = weights.plot(gdf, color='firebrickred', node_kws=dict(marker='*', color='k'))
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("W.plot depends on matplotlib.pyplot, and this was"
"not able to be imported. \nInstall matplotlib to"
"plot spatial weights.")
if ax is None:
f = plt.figure()
ax = plt.gca()
else:
f = plt.gcf()
if node_kws is not None:
if 'color' not in node_kws:
node_kws['color'] = color
else:
node_kws=dict(color=color)
if edge_kws is not None:
if 'color' not in edge_kws:
edge_kws['color'] = color
else:
edge_kws=dict(color=color)
for idx, neighbors in self:
if idx in self.islands:
continue
if indexed_on is not None:
neighbors = gdf[gdf[indexed_on].isin(neighbors)].index.tolist()
idx = gdf[gdf[indexed_on] == idx].index.tolist()[0]
centroids = gdf.loc[neighbors].centroid.apply(lambda p: (p.x, p.y))
centroids = np.vstack(centroids.values)
focal = np.hstack(gdf.loc[idx].geometry.centroid.xy)
seen = set()
for nidx, neighbor in zip(neighbors, centroids):
if (idx,nidx) in seen:
continue
ax.plot(*list(zip(focal, neighbor)), marker=None,
**edge_kws)
seen.update((idx,nidx))
seen.update((nidx,idx))
ax.scatter(gdf.centroid.apply(lambda p: p.x),
gdf.centroid.apply(lambda p: p.y),
**node_kws)
return f,ax
class WSP(object):
"""
Thin W class for spreg.
Parameters
----------
sparse : sparse_matrix
NxN object from scipy.sparse
id_order : list
An ordered list of ids, assumed to match the ordering in
sparse.
Attributes
----------
n : int
description
s0 : float
description
trcWtW_WW : float
description
Examples
--------
From GAL information
>>> import scipy.sparse
>>> from pysal.lib.weights import WSP
>>> rows = [0, 1, 1, 2, 2, 3]
>>> cols = [1, 0, 2, 1, 3, 3]
>>> weights = [1, 0.75, 0.25, 0.9, 0.1, 1]
>>> sparse = scipy.sparse.csr_matrix((weights, (rows, cols)), shape=(4,4))
>>> w = WSP(sparse)
>>> w.s0
4.0
>>> w.trcWtW_WW
6.395
>>> w.n
4
"""
def __init__(self, sparse, id_order=None):
if not scipy.sparse.issparse(sparse):
raise ValueError("must pass a scipy sparse object")
rows, cols = sparse.shape
if rows != cols:
raise ValueError("Weights object must be square")
self.sparse = sparse.tocsr()
self.n = sparse.shape[0]
if id_order:
if len(id_order) != self.n:
raise ValueError(
"Number of values in id_order must match shape of sparse")
self.id_order = id_order
self._cache = {}
@property
def s0(self):
"""s0 is defined as:
.. math::
s0=\sum_i \sum_j w_{i,j}
"""
if 's0' not in self._cache:
self._s0 = self.sparse.sum()
self._cache['s0'] = self._s0
return self._s0
@property
def trcWtW_WW(self):
"""Trace of :math:`W^{'}W + WW`.
"""
if 'trcWtW_WW' not in self._cache:
self._trcWtW_WW = self.diagWtW_WW.sum()
self._cache['trcWtW_WW'] = self._trcWtW_WW
return self._trcWtW_WW
@property
def diagWtW_WW(self):
"""Diagonal of :math:`W^{'}W + WW`.
"""
if 'diagWtW_WW' not in self._cache:
wt = self.sparse.transpose()
w = self.sparse
self._diagWtW_WW = (wt * w + w * w).diagonal()
self._cache['diagWtW_WW'] = self._diagWtW_WW
return self._diagWtW_WW
@classmethod
def from_W(cls, W):
"""
Constructs a WSP object from the W's sparse matrix
Parameters
----------
W : pysal.lib.weights.W
a pysal weights object with a sparse form and ids
Returns
-------
a WSP instance
"""
return cls(W.sparse, id_order=W.id_order)
def to_W(self, silence_warnings=False):
"""
Convert a pysal WSP object (thin weights matrix) to a pysal W object.
Parameters
----------
self : WSP
PySAL sparse weights object
silence_warnings : boolean
Switch to turn off (default on) print statements
for every observation with islands
Returns
-------
w : W
PySAL weights object
Examples
--------
>>> from pysal.lib.weights import lat2SW, WSP, WSP2W
Build a 10x10 scipy.sparse matrix for a rectangular 2x5 region of cells
(rook contiguity), then construct a pysal.lib sparse weights object (self).
>>> sp = lat2SW(2, 5)
>>> self = WSP(sp)
>>> self.n
10
>>> print(self.sparse[0].todense())
[[0 1 0 0 0 1 0 0 0 0]]
Convert this sparse weights object to a standard PySAL weights object.
>>> w = WSP2W(self)
>>> w.n
10
>>> print(w.full()[0][0])
[0. 1. 0. 0. 0. 1. 0. 0. 0. 0.]
"""
self.sparse
indices = self.sparse.indices
data = self.sparse.data
indptr = self.sparse.indptr
id_order = self.id_order
if id_order:
# replace indices with user IDs
indices = [id_order[i] for i in indices]
else:
id_order = list(range(self.n))
neighbors, weights = {}, {}
start = indptr[0]
for i in range(self.n):
oid = id_order[i]
end = indptr[i + 1]
neighbors[oid] = indices[start:end]
weights[oid] = data[start:end]
start = end
ids = copy.copy(self.id_order)
w = W(neighbors, weights, ids,
silence_warnings=silence_warnings)
w._sparse = copy.deepcopy(self.sparse)
w._cache['sparse'] = w._sparse
return w
|
lixun910/pysal
|
pysal/lib/weights/weights.py
|
Python
|
bsd-3-clause
| 46,693
|
[
"COLUMBUS"
] |
e413ce3661638cbe966dc12ed06429fe3386640dfdd1d959247079e2ce359ca8
|
"""@file test_bmode_noise.py
This module started out as the script used to demonstrate that B-mode shape cancellation would work
even for the COSMOS galaxies which don't have the p(|e|) you would get from a purely Gaussian random
B-mode field. Here we tested that the ranked-ordering prescription described in the handbook would
actually work.
In later tests for the validation of the GREAT3 catalogues, there were some additions to this script
to create output for the aperture mass dispersion split into E/B modes as a function of the
kmin_factor and kmax_factor input to the PowerSpectrum instance .buildGrid() method. This turned
out to be important as it leads to power leaking from the pure B-mode noise into the E-mode!
As a result, the metric calculation for Q_v was required to take into account the small quantity of
leaked B-mode noise from the galaxy intrinsic (pre-lensing) ellipticities when comparing submissions
to the reference "truth" E-mode aperture mass dispersion.
"""
import os
import pyfits
import numpy as np
import galsim
# Please modify this filepath to point to the correct location for real_galaxy_23.5_shapes.fits if
# necessary
SHAPECAT = os.path.join("..", "inputs", "galdata", "real_galaxy_23.5_shapes.fits")
RANDOM_SEED = 1335133
NGRID = 500
SIGMA_NOISE = 0.05
def generate_bmode_shears(var, ngrid, rng=None, kmax_factor=16, kmin_factor=1):
"""Generate b-mode shears, returns: g1, g2, gmag, gphi
"""
ps = galsim.PowerSpectrum(
b_power_function=lambda karr : var * np.ones_like(karr) / float(kmax_factor)**2)
g1, g2 = ps.buildGrid(
grid_spacing=1., ngrid=ngrid, rng=rng, kmax_factor=kmax_factor, kmin_factor=kmin_factor)
# These g1, g2 *shears* do not have to be on the unit disc, so we have to convert them to a |g|
# like ellipticity via the result for a circle following such a shear, using Schneider (2006)
# eq. 12. Note, this itself will mean that the ellips are not pure B-mode!!
gmag = np.sqrt(g1 * g1 + g2 * g2)
for i in range(g1.shape[0]): # ugly but my arm is broken
for j in range(g2.shape[1]):
if gmag[i, j] > 1.:
g1[i, j] = g1[i, j] / gmag[i, j]**2
g2[i, j] = g2[i, j] / gmag[i, j]**2
gmag[i, j] = np.sqrt(g1[i, j]**2 + g2[i, j]**2)
else:
pass
gphi = .5 * np.arctan2(g2, g1)
return g1, g2, gmag, gphi
def select_from_catalog(gmag, ngrid):
"""Returns an ngrid x ngrid array with values selected at random from the input array gmag.
"""
try:
selection = np.random.choice(gmag, (ngrid, ngrid))
except AttributeError:
import random
gmag_indices = range(len(gmag))
selection = [
gmag[index] for index in [random.choice(gmag_indices) for i in range(ngrid**2)]]
selection = np.reshape(np.asarray(selection), (ngrid, ngrid))
return selection
def match_gmags_by_rank(gmagt, gmag, ngrid):
"""Takes an array of target gmags (gmagt) and identifies the corresponding rank-sorted gmag
for each element, then returns these as an nside x nside array.
"""
isorted_gmagt = np.argsort(gmagt.flatten())
gmag_sample_sorted = np.sort(gmag.flatten())
sorted_gmag_dict = {}
for i in range(ngrid * ngrid):
sorted_gmag_dict[isorted_gmagt[i]] = gmag_sample_sorted[i]
gmags = np.reshape([sorted_gmag_dict[i] for i in range(ngrid * ngrid)], (ngrid, ngrid))
return gmags
if __name__ == "__main__":
print "Reading shape data from "+SHAPECAT
data = pyfits.getdata(SHAPECAT)
# Select well-measured e1 and e2
do_meas = data.field('do_meas')
e1 = data.field('e1')[do_meas > -0.5]
e2 = data.field('e2')[do_meas > -0.5]
emag = np.sqrt(e1 * e1 + e2 * e2)
ephi = .5 * np.arctan2(e2, e1)
# Convert these to g1, g2
gmag = emag / (1. + np.sqrt(1. - e1 * e1 - e2 * e2))
gphi = ephi
# Catch the 16 nans (|e| > 1) and discard
gmag = gmag[~np.isnan(gmag)]
gphi = gphi[~np.isnan(gmag)]
g1 = gmag * np.cos(2. * gphi)
g2 = gmag * np.sin(2. * gphi)
# Estimate total var from each component (assume isotropic) to use when generating B-mode
# noise as a Gaussian field
gvar = g1.var() + g2.var()
# Get some target 'pure' B-mode shears
g1t, g2t, gmagt, gphit = generate_bmode_shears(gvar, NGRID)
# Set up a power spectrum estimator
my_pse = galsim.pse.PowerSpectrumEstimator(NGRID, NGRID * 180. / np.pi)
# Get the e-mode leakage of the 'target' intrinsic shape noise, present due to the unit disc
# upper limit for |g| <= 1
ell, eet, bbt, ebt = my_pse.estimate(g1t, g2t)
if not os.path.isdir('plots'): os.mkdir('plots')
import matplotlib.pyplot as plt
plt.clf()
plt.loglog(ell, eet, color='k', label='Best case intrinsic E')
plt.loglog(ell, bbt, color='r', label='Best case intrinsic B')
plt.ylabel('Power')
plt.ylim(1.e-6, 1.e1)
plt.legend()
plt.axhline(SIGMA_NOISE**2, ls='--', color='k')
plt.savefig('./plots/bestcase_bmode_shapenoise.png')
plt.clf()
gmag_selected = select_from_catalog(gmag, NGRID)
g1crude = gmag_selected * np.cos(2. * gphit)
g2crude = gmag_selected * np.sin(2. * gphit)
ell, eec, bbc, ebc = my_pse.estimate(g1crude, g2crude)
plt.loglog(ell, eec, color='k', label='Crude (unmatched |g|) intrinsic E')
plt.loglog(ell, bbc, color='r', label='Crude (unmatched |g|) intrinsic B')
plt.ylabel('Power')
plt.ylim(1.e-6, 1.e1)
plt.legend()
plt.axhline(SIGMA_NOISE**2, ls='--', color='k')
plt.savefig('./plots/crude_bmode_shapenoise.png')
plt.clf()
gmags = match_gmags_by_rank(gmagt, gmag_selected, NGRID)
g1s = gmags * np.cos(2. * gphit)
g2s = gmags * np.sin(2. * gphit)
ell, ees, bbs, ebs = my_pse.estimate(g1s, g2s)
plt.loglog(ell, ees, color='k', label='Rank matched |g| intrinsic E')
plt.loglog(ell, bbs, color='r', label='Rank matched |g| intrinsic B')
plt.ylabel('Power')
plt.ylim(1.e-6, 1.e1)
plt.legend()
plt.axhline(SIGMA_NOISE**2, ls='--', color='k')
plt.savefig('./plots/ranked_bmode_shapenoise.png')
print "Mean leaked E = "+str(ees.mean())
print "SIGMA_NOISE**2 = "+str(SIGMA_NOISE**2)
print "Ratio = "+str(SIGMA_NOISE**2 / ees.mean())
plt.clf()
plt.subplot(211)
plt.title('COSMOS fits P(|g|)')
plt.hist(gmag_selected.flatten(), range=(0, 1), bins=30)
plt.ylim(0, 22500)
plt.subplot(212)
plt.title('Gaussian field B-mode P(|g|)')
plt.hist(gmagt.flatten(), range=(0, 1), bins=30, color='r')
plt.ylim(0, 22500)
plt.xlabel('P(|g|)')
plt.savefig('./plots/pe_hist.png')
# Trying sending this to corr2
x, y = np.meshgrid(np.arange(NGRID) * 10. / float(NGRID), np.arange(NGRID) * 10. / float(NGRID))
import sys
sys.path.append(os.path.join("..", "presubmission_script"))
import presubmission
for g1, g2, typestring in zip((g1t, g1s), (g2t, g2s), ("Pure", "Ranked")):
results = presubmission.run_corr2(
x.flatten(), y.flatten(), g1, g2, np.ones(NGRID * NGRID))
ylim = 2.e-5
theta = []
mapE = []
mapB = []
mmxa = []
mmxb = []
maperr = []
for line in results:
theta.append(float(line[0]))
mapE.append(float(line[1]))
mapB.append(float(line[2]))
mmxa.append(float(line[3]))
mmxb.append(float(line[4]))
maperr.append(float(line[5]))
theta = np.asarray(theta)
mapE = np.asarray(mapE)
mapB = np.asarray(mapB)
mmxa = np.asarray(mmxa)
mmxb = np.asarray(mmxb)
maperr = np.asarray(maperr)
plt.clf()
plt.errorbar(theta, mapE, fmt="k-", yerr=maperr, lw=2., label=r"Map$^2$")
plt.xscale("log")
plt.errorbar(theta, mapB, fmt="r-", yerr=maperr, label=r"Mx$^2$", lw=2.)
plt.errorbar(theta, mmxa, fmt="g-", yerr=maperr, label="MMx(a)", lw=2.)
plt.errorbar(theta, mmxb, fmt="b-", yerr=maperr, label="MMx(b)", lw=2.)
plt.axhline(color="k", ls="--")
plt.legend()
#print mapE
#print mapB
plt.ylim(-ylim, ylim)
plt.xlabel("R [degrees]")
plt.title(typestring+" B-mode shapes")
plt.savefig("./plots/map_bmode_only_"+typestring+".png")
print mapE.mean(), np.abs(mapE).max()
print mapB.mean(), np.abs(mapB).max()
plt.show()
|
barnabytprowe/great3-public
|
tests/test_bmode_noise.py
|
Python
|
bsd-3-clause
| 8,476
|
[
"Galaxy",
"Gaussian"
] |
19413ebd098ecfed3c0b9c7ea05b0520e1793d0f8831691f9c1612905fdff298
|
from numpy import sqrt, arctan, pi, exp, random, shape
I = 1.0j
# A package for beam optics in python
def spot_size(z, zR, w0):
"""Calculate spot size at z, given zR, and w0"""
return w0 * sqrt(1 + (z/zR)*(z/zR))
def radius_curvature(z, zR):
"""calculate R(z)"""
# This could be smarter, just adding epsilon to avoid nan's
if (z == 0):
z += 1e-31
return z * (1 + (zR/z)*(zR/z))
def guoy_phase(z, zR):
"""really just atan(z/zR)"""
return arctan(z/zR)
def rayleigh_range(w0, wavelambda):
"""Calculate rayleigh range"""
return pi*w0*w0/wavelambda
def gaussian_beam(x, y, z, E0, wavelambda, w0, k):
"""full gaussian beam at x, y, z given the beam parameters\n
E0 is the electric field amplitude
wavelambda is the wavelength
w0 is the beam waist (1/e field radius and 1/e^2 intensity radius)
k is a tuple of [kx,ky,kz]"""
r = sqrt(x*x + y*y)
zR = rayleigh_range(w0, wavelambda)
w = spot_size(z, zR, w0)
R = radius_curvature(z, zR)
eta = guoy_phase(z, zR)
return E0 * w0/w * exp(- r*r/(w*w)) *\
exp(-I*k[2]*z - I*k[2]*r*r/(2*R) + I*eta)*exp(I*k[0]*x + I*k[1]*y) #\
#+ sqrt(E0)*random.random([max(shape(x)), max(shape(y))])
# The exp(ikz) term in this definition causes extra phase accumulation
# compared to the BPM. I need to sort this out for sure.
# The following agrees with BPM:
# return w0/w * exp(- r*r/(w*w)) * exp(- 1j*k*r*r/(2*R) + 1j*eta)
def plane_wave_beam(x, y, z, A, k):
"""a simple plane wave mostly used for testing"""
#dphase = 1/(2*A**2)
#noiseamp = sqrt(A) * (random.random() - 0.5)
#noisephase = exp(2*pi*1j*dphase*(random.random() - 0.5))
# noise = random.normal(0, 0.5) * exp(1j*2*pi*random.random())
# return (A + noise) * exp(I*k[0]*x + I*k[1]*y + I*k[2]*z)
return (A) * exp(I*k[0]*x + I*k[1]*y + I*k[2]*z)
#return A * exp(I*k[0]*x + I*k[1]*y + I*k[2]*z)
# this is more accurate, the best way to model is to let
# the amplitude have noise.
|
DawesLab/LabNotebooks
|
BeamOptics.py
|
Python
|
mit
| 2,036
|
[
"Gaussian"
] |
ede1896a7569065a90c63d860c7bbf9fb4b7e2500190220184501bb8f545fcdd
|
import pandas as pd
import numpy as np
from .QCBase import VarNames
class Exporter(object):
""" Export class which writes parsed data to a certain format"""
valid_formats = ["pdf", "xlsx", "txt", "csv", "dataframe"]
def __init__(self, data=None):
self.data = data
# for later: add pandas independent functions to export arrays to file
def arrays_to_dframe(self, **kwargs):
""" Using keyworded arguments, expects arrays """
try:
df = pd.DataFrame(kwargs)
except ValueError: #if arrays do not have the same length
d = {}
for key, value in kwargs.items():
d[key] = pd.Series(value)
df = pd.DataFrame(d)
return df
def ExcitedStateSummary(self, results, fname="es_smry", fmt="csv",
ground_state=False):
""" Exports energy related excited state quantities to file
Parameters
----------
results : CCParser.ParseContainer
Parsing container that holds parsed values.
fname : string
Filename prefix.
fmt : string
Output format ('csv', 'xlsx'/'xls' or 'df' for pandas.DataFrame).
ground_state : bool
Whether to include an empty line in the table for the ground state.
"""
if fmt not in Exporter.valid_formats:
raise ValueError("File format '{0:}' not recognized or supported!".format(fmt))
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
d = {}
# (1) Excitation energies (default minimum)
#if hasattr(results, VarNames.exc_energy_rel):
d[VarNames.exc_energy_rel] = getattr(results, VarNames.exc_energy_rel).data
n_states = len(d[VarNames.exc_energy_rel])
# (2) Oscillator strengths
if hasattr(results, VarNames.osc_str):
d[VarNames.osc_str] = getattr(results, VarNames.osc_str).data
# (3) Amplitudes
if hasattr(results, VarNames.amplitudes):
ampl = getattr(results, VarNames.amplitudes)
pieces = [a.to_dataframe() for a in ampl]
key = [x for x in range(1,len(pieces)+1)]
amp_df = pd.concat(pieces, keys=key, names=["State", "Row ID"])
# prepare MultiIndex (there has to be a better way to do that...)
arrays = [[x for x in range(1, n_states+1)],
[0 for x in range(n_states)]]
tuples = list(zip(*arrays))# asterisk unpacks
df1 = pd.DataFrame(d)
df1.index = pd.MultiIndex.from_tuples(tuples, names=["State", "Row ID"])
df = pd.concat([df1, amp_df], axis=1)
# add row to MultiIndex, see https://stackoverflow.com/q/24917700
if ground_state:
df.loc[(0,0),:] = np.nan
df.sort_index(level=0, inplace=True)
# EXPORT TO FILE or dataframe
fout = fname + "." + fmt
if fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == ("xlsx" or "xls"):
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
elif fmt.lower() == ("dataframe" or "df"):
return df
def ReducedWeights(self, results, nbsfA, extern=None, fmt="print",
fname="AmplAnl", silent=False):
""" Calculate reduced weights based on fragment information.
The reduced weight for a single excitation :math:`i \\rightarrow a` is defined as
:math:`v_{i}^{a} = 0.5\\cdot(c_{i,A}^{2} + c_{a,A}^{2})\\cdot w_{i}^{a}`, with
c and w being the molecular orbital coefficient and transition weight,
respectively.
The MO coefficients from the output first have to be transformed to an
orthonormal basis.
Parameters
----------
results : CCParser.ParseContainer
Container object which contains excited state amplitudes
nbsfA : int
Number of basis functions on System A (assumes system A comes first!)
extern : CCParser.ParseContainer
Optional second container which contains orthonormalisation matrix and/or MO coefficients
fmt : string
Output format. Available are "print", "dataframe", "xlsx" or "csv"
fname : string
Output file name (basename only).
silent : bool
Whether to ignore lengthy printouts.
"""
# consistency
has_extern = True if extern != None else False
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
if not has_extern and not hasattr(results, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif has_extern and not hasattr(extern, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif not has_extern and not hasattr(results, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif has_extern and not hasattr(extern, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif not hasattr(results, VarNames.amplitudes):
raise AttributeError("Could not find amplitudes! Were they parsed?")
elif not hasattr(results, VarNames.n_bas):
raise AttributeError("Could not find number of basis functions! Was it parsed?")
else:
# (1) Orthonormalization matrix, hardcoded last
X = getattr(results, VarNames.orthonorm_matrix).get_last() if not \
has_extern else getattr(extern, VarNames.orthonorm_matrix).get_last()
X_inv = np.linalg.inv(X)
# (2) MO coeffiecients, hardcoded last
C = getattr(results, VarNames.mo_coefficients).get_last() if not \
has_extern else getattr(extern, VarNames.mo_coefficients).get_last()
C_prime = C * X_inv # Szabo, Ostlund, page 142
max_mo = C.shape[0]
# (3) Amplitudes
ampl = getattr(results, VarNames.amplitudes)
n_states = len(ampl)
# (4) Number of basis functions
nbsf = getattr(results, VarNames.n_bas).get_last()
# (4) Output variables
sum_weights = [0 for i in range(n_states)]
sum_redweights = [0 for i in range(n_states)]
# --------------
sos_A = [0 for a in range(C_prime.shape[0])]
sos_B = [0 for a in range(C_prime.shape[0])]
for c, vect in enumerate(C_prime):
for n in range(nbsf):
if n < nbsfA:
sos_A[c] += vect[0,n]**2
else:
sos_B[c] += vect[0,n]**2
for i,a in enumerate(ampl):#state
for t in range(len(a.occ)):#transition
if max(a.virt[t]) > max_mo:
if not silent:
print("State {0:>2d}: Omitting transition with weight \
{1:.1%} due to missing MO coefficients.".format(i+1, a.weights[t]))
continue
if len(a.occ[t]) == 1:#single amplitudes
rw = 0.5*(sos_A[a.occ[t][0]-1] + sos_A[a.virt[t][0]-1]) * a.weights[t]
elif len(a.occ[t]) == 2:#double amplitudes
rw = 0.25*(sos_A[a.occ[t][0]-1] + sos_A[a.occ[t][1]-1] +
sos_A[a.virt[t][0]-1] + sos_A[a.virt[t][1]-1]
)*a.weights[t]
else:
raise IndexError("Currently no more than double \
amplitudes are supported!")
sum_weights[i] += a.weights[t]
sum_redweights[i] += rw
#----------------
# Export as
fout = fname + "." + fmt
d = {"State": [i+1 for i in range(n_states)],
"sum_weight" : sum_weights,
"sum_red_weight" : sum_redweights}
df = pd.DataFrame(d)
df = df.assign(diff=df["sum_weight"]-df["sum_red_weight"],
ratio=df["sum_red_weight"]/df["sum_weight"])
if fmt == "print":
print("State | Sum(W) | Sum(P) | Sum(W) - Sum(P) | ratio P/W |\n",50*"-")
for i in range(n_states):
print(" S{0:>2d} | {1:.3f} | {2:.3f} | {3:15.3f} | {4:.1%}".format(
i+1, sum_weights[i], sum_redweights[i], sum_weights[i] -
sum_redweights[i], sum_redweights[i]/sum_weights[i]))
elif fmt == "dataframe":
return df
elif fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == "xlsx" or fmt == "xls":
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
else:
raise ValueError("Output format not supported!")
def MO_Molden(self, results, atom_basis, fname="molecular_orbitals",
tmp_5d=True):
""" Writes molecular orbitals to a molden file.
Expects molecular geometry in Angstrom.
More information on the molden format at
http://www.cmbi.ru.nl/molden/molden_format.html
Parameters
----------
results : CCParser.ParseContainer
Container object which holds MO coefficients.
exponents : dict
Dictionary mapping GTO exponents/coefficients to atoms. Expected
format of dictionary entry is list of strings.
fname : string
Output file name.
"""
from .QCBase import PeriodicTable
import re
C = results.C.get_last()
xyz = results.xyz.get_last()
en = results.mo_energies.get_last()
PeTa = PeriodicTable()
#TODO: Permutator needed in case of different formats (Molcas, Gaussian)
with open(fname+".molden", "w") as out:
out.write("[Molden Format]\n")
# write XYZ
out.write("[Atoms] (Angs)\n")
for i,atom in enumerate(xyz):
num = PeTa.get_atomic_num(atom[0])
out.write("{0:>3}{1:7d}{2:5d}".format(atom[0], i+1, num))
out.write("".join("{0:16.8f}".format(c) for c in atom[1:])+"\n")
# write basis exponents
out.write("[GTO]\n")
for n in range(len(xyz)):
# atom sequence number, 0
out.write("{0:d}{1:5d}\n".format(n+1, 0))
symb = xyz[n][0].upper()
#a = atom.upper()
basis = atom_basis[symb]
for coeff in basis:
# shell label, number of primitives, 1.00
if re.search(r"[SDPF]", coeff[0]):
out.write("{0:}{1:6d}{2:12.6f}\n".format(
coeff[0], int(coeff[1]), float(coeff[2])))
# exponent, contraction coefficient
else:
out.write("{0:18.8e}{1:18.8e}\n".format(
float(coeff[0]), float(coeff[1])))
out.write("\n")
for imo in range(C.shape[0]):#assumes counting from MO 1 !!
out.write("[MO]\nSym=X\n")
if imo < en.n_occ:#occupied
out.write("Ene={0:12.6f}\n".format(en.occ[imo]))
out.write("Spin=alpha\n")
out.write("Occup=1\n")
else:#virtual
out.write("Ene={0:12.6f}\n".format(en.virt[imo]))
out.write("Spin=alpha\n")
out.write("Occup=0\n")
for i in range(C.shape[1]):
out.write("{0:6d}{1: 22.12e}\n".format(i+1,C[imo, i]))
if tmp_5d:
out.write("[5D]\n")
print("MOs written to Molden file.")
|
spectre007/CCParser
|
Export.py
|
Python
|
mit
| 12,361
|
[
"Gaussian",
"MOLCAS"
] |
9603930ac2da1d53d08e7b1db35d06a17a0e2c7cb5b97b365fee17a2ec11e1c6
|
# coding: utf-8
# # Intro to satellite data I
#
# In this notebook we take a quick look at a 5 minutes of satellite data acquired from the MODIS instrument on the Aqua polar orbiting satellite. Aqua flies in the [A-train]( <http://atrain.nasa.gov), which is formation of satellites that orbit separated by a minute or so. The granule covers the period from 20:15 to 20:20 UCT on May 15, 2016 (Julian day 136) while Aqua flew over Ft. McMurray, Alberta. I downloaded the granule from the [Laadsweb NASA site]( https://ladsweb.nascom.nasa.gov/data/search.html) and converted it from HDF4 to HDF5 format (more on [this](https://www.hdfgroup.org/h5h4-diff.html) later). The structure of HDF5 files can be explored with the [HDFViewer tool](https://www.hdfgroup.org/products/java/release/download.html) (install version 2.13 from that link). The gory details are in the [Modis Users Guide](http://clouds.eos.ubc.ca/~phil/courses/atsc301/downloads/modis_users_guide.pdf).
#
# First, download the file from our course website:
# In[1]:
from a301utils.a301_readfile import download
import h5py
filename = 'MYD021KM.A2016136.2015.006.2016138123353.h5'
download(filename)
# Here is the corresponding red,green,blue color composite for the granule.
# In[2]:
from IPython.display import Image
Image(url='http://clouds.eos.ubc.ca/~phil/courses/atsc301/downloads/aqua_136_2015.jpg',width=600)
# ### now use h5py to read some of the satellite channels
# In[3]:
h5_file=h5py.File(filename)
# h5 files have attributes -- stored as a dictionary
# In[4]:
print(list(h5_file.attrs.keys()))
# ### print two of the attributes
# In[5]:
print(h5_file.attrs['Earth-Sun Distance_GLOSDS'])
# In[6]:
print(h5_file.attrs['HDFEOSVersion_GLOSDS'])
# h5 files have variables -- stored in a dictionary.
# The fields are aranged in a hierarchy of groups similar to a set of nested folders
# Here is what HDFViewer reports for the structure of the "EV_1KM_Emissive" dataset, which stands for "Earth View, 1 km pixel resolution, thermal emission channels". It is showing a 3 dimensional array of integers of shape (16,2030,1354). These are radiometer counts in 16 different wavelength channels for the 2030 x 1354 pixel granule.
# In[7]:
Image('screenshots/HDF_file_structure.png')
# **Read the radiance data from MODIS_SWATH_Type_L1B/Data Fields/EV_1KM_Emissive**
# Note the correspondence between the keys and the fields you see in HDFView:
#
# Here are the top level groups:
# In[8]:
print(list(h5_file.keys()))
# and the 'MODIS_SWATH_Type_L1B' group contains 3 subgroups:
# In[9]:
print(list(h5_file['MODIS_SWATH_Type_L1B'].keys()))
# and the 'Data Fields' subgroup contains 27 more groups:
# In[10]:
print(list(h5_file['MODIS_SWATH_Type_L1B/Data Fields'].keys()))
# Print out the 16 channel numbers stored in Band_1KM_Emissive data array. The [...] means "read everything". The 16 thermal channels are channels 20-36. Their wavelength ranges and common uses are listed
# [here](https://modis.gsfc.nasa.gov/about/specifications.php)
# In[11]:
print(h5_file['MODIS_SWATH_Type_L1B/Data Fields/Band_1KM_Emissive'][...])
# **note that channel 31, which covers the wavelength range 10.78-11.28 $\mu m$ occurs at index value 10 (remember python counts from 0)**
# In[12]:
index31=10
# **the data are stored as unsigned (i.e. only positive values), 2 byte (16 bit) integers which can hold values from 0 to $2^{16}$ - 1 = 65,535.
# The ">u2" notation below for the datatype (dtype) says that the data is unsigned, 2 byte, with the most significant
# byte stored first ("big endian", which is the same way we write numbers)**
#
# (Although the 2 byte words contain 16 bits, only 12 bits are significant).
#
# (h5py let's you specify the group names one at a time, instead of using '/' to separate them. This is convenient if you are storing your field name in a variable, for example.)
# In[13]:
my_name = 'EV_1KM_Emissive'
chan31=h5_file['MODIS_SWATH_Type_L1B']['Data Fields'][my_name][index31,:,:]
print(chan31.shape,chan31.dtype)
# **Print the first 3 rows and columns**
# In[14]:
chan31[:3,:3]
# ** we need to apply a
# scale and offset to convert counts to radiance, with units of $(W\,m^{-2}\,\mu m^{-1}\,sr^{-1}$). More about the
# sr units later**
# $Data = (RawData - offset) \times scale$
#
# this information is included in the attributes of each variable.
#
# (see page 36 of the [Modis Users Guide](http://clouds.eos.ubc.ca/~phil/courses/atsc301/downloads/modis_users_guide.pdf))
# **here is the scale for all 16 channels**
# In[15]:
scale=h5_file['MODIS_SWATH_Type_L1B']['Data Fields']['EV_1KM_Emissive'].attrs['radiance_scales'][...]
print(scale)
# **and here is the offset for 16 channels**
# In[16]:
offset=h5_file['MODIS_SWATH_Type_L1B']['Data Fields']['EV_1KM_Emissive'].attrs['radiance_offsets'][...]
print(offset)
# **note that as the satellite ages and wears out, these calibration coefficients change**
# In[17]:
chan31_calibrated =(chan31 - offset[index31])*scale[index31]
# In[18]:
get_ipython().magic('matplotlib inline')
# **histogram the raw counts -- note that hist doesn't know how to handle 2-dim arrays, so flatten to 1-d**
# In[19]:
import matplotlib.pyplot as plt
out=plt.hist(chan31.flatten())
#
# get the current axis to add title with gca()
#
ax = plt.gca()
_=ax.set(title='Aqua Modis raw counts')
# **histogram the calibrated radiances and show that they lie between
# 0-10 $W\,m^{-2}\,\mu m^{-1}\,sr^{-1}$ **
# In[20]:
import matplotlib.pyplot as plt
fig,ax = plt.subplots(1,1)
ax.hist(chan31_calibrated.flatten())
_=ax.set(xlabel='radiance $(W\,m^{-2}\,\mu m^{-1}\,sr^{-1}$)',
title='channel 31 radiance for Aqua Modis')
# ** Next Read MODIS_SWATH_Type_L1B/Geolocation Fields/Longitude**
# note that the longitude and latitude arrays are (406,271) while the actual
# data are (2030,1354). These lat/lon arrays show only every fifth row and column to
# save space. The full lat/lon arrays are stored in a separate file.
# In[21]:
lon_data=h5_file['MODIS_SWATH_Type_L1B']['Geolocation Fields']['Longitude'][...]
lat_data=h5_file['MODIS_SWATH_Type_L1B']['Geolocation Fields']['Latitude'][...]
_=plt.plot(lon_data[:10,:10],lat_data[:10,:10],'b+')
# **Note two things: 1) the pixels overlap and 2) they don't line up on lines of constant longitude and latitude**
#
# **The pixels are also not all the same size -- this distortion is called the [bowtie effect](http://eoweb.dlr.de:8080/short_guide/D-MODIS.html)**
#
# **Next -- plotting image data**
# In[ ]:
|
a301-teaching/a301_code
|
notebooks/python/satellite_I.py
|
Python
|
mit
| 6,584
|
[
"Bowtie"
] |
3250206f9995a228881ff9a955467467554edc32a33b08cba1beebe338b44b6f
|
# -*- coding: utf-8 -*-
#
# brunel2000_interactive.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import nest.raster_plot
import pylab
# Network parameters. These are given in Brunel (2000) J.Comp.Neuro.
g = 5.0 # Ratio of IPSP to EPSP amplitude: J_I/J_E
eta = 2.0 # rate of external population in multiples of threshold rate
delay = 1.5 # synaptic delay in ms
tau_m = 20.0 # Membrane time constant in mV
V_th = 20.0 # Spike threshold in mV
N_E = 8000
N_I = 2000
N_neurons = N_E + N_I
C_E = int(N_E / 10) # number of excitatory synapses per neuron
C_I = int(N_I / 10) # number of inhibitory synapses per neuron
J_E = 0.1
J_I = -g * J_E
nu_ex = eta * V_th / (J_E * C_E * tau_m) # rate of an external neuron in ms^-1
p_rate = 1000.0 * nu_ex * C_E # rate of the external population in s^-1
# Set parameters of the NEST simulation kernel
nest.SetKernelStatus({'print_time': True,
'local_num_threads': 2})
nest.SetDefaults('iaf_psc_delta',
{'C_m': 1.0,
'tau_m': tau_m,
't_ref': 2.0,
'E_L': 0.0,
'V_th': V_th,
'V_reset': 10.0})
# Create nodes -------------------------------------------------
nodes = nest.Create('iaf_psc_delta', N_neurons)
nodes_E = nodes[:N_E]
nodes_I = nodes[N_E:]
noise=nest.Create('poisson_generator', 1, {'rate': p_rate})
spikes=nest.Create('spike_detector',2,
[{'label': 'brunel_py_ex'},
{'label': 'brunel_py_in'}])
spikes_E=spikes[:1]
spikes_I=spikes[1:]
# Connect nodes ------------------------------------------------
nest.CopyModel('static_synapse_hom_w',
'excitatory',
{'weight':J_E,
'delay':delay})
nest.Connect(nodes_E, nodes,
{'rule': 'fixed_indegree',
'indegree': C_E},
'excitatory')
nest.CopyModel('static_synapse_hom_w',
'inhibitory',
{'weight':J_I,
'delay':delay})
nest.Connect(nodes_I, nodes,
{'rule': 'fixed_indegree',
'indegree': C_I},
'inhibitory')
nest.Connect(noise, nodes, syn_spec='excitatory')
N_rec = 50 # Number of neurons to record from
nest.Connect(nodes_E[:N_rec], spikes_E)
nest.Connect(nodes_I[:N_rec], spikes_I)
# Simulate -----------------------------------------------------
simtime = 300.
nest.Simulate(simtime)
ex_events, in_events = nest.GetStatus(spikes, 'n_events')
events_to_rate = 1000. / simtime /N_rec
rate_ex = ex_events * events_to_rate
print('Excitatory rate: {:.2f} Hz'.format(rate_ex))
rate_in = in_events * events_to_rate
print('Inhibitory rate: {:.2f} Hz'.format(rate_in))
nest.raster_plot.from_device(spikes_E, hist=True)
#pylab.show()
pylab.savefig('../figures/brunel_interactive.eps')
|
magnastrazh/NEUCOGAR
|
nest/serotonin/research/C/nest-2.10.0/doc/nest_by_example/scripts/brunel2000_interactive.py
|
Python
|
gpl-2.0
| 3,535
|
[
"NEURON"
] |
190412af92e5c85367a41bccce67bb2a9338ea883170d15099ed851020141aa5
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
#
#
##############################################################################################
# preamble
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
#grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/ACCMIP-MACCity_anthrop_1960-2020/sectors/NOx/n96e/chunks/MACCity_aircraft_NO_1975-1989_n96l85.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i303: CO surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i340'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='NO_aircrft'
# this is the grid we want to regrid to, e.g. N96 ENDGame
#grd=iris.load(grid_file)[0]
#grd.coord(axis='x').guess_bounds()
#grd.coord(axis='y').guess_bounds()
# This is the original data
#ems=iris.load_cube(emissions_file)
ocube=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
#nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
#nems.coord(axis='x').coord_system=grd.coord_system()
#nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
#nems.coord(axis='x').guess_bounds()
#nems.coord(axis='y').guess_bounds()
# now regrid
#ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_NO_aircrft'
ocube.long_name='NOx aircraft emissions'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='all_levels'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']='NO_aircrft'
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.4' # UM version
ocube.attributes['source']='MACCity_aircraft_NO_1960-2020_n96l85.nc'
ocube.attributes['data_version']='Beta release'
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([ 5385,
5415, 5445, 5475, 5505, 5535, 5565, 5595, 5625, 5655, 5685, 5715, 5745,
5775, 5805, 5835, 5865, 5895, 5925, 5955, 5985, 6015, 6045, 6075, 6105,
6135, 6165, 6195, 6225, 6255, 6285, 6315, 6345, 6375, 6405, 6435, 6465,
6495, 6525, 6555, 6585, 6615, 6645, 6675, 6705, 6735, 6765, 6795, 6825,
6855, 6885, 6915, 6945, 6975, 7005, 7035, 7065, 7095, 7125, 7155, 7185,
7215, 7245, 7275, 7305, 7335, 7365, 7395, 7425, 7455, 7485, 7515, 7545,
7575, 7605, 7635, 7665, 7695, 7725, 7755, 7785, 7815, 7845, 7875, 7905,
7935, 7965, 7995, 8025, 8055, 8085, 8115, 8145, 8175, 8205, 8235, 8265,
8295, 8325, 8355, 8385, 8415, 8445, 8475, 8505, 8535, 8565, 8595, 8625,
8655, 8685, 8715, 8745, 8775, 8805, 8835, 8865, 8895, 8925, 8955, 8985,
9015, 9045, 9075, 9105, 9135, 9165, 9195, 9225, 9255, 9285, 9315, 9345,
9375, 9405, 9435, 9465, 9495, 9525, 9555, 9585, 9615, 9645, 9675, 9705,
9735, 9765, 9795, 9825, 9855, 9885, 9915, 9945, 9975, 10005, 10035, 10065,
10095, 10125, 10155, 10185, 10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425,
10455, 10485, 10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815 ])
# make z-direction. -- MOK we won't need this for aircraft emissions?
#zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
# units='1',attributes={'positive':'up'})
#ocube.add_aux_coord(zdims)
#ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
#ocube.transpose([1,0,2,3])
# guess bounds of x and y dimension
ocube.coord(axis='x').guess_bounds()
ocube.coord(axis='y').guess_bounds()
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
# MOK -- uncomment the following line:
ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([ 5385,
5415, 5445, 5475, 5505, 5535, 5565, 5595, 5625, 5655, 5685, 5715, 5745,
5775, 5805, 5835, 5865, 5895, 5925, 5955, 5985, 6015, 6045, 6075, 6105,
6135, 6165, 6195, 6225, 6255, 6285, 6315, 6345, 6375, 6405, 6435, 6465,
6495, 6525, 6555, 6585, 6615, 6645, 6675, 6705, 6735, 6765, 6795, 6825,
6855, 6885, 6915, 6945, 6975, 7005, 7035, 7065, 7095, 7125, 7155, 7185,
7215, 7245, 7275, 7305, 7335, 7365, 7395, 7425, 7455, 7485, 7515, 7545,
7575, 7605, 7635, 7665, 7695, 7725, 7755, 7785, 7815, 7845, 7875, 7905,
7935, 7965, 7995, 8025, 8055, 8085, 8115, 8145, 8175, 8205, 8235, 8265,
8295, 8325, 8355, 8385, 8415, 8445, 8475, 8505, 8535, 8565, 8595, 8625,
8655, 8685, 8715, 8745, 8775, 8805, 8835, 8865, 8895, 8925, 8955, 8985,
9015, 9045, 9075, 9105, 9135, 9165, 9195, 9225, 9255, 9285, 9315, 9345,
9375, 9405, 9435, 9465, 9495, 9525, 9555, 9585, 9615, 9645, 9675, 9705,
9735, 9765, 9795, 9825, 9855, 9885, 9915, 9945, 9975, 10005, 10035, 10065,
10095, 10125, 10155, 10185, 10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425,
10455, 10485, 10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815 ],dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=False
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF4_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeslice/regrid_aircNO_n96e_360d_1975-1989.py
|
Python
|
gpl-3.0
| 8,837
|
[
"NetCDF"
] |
e035ddfe3b08da2921735f9ed775359b6e99c8efb89fbe1074ea9d3caa101ac0
|
import textwrap
import warnings
from io import StringIO
from pathlib import Path
import neurom.io as io
import neurom.io.neurolucida as nasc
import numpy as np
from mock import patch
from neurom import load_neuron
from neurom.core.dataformat import COLS
from neurom.io.datawrapper import DataWrapper
from nose.tools import eq_, ok_
from numpy.testing import assert_array_equal
DATA_PATH = Path(Path(__file__).parent, '../../../test_data')
NEUROLUCIDA_PATH = Path(DATA_PATH, 'neurolucida')
def test__match_section():
# no match in first 5
section = [0, 1, 2, 3, 4, 'something']
match = {'Foo': 'Bar', }
eq_(nasc._match_section(section, match), None)
def test__get_tokens():
morph_fd = StringIO(u'((()))')
tokens = list(nasc._get_tokens(morph_fd))
eq_(tokens, ['(', '(', '(', ')', ')', ')'])
morph_fd = StringIO(u'(Baz("Bar"("Foo")))')
tokens = list(nasc._get_tokens(morph_fd))
eq_(tokens, ['(', 'Baz', '(', '"Bar"', '(', '"Foo"', ')', ')', ')'])
morph_fd = StringIO(u'(Baz("Cell Bar Body"("Foo")))')
tokens = list(nasc._get_tokens(morph_fd))
eq_(tokens, ['(', 'Baz', '(', '"Cell Bar Body"', '(', '"Foo"', ')', ')', ')'])
def test__parse_section():
with patch('neurom.io.neurolucida._match_section') as mock_match:
mock_match.return_value = False # want all sections
token_iter = iter(['(', '(', '(', ')', ')', ')'])
section = nasc._parse_section(token_iter)
eq_(section, [[[[]]]])
token_iter = iter(['(', 'Baz', '(', '"Bar"', '(', '"Foo"', ')', ')', ')'])
section = nasc._parse_section(token_iter)
eq_(section, [['Baz',
['"Bar"',
['"Foo"',
]]]])
def test__parse_sections():
string_section = textwrap.dedent(
u"""(FilledCircle
(Color RGB (64, 0, 128))
(Name "Marker 11")
(Set "axons")
( -189.59 55.67 28.68 0.12) ; 1
) ; End of markers
( (Color Yellow)
(Axon)
(Set "axons")
( -40.54 -113.20 -36.61 0.12) ; Root
( -40.54 -113.20 -36.61 0.12) ; 1, R
Generated
) ; End of tree
""")
morph_fd = StringIO(string_section)
sections = nasc._parse_sections(morph_fd)
eq_(len(sections), 1) # FilledCircle is ignored
eq_(sections[0], [['Axon'],
['-40.54', '-113.20', '-36.61', '0.12'],
['-40.54', '-113.20', '-36.61', '0.12'],
'Generated'])
def test__flatten_section():
#[X, Y, Z, R, TYPE, ID, PARENT_ID]
subsection = [['0', '0', '0', '0'],
['1', '1', '1', '1'],
['2', '2', '2', '2'],
['3', '3', '3', '3'],
['4', '4', '4', '4'],
'Generated',
]
ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
# correct parents
ok_(np.allclose(ret[:, COLS.P], np.arange(-1, 4)))
ok_(np.allclose(ret[:, COLS.ID], np.arange(0, 5)))
subsection = [['-1', '-1', '-1', '-1'],
[['0', '0', '0', '0'],
['1', '1', '1', '1'],
['2', '2', '2', '2'],
['3', '3', '3', '3'],
['4', '4', '4', '4'],
'|',
['1', '2', '3', '4'],
['1', '2', '3', '4'],
['1', '2', '3', '4'],
['1', '2', '3', '4'],
['1', '2', '3', '4'], ]
]
ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
# correct parents
eq_(ret[0, COLS.P], -1.)
eq_(ret[1, COLS.P], 0.0)
eq_(ret[6, COLS.P], 0.0)
ok_(np.allclose(ret[:, COLS.ID], np.arange(0, 11))) # correct ID
# Try a non-standard bifurcation, ie: missing '|' separator
subsection = [['-1', '-1', '-1', '-1'],
[['0', '0', '0', '0'],
['1', '1', '1', '1'], ]
]
ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
eq_(ret.shape, (3, 7))
# try multifurcation
subsection = [['-1', '-1', '-1', '-1'],
[['0', '0', '0', '0'],
['1', '1', '1', '1'],
'|',
['2', '2', '2', '2'],
['3', '3', '3', '3'],
'|',
['4', '4', '4', '4'],
['5', '5', '5', '5'], ]
]
ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
# correct parents
eq_(ret[0, COLS.P], -1.)
eq_(ret[1, COLS.P], 0.0)
eq_(ret[3, COLS.P], 0.0)
eq_(ret[5, COLS.P], 0.0)
ok_(np.allclose(ret[:, COLS.ID], np.arange(0, 7))) # correct ID
def test__extract_section():
section = ['"CellBody"',
['CellBody'],
['-1', '-1', '-1', '-1'],
['1', '1', '1', '1'],
]
section = nasc._extract_section(section)
# unknown type
section = ['"Foo"',
['Bar'],
['-1', '-1', '-1', '-1'],
['1', '1', '1', '1'],
]
section = nasc._extract_section(section)
def test_sections_to_raw_data():
# from my h5 example neuron
# https://developer.humanbrainproject.eu/docs/projects/morphology-documentation/0.0.2/h5v1.html
soma = ['"CellBody"',
['CellBody'],
['1', '1', '0', '.1'],
['-1', '1', '0', '.1'],
['-1', '-1', '0', '.1'],
['1', '-1', '0', '.1'],
]
axon = [['Axon'],
['0', '5', '0', '.1'],
['2', '9', '0', '.1'],
['0', '13', '0', '.1'],
['2', '13', '0', '.1'],
['4', '13', '0', '.1'],
]
dendrite = [['Dendrite'],
['3', '-4', '0', '.1'],
['3', '-6', '0', '.1'],
['3', '-8', '0', '.1'],
['3', '-10', '0', '.1'],
[['0', '-10', '0', '.1'],
'|',
['6', '-10', '0', '.1'],
]
]
fake_neurite = [['This is not ', ], ['a neurite']]
sections = [soma, fake_neurite, axon, dendrite, ]
raw_data = nasc._sections_to_raw_data(sections)
eq_(raw_data.shape, (15, 7))
ok_(np.allclose(raw_data[:, COLS.ID], np.arange(0, 15))) # correct ID
# 3 is ID of end of the soma, 2 sections attach to this
ok_(np.count_nonzero(raw_data[:, COLS.P] == 3), 2)
# what I think the
# https://developer.humanbrainproject.eu/docs/projects/morphology-documentation/0.0.2/h5v1.html
# would look like
MORPH_ASC = textwrap.dedent(
u"""\
; Generated by the hand of mgevaert
("CellBody"
(CellBody)
(1 1 0 0) ; 1, 1
(-1 1 0 0) ; 1, 2
(-1 -1 0 0) ; 1, 3
(1 -1 0 0) ; 1, 4
);
((Axon)
(0 5 0 2)
(2 9 0 2)
(0 13 0 2)
(2 13 0 2)
(4 13 0 2)
)
((Dendrite)
(3 -4 0 2)
(3 -6 0 2)
(3 -8 0 2)
(3 -10 0 2)
(
(3 -10 0 2)
(0 -10 0 2)
(-3 -10 0 2)
|
(3 -10 0 2)
(6 -10 0 2)
(9 -10 0 2)
)
)
""")
def test_read():
with warnings.catch_warnings(record=True):
rdw = io.load_data(StringIO(MORPH_ASC), reader='asc')
raw_data = rdw.data_block
eq_(raw_data.shape, (19, 7))
ok_(np.allclose(raw_data[:, COLS.ID], np.arange(0, 19))) # correct ID
# 3 is ID of end of the soma, 2 sections attach to this
ok_(np.count_nonzero(raw_data[:, COLS.P] == 3), 2)
with warnings.catch_warnings(record=True):
neuron = load_neuron(StringIO(MORPH_ASC), reader='asc')
assert_array_equal(neuron.neurites[0].root_node.points[:, COLS.XYZ],
[[ 0., 5., 0.],
[ 2., 9., 0.],
[ 0., 13., 0.],
[ 2., 13., 0.],
[ 4., 13., 0.]])
def test_load_neurolucida_ascii():
f = Path(NEUROLUCIDA_PATH, 'sample.asc')
with warnings.catch_warnings(record=True):
ascii = io.load_data(f)
ok_(isinstance(ascii, DataWrapper))
eq_(len(ascii.data_block), 18)
def test_spine():
with warnings.catch_warnings(record=True):
n = load_neuron(Path(NEUROLUCIDA_PATH, 'spine.asc'))
assert_array_equal(n.neurites[0].points,
[[ 0. , 5. , 0. , 1. ],
[ 2. , 9. , 0. , 1. ],
[ 0. , 13. , 0. , 1. ]])
# with warnings.catch_warnings(record=True):
# assert_raises(RawDataError,
# load_neuron, Path(NEUROLUCIDA_PATH, 'broken-spine.asc'))
|
wizmer/NeuroM
|
neurom/io/tests/test_neurolucida.py
|
Python
|
bsd-3-clause
| 8,769
|
[
"NEURON"
] |
cffc97c1c037f3f17ba93716424d389868f346a9510c3749cf935efc7df0564e
|
''' Defines tests '''
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint, trapz
from scipy.optimize import minimize, differential_evolution
import chaospy as cp
from EntryGuidance.EntryEquations import System
from EntryGuidance.Uncertainty import getUncertainty
def Optimize():
''' Optimizes filter gain of a 1st order fading memory filter in an RSOCP formulation '''
perturb = getUncertainty()['parametric']
bounds = [(0,1)]
sol = differential_evolution(OptCostRS, args = (pdf, system),bounds=bounds, tol=1e-1, disp=True)
def OptCost(sample, gain):
''' Standard cost function. For a fixed sample we can optimize the gain '''
system = System(sample)
system.setFilterGain(gain)
r0, theta0, phi0, v0, gamma0, psi0,s0 = (3540.0e3, np.radians(-90.07), np.radians(-43.90),
5505.0, np.radians(-14.15), np.radians(4.99), 780e3)
x0_true = np.array([r0, theta0, phi0, v0, gamma0, psi0, s0, system.truth.vehicle.mass])
x0_nav = np.array([r0, theta0, phi0, v0, gamma0, psi0, s0, system.nav.vehicle.mass])
RL = 1.0
RD = 1.0
X0 = np.hstack((x0_true, x0_nav, RL, RD))
time = np.linspace(0,250,1500)
u = 0,0,0
X = odeint(system.dynamics(u), X0, time)
Ltrue,Dtrue = system.truth.aeroforces(X[:,8],X[:,11])
Lmodel,Dmodel = system.model.aeroforces(X[:,8], X[:,11])
L = Lmodel*X[:,16]
D = Dmodel*X[:,17]
err = (D-Dtrue)**2
return trapz(err, time)
def OptCostRS(gain, pdf):
polynomials = cp.orth_ttr(order=2, dist=pdf)
samples,weights = cp.generate_quadrature(order=2, domain=pdf, rule="Gaussian")
stateTensor = [OptCost(s,gain) for s in samples.T]
# stateTensor = pool.map(OptCost,samples.T)
PCE = cp.fit_quadrature(polynomials,samples,weights,stateTensor)
print "\nGain = {}".format(gain)
print "PCE Expectation: {} ".format(cp.E(poly=PCE,dist=pdf))
return cp.E(poly=PCE,dist=pdf)
def testFilters(sample=None):
if sample is None:
perturb = getUncertainty()['parametric']
sample = perturb.sample()
print sample
system = System(sample)
system.setFilterGain(0.9)
r0, theta0, phi0, v0, gamma0, psi0,s0 = (3540.0e3, np.radians(-90.07), np.radians(-43.90),
5505.0, np.radians(-14.15), np.radians(4.99), 780e3)
x0_true = np.array([r0, theta0, phi0, v0, gamma0, psi0, s0, system.truth.vehicle.mass])
x0_nav = np.array([r0, theta0, phi0, v0, gamma0, psi0, s0, system.nav.vehicle.mass])
RL = 1.0
RD = 1.0
X0 = np.hstack((x0_true, x0_nav, RL, RD))
time = np.linspace(0,200,150)
u = 0,0,0
X = odeint(system.dynamics(u), X0, time)
Ltrue,Dtrue = system.truth.aeroforces(X[:,8],X[:,11])
Lmodel,Dmodel = system.model.aeroforces(X[:,8], X[:,11])
L = Lmodel*X[:,16]
D = Dmodel*X[:,17]
# for i,x in enumerate(X0):
# print "state {}: {}".format(i,x)
# print "delta CL: {}".format(sample[1])
# print "delta CD: {}".format(sample[0])
# plt.figure()
# plt.plot(time,X[:,16],label = 'RL')
# plt.plot(time,X[:,17],label = 'RD')
# plt.plot(time,(1+sample[1])*np.ones_like(time),label = 'RL true')
# plt.plot(time,(1+sample[0])*np.ones_like(time),label = 'RD true')
# plt.legend(loc='best')
plt.figure()
plt.plot(time,Ltrue, label='Lift, Truth Model')
plt.plot(time,L,'o',label='Lift model corrected')
plt.plot(time,Lmodel,label='Uncorrected lift model')
plt.legend(loc='best')
plt.figure()
plt.plot(time,Dtrue, label='Drag, Truth Model')
plt.plot(time,D,'o',label='Drag model corrected by filter')
plt.plot(time,Dmodel,label='Uncorrected drag model')
plt.legend(loc='best')
# plt.figure()
# plt.plot(X[:,3],X[:,0])
# plt.plot(X[:,11],X[:,8])
plt.show()
return
def testCuba():
from cubature import cubature as cuba
CD = cp.Uniform(-0.10, 0.10) # CD
CL = cp.Uniform(-0.10, 0.10) # CL
rho0 = cp.Normal(0, 0.0333) # rho0
scaleHeight = cp.Uniform(-0.05,0.05) # scaleheight
pdf = cp.J(CD,CL,rho0,scaleHeight)
def PDF(x,*args,**kwargs):
return pdf.pdf(np.array(x).T)
x0 = np.array([-0.10,-0.10,-0.5,-0.05])
xf = np.array([0.10,0.10,0.5,0.05])
P,err = cuba(PDF,ndim=4,fdim=1,xmin=x0,xmax=xf,vectorized=True, adaptive='p')
print "Multi-dimensional integral of the PDF over its support = {}".format(P[0])
print "Total error in integration = {}".format(err[0])
def testPCE():
''' Tests chaospy against a 1-d problem with analytical solution '''
#model: dx/dt = -ax w/ a uniform in [0,1], x(0) = 1
# Truth model:
x0 = 1
time = np.linspace(0.001,100,200)
c = 1
mean = (1-np.exp(-c*time))/time
var = (1-np.exp(-2*c*time))/(2*time) - mean**2
# plt.figure(1)
# plt.plot(time, mean, '--', label='True')
# plt.figure(2)
# plt.plot(time, var, '--', label='True')
def xfun(t,a):
return np.exp(-a*t)
pdf = cp.Uniform(0,c)
for order in [5,10,15]:
polynomials = cp.orth_ttr(order=5, dist=pdf)
nodes, weights = cp.generate_quadrature(order=order+1, domain=pdf, rule="Gaussian")
samples = np.array([xfun(time,node) for node in nodes.T])
PCE = cp.fit_quadrature(polynomials,nodes,weights,samples)
pce_mean = cp.E(poly=PCE,dist=pdf)
pce_sigma = cp.Std(poly=PCE,dist=pdf)
plt.figure(1)
plt.plot(time, np.abs(mean-pce_mean), label='{} ({} points)'.format(order,nodes.shape[1]))
plt.figure(2)
plt.plot(time, np.abs(var-pce_sigma**2), label='{} ({} points)'.format(order,nodes.shape[1]))
plt.legend(loc='best')
plt.xlabel('Time')
plt.ylabel('Variance Error')
plt.figure(1)
plt.legend(loc='best')
plt.xlabel('Time')
plt.ylabel('Mean Error')
for n in [5,10,15, 1000]:
polynomials = cp.orth_ttr(order=5, dist=pdf)
nodes = pdf.sample(n+2,'S')
samples = np.array([xfun(time,node) for node in nodes.T])
PCE = cp.fit_regression(polynomials, nodes, samples,rule='T')
pce_mean = cp.E(poly=PCE,dist=pdf)
pce_sigma = cp.Std(poly=PCE,dist=pdf)
plt.figure(3)
plt.plot(time, np.abs(mean-pce_mean), label='{} points'.format(n+1))
plt.figure(4)
plt.plot(time, np.abs(var-pce_sigma**2), label='{} points'.format(n+1))
plt.legend(loc='best')
plt.xlabel('Time')
plt.ylabel('Variance Error')
plt.figure(3)
plt.legend(loc='best')
plt.xlabel('Time')
plt.ylabel('Mean Error')
plt.show()
# def testLQR():
if __name__ == '__main__':
# perturb = getUncertainty()['parametric']
# sample = [10*s for s in perturb.sample()]
# testFilters(sample)
# gains = np.linspace(-1., 0.99, 25)
# JRS = [OptCostRS(gain, perturb) for gain in gains]
# plt.plot(gains,JRS)
# plt.show()
testPCE()
|
CDNoyes/EDL-Py
|
TestSuite.py
|
Python
|
gpl-3.0
| 7,679
|
[
"Gaussian"
] |
e0f8e6e882b20ddd0a5cbf9f8f1f46536ae2a20a4ee0c864e2c9afe418153578
|
"""Structural variation detection for split and paired reads using lumpy.
Uses lumpyexpress for lumpy integration and samblaster for read preparation:
https://github.com/GregoryFaust/samblaster
https://github.com/arq5x/lumpy-sv
"""
import contextlib
import os
import sys
import shutil
import vcf
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.structural import shared as sshared
from bcbio.variation import effects, vcfutils, vfilter
# ## Lumpy main
def _run_lumpy(full_bams, sr_bams, disc_bams, work_dir, items):
"""Run lumpy-sv, using speedseq pipeline.
"""
batch = sshared.get_cur_batch(items)
ext = "-%s-svs" % batch if batch else "-svs"
out_file = os.path.join(work_dir, "%s%s.vcf"
% (os.path.splitext(os.path.basename(items[0]["align_bam"]))[0], ext))
sv_exclude_bed = sshared.prepare_exclude_file(items, out_file)
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
with tx_tmpdir(items[0]) as tmpdir:
full_bams = ",".join(full_bams)
sr_bams = ",".join(sr_bams)
disc_bams = ",".join(disc_bams)
exclude = "-x %s" % sv_exclude_bed if (sv_exclude_bed and utils.file_exists(sv_exclude_bed)) else ""
ref_file = dd.get_ref_file(items[0])
# use our bcbio python for runs within lumpyexpress
curpython_dir = os.path.dirname(sys.executable)
cmd = ("export PATH={curpython_dir}:$PATH && "
"lumpyexpress -v -B {full_bams} -S {sr_bams} -D {disc_bams} "
"{exclude} -T {tmpdir} -o {tx_out_file}")
do.run(cmd.format(**locals()), "lumpyexpress", items[0])
return vcfutils.sort_by_ref(out_file, items[0]), sv_exclude_bed
def _filter_by_support(in_file, data):
"""Filter call file based on supporting evidence, adding FILTER annotations to VCF.
Filters based on the following criteria:
- Minimum read support for the call (SU = total support)
- Large calls need split read evidence.
"""
rc_filter = ("FORMAT/SU < 4 || "
"(FORMAT/SR == 0 && FORMAT/SU < 15 && ABS(SVLEN)>50000) || "
"(FORMAT/SR == 0 && FORMAT/SU < 5 && ABS(SVLEN)<2000) || "
"(FORMAT/SR == 0 && FORMAT/SU < 15 && ABS(SVLEN)<300)")
return vfilter.hard_w_expression(in_file, rc_filter, data, name="ReadCountSupport",
limit_regions=None)
def _filter_by_background(base_samples, back_samples, gt_vcfs, data):
"""Filter base samples, marking any also present in the background.
"""
filtname = "InBackground"
filtdoc = "Variant also present in background samples with same genotype"
for base_name in base_samples:
orig_vcf = gt_vcfs[base_name]
out_file = "%s-backfilter.vcf" % (utils.splitext_plus(orig_vcf)[0])
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with file_transaction(data, out_file) as tx_out_file:
with utils.open_gzipsafe(orig_vcf) as in_handle:
with _vcf_readers([gt_vcfs[n] for n in back_samples]) as back_readers:
inp = vcf.Reader(in_handle, orig_vcf)
inp.filters[filtname] = vcf.parser._Filter(filtname, filtdoc)
with open(tx_out_file, "w") as out_handle:
outp = vcf.Writer(out_handle, inp)
for rec in inp:
back_recs = [r.next() for r in back_readers]
if _genotype_in_background(rec, back_recs):
rec.add_filter(filtname)
outp.write_record(rec)
if utils.file_exists(out_file + ".gz"):
out_file = out_file + ".gz"
gt_vcfs[base_name] = vcfutils.bgzip_and_index(out_file, data["config"])
return gt_vcfs
def _genotype_in_background(rec, back_recs):
"""Check if the genotype in the record of interest is present in the background records.
"""
def passes(rec):
return not rec.FILTER or len(rec.FILTER) == 0
return any([passes(brec) and passes(rec) and rec.samples[0].gt_alleles == brec.samples[0].gt_alleles
for brec in back_recs])
@contextlib.contextmanager
def _vcf_readers(vcf_files):
handles = []
readers = []
for vcf_file in vcf_files:
in_handle = utils.open_gzipsafe(vcf_file)
handles.append(in_handle)
readers.append(vcf.Reader(in_handle, vcf_file))
yield readers
for handle in handles:
handle.close()
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "lumpy"))
def run(items):
"""Perform detection of structural variations with lumpy, using bwa-mem alignment.
"""
if not all(utils.get_in(data, ("config", "algorithm", "aligner")) in ["bwa", False, None] for data in items):
raise ValueError("Require bwa-mem alignment input for lumpy structural variation detection")
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
work_dir = _sv_workdir(paired.tumor_data if paired and paired.tumor_data else items[0])
full_bams, sr_bams, disc_bams = [], [], []
for data in items:
dedup_bam, sr_bam, disc_bam = sshared.get_split_discordants(data, work_dir)
full_bams.append(dedup_bam)
sr_bams.append(sr_bam)
disc_bams.append(disc_bam)
lumpy_vcf, exclude_file = _run_lumpy(full_bams, sr_bams, disc_bams, work_dir, items)
gt_vcfs = {}
for data in items:
sample = dd.get_sample_name(data)
dedup_bam, sr_bam, _ = sshared.get_split_discordants(data, work_dir)
sample_vcf = vcfutils.select_sample(lumpy_vcf, sample,
utils.append_stem(lumpy_vcf, "-%s" % sample),
data["config"])
if "bnd-genotype" in dd.get_tools_on(data):
gt_vcf = _run_svtyper(sample_vcf, dedup_bam, sr_bam, exclude_file, data)
else:
std_vcf, bnd_vcf = _split_breakends(sample_vcf, data)
std_gt_vcf = _run_svtyper(std_vcf, dedup_bam, sr_bam, exclude_file, data)
gt_vcf = vcfutils.concat_variant_files_bcftools(
orig_files=[std_gt_vcf, bnd_vcf],
out_file="%s-combined.vcf.gz" % utils.splitext_plus(std_gt_vcf)[0],
config=data["config"])
gt_vcfs[dd.get_sample_name(data)] = _filter_by_support(gt_vcf, data)
if paired and paired.normal_name:
gt_vcfs = _filter_by_background([paired.tumor_name], [paired.normal_name], gt_vcfs, paired.tumor_data)
out = []
for data in items:
if "sv" not in data:
data["sv"] = []
vcf_file = gt_vcfs[dd.get_sample_name(data)]
if dd.get_svprioritize(data):
effects_vcf, _ = effects.add_to_vcf(vcf_file, data, "snpeff")
else:
effects_vcf = None
data["sv"].append({"variantcaller": "lumpy",
"vrn_file": effects_vcf or vcf_file,
"exclude_file": exclude_file})
out.append(data)
return out
def _split_breakends(in_file, data):
"""Skip genotyping on breakends. This is often slow in high depth regions with many breakends.
"""
bnd_file = "%s-bnd.vcf.gz" % utils.splitext_plus(in_file)[0]
std_file = "%s-std.vcf.gz" % utils.splitext_plus(in_file)[0]
if not utils.file_uptodate(bnd_file, in_file):
with file_transaction(data, bnd_file) as tx_out_file:
cmd = """bcftools view -O z -o {tx_out_file} -i "SVTYPE='BND'" {in_file}"""
do.run(cmd.format(**locals()), "Select Lumpy breakends")
vcfutils.bgzip_and_index(bnd_file, data["config"])
if not utils.file_uptodate(std_file, in_file):
with file_transaction(data, std_file) as tx_out_file:
cmd = """bcftools view -O z -o {tx_out_file} -e "SVTYPE='BND'" {in_file}"""
do.run(cmd.format(**locals()), "Select Lumpy non-breakends")
vcfutils.bgzip_and_index(std_file, data["config"])
return std_file, bnd_file
def run_svtyper_prioritize(call):
"""Run svtyper on prioritized outputs, adding in typing for breakends skipped earlier.
"""
def _run(in_file, work_dir, data):
dedup_bam, sr_bam, _ = sshared.get_split_discordants(data, work_dir)
return _run_svtyper(in_file, dedup_bam, sr_bam, call.get("exclude_file"), data)
return _run
def _run_svtyper(in_file, full_bam, sr_bam, exclude_file, data):
"""Genotype structural variant calls with SVtyper.
Removes calls in high depth regions to avoid slow runtimes:
https://github.com/hall-lab/svtyper/issues/16
"""
out_file = "%s-wgts.vcf.gz" % utils.splitext_plus(in_file)[0]
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
if not vcfutils.vcf_has_variants(in_file):
shutil.copy(in_file, out_file)
else:
python = sys.executable
svtyper = os.path.join(os.path.dirname(sys.executable), "svtyper")
if exclude_file and utils.file_exists(exclude_file):
regions_to_rm = "-T ^%s" % (exclude_file)
else:
regions_to_rm = ""
# add FILTER headers, which are lost during svtyping
header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0]
with open(header_file, "w") as out_handle:
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
break
if line.startswith("##FILTER"):
out_handle.write(line)
for region in ref.file_contigs(dd.get_ref_file(data), data["config"]):
out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size))
cmd = ("bcftools view {in_file} {regions_to_rm} | "
"{python} {svtyper} -M -B {full_bam} -S {sr_bam} | "
"bcftools annotate -h {header_file} | "
"bgzip -c > {tx_out_file}")
do.run(cmd.format(**locals()), "SV genotyping with svtyper")
return vcfutils.sort_by_ref(out_file, data)
|
mjafin/bcbio-nextgen
|
bcbio/structural/lumpy.py
|
Python
|
mit
| 10,840
|
[
"BWA"
] |
1c8f100bc2d925ebbafb223043ad8b012da55f5ae711c0a67b96e4f314865e26
|
#!/usr/bin/env python
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Evangelos Anagnostopoulos
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(description=
"Solve binary classification problems stored in libsvm format, "
"using Random Fourier features and SVMOcas")
parser.add_argument('--dataset', required=True, type=str,
help='Path to training dataset in LibSVM format.')
parser.add_argument('--testset', type=str,
help='Path to test dataset in LibSVM format.')
parser.add_argument('-D', default=300, type=int,
help='The number of samples to use')
parser.add_argument('-C', default=0.1, type=float,
help='SVMOcas regularization constant')
parser.add_argument('--epsilon', default=0.01, type=float,
help='SVMOcas epsilon parameter')
parser.add_argument('--width', default=8, type=float,
help='Width of the Gaussian Kernel to approximate')
parser.add_argument('--dimension', type=int,
help='Dimension of input dataset')
return parser.parse_args()
def evaluate(predicted_labels, labels, prefix="Results"):
from shogun import PRCEvaluation, ROCEvaluation, AccuracyMeasure
prc_evaluator = PRCEvaluation()
roc_evaluator = ROCEvaluation()
acc_evaluator = AccuracyMeasure()
auPRC = prc_evaluator.evaluate(predicted_labels, labels)
auROC = roc_evaluator.evaluate(predicted_labels, labels)
acc = acc_evaluator.evaluate(predicted_labels, labels)
print ('{0}: auPRC = {1:.5f}, auROC = {2:.5f}, acc = {3:.5f} '+
'({4}% incorrectly classified)').format(
prefix, auPRC, auROC, acc, (1-acc)*100)
def load_sparse_data(filename, dimension=None):
input_file = LibSVMFile(args.dataset)
sparse_feats = SparseRealFeatures()
label_array = sparse_feats.load_with_labels(input_file)
labels = BinaryLabels(label_array)
if dimension!=None:
sparse_feats.set_num_features(dimension)
return {'data':sparse_feats, 'labels':labels}
if __name__=='__main__':
from shogun import SparseRealFeatures, RandomFourierDotFeatures, GAUSSIAN
from shogun import LibSVMFile, BinaryLabels, SVMOcas
from shogun import Time
from numpy import array
args = parse_arguments()
print 'Loading training data...'
sparse_data = load_sparse_data(args.dataset,args.dimension)
kernel_params = array([args.width], dtype=float)
rf_feats = RandomFourierDotFeatures(sparse_data['data'], args.D, GAUSSIAN,
kernel_params)
svm = SVMOcas(args.C, rf_feats, sparse_data['labels'])
svm.set_epsilon(args.epsilon)
print 'Starting training.'
timer = Time()
svm.train()
timer.stop()
print 'Training completed, took {0:.2f}s.'.format(timer.time_diff_sec())
predicted_labels = svm.apply()
evaluate(predicted_labels, sparse_data['labels'], 'Training results')
if args.testset!=None:
random_coef = rf_feats.get_random_coefficients()
# removing current dataset from memory in order to load the test dataset,
# to avoid running out of memory
rf_feats = None
svm.set_features(None)
svm.set_labels(None)
sparse_data = None
print 'Loading test data...'
sparse_data = load_sparse_data(args.testset, args.dimension)
rf_feats = RandomFourierDotFeatures(sparse_data['data'], args.D, GAUSSIAN,
kernel_params, random_coef)
predicted_labels = svm.apply(rf_feats)
evaluate(predicted_labels, sparse_data['labels'], 'Test results')
|
shogun-toolbox/shogun
|
applications/classification/random_fourier_classification.py
|
Python
|
bsd-3-clause
| 3,353
|
[
"Gaussian"
] |
a2112599f7ab47af96dc20d3c9bcc27b373a57c94ac2fb4c8543bdc760a1ae64
|
# -*- coding: utf-8 -*-
# Created: 2018-01-04T19:46:58 PST
ilist = {
8485: u"Extended Potion of Undead Slaying",
8486: u"Extended Potion of Nightmare Court Slaying",
8487: u"Extended Potion of Sons of Svanir Slaying",
8488: u"Extended Potion of Outlaw Slaying",
8489: u"Extended Potion of Flame Legion Slaying",
8490: u"Extended Potion of Inquest Slaying",
8492: u"Extended Potion of Dredge Slaying",
8493: u"Extended Potion of Ghost Slaying",
8576: u"Bottle of Rice Wine",
8805: u"Weak Potion of Centaur Slaying",
8806: u"Weak Potion of Outlaw Slaying",
8807: u"Weak Potion of Nightmare Court Slaying",
8808: u"Weak Potion of Sons of Svanir Slaying",
8809: u"Weak Potion of Destroyer Slaying",
8810: u"Weak Potion of Elemental Slaying",
8811: u"Weak Potion of Demon Slaying",
8812: u"Weak Potion of Inquest Slaying",
8813: u"Weak Potion of Ogre Slaying",
8814: u"Weak Potion of Ice Brood Slaying",
8815: u"Weak Potion of Grawl Slaying",
8816: u"Weak Potion of Krait Slaying",
8817: u"Weak Potion of Dredge Slaying",
8818: u"Weak Potion of Undead Slaying",
8819: u"Minor Potion of Flame Legion Slaying",
8820: u"Minor Potion of Centaur Slaying",
8821: u"Minor Potion of Outlaw Slaying",
8822: u"Minor Potion of Nightmare Court Slaying",
8823: u"Minor Potion of Sons of Svanir Slaying",
8824: u"Minor Potion of Destroyer Slaying",
8825: u"Minor Potion of Elemental Slaying",
8826: u"Minor Potion of Demon Slaying",
8827: u"Minor Potion of Inquest Slaying",
8828: u"Minor Potion of Ogre Slaying",
8829: u"Minor Potion of Ice Brood Slaying",
8830: u"Minor Potion of Grawl Slaying",
8831: u"Minor Potion of Krait Slaying",
8832: u"Minor Potion of Dredge Slaying",
8833: u"Minor Potion of Undead Slaying",
8834: u"Potion of Flame Legion Slaying",
8835: u"Potion of Centaur Slaying",
8836: u"Potion of Outlaw Slaying",
8837: u"Potion of Nightmare Court Slaying",
8838: u"Potion of Sons of Svanir Slaying",
8839: u"Potion of Destroyer Slaying",
8840: u"Potion of Elemental Slaying",
8841: u"Potion of Demon Slaying",
8842: u"Potion of Inquest Slaying",
8843: u"Potion of Ogre Slaying",
8844: u"Potion of Ice Brood Slaying",
8845: u"Potion of Grawl Slaying",
8846: u"Potion of Krait Slaying",
8847: u"Potion of Dredge Slaying",
8848: u"Potion of Undead Slaying",
8849: u"Strong Potion of Flame Legion Slaying",
8850: u"Strong Potion of Centaur Slaying",
8851: u"Strong Potion of Outlaw Slaying",
8852: u"Strong Potion of Nightmare Court Slaying",
8853: u"Strong Potion of Sons of Svanir Slaying",
8854: u"Strong Potion of Destroyer Slaying",
8855: u"Strong Potion of Elemental Slaying",
8856: u"Strong Potion of Demon Slaying",
8857: u"Strong Potion of Inquest Slaying",
8858: u"Strong Potion of Ogre Slaying",
8859: u"Strong Potion of Ice Brood Slaying",
8860: u"Strong Potion of Grawl Slaying",
8861: u"Strong Potion of Krait Slaying",
8862: u"Strong Potion of Dredge Slaying",
8863: u"Strong Potion of Undead Slaying",
8864: u"Potent Potion of Flame Legion Slaying",
8865: u"Potent Potion of Centaur Slaying",
8866: u"Potent Potion of Outlaw Slaying",
8867: u"Potent Potion of Nightmare Court Slaying",
8868: u"Potent Potion of Sons of Svanir Slaying",
8869: u"Potent Potion of Destroyer Slaying",
8870: u"Potent Potion of Elemental Slaying",
8871: u"Potent Potion of Demon Slaying",
8872: u"Potent Potion of Inquest Slaying",
8873: u"Potent Potion of Ogre Slaying",
8874: u"Potent Potion of Ice Brood Slaying",
8875: u"Potent Potion of Grawl Slaying",
8876: u"Potent Potion of Krait Slaying",
8877: u"Potent Potion of Dredge Slaying",
8878: u"Potent Potion of Undead Slaying",
8879: u"Powerful Potion of Flame Legion Slaying",
8880: u"Powerful Potion of Centaur Slaying",
8881: u"Powerful Potion of Outlaw Slaying",
8882: u"Powerful Potion of Nightmare Court Slaying",
8883: u"Powerful Potion of Sons of Svanir Slaying",
8884: u"Powerful Potion of Destroyer Slaying",
8885: u"Powerful Potion of Elemental Slaying",
8886: u"Powerful Potion of Demon Slaying",
8887: u"Powerful Potion of Inquest Slaying",
8888: u"Powerful Potion of Ogre Slaying",
8889: u"Powerful Potion of Ice Brood Slaying",
8890: u"Powerful Potion of Grawl Slaying",
8891: u"Powerful Potion of Krait Slaying",
8892: u"Powerful Potion of Dredge Slaying",
8893: u"Powerful Potion of Undead Slaying",
9404: u"8 Slot Rawhide Leather Pack",
9406: u"8 Slot Jute Bag",
9409: u"10 Slot Wool Bag",
9411: u"12 Slot Cotton Bag",
9413: u"15 Slot Linen Bag",
9414: u"10 Slot Thin Leather Pack",
9416: u"12 Slot Coarse Leather Pack",
9418: u"15 Slot Rugged Leather Pack",
9420: u"8 Slot Reinforced Bronze Box",
9421: u"10 Slot Iron Box",
9422: u"12 Slot Steel Box",
9423: u"15 Slot Darksteel Box",
9426: u"8 Slot Invisible Pack",
9427: u"8 Slot Oiled Pack",
9429: u"8 Slot Equipment Box",
9430: u"8 Slot Safe Box",
9431: u"Rough Sharpening Stone",
9433: u"Simple Sharpening Stone",
9436: u"Standard Sharpening Stone",
9437: u"10 Slot Invisible Pack",
9438: u"Quality Sharpening Stone",
9439: u"10 Slot Oiled Pack",
9440: u"Hardened Sharpening Stone",
9443: u"Superior Sharpening Stone",
9444: u"12 Slot Invisible Pack",
9445: u"12 Slot Oiled Pack",
9448: u"15 Slot Invisible Pack",
9449: u"15 Slot Oiled Pack",
9451: u"10 Slot Equipment Box",
9452: u"Apprentice Maintenance Oil",
9453: u"Journeyman Maintenance Oil",
9454: u"10 Slot Safe Box",
9456: u"Standard Maintenance Oil",
9458: u"Artisan Maintenance Oil",
9459: u"15 Slot Equipment Box",
9460: u"Quality Maintenance Oil",
9461: u"Master Maintenance Oil",
9462: u"15 Slot Safe Box",
9464: u"Apprentice Tuning Crystal",
9466: u"12 Slot Safe Box",
9467: u"Journeyman Tuning Crystal",
9468: u"12 Slot Equipment Box",
9469: u"Standard Tuning Crystal",
9472: u"Artisan Tuning Crystal",
9473: u"Quality Tuning Crystal",
9476: u"Master Tuning Crystal",
9478: u"8 Slot Craftsman's Bag",
9480: u"8 Slot Invisible Bag",
9482: u"10 Slot Craftsman's Bag",
9484: u"10 Slot Invisible Bag",
9486: u"12 Slot Craftsman's Bag",
9488: u"12 Slot Invisible Bag",
9491: u"15 Slot Invisible Bag",
9494: u"15 Slot Craftsman's Bag",
9566: u"18 Slot Silk Bag",
9567: u"18 Slot Craftsman's Bag",
9569: u"18 Slot Invisible Bag",
9571: u"20 Slot Gossamer Bag",
9572: u"20 Slot Craftsman's Bag",
9574: u"20 Slot Invisible Bag",
9576: u"18 Slot Thick Leather Pack",
9579: u"18 Slot Invisible Pack",
9580: u"18 Slot Oiled Pack",
9581: u"20 Slot Hardened Leather Pack",
9584: u"20 Slot Invisible Pack",
9585: u"20 Slot Oiled Pack",
9586: u"18 Slot Mithril Box",
9588: u"18 Slot Equipment Box",
9589: u"18 Slot Safe Box",
9591: u"20 Slot Orichalcum Box",
9593: u"20 Slot Equipment Box",
9594: u"20 Slot Safe Box",
9817: u"Recipe: Ravaging Intricate Wool Insignia",
9818: u"Recipe: Rejuvenating Intricate Wool Insignia",
9819: u"Recipe: Honed Intricate Wool Insignia",
9820: u"Recipe: Hunter's Intricate Wool Insignia",
9821: u"Recipe: Strong Intricate Wool Insignia",
9822: u"Recipe: Vigorous Intricate Wool Insignia",
9823: u"Recipe: Hearty Intricate Wool Insignia",
9824: u"Recipe: Ravaging Intricate Cotton Insignia",
9825: u"Recipe: Rejuvenating Intricate Cotton Insignia",
9826: u"Recipe: Honed Intricate Cotton Insignia",
9827: u"Recipe: Hunter's Intricate Cotton Insignia",
9828: u"Recipe: Strong Intricate Cotton Insignia",
9829: u"Recipe: Vigorous Intricate Cotton Insignia",
9830: u"Recipe: Hearty Intricate Cotton Insignia",
9831: u"Recipe: Carrion Intricate Linen Insignia",
9832: u"Recipe: Cleric's Intricate Linen Insignia",
9833: u"Recipe: Assassin's Intricate Linen Insignia",
9834: u"Recipe: Berserker's Intricate Linen Insignia",
9835: u"Recipe: Valkyrie Intricate Linen Insignia",
9836: u"Recipe: Rampager's Intricate Linen Insignia",
9837: u"Recipe: Knight's Intricate Linen Insignia",
9838: u"Recipe: Carrion Intricate Silk Insignia",
9839: u"Recipe: Cleric's Intricate Silk Insignia",
9840: u"Recipe: Assassin's Intricate Silk Insignia",
9841: u"Recipe: Berserker's Intricate Silk Insignia",
9842: u"Recipe: Valkyrie Intricate Silk Insignia",
9843: u"Recipe: Rampager's Intricate Silk Insignia",
9844: u"Recipe: Knight's Intricate Silk Insignia",
9845: u"Recipe: Carrion Intricate Gossamer Insignia",
9846: u"Recipe: Cleric's Intricate Gossamer Insignia",
9847: u"Recipe: Assassin's Intricate Gossamer Insignia",
9848: u"Recipe: Berserker's Intricate Gossamer Insignia",
9849: u"Recipe: Valkyrie Intricate Gossamer Insignia",
9850: u"Recipe: Rampager's Intricate Gossamer Insignia",
9851: u"Recipe: Knight's Intricate Gossamer Insignia",
9852: u"Recipe: Ravaging Iron Imbued Inscription",
9853: u"Recipe: Rejuvenating Iron Imbued Inscription",
9854: u"Recipe: Honed Iron Imbued Inscription",
9855: u"Recipe: Hunter's Iron Imbued Inscription",
9856: u"Recipe: Strong Iron Imbued Inscription",
9857: u"Recipe: Vigorous Iron Imbued Inscription",
9858: u"Recipe: Hearty Iron Imbued Inscription",
9859: u"Recipe: Ravaging Steel Imbued Inscription",
9860: u"Recipe: Rejuvenating Steel Imbued Inscription",
9861: u"Recipe: Honed Steel Imbued Inscription",
9862: u"Recipe: Hunter's Steel Imbued Inscription",
9863: u"Recipe: Strong Steel Imbued Inscription",
9864: u"Recipe: Vigorous Steel Imbued Inscription",
9865: u"Recipe: Hearty Steel Imbued Inscription",
9866: u"Recipe: Carrion Darksteel Imbued Inscription",
9867: u"Recipe: Knight's Darksteel Imbued Inscription",
9868: u"Recipe: Cleric's Darksteel Imbued Inscription",
9869: u"Recipe: Assassin's Darksteel Imbued Inscription",
9870: u"Recipe: Berserker's Darksteel Imbued Inscription",
9871: u"Recipe: Valkyrie Darksteel Imbued Inscription",
9872: u"Recipe: Rampager's Darksteel Imbued Inscription",
9873: u"Recipe: Carrion Mithril Imbued Inscription",
9874: u"Recipe: Cleric's Mithril Imbued Inscription",
9875: u"Recipe: Assassin's Mithril Imbued Inscription",
9876: u"Recipe: Berserker's Mithril Imbued Inscription",
9877: u"Recipe: Valkyrie Mithril Imbued Inscription",
9878: u"Recipe: Rampager's Mithril Imbued Inscription",
9879: u"Recipe: Knight's Mithril Imbued Inscription",
9880: u"Recipe: Carrion Orichalcum Imbued Inscription",
9881: u"Recipe: Cleric's Orichalcum Imbued Inscription",
9882: u"Recipe: Assassin's Orichalcum Imbued Inscription",
9883: u"Recipe: Berserker's Orichalcum Imbued Inscription",
9884: u"Recipe: Valkyrie Orichalcum Imbued Inscription",
9885: u"Recipe: Rampager's Orichalcum Imbued Inscription",
9886: u"Recipe: Knight's Orichalcum Imbued Inscription",
9887: u"Recipe: Embellished Intricate Topaz Jewel",
9888: u"Recipe: Embellished Intricate Spinel Jewel",
9889: u"Recipe: Embellished Intricate Peridot Jewel",
9890: u"Recipe: Embellished Intricate Sunstone Jewel",
9891: u"Recipe: Embellished Intricate Carnelian Jewel",
9892: u"Recipe: Embellished Intricate Amethyst Jewel",
9893: u"Recipe: Embellished Intricate Lapis Jewel",
9894: u"Recipe: Embellished Gilded Topaz Jewel",
9895: u"Recipe: Embellished Gilded Spinel Jewel",
9896: u"Recipe: Embellished Gilded Peridot Jewel",
9897: u"Recipe: Embellished Gilded Sunstone Jewel",
9898: u"Recipe: Embellished Gilded Carnelian Jewel",
9899: u"Recipe: Embellished Gilded Amethyst Jewel",
9900: u"Recipe: Embellished Gilded Lapis Jewel",
9901: u"Recipe: Embellished Ornate Chrysocola Jewel",
9902: u"Recipe: Embellished Ornate Sapphire Jewel",
9903: u"Recipe: Embellished Ornate Opal Jewel",
9904: u"Recipe: Embellished Ornate Ruby Jewel",
9905: u"Recipe: Embellished Ornate Beryl Jewel",
9906: u"Recipe: Embellished Ornate Coral Jewel",
9907: u"Recipe: Embellished Ornate Emerald Jewel",
9908: u"Recipe: Embellished Brilliant Chrysocola Jewel",
9909: u"Recipe: Embellished Brilliant Sapphire Jewel",
9910: u"Recipe: Embellished Brilliant Opal Jewel",
9911: u"Recipe: Embellished Brilliant Ruby Jewel",
9912: u"Recipe: Embellished Brilliant Beryl Jewel",
9913: u"Recipe: Embellished Brilliant Coral Jewel",
9914: u"Recipe: Embellished Brilliant Emerald Jewel",
9941: u"Recipe: Corrupted Skeggox",
9942: u"Recipe: Corrupted Shard",
9943: u"Recipe: Corrupted Artifact",
9944: u"Recipe: Corrupted Avenger",
9945: u"Recipe: Corrupted Sledgehammer",
9946: u"Recipe: Corrupted Harpoon Gun",
9947: u"Recipe: Corrupted Greatbow",
9948: u"Recipe: Corrupted Cudgel",
9949: u"Recipe: Corrupted Revolver",
9950: u"Recipe: Corrupted Blaster",
9951: u"Recipe: Corrupted Scepter",
9952: u"Recipe: Corrupted Bulwark",
9953: u"Recipe: Corrupted Short Bow",
9954: u"Recipe: Corrupted Spear",
9955: u"Recipe: Corrupted Branch",
9956: u"Recipe: Corrupted Blade",
9957: u"Recipe: Corrupted Wartorch",
9958: u"Recipe: Corrupted Trident",
9959: u"Recipe: Corrupted Harbinger",
10254: u"Recipe: Extended Potion of Ghost Slaying",
10255: u"Recipe: Extended Potion of Outlaw Slaying",
10256: u"Recipe: Extended Potion of Dredge Slaying",
10257: u"Recipe: Extended Potion of Nightmare Court Slaying",
10258: u"Recipe: Extended Potion of Flame Legion Slaying",
10259: u"Recipe: Extended Potion of Undead Slaying",
10260: u"Recipe: Extended Potion of Sons of Svanir Slaying",
10261: u"Recipe: Extended Potion of Inquest Slaying",
10262: u"Mighty Chain Coat",
10263: u"Vital Chain Coat",
10264: u"Mighty Chain Coat",
10265: u"Vital Chain Coat",
10266: u"Precise Chain Coat",
10267: u"Resilient Chain Coat",
10268: u"Mighty Chain Boots",
10269: u"Vital Chain Boots",
10270: u"Mighty Chain Boots",
10271: u"Vital Chain Boots",
10272: u"Precise Chain Boots",
10273: u"Resilient Chain Boots",
10274: u"Mighty Chain Gauntlets",
10275: u"Vital Chain Gauntlets",
10276: u"Mighty Chain Gauntlets",
10277: u"Vital Chain Gauntlets",
10278: u"Precise Chain Gauntlets",
10279: u"Resilient Chain Gauntlets",
10280: u"Mighty Chain Legs",
10281: u"Vital Chain Legs",
10282: u"Mighty Chain Legs",
10283: u"Vital Chain Legs",
10284: u"Precise Chain Legs",
10285: u"Resilient Chain Legs",
10286: u"Vigorous Scale Boots",
10287: u"Vigorous Scale Boots",
10289: u"Strong Scale Boots",
10291: u"Honed Scale Boots",
10292: u"Strong Scale Boots",
10293: u"Hearty Scale Boots",
10294: u"Honed Scale Boots",
10295: u"Vigorous Scale Coat",
10296: u"Vigorous Scale Coat",
10298: u"Strong Scale Coat",
10300: u"Honed Scale Coat",
10301: u"Strong Scale Coat",
10302: u"Hearty Scale Coat",
10303: u"Honed Scale Coat",
10304: u"Vigorous Scale Gauntlets",
10305: u"Vigorous Scale Gauntlets",
10307: u"Strong Scale Gauntlets",
10309: u"Honed Scale Gauntlets",
10310: u"Strong Scale Gauntlets",
10311: u"Hearty Scale Gauntlets",
10312: u"Honed Scale Gauntlets",
10313: u"Vigorous Scale Legs",
10314: u"Vigorous Scale Legs",
10316: u"Strong Scale Legs",
10318: u"Honed Scale Legs",
10319: u"Strong Scale Legs",
10320: u"Hearty Scale Legs",
10321: u"Honed Scale Legs",
10323: u"Strong Splint Greaves",
10324: u"Hearty Splint Greaves",
10325: u"Honed Splint Greaves",
10326: u"Ravaging Splint Greaves",
10327: u"Honed Splint Greaves",
10329: u"Ravaging Splint Greaves",
10330: u"Strong Splint Greaves",
10331: u"Hearty Splint Greaves",
10332: u"Vigorous Splint Greaves",
10333: u"Vigorous Splint Greaves",
10334: u"Strong Splint Coat",
10335: u"Hearty Splint Coat",
10336: u"Honed Splint Coat",
10338: u"Ravaging Splint Coat",
10339: u"Honed Splint Coat",
10341: u"Ravaging Splint Coat",
10342: u"Strong Splint Coat",
10343: u"Hearty Splint Coat",
10344: u"Vigorous Splint Coat",
10345: u"Vigorous Splint Coat",
10346: u"Strong Splint Gauntlets",
10347: u"Hearty Splint Gauntlets",
10348: u"Honed Splint Gauntlets",
10350: u"Ravaging Splint Gauntlets",
10351: u"Honed Splint Gauntlets",
10353: u"Ravaging Splint Gauntlets",
10354: u"Strong Splint Gauntlets",
10355: u"Hearty Splint Gauntlets",
10356: u"Vigorous Splint Gauntlets",
10357: u"Vigorous Splint Gauntlets",
10358: u"Strong Splint Legs",
10359: u"Hearty Splint Legs",
10360: u"Honed Splint Legs",
10362: u"Ravaging Splint Legs",
10363: u"Honed Splint Legs",
10365: u"Ravaging Splint Legs",
10366: u"Strong Splint Legs",
10367: u"Hearty Splint Legs",
10368: u"Vigorous Splint Legs",
10369: u"Vigorous Splint Legs",
10370: u"Strong Splint Pauldrons",
10371: u"Hearty Splint Pauldrons",
10372: u"Honed Splint Pauldrons",
10374: u"Ravaging Splint Pauldrons",
10375: u"Honed Splint Pauldrons",
10377: u"Ravaging Splint Pauldrons",
10378: u"Strong Splint Pauldrons",
10379: u"Hearty Splint Pauldrons",
10380: u"Vigorous Splint Pauldrons",
10381: u"Vigorous Splint Pauldrons",
10382: u"Healing Chain Boots",
10383: u"Precise Chain Boots",
10384: u"Resilient Chain Boots",
10385: u"Healing Chain Coat",
10386: u"Precise Chain Coat",
10387: u"Resilient Chain Coat",
10388: u"Healing Chain Gauntlets",
10389: u"Precise Chain Gauntlets",
10390: u"Resilient Chain Gauntlets",
10391: u"Healing Chain Legs",
10392: u"Precise Chain Legs",
10393: u"Resilient Chain Legs",
10394: u"Mighty Chain Helm",
10395: u"Vital Chain Helm",
10396: u"Mighty Chain Helm",
10397: u"Vital Chain Helm",
10398: u"Precise Chain Helm",
10399: u"Resilient Chain Helm",
10400: u"Healing Chain Helm",
10401: u"Precise Chain Helm",
10402: u"Resilient Chain Helm",
10404: u"Vigorous Scale Helm",
10405: u"Strong Scale Helm",
10407: u"Strong Scale Helm",
10408: u"Vigorous Scale Helm",
10409: u"Honed Scale Helm",
10410: u"Honed Scale Helm",
10411: u"Hearty Scale Helm",
10413: u"Strong Splint Helm",
10414: u"Hearty Splint Helm",
10415: u"Ravaging Splint Helm",
10416: u"Honed Splint Helm",
10417: u"Ravaging Splint Helm",
10418: u"Honed Splint Helm",
10420: u"Strong Splint Helm",
10421: u"Vigorous Splint Helm",
10422: u"Hearty Splint Helm",
10423: u"Vigorous Splint Helm",
10424: u"Malign Chain Pauldrons",
10425: u"Mighty Chain Pauldrons",
10426: u"Vital Chain Pauldrons",
10427: u"Healing Chain Pauldrons",
10428: u"Precise Chain Pauldrons",
10429: u"Resilient Chain Pauldrons",
10430: u"Malign Chain Pauldrons",
10431: u"Healing Chain Pauldrons",
10432: u"Mighty Chain Pauldrons",
10433: u"Precise Chain Pauldrons",
10434: u"Resilient Chain Pauldrons",
10435: u"Vital Chain Pauldrons",
10436: u"Ravaging Scale Pauldrons",
10438: u"Vigorous Scale Pauldrons",
10439: u"Strong Scale Pauldrons",
10440: u"Honed Scale Pauldrons",
10441: u"Hearty Scale Pauldrons",
10442: u"Ravaging Scale Pauldrons",
10444: u"Vigorous Scale Pauldrons",
10445: u"Strong Scale Pauldrons",
10446: u"Honed Scale Pauldrons",
10447: u"Hearty Scale Pauldrons",
10448: u"Malign Chain Legs",
10449: u"Healing Chain Legs",
10450: u"Malign Chain Legs",
10451: u"Ravaging Scale Legs",
10452: u"Hearty Scale Legs",
10453: u"Ravaging Scale Legs",
10454: u"Malign Chain Boots",
10455: u"Healing Chain Boots",
10456: u"Malign Chain Boots",
10457: u"Ravaging Scale Boots",
10458: u"Hearty Scale Boots",
10459: u"Ravaging Scale Boots",
10460: u"Malign Chain Gauntlets",
10461: u"Healing Chain Gauntlets",
10462: u"Malign Chain Gauntlets",
10463: u"Ravaging Scale Gauntlets",
10464: u"Hearty Scale Gauntlets",
10465: u"Ravaging Scale Gauntlets",
10466: u"Malign Chain Coat",
10467: u"Healing Chain Coat",
10468: u"Malign Chain Coat",
10469: u"Ravaging Scale Coat",
10470: u"Hearty Scale Coat",
10471: u"Ravaging Scale Coat",
10472: u"Malign Chain Helm",
10473: u"Healing Chain Helm",
10474: u"Malign Chain Helm",
10475: u"Ravaging Scale Helm",
10476: u"Hearty Scale Helm",
10477: u"Ravaging Scale Helm",
10478: u"Valkyrie Tempered Scale Pauldrons",
10479: u"Carrion Tempered Scale Pauldrons",
10480: u"Knight's Tempered Scale Pauldrons",
10481: u"Rampager's Tempered Scale Pauldrons",
10482: u"Berserker's Tempered Scale Pauldrons",
10483: u"Cleric's Tempered Scale Pauldrons",
10485: u"Berserker's Tempered Scale Pauldrons",
10486: u"Cleric's Tempered Scale Pauldrons",
10487: u"Valkyrie Tempered Scale Pauldrons",
10488: u"Carrion Tempered Scale Pauldrons",
10489: u"Knight's Tempered Scale Pauldrons",
10490: u"Rampager's Tempered Scale Pauldrons",
10492: u"Carrion Barbaric Pauldrons",
10493: u"Knight's Barbaric Pauldrons",
10494: u"Valkyrie Barbaric Pauldrons",
10495: u"Cleric's Barbaric Pauldrons",
10497: u"Berserker's Barbaric Pauldrons",
10498: u"Cleric's Barbaric Pauldrons",
10499: u"Valkyrie Barbaric Pauldrons",
10500: u"Carrion Barbaric Pauldrons",
10501: u"Knight's Barbaric Pauldrons",
10502: u"Berserker's Barbaric Pauldrons",
10503: u"Rampager's Barbaric Pauldrons",
10505: u"Rampager's Barbaric Pauldrons",
10506: u"Carrion Draconic Pauldrons",
10507: u"Cleric's Draconic Pauldrons",
10509: u"Berserker's Draconic Pauldrons",
10510: u"Rampager's Draconic Pauldrons",
10511: u"Knight's Draconic Pauldrons",
10512: u"Valkyrie Draconic Pauldrons",
10513: u"Valkyrie Tempered Scale Legs",
10514: u"Carrion Tempered Scale Legs",
10515: u"Knight's Tempered Scale Legs",
10516: u"Rampager's Tempered Scale Legs",
10517: u"Berserker's Tempered Scale Legs",
10518: u"Cleric's Tempered Scale Legs",
10520: u"Berserker's Tempered Scale Legs",
10521: u"Cleric's Tempered Scale Legs",
10522: u"Valkyrie Tempered Scale Legs",
10523: u"Carrion Tempered Scale Legs",
10524: u"Knight's Tempered Scale Legs",
10525: u"Rampager's Tempered Scale Legs",
10527: u"Carrion Barbaric Legplates",
10528: u"Knight's Barbaric Legplates",
10529: u"Valkyrie Barbaric Legplates",
10530: u"Cleric's Barbaric Legplates",
10532: u"Berserker's Barbaric Legplates",
10533: u"Cleric's Barbaric Legplates",
10534: u"Valkyrie Barbaric Legplates",
10535: u"Carrion Barbaric Legplates",
10536: u"Knight's Barbaric Legplates",
10537: u"Berserker's Barbaric Legplates",
10538: u"Rampager's Barbaric Legplates",
10540: u"Rampager's Barbaric Legplates",
10541: u"Carrion Draconic Legs",
10542: u"Cleric's Draconic Legs",
10544: u"Berserker's Draconic Legs",
10545: u"Rampager's Draconic Legs",
10546: u"Knight's Draconic Legs",
10547: u"Valkyrie Draconic Legs",
10548: u"Valkyrie Tempered Scale Greaves",
10549: u"Carrion Tempered Scale Greaves",
10550: u"Knight's Tempered Scale Greaves",
10551: u"Rampager's Tempered Scale Greaves",
10552: u"Berserker's Tempered Scale Greaves",
10553: u"Cleric's Tempered Scale Greaves",
10555: u"Berserker's Tempered Scale Greaves",
10556: u"Cleric's Tempered Scale Greaves",
10557: u"Valkyrie Tempered Scale Greaves",
10558: u"Carrion Tempered Scale Greaves",
10559: u"Knight's Tempered Scale Greaves",
10560: u"Rampager's Tempered Scale Greaves",
10562: u"Carrion Barbaric Boots",
10563: u"Knight's Barbaric Boots",
10564: u"Valkyrie Barbaric Boots",
10565: u"Cleric's Barbaric Boots",
10567: u"Berserker's Barbaric Boots",
10568: u"Cleric's Barbaric Boots",
10569: u"Valkyrie Barbaric Boots",
10570: u"Carrion Barbaric Boots",
10571: u"Knight's Barbaric Boots",
10572: u"Berserker's Barbaric Boots",
10573: u"Rampager's Barbaric Boots",
10575: u"Rampager's Barbaric Boots",
10576: u"Carrion Draconic Boots",
10577: u"Cleric's Draconic Boots",
10579: u"Berserker's Draconic Boots",
10580: u"Rampager's Draconic Boots",
10581: u"Knight's Draconic Boots",
10582: u"Valkyrie Draconic Boots",
10583: u"Valkyrie Tempered Scale Gauntlets",
10584: u"Carrion Tempered Scale Gauntlets",
10585: u"Knight's Tempered Scale Gauntlets",
10586: u"Rampager's Tempered Scale Gauntlets",
10587: u"Berserker's Tempered Scale Gauntlets",
10588: u"Cleric's Tempered Scale Gauntlets",
10590: u"Berserker's Tempered Scale Gauntlets",
10591: u"Cleric's Tempered Scale Gauntlets",
10592: u"Valkyrie Tempered Scale Gauntlets",
10593: u"Carrion Tempered Scale Gauntlets",
10594: u"Knight's Tempered Scale Gauntlets",
10595: u"Rampager's Tempered Scale Gauntlets",
10597: u"Carrion Barbaric Gloves",
10598: u"Knight's Barbaric Gloves",
10599: u"Valkyrie Barbaric Gloves",
10600: u"Cleric's Barbaric Gloves",
10602: u"Berserker's Barbaric Gloves",
10603: u"Cleric's Barbaric Gloves",
10604: u"Valkyrie Barbaric Gloves",
10605: u"Carrion Barbaric Gloves",
10606: u"Knight's Barbaric Gloves",
10607: u"Berserker's Barbaric Gloves",
10608: u"Rampager's Barbaric Gloves",
10610: u"Rampager's Barbaric Gloves",
10611: u"Carrion Draconic Gauntlets",
10612: u"Cleric's Draconic Gauntlets",
10614: u"Berserker's Draconic Gauntlets",
10615: u"Rampager's Draconic Gauntlets",
10616: u"Knight's Draconic Gauntlets",
10617: u"Valkyrie Draconic Gauntlets",
10618: u"Valkyrie Tempered Scale Chestplate",
10619: u"Carrion Tempered Scale Chestplate",
10620: u"Knight's Tempered Scale Chestplate",
10621: u"Rampager's Tempered Scale Chestplate",
10622: u"Berserker's Tempered Scale Chestplate",
10623: u"Cleric's Tempered Scale Chestplate",
10625: u"Berserker's Tempered Scale Chestplate",
10626: u"Cleric's Tempered Scale Chestplate",
10627: u"Valkyrie Tempered Scale Chestplate",
10628: u"Carrion Tempered Scale Chestplate",
10629: u"Knight's Tempered Scale Chestplate",
10630: u"Rampager's Tempered Scale Chestplate",
10632: u"Carrion Draconic Coat",
10633: u"Cleric's Draconic Coat",
10635: u"Berserker's Draconic Coat",
10636: u"Rampager's Draconic Coat",
10637: u"Knight's Draconic Coat",
10638: u"Valkyrie Draconic Coat",
10639: u"Carrion Barbaric Coat",
10640: u"Knight's Barbaric Coat",
10641: u"Valkyrie Barbaric Coat",
10642: u"Cleric's Barbaric Coat",
10644: u"Berserker's Barbaric Coat",
10645: u"Cleric's Barbaric Coat",
10646: u"Valkyrie Barbaric Coat",
10647: u"Carrion Barbaric Coat",
10648: u"Knight's Barbaric Coat",
10649: u"Berserker's Barbaric Coat",
10650: u"Rampager's Barbaric Coat",
10652: u"Rampager's Barbaric Coat",
10653: u"Valkyrie Tempered Scale Helm",
10654: u"Carrion Tempered Scale Helm",
10655: u"Knight's Tempered Scale Helm",
10656: u"Rampager's Tempered Scale Helm",
10657: u"Berserker's Tempered Scale Helm",
10658: u"Cleric's Tempered Scale Helm",
10660: u"Berserker's Tempered Scale Helm",
10661: u"Cleric's Tempered Scale Helm",
10662: u"Valkyrie Tempered Scale Helm",
10663: u"Carrion Tempered Scale Helm",
10664: u"Knight's Tempered Scale Helm",
10665: u"Rampager's Tempered Scale Helm",
10667: u"Carrion Barbaric Helm",
10668: u"Knight's Barbaric Helm",
10669: u"Valkyrie Barbaric Helm",
10670: u"Cleric's Barbaric Helm",
10672: u"Berserker's Barbaric Helm",
10673: u"Cleric's Barbaric Helm",
10674: u"Valkyrie Barbaric Helm",
10675: u"Carrion Barbaric Helm",
10676: u"Knight's Barbaric Helm",
10677: u"Berserker's Barbaric Helm",
10678: u"Rampager's Barbaric Helm",
10680: u"Rampager's Barbaric Helm",
10681: u"Carrion Draconic Helm",
10682: u"Cleric's Draconic Helm",
10684: u"Berserker's Draconic Helm",
10685: u"Rampager's Draconic Helm",
10686: u"Knight's Draconic Helm",
10687: u"Valkyrie Draconic Helm",
10688: u"Carrion Gladiator Pauldrons",
10689: u"Cleric's Gladiator Pauldrons",
10691: u"Berserker's Gladiator Pauldrons",
10692: u"Rampager's Gladiator Pauldrons",
10693: u"Knight's Gladiator Pauldrons",
10694: u"Valkyrie Gladiator Pauldrons",
10695: u"Carrion Gladiator Legplates",
10696: u"Cleric's Gladiator Legplates",
10698: u"Berserker's Gladiator Legplates",
10699: u"Rampager's Gladiator Legplates",
10700: u"Knight's Gladiator Legplates",
10701: u"Valkyrie Gladiator Legplates",
10702: u"Carrion Gladiator Boots",
10703: u"Cleric's Gladiator Boots",
10705: u"Berserker's Gladiator Boots",
10706: u"Rampager's Gladiator Boots",
10707: u"Knight's Gladiator Boots",
10708: u"Valkyrie Gladiator Boots",
10709: u"Carrion Gladiator Gauntlets",
10710: u"Cleric's Gladiator Gauntlets",
10712: u"Berserker's Gladiator Gauntlets",
10713: u"Rampager's Gladiator Gauntlets",
10714: u"Knight's Gladiator Gauntlets",
10715: u"Valkyrie Gladiator Gauntlets",
10716: u"Carrion Gladiator Chestplate",
10717: u"Cleric's Gladiator Chestplate",
10719: u"Berserker's Gladiator Chestplate",
10720: u"Rampager's Gladiator Chestplate",
10721: u"Knight's Gladiator Chestplate",
10722: u"Valkyrie Gladiator Chestplate",
10723: u"Carrion Gladiator Helm",
10724: u"Cleric's Gladiator Helm",
10726: u"Berserker's Gladiator Helm",
10727: u"Rampager's Gladiator Helm",
10728: u"Knight's Gladiator Helm",
10729: u"Valkyrie Gladiator Helm",
10730: u"Mighty Chain Legs",
10731: u"Mighty Chain Boots",
10732: u"Mighty Chain Gauntlets",
10733: u"Mighty Chain Coat",
10734: u"Rejuvenating Scale Pauldrons",
10735: u"Rejuvenating Scale Pauldrons",
10736: u"Rejuvenating Splint Pauldrons",
10737: u"Rejuvenating Splint Pauldrons",
10738: u"Rejuvenating Scale Legs",
10739: u"Rejuvenating Scale Legs",
10740: u"Rejuvenating Splint Legs",
10741: u"Rejuvenating Splint Legs",
10742: u"Rejuvenating Scale Boots",
10743: u"Rejuvenating Scale Boots",
10744: u"Rejuvenating Splint Greaves",
10745: u"Rejuvenating Splint Greaves",
10746: u"Rejuvenating Scale Gauntlets",
10747: u"Rejuvenating Scale Gauntlets",
10748: u"Rejuvenating Splint Gauntlets",
10749: u"Rejuvenating Splint Gauntlets",
10750: u"Rejuvenating Scale Coat",
10751: u"Rejuvenating Scale Coat",
10752: u"Rejuvenating Splint Coat",
10753: u"Rejuvenating Splint Coat",
10754: u"Rejuvenating Scale Helm",
10755: u"Rejuvenating Scale Helm",
10756: u"Rejuvenating Splint Helm",
10757: u"Rejuvenating Splint Helm",
10758: u"Ravaging Gladiator Pauldrons",
10759: u"Rejuvenating Gladiator Pauldrons",
10760: u"Honed Gladiator Pauldrons",
10762: u"Strong Gladiator Pauldrons",
10763: u"Vigorous Gladiator Pauldrons",
10764: u"Hearty Gladiator Pauldrons",
10765: u"Ravaging Gladiator Pauldrons",
10766: u"Rejuvenating Gladiator Pauldrons",
10767: u"Honed Gladiator Pauldrons",
10769: u"Strong Gladiator Pauldrons",
10770: u"Vigorous Gladiator Pauldrons",
10771: u"Hearty Gladiator Pauldrons",
10772: u"Carrion Gladiator Pauldrons",
10773: u"Cleric's Gladiator Pauldrons",
10775: u"Berserker's Gladiator Pauldrons",
10776: u"Valkyrie Gladiator Pauldrons",
10777: u"Rampager's Gladiator Pauldrons",
10778: u"Knight's Gladiator Pauldrons",
10779: u"Ravaging Gladiator Legplates",
10780: u"Rejuvenating Gladiator Legplates",
10781: u"Honed Gladiator Legplates",
10783: u"Strong Gladiator Legplates",
10784: u"Vigorous Gladiator Legplates",
10785: u"Hearty Gladiator Legplates",
10786: u"Ravaging Gladiator Legplates",
10787: u"Rejuvenating Gladiator Legplates",
10788: u"Honed Gladiator Legplates",
10790: u"Strong Gladiator Legplates",
10791: u"Vigorous Gladiator Legplates",
10792: u"Hearty Gladiator Legplates",
10793: u"Carrion Gladiator Legplates",
10794: u"Cleric's Gladiator Legplates",
10796: u"Berserker's Gladiator Legplates",
10797: u"Valkyrie Gladiator Legplates",
10798: u"Rampager's Gladiator Legplates",
10799: u"Knight's Gladiator Legplates",
10800: u"Ravaging Gladiator Boots",
10801: u"Rejuvenating Gladiator Boots",
10802: u"Honed Gladiator Boots",
10804: u"Strong Gladiator Boots",
10805: u"Vigorous Gladiator Boots",
10806: u"Hearty Gladiator Boots",
10807: u"Ravaging Gladiator Boots",
10808: u"Rejuvenating Gladiator Boots",
10809: u"Honed Gladiator Boots",
10811: u"Strong Gladiator Boots",
10812: u"Vigorous Gladiator Boots",
10813: u"Hearty Gladiator Boots",
10814: u"Carrion Gladiator Boots",
10815: u"Cleric's Gladiator Boots",
10817: u"Berserker's Gladiator Boots",
10818: u"Valkyrie Gladiator Boots",
10819: u"Rampager's Gladiator Boots",
10820: u"Knight's Gladiator Boots",
10821: u"Ravaging Gladiator Gauntlets",
10822: u"Rejuvenating Gladiator Gauntlets",
10823: u"Honed Gladiator Gauntlets",
10825: u"Strong Gladiator Gauntlets",
10826: u"Vigorous Gladiator Gauntlets",
10827: u"Hearty Gladiator Gauntlets",
10828: u"Ravaging Gladiator Gauntlets",
10829: u"Rejuvenating Gladiator Gauntlets",
10830: u"Honed Gladiator Gauntlets",
10832: u"Strong Gladiator Gauntlets",
10833: u"Vigorous Gladiator Gauntlets",
10834: u"Hearty Gladiator Gauntlets",
10835: u"Carrion Gladiator Gauntlets",
10836: u"Cleric's Gladiator Gauntlets",
10838: u"Berserker's Gladiator Gauntlets",
10839: u"Valkyrie Gladiator Gauntlets",
10840: u"Rampager's Gladiator Gauntlets",
10841: u"Knight's Gladiator Gauntlets",
10842: u"Ravaging Gladiator Chestplate",
10843: u"Rejuvenating Gladiator Chestplate",
10844: u"Honed Gladiator Chestplate",
10846: u"Strong Gladiator Chestplate",
10847: u"Vigorous Gladiator Chestplate",
10848: u"Hearty Gladiator Chestplate",
10849: u"Ravaging Gladiator Chestplate",
10850: u"Rejuvenating Gladiator Chestplate",
10851: u"Honed Gladiator Chestplate",
10853: u"Strong Gladiator Chestplate",
10854: u"Vigorous Gladiator Chestplate",
10855: u"Hearty Gladiator Chestplate",
10856: u"Carrion Gladiator Chestplate",
10857: u"Cleric's Gladiator Chestplate",
10859: u"Berserker's Gladiator Chestplate",
10860: u"Valkyrie Gladiator Chestplate",
10861: u"Rampager's Gladiator Chestplate",
10862: u"Knight's Gladiator Chestplate",
10863: u"Ravaging Gladiator Helm",
10864: u"Rejuvenating Gladiator Helm",
10865: u"Honed Gladiator Helm",
10867: u"Strong Gladiator Helm",
10868: u"Vigorous Gladiator Helm",
10869: u"Hearty Gladiator Helm",
10870: u"Ravaging Gladiator Helm",
10871: u"Rejuvenating Gladiator Helm",
10872: u"Honed Gladiator Helm",
10874: u"Strong Gladiator Helm",
10875: u"Vigorous Gladiator Helm",
10876: u"Hearty Gladiator Helm",
10877: u"Carrion Gladiator Helm",
10878: u"Cleric's Gladiator Helm",
10880: u"Berserker's Gladiator Helm",
10881: u"Valkyrie Gladiator Helm",
10882: u"Rampager's Gladiator Helm",
10883: u"Knight's Gladiator Helm",
10884: u"Mighty Embroidered Coat",
10885: u"Vital Embroidered Coat",
10886: u"Mighty Embroidered Coat",
10887: u"Vital Embroidered Coat",
10888: u"Precise Embroidered Coat",
10889: u"Resilient Embroidered Coat",
10890: u"Precise Embroidered Coat",
10891: u"Resilient Embroidered Coat",
10892: u"Mighty Embroidered Sandals",
10893: u"Vital Embroidered Sandals",
10894: u"Mighty Embroidered Sandals",
10895: u"Vital Embroidered Sandals",
10896: u"Precise Embroidered Sandals",
10897: u"Resilient Embroidered Sandals",
10898: u"Precise Embroidered Sandals",
10899: u"Resilient Embroidered Sandals",
10900: u"Mighty Embroidered Wristguards",
10901: u"Vital Embroidered Wristguards",
10902: u"Mighty Embroidered Wristguards",
10903: u"Vital Embroidered Wristguards",
10904: u"Precise Embroidered Wristguards",
10905: u"Resilient Embroidered Wristguards",
10906: u"Precise Embroidered Wristguards",
10907: u"Resilient Embroidered Wristguards",
10908: u"Mighty Embroidered Pants",
10909: u"Vital Embroidered Pants",
10910: u"Mighty Embroidered Pants",
10911: u"Vital Embroidered Pants",
10912: u"Precise Embroidered Pants",
10913: u"Resilient Embroidered Pants",
10914: u"Precise Embroidered Pants",
10915: u"Resilient Embroidered Pants",
10916: u"Vigorous Student Shoes",
10917: u"Vigorous Student Coat",
10918: u"Vigorous Student Gloves",
10919: u"Vigorous Student Leggings",
10920: u"Vigorous Student Shoes",
10921: u"Vigorous Student Coat",
10922: u"Vigorous Student Gloves",
10923: u"Vigorous Student Leggings",
10924: u"Healing Embroidered Coat",
10925: u"Healing Embroidered Sandals",
10926: u"Healing Embroidered Wristguards",
10927: u"Healing Embroidered Pants",
10930: u"Honed Student Shoes",
10931: u"Strong Student Shoes",
10932: u"Strong Student Shoes",
10933: u"Hearty Student Shoes",
10934: u"Honed Student Shoes",
10936: u"Strong Student Coat",
10938: u"Honed Student Coat",
10939: u"Strong Student Coat",
10940: u"Hearty Student Coat",
10941: u"Honed Student Coat",
10943: u"Strong Student Gloves",
10945: u"Honed Student Gloves",
10946: u"Strong Student Gloves",
10947: u"Hearty Student Gloves",
10948: u"Honed Student Gloves",
10950: u"Strong Student Leggings",
10952: u"Honed Student Leggings",
10953: u"Strong Student Leggings",
10954: u"Hearty Student Leggings",
10955: u"Honed Student Leggings",
10956: u"Strong Acolyte Boots",
10957: u"Hearty Acolyte Boots",
10958: u"Honed Acolyte Boots",
10960: u"Ravaging Acolyte Boots",
10961: u"Honed Acolyte Boots",
10963: u"Ravaging Acolyte Boots",
10964: u"Strong Acolyte Boots",
10965: u"Hearty Acolyte Boots",
10966: u"Vigorous Acolyte Boots",
10967: u"Vigorous Acolyte Boots",
10968: u"Strong Acolyte Coat",
10969: u"Hearty Acolyte Coat",
10970: u"Honed Acolyte Coat",
10972: u"Ravaging Acolyte Coat",
10973: u"Honed Acolyte Coat",
10975: u"Ravaging Acolyte Coat",
10976: u"Strong Acolyte Coat",
10977: u"Hearty Acolyte Coat",
10978: u"Vigorous Acolyte Coat",
10979: u"Vigorous Acolyte Coat",
10980: u"Strong Acolyte Gloves",
10981: u"Hearty Acolyte Gloves",
10982: u"Honed Acolyte Gloves",
10984: u"Ravaging Acolyte Gloves",
10985: u"Honed Acolyte Gloves",
10987: u"Ravaging Acolyte Gloves",
10988: u"Strong Acolyte Gloves",
10989: u"Hearty Acolyte Gloves",
10990: u"Vigorous Acolyte Gloves",
10991: u"Vigorous Acolyte Gloves",
10992: u"Strong Acolyte Pants",
10993: u"Hearty Acolyte Pants",
10994: u"Honed Acolyte Pants",
10996: u"Ravaging Acolyte Pants",
10997: u"Honed Acolyte Pants",
10999: u"Ravaging Acolyte Pants",
11000: u"Strong Acolyte Pants",
11001: u"Hearty Acolyte Pants",
11002: u"Vigorous Acolyte Pants",
11003: u"Vigorous Acolyte Pants",
11004: u"Strong Acolyte Mantle",
11005: u"Hearty Acolyte Mantle",
11006: u"Honed Acolyte Mantle",
11008: u"Ravaging Acolyte Mantle",
11009: u"Honed Acolyte Mantle",
11011: u"Ravaging Acolyte Mantle",
11012: u"Strong Acolyte Mantle",
11013: u"Hearty Acolyte Mantle",
11014: u"Vigorous Acolyte Mantle",
11015: u"Vigorous Acolyte Mantle",
11016: u"Malign Embroidered Coat",
11017: u"Malign Embroidered Coat",
11018: u"Healing Embroidered Coat",
11019: u"Malign Embroidered Sandals",
11020: u"Malign Embroidered Sandals",
11021: u"Healing Embroidered Sandals",
11022: u"Malign Embroidered Wristguards",
11023: u"Malign Embroidered Wristguards",
11024: u"Healing Embroidered Wristguards",
11025: u"Malign Embroidered Pants",
11026: u"Malign Embroidered Pants",
11027: u"Healing Embroidered Pants",
11028: u"Hearty Student Shoes",
11029: u"Ravaging Student Shoes",
11030: u"Ravaging Student Shoes",
11031: u"Hearty Student Coat",
11032: u"Ravaging Student Coat",
11033: u"Ravaging Student Coat",
11034: u"Hearty Student Gloves",
11035: u"Ravaging Student Gloves",
11036: u"Ravaging Student Gloves",
11037: u"Hearty Student Leggings",
11038: u"Ravaging Student Leggings",
11039: u"Ravaging Student Leggings",
11040: u"Malign Embroidered Mask",
11041: u"Mighty Embroidered Mask",
11042: u"Vital Embroidered Mask",
11043: u"Healing Embroidered Mask",
11044: u"Precise Embroidered Mask",
11045: u"Resilient Embroidered Mask",
11046: u"Malign Embroidered Mask",
11047: u"Healing Embroidered Mask",
11048: u"Mighty Embroidered Mask",
11049: u"Precise Embroidered Mask",
11050: u"Resilient Embroidered Mask",
11051: u"Vital Embroidered Mask",
11052: u"Malign Embroidered Mantle",
11053: u"Mighty Embroidered Mantle",
11054: u"Vital Embroidered Mantle",
11055: u"Healing Embroidered Mantle",
11056: u"Precise Embroidered Mantle",
11057: u"Resilient Embroidered Mantle",
11058: u"Malign Embroidered Mantle",
11059: u"Healing Embroidered Mantle",
11060: u"Mighty Embroidered Mantle",
11061: u"Precise Embroidered Mantle",
11062: u"Resilient Embroidered Mantle",
11063: u"Vital Embroidered Mantle",
11064: u"Ravaging Student Circlet",
11066: u"Vigorous Student Circlet",
11067: u"Strong Student Circlet",
11068: u"Honed Student Circlet",
11069: u"Hearty Student Circlet",
11070: u"Ravaging Student Circlet",
11072: u"Vigorous Student Circlet",
11073: u"Strong Student Circlet",
11074: u"Honed Student Circlet",
11075: u"Hearty Student Circlet",
11076: u"Ravaging Student Mantle",
11078: u"Vigorous Student Mantle",
11079: u"Strong Student Mantle",
11080: u"Honed Student Mantle",
11081: u"Hearty Student Mantle",
11082: u"Ravaging Student Mantle",
11084: u"Vigorous Student Mantle",
11085: u"Strong Student Mantle",
11086: u"Honed Student Mantle",
11087: u"Hearty Student Mantle",
11088: u"Ravaging Acolyte Mask",
11089: u"Vigorous Acolyte Mask",
11091: u"Strong Acolyte Mask",
11092: u"Honed Acolyte Mask",
11093: u"Hearty Acolyte Mask",
11094: u"Ravaging Acolyte Mask",
11095: u"Strong Acolyte Mask",
11096: u"Vigorous Acolyte Mask",
11098: u"Honed Acolyte Mask",
11099: u"Hearty Acolyte Mask",
11100: u"Carrion Exalted Boots",
11101: u"Cleric's Exalted Boots",
11103: u"Berserker's Exalted Boots",
11104: u"Rampager's Exalted Boots",
11105: u"Knight's Exalted Boots",
11106: u"Valkyrie Exalted Boots",
11107: u"Carrion Feathered Boots",
11108: u"Knight's Feathered Boots",
11109: u"Valkyrie Feathered Boots",
11110: u"Cleric's Feathered Boots",
11112: u"Berserker's Feathered Boots",
11113: u"Cleric's Feathered Boots",
11114: u"Valkyrie Feathered Boots",
11115: u"Carrion Feathered Boots",
11116: u"Knight's Feathered Boots",
11117: u"Berserker's Feathered Boots",
11118: u"Rampager's Feathered Boots",
11120: u"Rampager's Feathered Boots",
11121: u"Carrion Masquerade Boots",
11122: u"Cleric's Masquerade Boots",
11124: u"Berserker's Masquerade Boots",
11125: u"Rampager's Masquerade Boots",
11126: u"Knight's Masquerade Boots",
11127: u"Valkyrie Masquerade Boots",
11128: u"Valkyrie Winged Boots",
11129: u"Carrion Winged Boots",
11130: u"Knight's Winged Boots",
11131: u"Rampager's Winged Boots",
11132: u"Berserker's Winged Boots",
11133: u"Cleric's Winged Boots",
11135: u"Berserker's Winged Boots",
11136: u"Cleric's Winged Boots",
11137: u"Valkyrie Winged Boots",
11138: u"Carrion Winged Boots",
11139: u"Knight's Winged Boots",
11140: u"Rampager's Winged Boots",
11142: u"Carrion Exalted Pants",
11143: u"Cleric's Exalted Pants",
11145: u"Berserker's Exalted Pants",
11146: u"Rampager's Exalted Pants",
11147: u"Knight's Exalted Pants",
11148: u"Valkyrie Exalted Pants",
11149: u"Carrion Feathered Pants",
11150: u"Knight's Feathered Pants",
11151: u"Valkyrie Feathered Pants",
11152: u"Cleric's Feathered Pants",
11154: u"Berserker's Feathered Pants",
11155: u"Cleric's Feathered Pants",
11156: u"Valkyrie Feathered Pants",
11157: u"Carrion Feathered Pants",
11158: u"Knight's Feathered Pants",
11159: u"Berserker's Feathered Pants",
11160: u"Rampager's Feathered Pants",
11162: u"Rampager's Feathered Pants",
11163: u"Carrion Masquerade Leggings",
11164: u"Cleric's Masquerade Leggings",
11166: u"Berserker's Masquerade Leggings",
11167: u"Rampager's Masquerade Leggings",
11168: u"Knight's Masquerade Leggings",
11169: u"Valkyrie Masquerade Leggings",
11170: u"Valkyrie Winged Pants",
11171: u"Carrion Winged Pants",
11172: u"Knight's Winged Pants",
11173: u"Rampager's Winged Pants",
11174: u"Berserker's Winged Pants",
11175: u"Cleric's Winged Pants",
11177: u"Berserker's Winged Pants",
11178: u"Cleric's Winged Pants",
11179: u"Valkyrie Winged Pants",
11180: u"Carrion Winged Pants",
11181: u"Knight's Winged Pants",
11182: u"Rampager's Winged Pants",
11184: u"Carrion Exalted Masque",
11185: u"Cleric's Exalted Masque",
11187: u"Berserker's Exalted Masque",
11188: u"Rampager's Exalted Masque",
11189: u"Knight's Exalted Masque",
11190: u"Valkyrie Exalted Masque",
11191: u"Carrion Feathered Headpiece",
11192: u"Knight's Feathered Headpiece",
11193: u"Valkyrie Feathered Headpiece",
11194: u"Cleric's Feathered Headpiece",
11196: u"Berserker's Feathered Headpiece",
11197: u"Cleric's Feathered Headpiece",
11198: u"Valkyrie Feathered Headpiece",
11199: u"Carrion Feathered Headpiece",
11200: u"Knight's Feathered Headpiece",
11201: u"Berserker's Feathered Headpiece",
11202: u"Rampager's Feathered Headpiece",
11204: u"Rampager's Feathered Headpiece",
11205: u"Carrion Masquerade Mask",
11206: u"Cleric's Masquerade Mask",
11208: u"Berserker's Masquerade Mask",
11209: u"Rampager's Masquerade Mask",
11210: u"Knight's Masquerade Mask",
11211: u"Valkyrie Masquerade Mask",
11212: u"Valkyrie Winged Headpiece",
11213: u"Carrion Winged Headpiece",
11214: u"Knight's Winged Headpiece",
11215: u"Rampager's Winged Headpiece",
11216: u"Berserker's Winged Headpiece",
11217: u"Cleric's Winged Headpiece",
11219: u"Berserker's Winged Headpiece",
11220: u"Cleric's Winged Headpiece",
11221: u"Valkyrie Winged Headpiece",
11222: u"Carrion Winged Headpiece",
11223: u"Knight's Winged Headpiece",
11224: u"Rampager's Winged Headpiece",
11226: u"Carrion Exalted Gloves",
11227: u"Cleric's Exalted Gloves",
11229: u"Berserker's Exalted Gloves",
11230: u"Rampager's Exalted Gloves",
11231: u"Knight's Exalted Gloves",
11232: u"Valkyrie Exalted Gloves",
11233: u"Carrion Feathered Gloves",
11234: u"Knight's Feathered Gloves",
11235: u"Valkyrie Feathered Gloves",
11236: u"Cleric's Feathered Gloves",
11238: u"Berserker's Feathered Gloves",
11239: u"Cleric's Feathered Gloves",
11240: u"Valkyrie Feathered Gloves",
11241: u"Carrion Feathered Gloves",
11242: u"Knight's Feathered Gloves",
11243: u"Berserker's Feathered Gloves",
11244: u"Rampager's Feathered Gloves",
11246: u"Rampager's Feathered Gloves",
11247: u"Carrion Masquerade Gloves",
11248: u"Cleric's Masquerade Gloves",
11250: u"Berserker's Masquerade Gloves",
11251: u"Rampager's Masquerade Gloves",
11252: u"Knight's Masquerade Gloves",
11253: u"Valkyrie Masquerade Gloves",
11254: u"Valkyrie Winged Gloves",
11255: u"Carrion Winged Gloves",
11256: u"Knight's Winged Gloves",
11257: u"Rampager's Winged Gloves",
11258: u"Berserker's Winged Gloves",
11259: u"Cleric's Winged Gloves",
11261: u"Berserker's Winged Gloves",
11262: u"Cleric's Winged Gloves",
11263: u"Valkyrie Winged Gloves",
11264: u"Carrion Winged Gloves",
11265: u"Knight's Winged Gloves",
11266: u"Rampager's Winged Gloves",
11268: u"Carrion Exalted Coat",
11269: u"Cleric's Exalted Coat",
11271: u"Berserker's Exalted Coat",
11272: u"Rampager's Exalted Coat",
11273: u"Knight's Exalted Coat",
11274: u"Valkyrie Exalted Coat",
11275: u"Carrion Feathered Vestments",
11276: u"Knight's Feathered Vestments",
11277: u"Valkyrie Feathered Vestments",
11278: u"Cleric's Feathered Vestments",
11280: u"Berserker's Feathered Vestments",
11281: u"Cleric's Feathered Vestments",
11282: u"Valkyrie Feathered Vestments",
11283: u"Carrion Feathered Vestments",
11284: u"Knight's Feathered Vestments",
11285: u"Berserker's Feathered Vestments",
11286: u"Rampager's Feathered Vestments",
11288: u"Rampager's Feathered Vestments",
11289: u"Carrion Masquerade Raiments",
11290: u"Cleric's Masquerade Raiments",
11292: u"Berserker's Masquerade Raiments",
11293: u"Rampager's Masquerade Raiments",
11294: u"Knight's Masquerade Raiments",
11295: u"Valkyrie Masquerade Raiments",
11296: u"Valkyrie Winged Tunic",
11297: u"Carrion Winged Tunic",
11298: u"Knight's Winged Tunic",
11299: u"Rampager's Winged Tunic",
11300: u"Berserker's Winged Tunic",
11301: u"Cleric's Winged Tunic",
11303: u"Berserker's Winged Tunic",
11304: u"Cleric's Winged Tunic",
11305: u"Valkyrie Winged Tunic",
11306: u"Carrion Winged Tunic",
11307: u"Knight's Winged Tunic",
11308: u"Rampager's Winged Tunic",
11310: u"Valkyrie Winged Mantle",
11311: u"Carrion Winged Mantle",
11312: u"Knight's Winged Mantle",
11313: u"Rampager's Winged Mantle",
11314: u"Berserker's Winged Mantle",
11315: u"Cleric's Winged Mantle",
11317: u"Berserker's Winged Mantle",
11318: u"Cleric's Winged Mantle",
11319: u"Valkyrie Winged Mantle",
11320: u"Carrion Winged Mantle",
11321: u"Knight's Winged Mantle",
11322: u"Rampager's Winged Mantle",
11324: u"Carrion Feathered Mantle",
11325: u"Knight's Feathered Mantle",
11326: u"Valkyrie Feathered Mantle",
11327: u"Cleric's Feathered Mantle",
11329: u"Berserker's Feathered Mantle",
11330: u"Cleric's Feathered Mantle",
11331: u"Valkyrie Feathered Mantle",
11332: u"Carrion Feathered Mantle",
11333: u"Knight's Feathered Mantle",
11334: u"Berserker's Feathered Mantle",
11335: u"Rampager's Feathered Mantle",
11337: u"Rampager's Feathered Mantle",
11338: u"Carrion Masquerade Mantle",
11339: u"Cleric's Masquerade Mantle",
11341: u"Berserker's Masquerade Mantle",
11342: u"Rampager's Masquerade Mantle",
11343: u"Knight's Masquerade Mantle",
11344: u"Valkyrie Masquerade Mantle",
11345: u"Carrion Exalted Mantle",
11346: u"Cleric's Exalted Mantle",
11348: u"Berserker's Exalted Mantle",
11349: u"Rampager's Exalted Mantle",
11350: u"Knight's Exalted Mantle",
11351: u"Valkyrie Exalted Mantle",
11352: u"Mighty Embroidered Sandals",
11353: u"Mighty Embroidered Coat",
11354: u"Mighty Embroidered Pants",
11355: u"Mighty Embroidered Wristguards",
11356: u"Rejuvenating Student Mantle",
11357: u"Rejuvenating Student Mantle",
11358: u"Rejuvenating Acolyte Mantle",
11359: u"Rejuvenating Acolyte Mantle",
11360: u"Rejuvenating Acolyte Boots",
11361: u"Rejuvenating Acolyte Boots",
11362: u"Rejuvenating Student Shoes",
11363: u"Rejuvenating Student Shoes",
11364: u"Rejuvenating Acolyte Coat",
11365: u"Rejuvenating Acolyte Coat",
11366: u"Rejuvenating Student Coat",
11367: u"Rejuvenating Student Coat",
11368: u"Rejuvenating Acolyte Mask",
11369: u"Rejuvenating Acolyte Mask",
11370: u"Rejuvenating Student Circlet",
11371: u"Rejuvenating Student Circlet",
11372: u"Rejuvenating Acolyte Pants",
11373: u"Rejuvenating Acolyte Pants",
11374: u"Rejuvenating Student Leggings",
11375: u"Rejuvenating Student Leggings",
11376: u"Rejuvenating Acolyte Gloves",
11377: u"Rejuvenating Acolyte Gloves",
11378: u"Rejuvenating Student Gloves",
11379: u"Rejuvenating Student Gloves",
11380: u"Ravaging Masquerade Mantle",
11381: u"Rejuvenating Masquerade Mantle",
11382: u"Honed Masquerade Mantle",
11384: u"Strong Masquerade Mantle",
11385: u"Vigorous Masquerade Mantle",
11386: u"Hearty Masquerade Mantle",
11387: u"Ravaging Masquerade Mantle",
11388: u"Rejuvenating Masquerade Mantle",
11389: u"Honed Masquerade Mantle",
11391: u"Strong Masquerade Mantle",
11392: u"Vigorous Masquerade Mantle",
11393: u"Hearty Masquerade Mantle",
11394: u"Carrion Masquerade Mantle",
11395: u"Cleric's Masquerade Mantle",
11397: u"Valkyrie Masquerade Mantle",
11398: u"Berserker's Masquerade Mantle",
11399: u"Rampager's Masquerade Mantle",
11400: u"Knight's Masquerade Mantle",
11401: u"Carrion Masquerade Boots",
11402: u"Cleric's Masquerade Boots",
11404: u"Valkyrie Masquerade Boots",
11405: u"Berserker's Masquerade Boots",
11406: u"Rampager's Masquerade Boots",
11407: u"Knight's Masquerade Boots",
11408: u"Ravaging Masquerade Boots",
11409: u"Rejuvenating Masquerade Boots",
11410: u"Honed Masquerade Boots",
11412: u"Strong Masquerade Boots",
11413: u"Vigorous Masquerade Boots",
11414: u"Hearty Masquerade Boots",
11415: u"Ravaging Masquerade Boots",
11416: u"Rejuvenating Masquerade Boots",
11417: u"Honed Masquerade Boots",
11419: u"Strong Masquerade Boots",
11420: u"Vigorous Masquerade Boots",
11421: u"Hearty Masquerade Boots",
11422: u"Carrion Masquerade Raiments",
11423: u"Cleric's Masquerade Raiments",
11425: u"Valkyrie Masquerade Raiments",
11426: u"Berserker's Masquerade Raiments",
11427: u"Rampager's Masquerade Raiments",
11428: u"Knight's Masquerade Raiments",
11429: u"Ravaging Masquerade Raiments",
11430: u"Rejuvenating Masquerade Raiments",
11431: u"Honed Masquerade Raiments",
11433: u"Strong Masquerade Raiments",
11434: u"Vigorous Masquerade Raiments",
11435: u"Hearty Masquerade Raiments",
11436: u"Ravaging Masquerade Raiments",
11437: u"Rejuvenating Masquerade Raiments",
11438: u"Honed Masquerade Raiments",
11440: u"Strong Masquerade Raiments",
11441: u"Vigorous Masquerade Raiments",
11442: u"Hearty Masquerade Raiments",
11443: u"Carrion Masquerade Mask",
11444: u"Cleric's Masquerade Mask",
11446: u"Valkyrie Masquerade Mask",
11447: u"Berserker's Masquerade Mask",
11448: u"Rampager's Masquerade Mask",
11449: u"Knight's Masquerade Mask",
11450: u"Ravaging Masquerade Mask",
11451: u"Rejuvenating Masquerade Mask",
11452: u"Honed Masquerade Mask",
11454: u"Strong Masquerade Mask",
11455: u"Vigorous Masquerade Mask",
11456: u"Hearty Masquerade Mask",
11457: u"Ravaging Masquerade Mask",
11458: u"Rejuvenating Masquerade Mask",
11459: u"Honed Masquerade Mask",
11461: u"Strong Masquerade Mask",
11462: u"Vigorous Masquerade Mask",
11463: u"Hearty Masquerade Mask",
11464: u"Carrion Masquerade Leggings",
11465: u"Cleric's Masquerade Leggings",
11467: u"Valkyrie Masquerade Leggings",
11468: u"Berserker's Masquerade Leggings",
11469: u"Rampager's Masquerade Leggings",
11470: u"Knight's Masquerade Leggings",
11471: u"Ravaging Masquerade Leggings",
11472: u"Rejuvenating Masquerade Leggings",
11473: u"Honed Masquerade Leggings",
11475: u"Strong Masquerade Leggings",
11476: u"Vigorous Masquerade Leggings",
11477: u"Hearty Masquerade Leggings",
11478: u"Ravaging Masquerade Leggings",
11479: u"Rejuvenating Masquerade Leggings",
11480: u"Honed Masquerade Leggings",
11482: u"Strong Masquerade Leggings",
11483: u"Vigorous Masquerade Leggings",
11484: u"Hearty Masquerade Leggings",
11485: u"Carrion Masquerade Gloves",
11486: u"Cleric's Masquerade Gloves",
11488: u"Valkyrie Masquerade Gloves",
11489: u"Berserker's Masquerade Gloves",
11490: u"Rampager's Masquerade Gloves",
11491: u"Knight's Masquerade Gloves",
11492: u"Ravaging Masquerade Gloves",
11493: u"Rejuvenating Masquerade Gloves",
11494: u"Honed Masquerade Gloves",
11496: u"Strong Masquerade Gloves",
11497: u"Vigorous Masquerade Gloves",
11498: u"Hearty Masquerade Gloves",
11499: u"Ravaging Masquerade Gloves",
11500: u"Rejuvenating Masquerade Gloves",
11501: u"Honed Masquerade Gloves",
11503: u"Strong Masquerade Gloves",
11504: u"Vigorous Masquerade Gloves",
11505: u"Hearty Masquerade Gloves",
11506: u"Mighty Seeker Boots",
11507: u"Vital Seeker Boots",
11508: u"Mighty Seeker Boots",
11509: u"Vital Seeker Boots",
11510: u"Precise Seeker Boots",
11511: u"Resilient Seeker Boots",
11512: u"Healing Seeker Boots",
11513: u"Precise Seeker Boots",
11514: u"Resilient Seeker Boots",
11516: u"Vigorous Outlaw Boots",
11517: u"Strong Outlaw Boots",
11519: u"Strong Outlaw Boots",
11520: u"Vigorous Outlaw Boots",
11521: u"Honed Outlaw Boots",
11522: u"Honed Outlaw Boots",
11523: u"Hearty Outlaw Boots",
11525: u"Strong Leather Shoes",
11526: u"Hearty Leather Shoes",
11527: u"Ravaging Leather Shoes",
11528: u"Honed Leather Shoes",
11529: u"Ravaging Leather Shoes",
11530: u"Honed Leather Shoes",
11532: u"Strong Leather Shoes",
11533: u"Vigorous Leather Shoes",
11534: u"Hearty Leather Shoes",
11535: u"Vigorous Leather Shoes",
11536: u"Mighty Seeker Coat",
11537: u"Vital Seeker Coat",
11538: u"Mighty Seeker Coat",
11539: u"Vital Seeker Coat",
11540: u"Precise Seeker Coat",
11541: u"Resilient Seeker Coat",
11542: u"Healing Seeker Coat",
11543: u"Precise Seeker Coat",
11544: u"Resilient Seeker Coat",
11546: u"Vigorous Outlaw Coat",
11547: u"Strong Outlaw Coat",
11549: u"Strong Outlaw Coat",
11550: u"Vigorous Outlaw Coat",
11551: u"Honed Outlaw Coat",
11552: u"Honed Outlaw Coat",
11553: u"Hearty Outlaw Coat",
11555: u"Strong Leather Coat",
11556: u"Hearty Leather Coat",
11557: u"Ravaging Leather Coat",
11558: u"Honed Leather Coat",
11559: u"Ravaging Leather Coat",
11560: u"Honed Leather Coat",
11562: u"Strong Leather Coat",
11563: u"Vigorous Leather Coat",
11564: u"Hearty Leather Coat",
11565: u"Vigorous Leather Coat",
11566: u"Mighty Seeker Gloves",
11567: u"Vital Seeker Gloves",
11568: u"Mighty Seeker Gloves",
11569: u"Vital Seeker Gloves",
11570: u"Precise Seeker Gloves",
11571: u"Resilient Seeker Gloves",
11572: u"Healing Seeker Gloves",
11573: u"Precise Seeker Gloves",
11574: u"Resilient Seeker Gloves",
11576: u"Vigorous Outlaw Gloves",
11577: u"Strong Outlaw Gloves",
11579: u"Strong Outlaw Gloves",
11580: u"Vigorous Outlaw Gloves",
11581: u"Honed Outlaw Gloves",
11582: u"Honed Outlaw Gloves",
11583: u"Hearty Outlaw Gloves",
11585: u"Strong Leather Gloves",
11586: u"Hearty Leather Gloves",
11587: u"Ravaging Leather Gloves",
11588: u"Honed Leather Gloves",
11589: u"Ravaging Leather Gloves",
11590: u"Honed Leather Gloves",
11592: u"Strong Leather Gloves",
11593: u"Vigorous Leather Gloves",
11594: u"Hearty Leather Gloves",
11595: u"Vigorous Leather Gloves",
11596: u"Mighty Seeker Pants",
11597: u"Vital Seeker Pants",
11598: u"Mighty Seeker Pants",
11599: u"Vital Seeker Pants",
11600: u"Precise Seeker Pants",
11601: u"Resilient Seeker Pants",
11602: u"Healing Seeker Pants",
11603: u"Precise Seeker Pants",
11604: u"Resilient Seeker Pants",
11606: u"Vigorous Outlaw Pants",
11607: u"Strong Outlaw Pants",
11609: u"Strong Outlaw Pants",
11610: u"Vigorous Outlaw Pants",
11611: u"Honed Outlaw Pants",
11612: u"Honed Outlaw Pants",
11613: u"Hearty Outlaw Pants",
11615: u"Strong Leather Pants",
11616: u"Hearty Leather Pants",
11617: u"Ravaging Leather Pants",
11618: u"Honed Leather Pants",
11619: u"Ravaging Leather Pants",
11620: u"Honed Leather Pants",
11622: u"Strong Leather Pants",
11623: u"Vigorous Leather Pants",
11624: u"Hearty Leather Pants",
11625: u"Vigorous Leather Pants",
11627: u"Strong Leather Shoulders",
11628: u"Hearty Leather Shoulders",
11629: u"Honed Leather Shoulders",
11630: u"Ravaging Leather Shoulders",
11631: u"Honed Leather Shoulders",
11633: u"Ravaging Leather Shoulders",
11634: u"Strong Leather Shoulders",
11635: u"Hearty Leather Shoulders",
11636: u"Vigorous Leather Shoulders",
11637: u"Vigorous Leather Shoulders",
11638: u"Mighty Seeker Mask",
11639: u"Vital Seeker Mask",
11640: u"Mighty Seeker Mask",
11641: u"Vital Seeker Mask",
11642: u"Precise Seeker Mask",
11643: u"Resilient Seeker Mask",
11644: u"Healing Seeker Mask",
11645: u"Precise Seeker Mask",
11646: u"Resilient Seeker Mask",
11648: u"Vigorous Outlaw Mask",
11649: u"Strong Outlaw Mask",
11651: u"Strong Outlaw Mask",
11652: u"Vigorous Outlaw Mask",
11653: u"Honed Outlaw Mask",
11654: u"Honed Outlaw Mask",
11655: u"Hearty Outlaw Mask",
11656: u"Ravaging Leather Mask",
11657: u"Strong Leather Mask",
11658: u"Hearty Leather Mask",
11660: u"Honed Leather Mask",
11661: u"Ravaging Leather Mask",
11663: u"Honed Leather Mask",
11664: u"Strong Leather Mask",
11665: u"Vigorous Leather Mask",
11666: u"Hearty Leather Mask",
11667: u"Vigorous Leather Mask",
11668: u"Malign Seeker Pants",
11669: u"Healing Seeker Pants",
11670: u"Malign Seeker Pants",
11671: u"Ravaging Outlaw Pants",
11672: u"Ravaging Outlaw Pants",
11673: u"Hearty Outlaw Pants",
11674: u"Malign Seeker Coat",
11675: u"Healing Seeker Coat",
11676: u"Malign Seeker Coat",
11677: u"Ravaging Outlaw Coat",
11678: u"Ravaging Outlaw Coat",
11679: u"Hearty Outlaw Coat",
11680: u"Malign Seeker Gloves",
11681: u"Healing Seeker Gloves",
11682: u"Malign Seeker Gloves",
11683: u"Ravaging Outlaw Gloves",
11684: u"Ravaging Outlaw Gloves",
11685: u"Hearty Outlaw Gloves",
11686: u"Malign Seeker Boots",
11687: u"Healing Seeker Boots",
11688: u"Malign Seeker Boots",
11689: u"Ravaging Outlaw Boots",
11690: u"Ravaging Outlaw Boots",
11691: u"Hearty Outlaw Boots",
11692: u"Mighty Seeker Shoulders",
11693: u"Vital Seeker Shoulders",
11694: u"Healing Seeker Shoulders",
11695: u"Precise Seeker Shoulders",
11696: u"Resilient Seeker Shoulders",
11697: u"Malign Seeker Shoulders",
11698: u"Malign Seeker Shoulders",
11699: u"Healing Seeker Shoulders",
11700: u"Mighty Seeker Shoulders",
11701: u"Precise Seeker Shoulders",
11702: u"Resilient Seeker Shoulders",
11703: u"Vital Seeker Shoulders",
11704: u"Ravaging Outlaw Shoulders",
11706: u"Vigorous Outlaw Shoulders",
11707: u"Strong Outlaw Shoulders",
11708: u"Honed Outlaw Shoulders",
11709: u"Hearty Outlaw Shoulders",
11710: u"Ravaging Outlaw Shoulders",
11712: u"Vigorous Outlaw Shoulders",
11713: u"Strong Outlaw Shoulders",
11714: u"Honed Outlaw Shoulders",
11715: u"Hearty Outlaw Shoulders",
11716: u"Malign Seeker Mask",
11717: u"Healing Seeker Mask",
11718: u"Malign Seeker Mask",
11719: u"Ravaging Outlaw Mask",
11720: u"Ravaging Outlaw Mask",
11721: u"Hearty Outlaw Mask",
11722: u"Valkyrie Rascal Pants",
11723: u"Carrion Rascal Pants",
11724: u"Knight's Rascal Pants",
11725: u"Rampager's Rascal Pants",
11726: u"Berserker's Rascal Pants",
11727: u"Cleric's Rascal Pants",
11729: u"Berserker's Rascal Pants",
11730: u"Cleric's Rascal Pants",
11731: u"Valkyrie Rascal Pants",
11732: u"Carrion Rascal Pants",
11733: u"Knight's Rascal Pants",
11734: u"Rampager's Rascal Pants",
11736: u"Carrion Prowler Pants",
11737: u"Knight's Prowler Pants",
11738: u"Valkyrie Prowler Pants",
11739: u"Cleric's Prowler Pants",
11741: u"Berserker's Prowler Pants",
11742: u"Cleric's Prowler Pants",
11743: u"Valkyrie Prowler Pants",
11744: u"Carrion Prowler Pants",
11745: u"Knight's Prowler Pants",
11746: u"Berserker's Prowler Pants",
11747: u"Rampager's Prowler Pants",
11749: u"Rampager's Prowler Pants",
11750: u"Carrion Noble Pants",
11751: u"Cleric's Noble Pants",
11753: u"Berserker's Noble Pants",
11754: u"Rampager's Noble Pants",
11755: u"Knight's Noble Pants",
11756: u"Valkyrie Noble Pants",
11757: u"Carrion Emblazoned Pants",
11758: u"Cleric's Emblazoned Pants",
11760: u"Berserker's Emblazoned Pants",
11761: u"Rampager's Emblazoned Pants",
11762: u"Knight's Emblazoned Pants",
11763: u"Valkyrie Emblazoned Pants",
11764: u"Valkyrie Rascal Coat",
11765: u"Carrion Rascal Coat",
11766: u"Knight's Rascal Coat",
11767: u"Rampager's Rascal Coat",
11768: u"Berserker's Rascal Coat",
11769: u"Cleric's Rascal Coat",
11771: u"Berserker's Rascal Coat",
11772: u"Cleric's Rascal Coat",
11773: u"Valkyrie Rascal Coat",
11774: u"Carrion Rascal Coat",
11775: u"Knight's Rascal Coat",
11776: u"Rampager's Rascal Coat",
11778: u"Carrion Prowler Coat",
11779: u"Knight's Prowler Coat",
11780: u"Valkyrie Prowler Coat",
11781: u"Cleric's Prowler Coat",
11783: u"Berserker's Prowler Coat",
11784: u"Cleric's Prowler Coat",
11785: u"Valkyrie Prowler Coat",
11786: u"Carrion Prowler Coat",
11787: u"Knight's Prowler Coat",
11788: u"Berserker's Prowler Coat",
11789: u"Rampager's Prowler Coat",
11791: u"Rampager's Prowler Coat",
11792: u"Carrion Noble Coat",
11793: u"Cleric's Noble Coat",
11795: u"Berserker's Noble Coat",
11796: u"Rampager's Noble Coat",
11797: u"Knight's Noble Coat",
11798: u"Valkyrie Noble Coat",
11799: u"Carrion Emblazoned Coat",
11800: u"Cleric's Emblazoned Coat",
11802: u"Berserker's Emblazoned Coat",
11803: u"Rampager's Emblazoned Coat",
11804: u"Knight's Emblazoned Coat",
11805: u"Valkyrie Emblazoned Coat",
11806: u"Valkyrie Rascal Gloves",
11807: u"Carrion Rascal Gloves",
11808: u"Knight's Rascal Gloves",
11809: u"Rampager's Rascal Gloves",
11810: u"Berserker's Rascal Gloves",
11811: u"Cleric's Rascal Gloves",
11813: u"Berserker's Rascal Gloves",
11814: u"Cleric's Rascal Gloves",
11815: u"Valkyrie Rascal Gloves",
11816: u"Carrion Rascal Gloves",
11817: u"Knight's Rascal Gloves",
11818: u"Rampager's Rascal Gloves",
11820: u"Carrion Prowler Gloves",
11821: u"Knight's Prowler Gloves",
11822: u"Valkyrie Prowler Gloves",
11823: u"Cleric's Prowler Gloves",
11825: u"Berserker's Prowler Gloves",
11826: u"Cleric's Prowler Gloves",
11827: u"Valkyrie Prowler Gloves",
11828: u"Carrion Prowler Gloves",
11829: u"Knight's Prowler Gloves",
11830: u"Berserker's Prowler Gloves",
11831: u"Rampager's Prowler Gloves",
11833: u"Rampager's Prowler Gloves",
11834: u"Carrion Noble Gloves",
11835: u"Cleric's Noble Gloves",
11837: u"Berserker's Noble Gloves",
11838: u"Rampager's Noble Gloves",
11839: u"Knight's Noble Gloves",
11840: u"Valkyrie Noble Gloves",
11841: u"Carrion Emblazoned Gloves",
11842: u"Cleric's Emblazoned Gloves",
11844: u"Berserker's Emblazoned Gloves",
11845: u"Rampager's Emblazoned Gloves",
11846: u"Knight's Emblazoned Gloves",
11847: u"Valkyrie Emblazoned Gloves",
11848: u"Valkyrie Rascal Boots",
11849: u"Carrion Rascal Boots",
11850: u"Knight's Rascal Boots",
11851: u"Rampager's Rascal Boots",
11852: u"Berserker's Rascal Boots",
11853: u"Cleric's Rascal Boots",
11855: u"Berserker's Rascal Boots",
11856: u"Cleric's Rascal Boots",
11857: u"Valkyrie Rascal Boots",
11858: u"Carrion Rascal Boots",
11859: u"Knight's Rascal Boots",
11860: u"Rampager's Rascal Boots",
11862: u"Carrion Prowler Boots",
11863: u"Knight's Prowler Boots",
11864: u"Valkyrie Prowler Boots",
11865: u"Cleric's Prowler Boots",
11867: u"Berserker's Prowler Boots",
11868: u"Cleric's Prowler Boots",
11869: u"Valkyrie Prowler Boots",
11870: u"Carrion Prowler Boots",
11871: u"Knight's Prowler Boots",
11872: u"Berserker's Prowler Boots",
11873: u"Rampager's Prowler Boots",
11875: u"Rampager's Prowler Boots",
11876: u"Carrion Noble Boots",
11877: u"Cleric's Noble Boots",
11879: u"Berserker's Noble Boots",
11880: u"Rampager's Noble Boots",
11881: u"Knight's Noble Boots",
11882: u"Valkyrie Noble Boots",
11883: u"Carrion Emblazoned Boots",
11884: u"Cleric's Emblazoned Boots",
11886: u"Berserker's Emblazoned Boots",
11887: u"Rampager's Emblazoned Boots",
11888: u"Knight's Emblazoned Boots",
11889: u"Valkyrie Emblazoned Boots",
11890: u"Valkyrie Rascal Shoulders",
11891: u"Carrion Rascal Shoulders",
11892: u"Knight's Rascal Shoulders",
11893: u"Rampager's Rascal Shoulders",
11894: u"Berserker's Rascal Shoulders",
11895: u"Cleric's Rascal Shoulders",
11897: u"Berserker's Rascal Shoulders",
11898: u"Cleric's Rascal Shoulders",
11899: u"Valkyrie Rascal Shoulders",
11900: u"Carrion Rascal Shoulders",
11901: u"Knight's Rascal Shoulders",
11902: u"Rampager's Rascal Shoulders",
11904: u"Carrion Prowler Shoulders",
11905: u"Knight's Prowler Shoulders",
11906: u"Valkyrie Prowler Shoulders",
11907: u"Cleric's Prowler Shoulders",
11909: u"Berserker's Prowler Shoulders",
11910: u"Cleric's Prowler Shoulders",
11911: u"Valkyrie Prowler Shoulders",
11912: u"Carrion Prowler Shoulders",
11913: u"Knight's Prowler Shoulders",
11914: u"Berserker's Prowler Shoulders",
11915: u"Rampager's Prowler Shoulders",
11917: u"Rampager's Prowler Shoulders",
11918: u"Carrion Noble Shoulders",
11919: u"Cleric's Noble Shoulders",
11921: u"Berserker's Noble Shoulders",
11922: u"Rampager's Noble Shoulders",
11923: u"Knight's Noble Shoulders",
11924: u"Valkyrie Noble Shoulders",
11925: u"Carrion Emblazoned Shoulders",
11926: u"Cleric's Emblazoned Shoulders",
11928: u"Berserker's Emblazoned Shoulders",
11929: u"Rampager's Emblazoned Shoulders",
11930: u"Knight's Emblazoned Shoulders",
11931: u"Valkyrie Emblazoned Shoulders",
11932: u"Valkyrie Rascal Mask",
11933: u"Carrion Rascal Mask",
11934: u"Knight's Rascal Mask",
11935: u"Rampager's Rascal Mask",
11936: u"Berserker's Rascal Mask",
11937: u"Cleric's Rascal Mask",
11939: u"Berserker's Rascal Mask",
11940: u"Cleric's Rascal Mask",
11941: u"Valkyrie Rascal Mask",
11942: u"Carrion Rascal Mask",
11943: u"Knight's Rascal Mask",
11944: u"Rampager's Rascal Mask",
11946: u"Carrion Prowler Mask",
11947: u"Knight's Prowler Mask",
11948: u"Valkyrie Prowler Mask",
11949: u"Cleric's Prowler Mask",
11951: u"Berserker's Prowler Mask",
11952: u"Cleric's Prowler Mask",
11953: u"Valkyrie Prowler Mask",
11954: u"Carrion Prowler Mask",
11955: u"Knight's Prowler Mask",
11956: u"Berserker's Prowler Mask",
11957: u"Rampager's Prowler Mask",
11959: u"Rampager's Prowler Mask",
11960: u"Carrion Noble Mask",
11961: u"Cleric's Noble Mask",
11963: u"Berserker's Noble Mask",
11964: u"Rampager's Noble Mask",
11965: u"Knight's Noble Mask",
11966: u"Valkyrie Noble Mask",
11967: u"Carrion Emblazoned Helm",
11968: u"Cleric's Emblazoned Helm",
11970: u"Berserker's Emblazoned Helm",
11971: u"Rampager's Emblazoned Helm",
11972: u"Knight's Emblazoned Helm",
11973: u"Valkyrie Emblazoned Helm",
11974: u"Mighty Seeker Pants",
11975: u"Mighty Seeker Coat",
11976: u"Mighty Seeker Gloves",
11977: u"Mighty Seeker Boots",
11978: u"Rejuvenating Outlaw Pants",
11979: u"Rejuvenating Outlaw Pants",
11980: u"Rejuvenating Leather Pants",
11981: u"Rejuvenating Leather Pants",
11982: u"Rejuvenating Outlaw Coat",
11983: u"Rejuvenating Outlaw Coat",
11984: u"Rejuvenating Leather Coat",
11985: u"Rejuvenating Leather Coat",
11986: u"Rejuvenating Outlaw Gloves",
11987: u"Rejuvenating Outlaw Gloves",
11988: u"Rejuvenating Leather Gloves",
11989: u"Rejuvenating Leather Gloves",
11990: u"Rejuvenating Outlaw Boots",
11991: u"Rejuvenating Outlaw Boots",
11992: u"Rejuvenating Leather Shoes",
11993: u"Rejuvenating Leather Shoes",
11994: u"Rejuvenating Outlaw Shoulders",
11995: u"Rejuvenating Outlaw Shoulders",
11996: u"Rejuvenating Leather Shoulders",
11997: u"Rejuvenating Leather Shoulders",
11998: u"Rejuvenating Leather Mask",
11999: u"Rejuvenating Leather Mask",
12000: u"Rejuvenating Outlaw Mask",
12001: u"Rejuvenating Outlaw Mask",
12002: u"Ravaging Noble Pants",
12003: u"Rejuvenating Noble Pants",
12004: u"Honed Noble Pants",
12006: u"Strong Noble Pants",
12007: u"Vigorous Noble Pants",
12008: u"Hearty Noble Pants",
12009: u"Ravaging Noble Pants",
12010: u"Rejuvenating Noble Pants",
12011: u"Honed Noble Pants",
12013: u"Strong Noble Pants",
12014: u"Vigorous Noble Pants",
12015: u"Hearty Noble Pants",
12016: u"Carrion Noble Pants",
12017: u"Cleric's Noble Pants",
12019: u"Valkyrie Noble Pants",
12020: u"Berserker's Noble Pants",
12021: u"Rampager's Noble Pants",
12022: u"Knight's Noble Pants",
12023: u"Ravaging Noble Coat",
12024: u"Rejuvenating Noble Coat",
12025: u"Honed Noble Coat",
12027: u"Strong Noble Coat",
12028: u"Vigorous Noble Coat",
12029: u"Hearty Noble Coat",
12030: u"Ravaging Noble Coat",
12031: u"Rejuvenating Noble Coat",
12032: u"Honed Noble Coat",
12034: u"Strong Noble Coat",
12035: u"Vigorous Noble Coat",
12036: u"Hearty Noble Coat",
12037: u"Carrion Noble Coat",
12038: u"Cleric's Noble Coat",
12040: u"Valkyrie Noble Coat",
12041: u"Berserker's Noble Coat",
12042: u"Rampager's Noble Coat",
12043: u"Knight's Noble Coat",
12044: u"Ravaging Noble Gloves",
12045: u"Rejuvenating Noble Gloves",
12046: u"Honed Noble Gloves",
12048: u"Strong Noble Gloves",
12049: u"Vigorous Noble Gloves",
12050: u"Hearty Noble Gloves",
12051: u"Ravaging Noble Gloves",
12052: u"Rejuvenating Noble Gloves",
12053: u"Honed Noble Gloves",
12055: u"Strong Noble Gloves",
12056: u"Vigorous Noble Gloves",
12057: u"Hearty Noble Gloves",
12058: u"Carrion Noble Gloves",
12059: u"Cleric's Noble Gloves",
12061: u"Valkyrie Noble Gloves",
12062: u"Berserker's Noble Gloves",
12063: u"Rampager's Noble Gloves",
12064: u"Knight's Noble Gloves",
12065: u"Ravaging Noble Boots",
12066: u"Rejuvenating Noble Boots",
12067: u"Honed Noble Boots",
12069: u"Strong Noble Boots",
12070: u"Vigorous Noble Boots",
12071: u"Hearty Noble Boots",
12072: u"Ravaging Noble Boots",
12073: u"Rejuvenating Noble Boots",
12074: u"Honed Noble Boots",
12076: u"Strong Noble Boots",
12077: u"Vigorous Noble Boots",
12078: u"Hearty Noble Boots",
12079: u"Carrion Noble Boots",
12080: u"Cleric's Noble Boots",
12082: u"Valkyrie Noble Boots",
12083: u"Berserker's Noble Boots",
12084: u"Rampager's Noble Boots",
12085: u"Knight's Noble Boots",
12086: u"Ravaging Noble Shoulders",
12087: u"Rejuvenating Noble Shoulders",
12088: u"Honed Noble Shoulders",
12090: u"Strong Noble Shoulders",
12091: u"Vigorous Noble Shoulders",
12092: u"Hearty Noble Shoulders",
12093: u"Ravaging Noble Shoulders",
12094: u"Rejuvenating Noble Shoulders",
12095: u"Honed Noble Shoulders",
12097: u"Strong Noble Shoulders",
12098: u"Vigorous Noble Shoulders",
12099: u"Hearty Noble Shoulders",
12100: u"Carrion Noble Shoulders",
12101: u"Cleric's Noble Shoulders",
12103: u"Valkyrie Noble Shoulders",
12104: u"Berserker's Noble Shoulders",
12105: u"Rampager's Noble Shoulders",
12106: u"Knight's Noble Shoulders",
12107: u"Ravaging Noble Mask",
12108: u"Rejuvenating Noble Mask",
12109: u"Honed Noble Mask",
12111: u"Strong Noble Mask",
12112: u"Vigorous Noble Mask",
12113: u"Hearty Noble Mask",
12114: u"Ravaging Noble Mask",
12115: u"Rejuvenating Noble Mask",
12116: u"Honed Noble Mask",
12118: u"Strong Noble Mask",
12119: u"Vigorous Noble Mask",
12120: u"Hearty Noble Mask",
12121: u"Carrion Noble Mask",
12122: u"Cleric's Noble Mask",
12124: u"Valkyrie Noble Mask",
12125: u"Berserker's Noble Mask",
12126: u"Rampager's Noble Mask",
12127: u"Knight's Noble Mask",
12128: u"Omnomberry",
12129: u"Spicy Meat Kabob",
12130: u"Roasted Rutabaga",
12132: u"Loaf of Bread",
12134: u"Carrot",
12135: u"Potato",
12136: u"Bag of Flour",
12137: u"Glass of Buttermilk",
12138: u"Stick of Butter",
12141: u"Tomato",
12142: u"Onion",
12143: u"Egg",
12144: u"Snow Truffle",
12145: u"Rice Ball",
12146: u"Spinach Salad",
12147: u"Mushroom",
12148: u"Spicy Flank Steak",
12151: u"Packet of Baking Powder",
12152: u"Packet of Yeast",
12153: u"Packet of Salt",
12155: u"Bag of Sugar",
12156: u"Jug of Water",
12157: u"Jar of Vinegar",
12158: u"Jar of Vegetable Oil",
12159: u"Cheese Wedge",
12161: u"Beet",
12162: u"Turnip",
12163: u"Head of Garlic",
12165: u"Apple",
12166: u"Ball of Dough",
12167: u"Bowl of Staple Soup Vegetables",
12168: u"Bowl of White Frosting",
12169: u"Bowl of Baker's Dry Ingredients",
12170: u"Bowl of Baker's Wet Ingredients",
12171: u"Pasta Noodles",
12172: u"Jar of Tomato Sauce",
12173: u"Bowl of Sage Stuffing",
12174: u"Bowl of Tangy Sautee Mix",
12175: u"Bottle of Ascalonian Dressing",
12176: u"Bottle of Simple Dressing",
12177: u"Pile of Cinnamon and Sugar",
12178: u"Pile of Salt and Pepper",
12179: u"Pile of Simple Stew Herbs",
12180: u"Pile of Ascalonian Herbs",
12181: u"Pile of Tangy Seasoning",
12182: u"Buttermilk Biscuit",
12184: u"Cup of Potato Fries",
12186: u"Cheese Triangle",
12187: u"Slice of Buttered Toast",
12188: u"Bowl of Simple Salad",
12190: u"Marinated Mushroom",
12191: u"Mashed Potato",
12194: u"Slice of Garlic Bread",
12195: u"Meatball",
12196: u"Bowl of Ascalonian Salad",
12199: u"Bowl of Red Meat Stock",
12200: u"Bowl of Hearty Red Meat Stew",
12201: u"Bowl of Simple Meat Stew",
12202: u"Bowl of Poultry Stock",
12204: u"Bowl of Poultry Noodle Soup",
12205: u"Bowl of Simple Poultry Soup",
12206: u"Bowl of Vegetable Stock",
12207: u"Bowl of Simple Vegetable Soup",
12208: u"Deluxe Burger",
12209: u"Grilled Poultry",
12210: u"Hamburger",
12212: u"Roasted Meaty Sandwich",
12213: u"Cheeseburger",
12214: u"Poultry Piccata",
12215: u"Stuffed Pepper",
12216: u"Turnip Casserole",
12217: u"Plate of Pasta with Tomato Sauce",
12218: u"Cheese Pizza",
12221: u"Meatball Dinner",
12222: u"Chocolate Cake",
12223: u"Apple Pie",
12224: u"White Cake",
12225: u"Cinnamon Apple",
12226: u"Strawberries and Biscuits",
12227: u"Sugar Cookie",
12228: u"Cup of Banana Cream Pie Filling",
12229: u"Chocolate Bar",
12230: u"Banana Cream Pie",
12232: u"Green Bean",
12234: u"Vanilla Bean",
12235: u"Bell Pepper",
12236: u"Black Peppercorn",
12237: u"Black Bean",
12238: u"Head of Lettuce",
12239: u"Kidney Bean",
12240: u"Celery Stalk",
12241: u"Spinach Leaf",
12243: u"Sage Leaf",
12244: u"Oregano Leaf",
12245: u"Basil Leaf",
12246: u"Parsley Leaf",
12247: u"Bay Leaf",
12248: u"Thyme Leaf",
12249: u"Nutmeg Seed",
12250: u"Walnut",
12251: u"Banana",
12252: u"Lemon",
12253: u"Strawberry",
12254: u"Raspberry",
12255: u"Blueberry",
12256: u"Cumin",
12258: u"Cinnamon Stick",
12260: u"Bowl of Simple Chili Base",
12261: u"Bowl of Cherry Pie Filling",
12262: u"Bowl of Grape Pie Filling",
12263: u"Ball of Cookie Dough",
12267: u"Bowl of Roux",
12268: u"Bowl of Cream Soup Base",
12269: u"Bowl of Blueberry Pie Filling",
12270: u"Bowl of Chocolate Frosting",
12271: u"Bottle of Soy Sauce",
12272: u"Pile of Divinity Fair Herbs",
12273: u"Minotaur Steak",
12275: u"Bottle of Sesame Ginger Sauce",
12276: u"Pile of Paprika",
12278: u"Pile of Simple Chili Seasoning",
12279: u"Grilled Mushroom",
12280: u"Loaf of Banana Bread",
12281: u"Bowl of Mashed Yams",
12282: u"Divinity Stuffed Mushroom",
12283: u"Grilled Portobello Mushroom",
12284: u"Bowl of Spiced Mashed Yams",
12285: u"Loaf of Zucchini Bread",
12286: u"Bowl of Salsa",
12288: u"Bowl of Avocado Salsa",
12289: u"Lemon Bar",
12290: u"Bowl of Coleslaw",
12291: u"Bowl of Sauteed Zucchini with Nutmeg",
12293: u"Bowl of Tomato Soup",
12294: u"Bowl of Clam Chowder",
12295: u"Bowl of Creamy Portobello Soup",
12296: u"Bowl of Tomato Zucchini Soup",
12297: u"Bowl of Fancy Creamy Mushroom Soup",
12298: u"Bowl of Dilled Clam Chowder",
12299: u"Bowl of Meat and Bean Chili",
12300: u"Bowl of Simple Bean Chili",
12301: u"Sage Stuffed Poultry",
12302: u"Bowl of Cabbage Stirfry",
12303: u"Veggie Burger",
12304: u"Filet of Sesame Roasted Meat",
12305: u"Sesame Roasted Dinner",
12306: u"Filet of Rosemary Roasted Meat",
12307: u"Pepper Steak Dinner",
12308: u"Bowl of Apple Sauce",
12309: u"Apple Tart",
12311: u"Bowl of Blueberry Apple Compote",
12312: u"Strawberry Tart",
12313: u"Bowl of Strawberry Apple Compote",
12314: u"Chocolate Chip Cookie",
12315: u"Caramel",
12316: u"Blueberry Pie",
12317: u"Caramel Apple",
12318: u"Chocolate Cherry",
12319: u"Cherry Cookie",
12320: u"Cherry Pie",
12322: u"Grape Pie",
12323: u"Cherry Almond Bar",
12324: u"Bag of Starch",
12325: u"Bowl of Sour Cream",
12327: u"Clam",
12328: u"Ginger Root",
12329: u"Yam",
12330: u"Zucchini",
12331: u"Chili Pepper",
12332: u"Head of Cabbage",
12333: u"Kale Leaf",
12334: u"Portobello Mushroom",
12335: u"Rosemary Sprig",
12336: u"Dill Sprig",
12337: u"Almond",
12338: u"Cherry",
12339: u"Lime",
12340: u"Avocado",
12341: u"Grape",
12342: u"Sesame Seed",
12344: u"Blueberry Cookie",
12345: u"Strawberry Cookie",
12346: u"Veggie Pizza",
12347: u"Clam Cake",
12348: u"Bowl of Beet and Bean Stew",
12349: u"Bowl of Savory Spinach and Poultry Soup",
12350: u"Coconut",
12351: u"Orange",
12353: u"Bowl of Simple Meat Chili",
12354: u"Meat Pie",
12355: u"Blueberry Tart",
12356: u"Chocolate Banana",
12357: u"Strawberry Pie",
12358: u"Spinach Burger",
12359: u"Pepper Steak",
12360: u"Sage Stuffed Mushroom",
12361: u"Bowl of Bean Salad",
12362: u"Bowl of Garlic Spinach Sautee",
12363: u"Bowl of Fancy Bean Chili",
12364: u"Bowl of Cherry Vanilla Compote",
12365: u"Cherry Tart",
12366: u"Chocolate Cherry Cake",
12367: u"Dilled Poultry Piccata",
12368: u"Mushroom Pizza",
12369: u"Bowl of Krytan Meatball Dinner",
12370: u"Stuffed Zucchini",
12371: u"Spicy Lime Steak",
12372: u"Yam Fritter",
12373: u"Loaf of Rosemary Bread",
12374: u"Bowl of Kale and Poultry Soup",
12375: u"Bowl of Kale Soup",
12376: u"Bowl of Zucchini Chili",
12377: u"Bowl of Chili and Avocado",
12378: u"Bowl of Yam Soup",
12379: u"Bowl of Meat and Cabbage Stew",
12380: u"Orange Coconut Bar",
12381: u"Chocolate Orange",
12382: u"Chocolate Mint Cookie",
12383: u"Blackberry Cookie",
12384: u"Pumpkin Pie",
12385: u"Blackberry Pie",
12386: u"Bowl of Blackberry Pear Compote",
12387: u"Ginger Pear Tart",
12388: u"Orange Coconut Cake",
12389: u"Plate of Citrus Poultry with Almonds",
12390: u"Horseradish Burger",
12391: u"Fancy Veggie Pizza",
12392: u"Bowl of Mushroom Risotto",
12393: u"Plate of Citrus Clove Meat",
12394: u"Plate of Coriander Crusted Meat",
12395: u"Plate of Roast Meat with Mint Sauce",
12396: u"Plate of Coriander Crusted Meat Dinner",
12397: u"Bowl of Avocado Stirfry",
12398: u"Bowl of Chickpea Salad",
12399: u"Chickpea Fritter",
12400: u"Slice of Pumpkin Bread",
12401: u"Bowl of Pesto Pasta Salad",
12402: u"Slice of Spiced Bread",
12403: u"Eztlitl Stuffed Mushroom",
12404: u"Bowl of Cabbage and Chickpea Salad",
12405: u"Bowl of Cauliflower Sautee",
12406: u"Bowl of Hummus",
12407: u"Bowl of Spiced Meat and Cabbage Stew",
12408: u"Bowl of Spiced Veggie Chili",
12409: u"Bowl of Spiced Meat Chili",
12410: u"Bowl of Pumpkin Bisque",
12411: u"Bowl of Cauliflower Soup",
12412: u"Bowl of Chickpea Soup",
12413: u"Bowl of Hearty Poultry Soup",
12414: u"Bowl of Chickpea and Poultry Soup",
12415: u"Raspberry Peach Bar",
12416: u"Chocolate Raspberry Cream",
12417: u"Chocolate Raspberry Cookie",
12418: u"Peach Cookie",
12419: u"Peach Pie",
12420: u"Mixed Berry Pie",
12421: u"Raspberry Peach Compote",
12422: u"Peach Tart",
12423: u"Chocolate Raspberry Cake",
12424: u"Bowl of Poultry Tarragon Pasta",
12425: u"Tarragon Stuffed Poultry",
12426: u"Spicy Cheeseburger",
12427: u"Super Veggie Pizza",
12428: u"Bowl of Mushroom and Asparagus Risotto",
12429: u"Spicier Flank Steak",
12430: u"Plate of Steak and Asparagus",
12431: u"Plate of Roast Meat with Braised Leeks",
12432: u"Plate of Steak and Asparagus Dinner",
12433: u"Bowl of Eggplant Stirfry",
12434: u"Bowl of Asparagus and Sage Salad",
12435: u"Eggplant Fritter",
12436: u"Loaf of Raspberry Peach Bread",
12437: u"Loaf of Tarragon Bread",
12438: u"Roasted Parsnip",
12439: u"Spicy Stuffed Mushroom",
12440: u"Bowl of Mango Salsa",
12441: u"Bowl of Eggplant Sautee",
12442: u"Roasted Artichoke",
12443: u"Bowl of Poultry and Winter Vegetable Soup",
12444: u"Bowl of Poultry and Leek Soup",
12445: u"Bowl of Snow Truffle Soup",
12446: u"Bowl of Spicy Veggie Chili",
12447: u"Bowl of Spicy Meat Chili",
12448: u"Bowl of Potato and Leek Soup",
12449: u"Bowl of Butternut Squash Soup",
12450: u"Bowl of Artichoke Soup",
12451: u"Bowl of Meat and Winter Vegetable Stew",
12487: u"Bowl of Curry Pumpkin Soup",
12489: u"Bowl of Herbed Meat Stock",
12490: u"Bowl of Herbed Poultry Stock",
12491: u"Bowl of Herbed Vegetable Stock",
12492: u"Sesame Seed Bun",
12493: u"Bowl of Chocolate Cherry Frosting",
12494: u"Bowl of Dilled Cream Sauce",
12495: u"Bowl of Stirfry Base",
12496: u"Pile of Stirfry Spice Mix",
12497: u"Bowl of Chocolate Raspberry Frosting",
12498: u"Bowl of Peach Pie Filling",
12499: u"Bowl of Mixed Berry Pie Filling",
12500: u"Bowl of Tarragon Cream Sauce",
12501: u"Bowl of Winter Vegetable Mix",
12502: u"Eggplant",
12503: u"Peach",
12504: u"Cayenne Pepper",
12505: u"Asparagus Spear",
12506: u"Tarragon Leaves",
12507: u"Parsnip",
12508: u"Leek",
12510: u"Lotus Root",
12511: u"Butternut Squash",
12512: u"Artichoke",
12513: u"Bowl of Strawberry Pie Filling",
12514: u"Pear",
12515: u"Chickpea",
12516: u"Pinenut",
12517: u"Shallot",
12518: u"Horseradish Root",
12519: u"Pile of Pumpkin Pie Spice",
12520: u"Bowl of Pumpkin Pie Filling",
12521: u"Jar of Orange Sauce",
12522: u"Orange Cake",
12523: u"Bowl of Orange Coconut Frosting",
12524: u"Bowl of Blackberry Pie Filling",
12525: u"Bowl of Eztlitl Stuffing",
12526: u"Bowl of Pesto",
12527: u"Jar of Mint Sauce",
12528: u"Bowl of Fancy Tangy Sautee Mix",
12529: u"Bowl of Risotto Base",
12530: u"Jar of Citrus Cream Sauce",
12531: u"Coriander Seed",
12532: u"Head of Cauliflower",
12533: u"Green Onion",
12534: u"Clove",
12535: u"Rutabaga",
12536: u"Mint Leaf",
12537: u"Blackberry",
12538: u"Sugar Pumpkin",
12543: u"Mango",
12548: u"Bowl of Simple Stirfry",
12549: u"Chili Pepper Popper",
12729: u"Bowl of Basic Vegetable Soup",
12730: u"Bowl of Onion Soup",
12731: u"Bowl of Basic Poultry Soup",
12732: u"Bowl of Green Bean Stew",
12733: u"Slice of Cinnamon Toast",
12734: u"Cinnamon Pinwheel",
12735: u"Bowl of Sauteed Carrots",
12736: u"Onion Ring",
12737: u"Grilled Steak",
12738: u"Lemon Tart",
12741: u"Unidentified Brown Dye",
12743: u"Unidentified Green Dye",
12744: u"Unidentified Orange Dye",
12745: u"Unidentified Purple Dye",
12746: u"Unidentified Red Dye",
12747: u"Unidentified Yellow Dye",
12748: u"Unidentified Blue Dye",
12749: u"Unidentified Brown Dye",
12750: u"Unidentified Gray Dye",
12751: u"Unidentified Green Dye",
12752: u"Unidentified Orange Dye",
12753: u"Unidentified Purple Dye",
12754: u"Unidentified Red Dye",
12755: u"Unidentified Yellow Dye",
12756: u"Unidentified Blue Dye",
12757: u"Unidentified Brown Dye",
12758: u"Unidentified Gray Dye",
12759: u"Unidentified Green Dye",
12760: u"Unidentified Orange Dye",
12761: u"Unidentified Purple Dye",
12762: u"Unidentified Red Dye",
12763: u"Unidentified Yellow Dye",
12806: u"Copper Hook",
12807: u"Gold Hook",
12808: u"Mithril Hook",
12810: u"Platinum Hook",
12811: u"Silver Hook",
12812: u"Copper Setting",
12813: u"Gold Setting",
12814: u"Mithril Setting",
12816: u"Platinum Setting",
12817: u"Silver Setting",
12818: u"Copper Chain",
12819: u"Gold Chain",
12820: u"Mithril Chain",
12822: u"Platinum Chain",
12823: u"Silver Chain",
12824: u"Copper Band",
12825: u"Gold Band",
12826: u"Mithril Band",
12828: u"Platinum Band",
12829: u"Silver Band",
12830: u"Copper Filigree",
12831: u"Silver Filigree",
12832: u"Gold Filigree",
12833: u"Mithril Filigree",
12835: u"Platinum Filigree",
12836: u"Bronze Greatsword Blade",
12837: u"Darksteel Greatsword Blade",
12838: u"Iron Greatsword Blade",
12839: u"Mithril Greatsword Blade",
12840: u"Orichalcum Greatsword Blade",
12841: u"Steel Greatsword Blade",
12842: u"Bronze Sword Hilt",
12843: u"Darksteel Sword Hilt",
12844: u"Iron Sword Hilt",
12845: u"Mithril Sword Hilt",
12846: u"Orichalcum Sword Hilt",
12847: u"Steel Sword Hilt",
12848: u"Bronze Axe Blade",
12849: u"Darksteel Axe Blade",
12850: u"Iron Axe Blade",
12851: u"Mithril Axe Blade",
12852: u"Orichalcum Axe Blade",
12853: u"Steel Axe Blade",
12854: u"Bronze Dagger Blade",
12855: u"Darksteel Dagger Blade",
12856: u"Iron Dagger Blade",
12857: u"Mithril Dagger Blade",
12858: u"Orichalcum Dagger Blade",
12859: u"Steel Dagger Blade",
12860: u"Bronze Hammer Head",
12861: u"Darksteel Hammer Head",
12862: u"Iron Hammer Head",
12863: u"Mithril Hammer Head",
12864: u"Orichalcum Hammer Head",
12865: u"Steel Hammer Head",
12866: u"Bronze Sword Blade",
12867: u"Darksteel Sword Blade",
12868: u"Iron Sword Blade",
12869: u"Mithril Sword Blade",
12870: u"Orichalcum Sword Blade",
12871: u"Steel Sword Blade",
12872: u"Bronze Mace Head",
12873: u"Darksteel Mace Head",
12874: u"Iron Mace Head",
12875: u"Mithril Mace Head",
12876: u"Orichalcum Mace Head",
12877: u"Steel Mace Head",
12878: u"Bronze Dagger Hilt",
12879: u"Darksteel Dagger Hilt",
12880: u"Iron Dagger Hilt",
12881: u"Mithril Dagger Hilt",
12882: u"Orichalcum Dagger Hilt",
12883: u"Steel Dagger Hilt",
12884: u"Bronze Greatsword Hilt",
12885: u"Darksteel Greatsword Hilt",
12886: u"Iron Greatsword Hilt",
12887: u"Mithril Greatsword Hilt",
12888: u"Orichalcum Greatsword Hilt",
12889: u"Steel Greatsword Hilt",
12890: u"Small Soft Haft",
12891: u"Small Green Haft",
12892: u"Small Ancient Haft",
12893: u"Small Seasoned Haft",
12894: u"Small Elder Haft",
12895: u"Small Hard Haft",
12896: u"Large Elder Haft",
12897: u"Large Green Haft",
12898: u"Large Hard Haft",
12899: u"Large Ancient Haft",
12900: u"Large Soft Haft",
12901: u"Large Seasoned Haft",
12902: u"Bronze Shield Backing",
12903: u"Darksteel Shield Backing",
12904: u"Iron Shield Backing",
12905: u"Mithril Shield Backing",
12906: u"Orichalcum Shield Backing",
12907: u"Steel Shield Backing",
12908: u"Bronze Shield Boss",
12909: u"Darksteel Shield Boss",
12910: u"Iron Shield Boss",
12911: u"Mithril Shield Boss",
12912: u"Orichalcum Shield Boss",
12913: u"Steel Shield Boss",
12914: u"Bronze Rifle Barrel",
12915: u"Darksteel Rifle Barrel",
12916: u"Iron Rifle Barrel",
12917: u"Mithril Rifle Barrel",
12918: u"Orichalcum Rifle Barrel",
12919: u"Steel Rifle Barrel",
12920: u"Bronze Pistol Barrel",
12921: u"Darksteel Pistol Barrel",
12922: u"Iron Pistol Barrel",
12923: u"Mithril Pistol Barrel",
12924: u"Orichalcum Pistol Barrel",
12925: u"Steel Pistol Barrel",
12926: u"Bronze Warhorn Mouthpiece",
12927: u"Darksteel Warhorn Mouthpiece",
12928: u"Iron Warhorn Mouthpiece",
12929: u"Mithril Warhorn Mouthpiece",
12930: u"Orichalcum Warhorn Mouthpiece",
12931: u"Steel Warhorn Mouthpiece",
12932: u"Bronze Horn",
12933: u"Darksteel Horn",
12934: u"Iron Horn",
12935: u"Mithril Horn",
12936: u"Orichalcum Horn",
12937: u"Steel Horn",
12938: u"Elder Longbow Stave",
12939: u"Green Longbow Stave",
12940: u"Hard Longbow Stave",
12941: u"Ancient Longbow Stave",
12942: u"Soft Longbow Stave",
12943: u"Seasoned Longbow Stave",
12944: u"Elder Short-Bow Stave",
12945: u"Green Short-Bow Stave",
12946: u"Hard Short-Bow Stave",
12947: u"Ancient Short-Bow Stave",
12948: u"Soft Short-Bow Stave",
12949: u"Seasoned Short-Bow Stave",
12950: u"Elder Rifle Stock",
12951: u"Green Rifle Stock",
12952: u"Hard Rifle Stock",
12953: u"Ancient Rifle Stock",
12954: u"Soft Rifle Stock",
12955: u"Seasoned Rifle Stock",
12956: u"Elder Pistol Frame",
12957: u"Green Pistol Frame",
12958: u"Hard Pistol Frame",
12959: u"Ancient Pistol Frame",
12960: u"Soft Pistol Frame",
12961: u"Seasoned Pistol Frame",
12962: u"Thin String",
12963: u"Hardened String",
12964: u"Rawhide String",
12965: u"Rugged String",
12966: u"Thick String",
12967: u"Coarse String",
12968: u"Green Torch Handle",
12969: u"Soft Torch Handle",
12970: u"Elder Staff Shaft",
12971: u"Green Staff Shaft",
12972: u"Hard Staff Shaft",
12973: u"Ancient Staff Shaft",
12974: u"Soft Staff Shaft",
12975: u"Seasoned Staff Shaft",
12976: u"Ancient Scepter Rod",
12977: u"Elder Scepter Rod",
12978: u"Green Scepter Rod",
12979: u"Hard Scepter Rod",
12980: u"Seasoned Scepter Rod",
12981: u"Soft Scepter Rod",
12982: u"Ancient Focus Casing",
12983: u"Elder Focus Casing",
12984: u"Green Focus Casing",
12985: u"Hard Focus Casing",
12986: u"Seasoned Focus Casing",
12987: u"Soft Focus Casing",
12988: u"Orichalcum Plated Dowel",
12989: u"Mithril Plated Dowel",
12990: u"Bronze Plated Dowel",
12991: u"Darksteel Plated Dowel",
12992: u"Steel Plated Dowel",
12993: u"Iron Plated Dowel",
12994: u"Bronze Spear Head",
12995: u"Darksteel Spear Head",
12996: u"Iron Spear Head",
12997: u"Mithril Spear Head",
12998: u"Orichalcum Spear Head",
12999: u"Steel Spear Head",
13000: u"Bronze Trident Head",
13001: u"Darksteel Trident Head",
13002: u"Iron Trident Head",
13003: u"Mithril Trident Head",
13004: u"Orichalcum Trident Head",
13005: u"Steel Trident Head",
13006: u"Rune of Holding",
13007: u"Major Rune of Holding",
13008: u"Greater Rune of Holding",
13009: u"Superior Rune of Holding",
13010: u"Minor Rune of Holding",
13011: u"Seasoned Torch Handle",
13012: u"Hard Torch Handle",
13013: u"Elder Torch Handle",
13014: u"Ancient Torch Handle",
13015: u"Jute Tunic Panel",
13016: u"Jute Tunic Lining",
13017: u"Jute Wristguard Strap",
13018: u"Jute Wristguard Padding",
13019: u"Jute Breeches Panel",
13020: u"Jute Breeches Lining",
13021: u"Jute Sandal Upper",
13022: u"Jute Sandal Sole",
13023: u"Jute Headpiece Strap",
13024: u"Jute Headpiece Padding",
13025: u"Jute Epaulet Padding",
13026: u"Jute Epaulet Panel",
13027: u"Wool Vestments Panel",
13028: u"Wool Vestments Lining",
13029: u"Wool Gloves Panel",
13030: u"Wool Gloves Padding",
13031: u"Wool Trouser Panel",
13032: u"Wool Trouser Lining",
13033: u"Wool Footwear Sole",
13034: u"Wool Footwear Upper",
13035: u"Wool Headpiece Strap",
13036: u"Wool Headpiece Padding",
13037: u"Wool Epaulet Padding",
13038: u"Wool Epaulet Panel",
13039: u"Cotton Coat Panel",
13040: u"Cotton Coat Lining",
13041: u"Cotton Gloves Panel",
13042: u"Cotton Gloves Padding",
13043: u"Cotton Pants Panel",
13044: u"Cotton Pants Lining",
13045: u"Cotton Shoe Sole",
13046: u"Cotton Shoe Upper",
13047: u"Cotton Helm Strap",
13048: u"Cotton Helm Padding",
13049: u"Cotton Epaulet Padding",
13050: u"Cotton Epaulet Panel",
13051: u"Ancient Harpoon",
13052: u"Elder Harpoon",
13053: u"Green Harpoon",
13054: u"Hard Harpoon",
13055: u"Seasoned Harpoon",
13056: u"Soft Harpoon",
13057: u"Bronze Torch Head",
13058: u"Darksteel Torch Head",
13059: u"Iron Torch Head",
13060: u"Mithril Torch Head",
13061: u"Orichalcum Torch Head",
13062: u"Steel Torch Head",
13063: u"Coarse Boot Sole",
13064: u"Coarse Shoulderguard Panel",
13065: u"Coarse Boot Upper",
13066: u"Coarse Longcoat Padding",
13067: u"Coarse Longcoat Panel",
13068: u"Coarse Glove Lining",
13069: u"Coarse Glove Panel",
13070: u"Coarse Goggle Padding",
13071: u"Coarse Goggle Strap",
13072: u"Coarse Trouser Padding",
13073: u"Coarse Trouser Panel",
13074: u"Coarse Shoulderguard Padding",
13075: u"Thin Boot Sole",
13076: u"Thin Glove Strap",
13077: u"Thin Shoulderguard Panel",
13078: u"Thin Boot Upper",
13079: u"Thin Jerkin Padding",
13080: u"Thin Jerkin Panel",
13081: u"Thin Glove Lining",
13082: u"Thin Mask Panel",
13083: u"Thin Mask Strap",
13084: u"Thin Legging Padding",
13085: u"Thin Legging Panel",
13086: u"Thin Shoulderguard Padding",
13087: u"Rawhide Legging Strap",
13088: u"Rawhide Legging Panel",
13089: u"Rawhide Mask Padding",
13090: u"Rawhide Mask Strap",
13091: u"Rawhide Boot Upper",
13092: u"Rawhide Shoulderguard Padding",
13093: u"Rawhide Chestguard Padding",
13094: u"Rawhide Chestguard Panel",
13095: u"Rawhide Glove Lining",
13096: u"Rawhide Boot Sole",
13097: u"Rawhide Shoulderguard Panel",
13098: u"Rawhide Glove Strap",
13099: u"Bronze Chain Legging Lining",
13100: u"Bronze Chain Leggings Panel",
13101: u"Bronze Helmet Lining",
13102: u"Bronze Helmet Casing",
13103: u"Bronze Chain Boot Lining",
13104: u"Bronze Pauldron Lining",
13105: u"Bronze Chain Chest Padding",
13106: u"Bronze Chain Chest Panel",
13107: u"Bronze Chain Glove Lining",
13108: u"Bronze Chain Boot Panel",
13109: u"Bronze Pauldron Casing",
13110: u"Bronze Chain Glove Panel",
13111: u"Iron Scale Boot Lining",
13112: u"Iron Scale Armguard Panel",
13113: u"Iron Pauldron Casing",
13114: u"Iron Scale Boot Panel",
13115: u"Iron Scale Chest Padding",
13116: u"Iron Scale Chest Panel",
13117: u"Iron Scale Armguard Lining",
13118: u"Iron Casque Lining",
13119: u"Iron Casque Casing",
13120: u"Iron Scale Legging Lining",
13121: u"Iron Scale Legging Panel",
13122: u"Iron Pauldron Lining",
13123: u"Steel Splint Boot Casing",
13124: u"Steel Splint Pauldron Casing",
13125: u"Steel Splint Boot Lining",
13126: u"Steel Splint Chestplate Padding",
13127: u"Steel Splint Chestplate Panel",
13128: u"Steel Splint Gauntlet Lining",
13129: u"Steel Splint Gauntlet Plates",
13130: u"Steel Splint Helmet Lining",
13131: u"Steel Splint Helmet Casing",
13132: u"Steel Splint Legging Lining",
13133: u"Steel Splint Legging Panel",
13134: u"Steel Splint Pauldron Lining",
13135: u"Darksteel Boot Casing",
13136: u"Darksteel Pauldron Casing",
13137: u"Darksteel Boot Lining",
13138: u"Darksteel Chestplate Padding",
13139: u"Darksteel Chestplate Panel",
13140: u"Darksteel Gauntlet Lining",
13141: u"Darksteel Gauntlet Plates",
13142: u"Darksteel Helmet Lining",
13143: u"Darksteel Helmet Casing",
13144: u"Darksteel Legging Lining",
13145: u"Darksteel Legging Panel",
13146: u"Darksteel Pauldron Lining",
13147: u"Mithril Boot Casing",
13148: u"Mithril Pauldron Casing",
13149: u"Mithril Boot Lining",
13150: u"Mithril Chestplate Padding",
13151: u"Mithril Chestplate Panel",
13152: u"Mithril Gauntlet Lining",
13153: u"Mithril Gauntlet Plates",
13154: u"Mithril Helmet Lining",
13155: u"Mithril Helmet Casing",
13156: u"Mithril Legging Lining",
13157: u"Mithril Legging Panel",
13158: u"Mithril Pauldron Lining",
13159: u"Orichalcum Boot Casing",
13160: u"Orichalcum Pauldron Casing",
13161: u"Orichalcum Boot Lining",
13162: u"Orichalcum Chestplate Padding",
13163: u"Orichalcum Chestplate Panel",
13164: u"Orichalcum Gauntlet Lining",
13165: u"Orichalcum Gauntlet Plates",
13166: u"Orichalcum Helmet Lining",
13167: u"Orichalcum Helmet Casing",
13168: u"Orichalcum Legging Lining",
13169: u"Orichalcum Legging Panel",
13170: u"Orichalcum Pauldron Lining",
13171: u"Linen Coat Panel",
13172: u"Linen Coat Lining",
13173: u"Linen Gloves Panel",
13174: u"Linen Gloves Padding",
13175: u"Linen Pants Panel",
13176: u"Linen Pants Lining",
13177: u"Linen Shoe Sole",
13178: u"Linen Shoe Upper",
13179: u"Linen Helm Strap",
13180: u"Linen Helm Padding",
13181: u"Linen Epaulet Padding",
13182: u"Linen Epaulet Panel",
13183: u"Silk Coat Panel",
13184: u"Silk Coat Lining",
13185: u"Silk Gloves Panel",
13186: u"Silk Gloves Padding",
13187: u"Silk Pants Panel",
13188: u"Silk Pants Lining",
13189: u"Silk Shoe Sole",
13190: u"Silk Shoe Upper",
13191: u"Silk Helm Strap",
13192: u"Silk Helm Padding",
13193: u"Silk Epaulet Padding",
13194: u"Silk Epaulet Panel",
13195: u"Gossamer Coat Panel",
13196: u"Gossamer Coat Lining",
13197: u"Gossamer Gloves Panel",
13198: u"Gossamer Gloves Padding",
13199: u"Gossamer Pants Panel",
13200: u"Gossamer Pants Lining",
13201: u"Gossamer Shoe Sole",
13202: u"Gossamer Shoe Upper",
13203: u"Gossamer Helm Strap",
13204: u"Gossamer Helm Padding",
13205: u"Gossamer Epaulet Padding",
13206: u"Gossamer Epaulet Panel",
13207: u"Rugged Boot Sole",
13208: u"Rugged Shoulderguard Panel",
13209: u"Rugged Boot Upper",
13210: u"Rugged Longcoat Padding",
13211: u"Rugged Longcoat Panel",
13212: u"Rugged Glove Lining",
13213: u"Rugged Glove Panel",
13214: u"Rugged Goggle Padding",
13215: u"Rugged Goggle Strap",
13216: u"Rugged Trouser Padding",
13217: u"Rugged Trouser Panel",
13218: u"Rugged Shoulderguard Padding",
13219: u"Thick Boot Sole",
13220: u"Thick Shoulderguard Panel",
13221: u"Thick Boot Upper",
13222: u"Thick Longcoat Padding",
13223: u"Thick Longcoat Panel",
13224: u"Thick Glove Lining",
13225: u"Thick Glove Panel",
13226: u"Thick Mask Padding",
13227: u"Thick Mask Strap",
13228: u"Thick Trouser Padding",
13229: u"Thick Trouser Panel",
13230: u"Thick Shoulderguard Padding",
13231: u"Hardened Boot Sole",
13232: u"Hardened Shoulderguard Panel",
13233: u"Hardened Boot Upper",
13234: u"Hardened Longcoat Padding",
13235: u"Hardened Longcoat Panel",
13236: u"Hardened Glove Lining",
13237: u"Hardened Glove Panel",
13238: u"Hardened Helmet Padding",
13239: u"Hardened Helmet Strap",
13240: u"Hardened Trouser Padding",
13241: u"Hardened Trouser Panel",
13242: u"Hardened Shoulderguard Padding",
13243: u"Ancient Focus Core",
13244: u"Elder Focus Core",
13245: u"Green Focus Core",
13246: u"Hard Focus Core",
13247: u"Seasoned Focus Core",
13248: u"Soft Focus Core",
13249: u"Green Trident Shaft",
13250: u"Hard Trident Shaft",
13251: u"Soft Trident Shaft",
13252: u"Elder Trident Shaft",
13253: u"Ancient Trident Shaft",
13254: u"Seasoned Trident Shaft",
13255: u"Ancient Scepter Core",
13256: u"Elder Scepter Core",
13257: u"Green Scepter Core",
13258: u"Hard Scepter Core",
13259: u"Seasoned Scepter Core",
13260: u"Soft Scepter Core",
13261: u"Ancient Staff Head",
13262: u"Elder Staff Head",
13263: u"Green Staff Head",
13264: u"Hard Staff Head",
13265: u"Seasoned Staff Head",
13266: u"Soft Staff Head",
13267: u"Turquoise Copper Amulet",
13268: u"Tiger's Eye Copper Stud",
13269: u"Tiger's Eye Copper Ring",
13270: u"Turquoise Copper Ring",
13271: u"Malachite Copper Ring",
13273: u"Garnet Copper Amulet",
13275: u"Garnet Copper Stud",
13277: u"Garnet Copper Ring",
13278: u"Garnet Copper Ring",
13279: u"Turquoise Copper Ring",
13280: u"Malachite Copper Ring",
13281: u"Tiger's Eye Copper Ring",
13282: u"Amethyst Silver Band",
13283: u"Garnet Copper Amulet",
13284: u"Tiger's Eye Copper Amulet",
13285: u"Turquoise Copper Stud",
13286: u"Malachite Copper Stud",
13287: u"Turquoise Copper Stud",
13288: u"Garnet Copper Stud",
13289: u"Tiger's Eye Copper Stud",
13290: u"Malachite Copper Stud",
13291: u"Amethyst Silver Earring",
13293: u"Malachite Copper Amulet",
13294: u"Amethyst Silver Pendant",
13296: u"Turquoise Copper Amulet",
13301: u"Carnelian Silver Ring",
13302: u"Carnelian Silver Ring",
13303: u"Peridot Silver Ring",
13304: u"Carnelian Silver Amulet",
13305: u"Carnelian Silver Amulet",
13306: u"Peridot Silver Amulet",
13307: u"Carnelian Silver Stud",
13308: u"Carnelian Silver Stud",
13309: u"Peridot Silver Earring",
13310: u"Peridot Silver Ring",
13311: u"Lapis Silver Ring",
13312: u"Peridot Silver Pendant",
13313: u"Lapis Silver Amulet",
13314: u"Peridot Silver Earring",
13315: u"Lapis Silver Earring",
13316: u"Carnelian Gold Band",
13317: u"Peridot Gold Ring",
13318: u"Amethyst Gold Ring",
13319: u"Topaz Gold Ring",
13320: u"Lapis Gold Ring",
13321: u"Carnelian Gold Pendant",
13322: u"Peridot Gold Amulet",
13323: u"Amethyst Gold Amulet",
13324: u"Topaz Gold Amulet",
13325: u"Lapis Gold Amulet",
13326: u"Carnelian Gold Earring",
13327: u"Peridot Gold Earring",
13328: u"Amethyst Gold Earring",
13329: u"Topaz Gold Earring",
13330: u"Lapis Gold Earring",
13331: u"Carnelian Gold Band",
13332: u"Peridot Gold Ring",
13333: u"Lapis Gold Ring",
13335: u"Topaz Gold Ring",
13336: u"Amethyst Gold Ring",
13337: u"Carnelian Gold Pendant",
13338: u"Peridot Gold Amulet",
13339: u"Lapis Gold Amulet",
13341: u"Topaz Gold Amulet",
13342: u"Amethyst Gold Amulet",
13343: u"Carnelian Gold Earring",
13344: u"Peridot Gold Earring",
13345: u"Lapis Gold Earring",
13347: u"Topaz Gold Earring",
13348: u"Amethyst Gold Earring",
13352: u"Amethyst Silver Band",
13353: u"Pearl Copper Ring",
13354: u"Pearl Copper Amulet",
13355: u"Amethyst Silver Pendant",
13356: u"Pearl Copper Stud",
13357: u"Amethyst Silver Earring",
13359: u"Tiger's Eye Copper Amulet",
13360: u"Malachite Copper Amulet",
13361: u"Amber Copper Amulet",
13362: u"Pearl Copper Amulet",
13363: u"Amber Copper Amulet",
13364: u"Amber Copper Stud",
13365: u"Amber Copper Stud",
13366: u"Pearl Copper Stud",
13367: u"Amber Copper Ring",
13368: u"Pearl Copper Ring",
13369: u"Amber Copper Ring",
13370: u"Topaz Silver Band",
13371: u"Lapis Silver Ring",
13372: u"Topaz Silver Band",
13373: u"Topaz Silver Earring",
13374: u"Lapis Silver Earring",
13375: u"Topaz Silver Earring",
13376: u"Topaz Silver Pendant",
13377: u"Lapis Silver Amulet",
13378: u"Topaz Silver Pendant",
13379: u"Beryl Platinum Ring",
13380: u"Chrysocola Platinum Ring",
13381: u"Emerald Platinum Ring",
13382: u"Coral Platinum Ring",
13383: u"Ruby Platinum Ring",
13384: u"Sapphire Platinum Ring",
13386: u"Ruby Platinum Ring",
13387: u"Sapphire Platinum Ring",
13388: u"Beryl Platinum Ring",
13389: u"Chrysocola Platinum Ring",
13390: u"Emerald Platinum Ring",
13391: u"Coral Platinum Ring",
13393: u"Chrysocola Mithril Ring",
13394: u"Emerald Mithril Ring",
13395: u"Beryl Mithril Ring",
13396: u"Sapphire Mithril Ring",
13398: u"Ruby Mithril Ring",
13399: u"Sapphire Mithril Ring",
13400: u"Beryl Mithril Ring",
13401: u"Chrysocola Mithril Ring",
13402: u"Emerald Mithril Ring",
13403: u"Ruby Mithril Ring",
13404: u"Coral Mithril Ring",
13406: u"Coral Mithril Ring",
13407: u"Chrysocola Mithril Ring",
13408: u"Sapphire Mithril Ring",
13410: u"Ruby Mithril Ring",
13411: u"Coral Mithril Ring",
13412: u"Emerald Mithril Ring",
13413: u"Beryl Mithril Ring",
13421: u"Beryl Platinum Earring",
13422: u"Chrysocola Platinum Earring",
13423: u"Emerald Platinum Earring",
13424: u"Coral Platinum Earring",
13425: u"Ruby Platinum Earring",
13426: u"Sapphire Platinum Earring",
13428: u"Ruby Platinum Earring",
13429: u"Sapphire Platinum Earring",
13430: u"Beryl Platinum Earring",
13431: u"Chrysocola Platinum Earring",
13432: u"Emerald Platinum Earring",
13433: u"Coral Platinum Earring",
13435: u"Chrysocola Mithril Earring",
13436: u"Emerald Mithril Earring",
13437: u"Beryl Mithril Earring",
13438: u"Sapphire Mithril Earring",
13440: u"Ruby Mithril Earring",
13441: u"Coral Mithril Earring",
13442: u"Chrysocola Mithril Earring",
13443: u"Sapphire Mithril Earring",
13445: u"Ruby Mithril Earring",
13446: u"Coral Mithril Earring",
13447: u"Emerald Mithril Earring",
13448: u"Beryl Mithril Earring",
13449: u"Chrysocola Mithril Earring",
13450: u"Sapphire Mithril Earring",
13452: u"Ruby Mithril Earring",
13453: u"Coral Mithril Earring",
13454: u"Emerald Mithril Earring",
13455: u"Beryl Mithril Earring",
13463: u"Beryl Platinum Amulet",
13464: u"Chrysocola Platinum Amulet",
13465: u"Emerald Platinum Amulet",
13466: u"Coral Platinum Amulet",
13467: u"Ruby Platinum Amulet",
13468: u"Sapphire Platinum Amulet",
13470: u"Ruby Platinum Amulet",
13471: u"Sapphire Platinum Amulet",
13472: u"Beryl Platinum Amulet",
13473: u"Chrysocola Platinum Amulet",
13474: u"Emerald Platinum Amulet",
13475: u"Coral Platinum Amulet",
13477: u"Chrysocola Mithril Amulet",
13478: u"Emerald Mithril Amulet",
13479: u"Beryl Mithril Amulet",
13480: u"Sapphire Mithril Amulet",
13482: u"Ruby Mithril Amulet",
13483: u"Sapphire Mithril Amulet",
13484: u"Beryl Mithril Amulet",
13485: u"Chrysocola Mithril Amulet",
13486: u"Emerald Mithril Amulet",
13487: u"Ruby Mithril Amulet",
13488: u"Coral Mithril Amulet",
13490: u"Coral Mithril Amulet",
13491: u"Chrysocola Mithril Amulet",
13492: u"Sapphire Mithril Amulet",
13494: u"Ruby Mithril Amulet",
13495: u"Coral Mithril Amulet",
13496: u"Emerald Mithril Amulet",
13497: u"Beryl Mithril Amulet",
13505: u"Spinel Silver Ring",
13506: u"Spinel Silver Ring",
13507: u"Spinel Gold Ring",
13508: u"Spinel Gold Ring",
13509: u"Spinel Silver Earring",
13510: u"Spinel Silver Earring",
13511: u"Spinel Gold Earring",
13512: u"Spinel Gold Earring",
13513: u"Spinel Silver Amulet",
13514: u"Spinel Silver Amulet",
13515: u"Spinel Gold Amulet",
13516: u"Spinel Gold Amulet",
13520: u"Topaz Silver Band",
13521: u"Spinel Silver Ring",
13522: u"Peridot Silver Ring",
13524: u"Carnelian Silver Ring",
13525: u"Amethyst Silver Band",
13526: u"Lapis Silver Ring",
13527: u"Topaz Gold Ring",
13528: u"Spinel Gold Ring",
13529: u"Peridot Gold Ring",
13531: u"Carnelian Gold Band",
13532: u"Amethyst Gold Ring",
13533: u"Lapis Gold Ring",
13534: u"Chrysocola Platinum Ring",
13535: u"Sapphire Platinum Ring",
13537: u"Beryl Platinum Ring",
13538: u"Ruby Platinum Ring",
13539: u"Coral Platinum Ring",
13540: u"Emerald Platinum Ring",
13541: u"Topaz Silver Earring",
13542: u"Spinel Silver Earring",
13543: u"Peridot Silver Earring",
13545: u"Carnelian Silver Stud",
13546: u"Amethyst Silver Earring",
13547: u"Lapis Silver Earring",
13548: u"Topaz Gold Earring",
13549: u"Spinel Gold Earring",
13550: u"Peridot Gold Earring",
13552: u"Carnelian Gold Earring",
13553: u"Amethyst Gold Earring",
13554: u"Lapis Gold Earring",
13555: u"Chrysocola Platinum Earring",
13556: u"Sapphire Platinum Earring",
13558: u"Beryl Platinum Earring",
13559: u"Ruby Platinum Earring",
13560: u"Coral Platinum Earring",
13561: u"Emerald Platinum Earring",
13562: u"Topaz Silver Pendant",
13563: u"Spinel Silver Amulet",
13564: u"Peridot Silver Pendant",
13566: u"Carnelian Silver Amulet",
13567: u"Amethyst Silver Pendant",
13568: u"Lapis Silver Amulet",
13569: u"Topaz Gold Amulet",
13570: u"Spinel Gold Amulet",
13571: u"Peridot Gold Amulet",
13573: u"Carnelian Gold Pendant",
13574: u"Amethyst Gold Amulet",
13575: u"Lapis Gold Amulet",
13576: u"Chrysocola Platinum Amulet",
13577: u"Sapphire Platinum Amulet",
13579: u"Beryl Platinum Amulet",
13580: u"Ruby Platinum Amulet",
13581: u"Coral Platinum Amulet",
13582: u"Emerald Platinum Amulet",
13583: u"Carrion Primordus Staff",
13584: u"Cleric's Primordus Staff",
13586: u"Valkyrie Primordus Staff",
13587: u"Berserker's Primordus Staff",
13588: u"Rampager's Primordus Staff",
13589: u"Knight's Primordus Staff",
13590: u"Carrion Primordus Trident",
13591: u"Cleric's Primordus Trident",
13593: u"Valkyrie Primordus Trident",
13594: u"Berserker's Primordus Trident",
13595: u"Rampager's Primordus Trident",
13596: u"Knight's Primordus Trident",
13597: u"Carrion Primordus Scepter",
13598: u"Cleric's Primordus Scepter",
13600: u"Valkyrie Primordus Scepter",
13601: u"Berserker's Primordus Scepter",
13602: u"Rampager's Primordus Scepter",
13603: u"Knight's Primordus Scepter",
13604: u"Carrion Primordus Focus",
13605: u"Cleric's Primordus Focus",
13607: u"Valkyrie Primordus Focus",
13608: u"Berserker's Primordus Focus",
13609: u"Rampager's Primordus Focus",
13610: u"Knight's Primordus Focus",
13611: u"Carrion Primordus Short Bow",
13612: u"Cleric's Primordus Short Bow",
13614: u"Valkyrie Primordus Short Bow",
13615: u"Berserker's Primordus Short Bow",
13616: u"Rampager's Primordus Short Bow",
13617: u"Knight's Primordus Short Bow",
13618: u"Carrion Primordus Warhorn",
13619: u"Cleric's Primordus Warhorn",
13621: u"Valkyrie Primordus Warhorn",
13622: u"Berserker's Primordus Warhorn",
13623: u"Rampager's Primordus Warhorn",
13624: u"Knight's Primordus Warhorn",
13625: u"Carrion Primordus Torch",
13626: u"Cleric's Primordus Torch",
13628: u"Valkyrie Primordus Torch",
13629: u"Berserker's Primordus Torch",
13630: u"Rampager's Primordus Torch",
13631: u"Knight's Primordus Torch",
13632: u"Carrion Primordus Longbow",
13633: u"Cleric's Primordus Longbow",
13635: u"Valkyrie Primordus Longbow",
13636: u"Berserker's Primordus Longbow",
13637: u"Rampager's Primordus Longbow",
13638: u"Knight's Primordus Longbow",
13639: u"Carrion Primordus Pistol",
13640: u"Cleric's Primordus Pistol",
13642: u"Valkyrie Primordus Pistol",
13643: u"Berserker's Primordus Pistol",
13644: u"Rampager's Primordus Pistol",
13645: u"Knight's Primordus Pistol",
13646: u"Carrion Primordus Rifle",
13647: u"Cleric's Primordus Rifle",
13649: u"Valkyrie Primordus Rifle",
13650: u"Berserker's Primordus Rifle",
13651: u"Rampager's Primordus Rifle",
13652: u"Knight's Primordus Rifle",
13653: u"Carrion Primordus Harpoon Gun",
13654: u"Cleric's Primordus Harpoon Gun",
13656: u"Valkyrie Primordus Harpoon Gun",
13657: u"Berserker's Primordus Harpoon Gun",
13658: u"Rampager's Primordus Harpoon Gun",
13659: u"Knight's Primordus Harpoon Gun",
13660: u"Carrion Primordus Dagger",
13661: u"Cleric's Primordus Dagger",
13663: u"Berserker's Primordus Dagger",
13664: u"Valkyrie Primordus Dagger",
13665: u"Rampager's Primordus Dagger",
13666: u"Knight's Primordus Dagger",
13667: u"Carrion Primordus Sword",
13668: u"Cleric's Primordus Sword",
13670: u"Berserker's Primordus Sword",
13671: u"Valkyrie Primordus Sword",
13672: u"Rampager's Primordus Sword",
13673: u"Knight's Primordus Sword",
13674: u"Carrion Primordus Mace",
13675: u"Cleric's Primordus Mace",
13677: u"Berserker's Primordus Mace",
13678: u"Valkyrie Primordus Mace",
13679: u"Rampager's Primordus Mace",
13680: u"Knight's Primordus Mace",
13681: u"Carrion Primordus Shield",
13682: u"Cleric's Primordus Shield",
13684: u"Berserker's Primordus Shield",
13685: u"Valkyrie Primordus Shield",
13686: u"Rampager's Primordus Shield",
13687: u"Knight's Primordus Shield",
13688: u"Carrion Primordus Axe",
13689: u"Cleric's Primordus Axe",
13691: u"Berserker's Primordus Axe",
13692: u"Valkyrie Primordus Axe",
13693: u"Rampager's Primordus Axe",
13694: u"Knight's Primordus Axe",
13695: u"Carrion Primordus Greatsword",
13696: u"Cleric's Primordus Greatsword",
13698: u"Berserker's Primordus Greatsword",
13699: u"Valkyrie Primordus Greatsword",
13700: u"Rampager's Primordus Greatsword",
13701: u"Knight's Primordus Greatsword",
13702: u"Carrion Primordus Maul",
13703: u"Cleric's Primordus Maul",
13705: u"Berserker's Primordus Maul",
13706: u"Valkyrie Primordus Maul",
13707: u"Rampager's Primordus Maul",
13708: u"Knight's Primordus Maul",
13709: u"Carrion Primordus Pike",
13710: u"Cleric's Primordus Pike",
13712: u"Berserker's Primordus Pike",
13713: u"Valkyrie Primordus Pike",
13714: u"Rampager's Primordus Pike",
13715: u"Knight's Primordus Pike",
13717: u"Mighty Green Wood Staff",
13718: u"Vital Green Wood Staff",
13719: u"Mighty Green Wood Staff",
13720: u"Vital Green Wood Staff",
13721: u"Precise Green Wood Staff",
13722: u"Resilient Green Wood Staff",
13723: u"Precise Green Wood Staff",
13724: u"Resilient Green Wood Staff",
13725: u"Vigorous Soft Wood Staff",
13726: u"Vigorous Soft Wood Staff",
13727: u"Mighty Green Wood Scepter",
13728: u"Vital Green Wood Scepter",
13729: u"Mighty Green Wood Scepter",
13730: u"Vital Green Wood Scepter",
13731: u"Precise Green Wood Scepter",
13732: u"Resilient Green Wood Scepter",
13733: u"Precise Green Wood Scepter",
13734: u"Resilient Green Wood Scepter",
13735: u"Vigorous Soft Wood Scepter",
13736: u"Vigorous Soft Wood Scepter",
13737: u"Mighty Green Wood Focus",
13738: u"Vital Green Wood Focus",
13739: u"Mighty Green Wood Focus",
13740: u"Vital Green Wood Focus",
13741: u"Precise Green Wood Focus",
13742: u"Resilient Green Wood Focus",
13743: u"Precise Green Wood Focus",
13744: u"Resilient Green Wood Focus",
13745: u"Vigorous Soft Wood Focus",
13746: u"Vigorous Soft Wood Focus",
13747: u"Mighty Green Wood Trident",
13748: u"Vital Green Wood Trident",
13749: u"Mighty Green Wood Trident",
13750: u"Vital Green Wood Trident",
13751: u"Precise Green Wood Trident",
13752: u"Resilient Green Wood Trident",
13753: u"Precise Green Wood Trident",
13754: u"Resilient Green Wood Trident",
13755: u"Vigorous Soft Wood Trident",
13756: u"Vigorous Soft Wood Trident",
13757: u"Healing Green Wood Staff",
13759: u"Strong Soft Wood Staff",
13760: u"Strong Soft Wood Staff",
13762: u"Honed Soft Wood Staff",
13763: u"Honed Soft Wood Staff",
13764: u"Hearty Soft Wood Staff",
13765: u"Strong Seasoned Wood Staff",
13766: u"Hearty Seasoned Wood Staff",
13767: u"Ravaging Seasoned Wood Staff",
13768: u"Honed Seasoned Wood Staff",
13770: u"Ravaging Seasoned Wood Staff",
13771: u"Honed Seasoned Wood Staff",
13773: u"Strong Seasoned Wood Staff",
13774: u"Vigorous Seasoned Wood Staff",
13775: u"Hearty Seasoned Wood Staff",
13776: u"Vigorous Seasoned Wood Staff",
13777: u"Healing Green Wood Trident",
13779: u"Strong Soft Wood Trident",
13780: u"Strong Soft Wood Trident",
13782: u"Honed Soft Wood Trident",
13783: u"Honed Soft Wood Trident",
13784: u"Hearty Soft Wood Trident",
13785: u"Strong Seasoned Wood Trident",
13786: u"Hearty Seasoned Wood Trident",
13787: u"Ravaging Seasoned Wood Trident",
13788: u"Honed Seasoned Wood Trident",
13790: u"Ravaging Seasoned Wood Trident",
13791: u"Honed Seasoned Wood Trident",
13793: u"Strong Seasoned Wood Trident",
13794: u"Vigorous Seasoned Wood Trident",
13795: u"Hearty Seasoned Wood Trident",
13796: u"Vigorous Seasoned Wood Trident",
13797: u"Healing Green Wood Scepter",
13799: u"Strong Soft Wood Scepter",
13800: u"Strong Soft Wood Scepter",
13802: u"Honed Soft Wood Scepter",
13803: u"Honed Soft Wood Scepter",
13804: u"Hearty Soft Wood Scepter",
13805: u"Strong Seasoned Wood Scepter",
13806: u"Hearty Seasoned Wood Scepter",
13807: u"Ravaging Seasoned Wood Scepter",
13808: u"Honed Seasoned Wood Scepter",
13810: u"Ravaging Seasoned Wood Scepter",
13811: u"Honed Seasoned Wood Scepter",
13813: u"Strong Seasoned Wood Scepter",
13814: u"Vigorous Seasoned Wood Scepter",
13815: u"Hearty Seasoned Wood Scepter",
13816: u"Vigorous Seasoned Wood Scepter",
13817: u"Healing Green Wood Focus",
13819: u"Strong Soft Wood Focus",
13820: u"Strong Soft Wood Focus",
13821: u"Honed Soft Wood Focus",
13823: u"Honed Soft Wood Focus",
13824: u"Hearty Soft Wood Focus",
13826: u"Strong Seasoned Wood Focus",
13827: u"Hearty Seasoned Wood Focus",
13828: u"Honed Seasoned Wood Focus",
13829: u"Ravaging Seasoned Wood Focus",
13830: u"Honed Seasoned Wood Focus",
13832: u"Ravaging Seasoned Wood Focus",
13833: u"Strong Seasoned Wood Focus",
13834: u"Hearty Seasoned Wood Focus",
13835: u"Vigorous Seasoned Wood Focus",
13836: u"Vigorous Seasoned Wood Focus",
13837: u"Malign Green Wood Staff",
13838: u"Malign Green Wood Staff",
13839: u"Healing Green Wood Staff",
13840: u"Ravaging Soft Wood Staff",
13841: u"Hearty Soft Wood Staff",
13842: u"Ravaging Soft Wood Staff",
13843: u"Malign Green Wood Trident",
13844: u"Malign Green Wood Trident",
13845: u"Healing Green Wood Trident",
13846: u"Ravaging Soft Wood Trident",
13847: u"Hearty Soft Wood Trident",
13848: u"Ravaging Soft Wood Trident",
13849: u"Malign Green Wood Scepter",
13850: u"Malign Green Wood Scepter",
13851: u"Healing Green Wood Scepter",
13852: u"Ravaging Soft Wood Scepter",
13853: u"Hearty Soft Wood Scepter",
13854: u"Ravaging Soft Wood Scepter",
13855: u"Malign Green Wood Focus",
13856: u"Malign Green Wood Focus",
13857: u"Healing Green Wood Focus",
13858: u"Ravaging Soft Wood Focus",
13859: u"Hearty Soft Wood Focus",
13860: u"Ravaging Soft Wood Focus",
13861: u"Berserker's Hard Wood Staff",
13862: u"Cleric's Hard Wood Staff",
13863: u"Rampager's Hard Wood Staff",
13865: u"Valkyrie Hard Wood Staff",
13866: u"Carrion Hard Wood Staff",
13867: u"Knight's Hard Wood Staff",
13868: u"Berserker's Hard Wood Staff",
13869: u"Cleric's Hard Wood Staff",
13870: u"Rampager's Hard Wood Staff",
13872: u"Valkyrie Hard Wood Staff",
13873: u"Carrion Hard Wood Staff",
13874: u"Knight's Hard Wood Staff",
13875: u"Cleric's Elder Wood Staff",
13876: u"Berserker's Elder Wood Staff",
13877: u"Rampager's Elder Wood Staff",
13878: u"Carrion Elder Wood Staff",
13880: u"Knight's Elder Wood Staff",
13881: u"Valkyrie Elder Wood Staff",
13882: u"Carrion Elder Wood Staff",
13883: u"Cleric's Elder Wood Staff",
13885: u"Berserker's Elder Wood Staff",
13886: u"Rampager's Elder Wood Staff",
13887: u"Knight's Elder Wood Staff",
13888: u"Valkyrie Elder Wood Staff",
13889: u"Carrion Krait Crook",
13890: u"Cleric's Krait Crook",
13892: u"Berserker's Krait Crook",
13893: u"Rampager's Krait Crook",
13894: u"Knight's Krait Crook",
13895: u"Valkyrie Krait Crook",
13896: u"Carrion Pearl Quarterstaff",
13897: u"Cleric's Pearl Quarterstaff",
13899: u"Berserker's Pearl Quarterstaff",
13900: u"Rampager's Pearl Quarterstaff",
13901: u"Knight's Pearl Quarterstaff",
13902: u"Valkyrie Pearl Quarterstaff",
13903: u"Carrion Pearl Rod",
13904: u"Cleric's Pearl Rod",
13906: u"Berserker's Pearl Rod",
13907: u"Rampager's Pearl Rod",
13908: u"Knight's Pearl Rod",
13909: u"Valkyrie Pearl Rod",
13910: u"Cleric's Elder Wood Scepter",
13911: u"Berserker's Elder Wood Scepter",
13912: u"Rampager's Elder Wood Scepter",
13913: u"Carrion Elder Wood Scepter",
13915: u"Knight's Elder Wood Scepter",
13916: u"Valkyrie Elder Wood Scepter",
13917: u"Carrion Elder Wood Scepter",
13918: u"Cleric's Elder Wood Scepter",
13920: u"Berserker's Elder Wood Scepter",
13921: u"Rampager's Elder Wood Scepter",
13922: u"Knight's Elder Wood Scepter",
13923: u"Valkyrie Elder Wood Scepter",
13924: u"Carrion Krait Wand",
13925: u"Cleric's Krait Wand",
13927: u"Berserker's Krait Wand",
13928: u"Rampager's Krait Wand",
13929: u"Knight's Krait Wand",
13930: u"Valkyrie Krait Wand",
13931: u"Berserker's Hard Wood Scepter",
13932: u"Cleric's Hard Wood Scepter",
13933: u"Rampager's Hard Wood Scepter",
13935: u"Valkyrie Hard Wood Scepter",
13936: u"Carrion Hard Wood Scepter",
13937: u"Knight's Hard Wood Scepter",
13938: u"Berserker's Hard Wood Scepter",
13939: u"Cleric's Hard Wood Scepter",
13940: u"Rampager's Hard Wood Scepter",
13942: u"Valkyrie Hard Wood Scepter",
13943: u"Carrion Hard Wood Scepter",
13944: u"Knight's Hard Wood Scepter",
13945: u"Berserker's Hard Wood Focus",
13946: u"Cleric's Hard Wood Focus",
13947: u"Rampager's Hard Wood Focus",
13949: u"Valkyrie Hard Wood Focus",
13950: u"Carrion Hard Wood Focus",
13951: u"Knight's Hard Wood Focus",
13952: u"Berserker's Hard Wood Focus",
13953: u"Cleric's Hard Wood Focus",
13954: u"Rampager's Hard Wood Focus",
13956: u"Valkyrie Hard Wood Focus",
13957: u"Carrion Hard Wood Focus",
13958: u"Knight's Hard Wood Focus",
13959: u"Cleric's Elder Wood Focus",
13960: u"Berserker's Elder Wood Focus",
13961: u"Rampager's Elder Wood Focus",
13962: u"Carrion Elder Wood Focus",
13964: u"Knight's Elder Wood Focus",
13965: u"Valkyrie Elder Wood Focus",
13966: u"Carrion Elder Wood Focus",
13967: u"Cleric's Elder Wood Focus",
13969: u"Berserker's Elder Wood Focus",
13970: u"Rampager's Elder Wood Focus",
13971: u"Knight's Elder Wood Focus",
13972: u"Valkyrie Elder Wood Focus",
13973: u"Carrion Krait Star",
13974: u"Cleric's Krait Star",
13976: u"Berserker's Krait Star",
13977: u"Rampager's Krait Star",
13978: u"Knight's Krait Star",
13979: u"Valkyrie Krait Star",
13980: u"Carrion Pearl Conch",
13981: u"Cleric's Pearl Conch",
13983: u"Berserker's Pearl Conch",
13984: u"Rampager's Pearl Conch",
13985: u"Knight's Pearl Conch",
13986: u"Valkyrie Pearl Conch",
13987: u"Carrion Pearl Trident",
13988: u"Cleric's Pearl Trident",
13990: u"Berserker's Pearl Trident",
13991: u"Rampager's Pearl Trident",
13992: u"Knight's Pearl Trident",
13993: u"Valkyrie Pearl Trident",
13994: u"Cleric's Elder Wood Trident",
13995: u"Berserker's Elder Wood Trident",
13996: u"Rampager's Elder Wood Trident",
13997: u"Carrion Elder Wood Trident",
13999: u"Knight's Elder Wood Trident",
14000: u"Valkyrie Elder Wood Trident",
14001: u"Carrion Elder Wood Trident",
14002: u"Cleric's Elder Wood Trident",
14004: u"Berserker's Elder Wood Trident",
14005: u"Rampager's Elder Wood Trident",
14006: u"Knight's Elder Wood Trident",
14007: u"Valkyrie Elder Wood Trident",
14008: u"Carrion Krait Trident",
14009: u"Cleric's Krait Trident",
14011: u"Berserker's Krait Trident",
14012: u"Rampager's Krait Trident",
14013: u"Knight's Krait Trident",
14014: u"Valkyrie Krait Trident",
14015: u"Berserker's Hard Wood Trident",
14016: u"Cleric's Hard Wood Trident",
14017: u"Rampager's Hard Wood Trident",
14019: u"Valkyrie Hard Wood Trident",
14020: u"Carrion Hard Wood Trident",
14021: u"Knight's Hard Wood Trident",
14022: u"Berserker's Hard Wood Trident",
14023: u"Cleric's Hard Wood Trident",
14024: u"Rampager's Hard Wood Trident",
14026: u"Valkyrie Hard Wood Trident",
14027: u"Carrion Hard Wood Trident",
14028: u"Knight's Hard Wood Trident",
14031: u"Mighty Green Wood Staff",
14032: u"Mighty Green Wood Trident",
14033: u"Mighty Green Wood Scepter",
14034: u"Mighty Green Wood Focus",
14035: u"Rejuvenating Soft Wood Staff",
14036: u"Rejuvenating Soft Wood Staff",
14037: u"Rejuvenating Seasoned Wood Staff",
14038: u"Rejuvenating Seasoned Wood Staff",
14039: u"Rejuvenating Soft Wood Trident",
14040: u"Rejuvenating Soft Wood Trident",
14041: u"Rejuvenating Seasoned Wood Trident",
14042: u"Rejuvenating Seasoned Wood Trident",
14043: u"Rejuvenating Soft Wood Scepter",
14044: u"Rejuvenating Soft Wood Scepter",
14045: u"Rejuvenating Seasoned Wood Scepter",
14046: u"Rejuvenating Seasoned Wood Scepter",
14047: u"Rejuvenating Soft Wood Focus",
14048: u"Rejuvenating Soft Wood Focus",
14049: u"Rejuvenating Seasoned Wood Focus",
14050: u"Rejuvenating Seasoned Wood Focus",
14051: u"Ravaging Bandit Spire",
14052: u"Rejuvenating Bandit Spire",
14053: u"Strong Bandit Spire",
14054: u"Vigorous Bandit Spire",
14056: u"Honed Bandit Spire",
14057: u"Hearty Bandit Spire",
14058: u"Ravaging Dredge Pillar",
14059: u"Rejuvenating Dredge Pillar",
14060: u"Strong Dredge Pillar",
14061: u"Vigorous Dredge Pillar",
14063: u"Honed Dredge Pillar",
14064: u"Hearty Dredge Pillar",
14065: u"Carrion Ogre Warstaff",
14066: u"Cleric's Ogre Warstaff",
14068: u"Valkyrie Ogre Warstaff",
14069: u"Berserker's Ogre Warstaff",
14070: u"Rampager's Ogre Warstaff",
14071: u"Knight's Ogre Warstaff",
14072: u"Ravaging Bandit Trident",
14073: u"Rejuvenating Bandit Trident",
14074: u"Strong Bandit Trident",
14075: u"Vigorous Bandit Trident",
14077: u"Honed Bandit Trident",
14078: u"Hearty Bandit Trident",
14079: u"Ravaging Dredge Trident",
14080: u"Rejuvenating Dredge Trident",
14081: u"Strong Dredge Trident",
14082: u"Vigorous Dredge Trident",
14084: u"Honed Dredge Trident",
14085: u"Hearty Dredge Trident",
14086: u"Carrion Ogre Trident",
14087: u"Cleric's Ogre Trident",
14089: u"Valkyrie Ogre Trident",
14090: u"Berserker's Ogre Trident",
14091: u"Rampager's Ogre Trident",
14092: u"Knight's Ogre Trident",
14093: u"Ravaging Bandit Baton",
14094: u"Rejuvenating Bandit Baton",
14095: u"Strong Bandit Baton",
14096: u"Vigorous Bandit Baton",
14098: u"Honed Bandit Baton",
14099: u"Hearty Bandit Baton",
14100: u"Ravaging Dredge Baton",
14101: u"Rejuvenating Dredge Baton",
14102: u"Strong Dredge Baton",
14103: u"Vigorous Dredge Baton",
14105: u"Honed Dredge Baton",
14106: u"Hearty Dredge Baton",
14107: u"Carrion Ogre Truncheon",
14108: u"Cleric's Ogre Truncheon",
14110: u"Valkyrie Ogre Truncheon",
14111: u"Berserker's Ogre Truncheon",
14112: u"Rampager's Ogre Truncheon",
14113: u"Knight's Ogre Truncheon",
14114: u"Ravaging Bandit Focus",
14115: u"Rejuvenating Bandit Focus",
14116: u"Strong Bandit Focus",
14117: u"Vigorous Bandit Focus",
14119: u"Honed Bandit Focus",
14120: u"Hearty Bandit Focus",
14121: u"Ravaging Dredge Canary",
14122: u"Rejuvenating Dredge Canary",
14123: u"Strong Dredge Canary",
14124: u"Vigorous Dredge Canary",
14126: u"Honed Dredge Canary",
14127: u"Hearty Dredge Canary",
14128: u"Carrion Ogre Effigy",
14129: u"Cleric's Ogre Effigy",
14131: u"Valkyrie Ogre Effigy",
14132: u"Berserker's Ogre Effigy",
14133: u"Rampager's Ogre Effigy",
14134: u"Knight's Ogre Effigy",
14154: u"Mighty Green Wood Longbow",
14155: u"Precise Green Wood Longbow",
14156: u"Vital Green Wood Longbow",
14157: u"Resilient Green Wood Longbow",
14158: u"Vital Green Wood Longbow",
14159: u"Mighty Green Wood Longbow",
14160: u"Precise Green Wood Longbow",
14161: u"Resilient Green Wood Longbow",
14162: u"Vigorous Soft Wood Longbow",
14163: u"Vigorous Soft Wood Longbow",
14164: u"Mighty Green Wood Short Bow",
14165: u"Vital Green Wood Short Bow",
14166: u"Mighty Green Wood Short Bow",
14167: u"Vital Green Wood Short Bow",
14168: u"Precise Green Wood Short Bow",
14169: u"Resilient Green Wood Short Bow",
14170: u"Precise Green Wood Short Bow",
14171: u"Resilient Green Wood Short Bow",
14172: u"Vigorous Soft Wood Short Bow",
14173: u"Vigorous Soft Wood Short Bow",
14174: u"Mighty Bronze Rifle",
14175: u"Vital Bronze Rifle",
14176: u"Precise Bronze Rifle",
14177: u"Resilient Bronze Rifle",
14178: u"Precise Bronze Rifle",
14179: u"Resilient Bronze Rifle",
14180: u"Mighty Bronze Rifle",
14181: u"Vital Bronze Rifle",
14182: u"Vigorous Iron Rifle",
14183: u"Vigorous Iron Rifle",
14184: u"Mighty Bronze Pistol",
14185: u"Vital Bronze Pistol",
14186: u"Precise Bronze Pistol",
14187: u"Resilient Bronze Pistol",
14188: u"Mighty Bronze Pistol",
14189: u"Vital Bronze Pistol",
14190: u"Precise Bronze Pistol",
14191: u"Resilient Bronze Pistol",
14192: u"Vigorous Iron Pistol",
14193: u"Vigorous Iron Pistol",
14194: u"Mighty Green Wood Torch",
14195: u"Vital Green Wood Torch",
14196: u"Mighty Green Wood Torch",
14197: u"Vital Green Wood Torch",
14198: u"Precise Green Wood Torch",
14199: u"Resilient Green Wood Torch",
14200: u"Precise Green Wood Torch",
14201: u"Resilient Green Wood Torch",
14202: u"Vigorous Soft Wood Torch",
14203: u"Vigorous Soft Wood Torch",
14204: u"Mighty Green Wood Warhorn",
14205: u"Vital Green Wood Warhorn",
14206: u"Mighty Green Wood Warhorn",
14207: u"Vital Green Wood Warhorn",
14208: u"Precise Green Wood Warhorn",
14209: u"Resilient Green Wood Warhorn",
14210: u"Precise Green Wood Warhorn",
14211: u"Resilient Green Wood Warhorn",
14212: u"Vigorous Soft Wood Warhorn",
14213: u"Vigorous Soft Wood Warhorn",
14214: u"Mighty Green Wood Harpoon Gun",
14215: u"Vital Green Wood Harpoon Gun",
14216: u"Mighty Green Wood Harpoon Gun",
14217: u"Vital Green Wood Harpoon Gun",
14218: u"Precise Green Wood Harpoon Gun",
14219: u"Resilient Green Wood Harpoon Gun",
14220: u"Precise Green Wood Harpoon Gun",
14221: u"Resilient Green Wood Harpoon Gun",
14222: u"Vigorous Soft Wood Harpoon Gun",
14223: u"Vigorous Soft Wood Harpoon Gun",
14224: u"Healing Green Wood Short Bow",
14226: u"Strong Soft Wood Short Bow",
14227: u"Strong Soft Wood Short Bow",
14228: u"Honed Soft Wood Short Bow",
14230: u"Honed Soft Wood Short Bow",
14231: u"Hearty Soft Wood Short Bow",
14232: u"Strong Seasoned Wood Short Bow",
14233: u"Hearty Seasoned Wood Short Bow",
14234: u"Ravaging Seasoned Wood Short Bow",
14235: u"Honed Seasoned Wood Short Bow",
14237: u"Ravaging Seasoned Wood Short Bow",
14238: u"Honed Seasoned Wood Short Bow",
14240: u"Strong Seasoned Wood Short Bow",
14241: u"Vigorous Seasoned Wood Short Bow",
14242: u"Hearty Seasoned Wood Short Bow",
14243: u"Vigorous Seasoned Wood Short Bow",
14244: u"Healing Green Wood Warhorn",
14246: u"Strong Soft Wood Warhorn",
14247: u"Strong Soft Wood Warhorn",
14248: u"Honed Soft Wood Warhorn",
14250: u"Honed Soft Wood Warhorn",
14251: u"Hearty Soft Wood Warhorn",
14252: u"Strong Seasoned Wood Warhorn",
14253: u"Hearty Seasoned Wood Warhorn",
14254: u"Ravaging Seasoned Wood Warhorn",
14255: u"Honed Seasoned Wood Warhorn",
14257: u"Ravaging Seasoned Wood Warhorn",
14258: u"Honed Seasoned Wood Warhorn",
14260: u"Strong Seasoned Wood Warhorn",
14261: u"Vigorous Seasoned Wood Warhorn",
14262: u"Hearty Seasoned Wood Warhorn",
14263: u"Vigorous Seasoned Wood Warhorn",
14264: u"Healing Green Wood Torch",
14266: u"Strong Soft Wood Torch",
14267: u"Strong Soft Wood Torch",
14268: u"Honed Soft Wood Torch",
14270: u"Honed Soft Wood Torch",
14271: u"Hearty Soft Wood Torch",
14272: u"Strong Seasoned Wood Torch",
14273: u"Hearty Seasoned Wood Torch",
14274: u"Ravaging Seasoned Wood Torch",
14275: u"Honed Seasoned Wood Torch",
14277: u"Ravaging Seasoned Wood Torch",
14278: u"Honed Seasoned Wood Torch",
14280: u"Strong Seasoned Wood Torch",
14281: u"Vigorous Seasoned Wood Torch",
14282: u"Hearty Seasoned Wood Torch",
14283: u"Vigorous Seasoned Wood Torch",
14284: u"Healing Green Wood Longbow",
14286: u"Strong Soft Wood Longbow",
14287: u"Strong Soft Wood Longbow",
14288: u"Honed Soft Wood Longbow",
14290: u"Honed Soft Wood Longbow",
14291: u"Hearty Soft Wood Longbow",
14292: u"Strong Seasoned Wood Longbow",
14293: u"Hearty Seasoned Wood Longbow",
14294: u"Ravaging Seasoned Wood Longbow",
14295: u"Honed Seasoned Wood Longbow",
14297: u"Ravaging Seasoned Wood Longbow",
14298: u"Honed Seasoned Wood Longbow",
14300: u"Strong Seasoned Wood Longbow",
14301: u"Vigorous Seasoned Wood Longbow",
14302: u"Hearty Seasoned Wood Longbow",
14303: u"Vigorous Seasoned Wood Longbow",
14304: u"Healing Bronze Pistol",
14306: u"Strong Iron Pistol",
14307: u"Strong Iron Pistol",
14308: u"Honed Iron Pistol",
14310: u"Honed Iron Pistol",
14311: u"Hearty Iron Pistol",
14312: u"Strong Steel Pistol",
14313: u"Hearty Steel Pistol",
14314: u"Ravaging Steel Pistol",
14315: u"Honed Steel Pistol",
14317: u"Ravaging Steel Pistol",
14318: u"Honed Steel Pistol",
14320: u"Strong Steel Pistol",
14321: u"Vigorous Steel Pistol",
14322: u"Hearty Steel Pistol",
14323: u"Vigorous Steel Pistol",
14324: u"Healing Bronze Rifle",
14326: u"Strong Iron Rifle",
14327: u"Strong Iron Rifle",
14328: u"Honed Iron Rifle",
14330: u"Honed Iron Rifle",
14331: u"Hearty Iron Rifle",
14332: u"Strong Steel Rifle",
14333: u"Hearty Steel Rifle",
14334: u"Ravaging Steel Rifle",
14335: u"Honed Steel Rifle",
14337: u"Ravaging Steel Rifle",
14338: u"Honed Steel Rifle",
14340: u"Strong Steel Rifle",
14341: u"Vigorous Steel Rifle",
14342: u"Hearty Steel Rifle",
14343: u"Vigorous Steel Rifle",
14344: u"Healing Green Wood Harpoon Gun",
14346: u"Strong Soft Wood Harpoon Gun",
14347: u"Strong Soft Wood Harpoon Gun",
14348: u"Honed Soft Wood Harpoon Gun",
14350: u"Honed Soft Wood Harpoon Gun",
14351: u"Hearty Soft Wood Harpoon Gun",
14352: u"Strong Seasoned Wood Harpoon Gun",
14353: u"Hearty Seasoned Wood Harpoon Gun",
14354: u"Ravaging Seasoned Wood Harpoon Gun",
14355: u"Honed Seasoned Wood Harpoon Gun",
14357: u"Ravaging Seasoned Wood Harpoon Gun",
14358: u"Honed Seasoned Wood Harpoon Gun",
14360: u"Strong Seasoned Wood Harpoon Gun",
14361: u"Vigorous Seasoned Wood Harpoon Gun",
14362: u"Hearty Seasoned Wood Harpoon Gun",
14363: u"Vigorous Seasoned Wood Harpoon Gun",
14364: u"Malign Green Wood Short Bow",
14365: u"Malign Green Wood Short Bow",
14366: u"Healing Green Wood Short Bow",
14367: u"Ravaging Soft Wood Short Bow",
14368: u"Hearty Soft Wood Short Bow",
14369: u"Ravaging Soft Wood Short Bow",
14370: u"Malign Green Wood Warhorn",
14371: u"Malign Green Wood Warhorn",
14372: u"Healing Green Wood Warhorn",
14373: u"Ravaging Soft Wood Warhorn",
14374: u"Hearty Soft Wood Warhorn",
14375: u"Ravaging Soft Wood Warhorn",
14376: u"Malign Green Wood Torch",
14377: u"Malign Green Wood Torch",
14378: u"Healing Green Wood Torch",
14379: u"Ravaging Soft Wood Torch",
14380: u"Hearty Soft Wood Torch",
14381: u"Ravaging Soft Wood Torch",
14382: u"Malign Green Wood Longbow",
14383: u"Malign Green Wood Longbow",
14384: u"Healing Green Wood Longbow",
14385: u"Ravaging Soft Wood Longbow",
14386: u"Hearty Soft Wood Longbow",
14387: u"Ravaging Soft Wood Longbow",
14388: u"Malign Bronze Pistol",
14389: u"Malign Bronze Pistol",
14390: u"Healing Bronze Pistol",
14391: u"Ravaging Iron Pistol",
14392: u"Hearty Iron Pistol",
14393: u"Ravaging Iron Pistol",
14394: u"Malign Bronze Rifle",
14395: u"Malign Bronze Rifle",
14396: u"Healing Bronze Rifle",
14397: u"Ravaging Iron Rifle",
14398: u"Hearty Iron Rifle",
14399: u"Ravaging Iron Rifle",
14400: u"Malign Green Wood Harpoon Gun",
14401: u"Malign Green Wood Harpoon Gun",
14402: u"Healing Green Wood Harpoon Gun",
14403: u"Ravaging Soft Wood Harpoon Gun",
14404: u"Hearty Soft Wood Harpoon Gun",
14405: u"Ravaging Soft Wood Harpoon Gun",
14406: u"Carrion Hard Wood Longbow",
14407: u"Carrion Hard Wood Longbow",
14408: u"Valkyrie Hard Wood Longbow",
14409: u"Valkyrie Hard Wood Longbow",
14410: u"Knight's Hard Wood Longbow",
14411: u"Knight's Hard Wood Longbow",
14414: u"Rampager's Hard Wood Longbow",
14415: u"Rampager's Hard Wood Longbow",
14416: u"Cleric's Hard Wood Longbow",
14417: u"Cleric's Hard Wood Longbow",
14418: u"Berserker's Hard Wood Longbow",
14419: u"Berserker's Hard Wood Longbow",
14420: u"Cleric's Krait Recurve Bow",
14422: u"Berserker's Krait Recurve Bow",
14423: u"Rampager's Krait Recurve Bow",
14424: u"Cleric's Elder Wood Longbow",
14426: u"Berserker's Elder Wood Longbow",
14427: u"Rampager's Elder Wood Longbow",
14428: u"Carrion Krait Recurve Bow",
14430: u"Cleric's Elder Wood Longbow",
14431: u"Berserker's Elder Wood Longbow",
14432: u"Rampager's Elder Wood Longbow",
14433: u"Carrion Elder Wood Longbow",
14434: u"Carrion Elder Wood Longbow",
14435: u"Knight's Elder Wood Longbow",
14436: u"Valkyrie Elder Wood Longbow",
14437: u"Knight's Krait Recurve Bow",
14438: u"Knight's Elder Wood Longbow",
14439: u"Valkyrie Krait Recurve Bow",
14440: u"Valkyrie Elder Wood Longbow",
14441: u"Cleric's Pearl Stinger",
14443: u"Berserker's Pearl Stinger",
14444: u"Rampager's Pearl Stinger",
14445: u"Carrion Pearl Stinger",
14446: u"Knight's Pearl Stinger",
14447: u"Valkyrie Pearl Stinger",
14448: u"Carrion Pearl Needler",
14449: u"Cleric's Pearl Needler",
14451: u"Berserker's Pearl Needler",
14452: u"Rampager's Pearl Needler",
14453: u"Knight's Pearl Needler",
14454: u"Valkyrie Pearl Needler",
14455: u"Carrion Elder Wood Short Bow",
14456: u"Knight's Elder Wood Short Bow",
14457: u"Valkyrie Elder Wood Short Bow",
14458: u"Cleric's Elder Wood Short Bow",
14460: u"Berserker's Elder Wood Short Bow",
14461: u"Rampager's Elder Wood Short Bow",
14462: u"Carrion Elder Wood Short Bow",
14463: u"Cleric's Elder Wood Short Bow",
14465: u"Berserker's Elder Wood Short Bow",
14466: u"Rampager's Elder Wood Short Bow",
14467: u"Knight's Elder Wood Short Bow",
14468: u"Valkyrie Elder Wood Short Bow",
14469: u"Carrion Krait Short Bow",
14470: u"Cleric's Krait Short Bow",
14472: u"Berserker's Krait Short Bow",
14473: u"Rampager's Krait Short Bow",
14474: u"Knight's Krait Short Bow",
14475: u"Valkyrie Krait Short Bow",
14476: u"Carrion Hard Wood Short Bow",
14477: u"Knight's Hard Wood Short Bow",
14478: u"Valkyrie Hard Wood Short Bow",
14479: u"Cleric's Hard Wood Short Bow",
14481: u"Berserker's Hard Wood Short Bow",
14482: u"Rampager's Hard Wood Short Bow",
14483: u"Rampager's Hard Wood Short Bow",
14484: u"Knight's Hard Wood Short Bow",
14485: u"Valkyrie Hard Wood Short Bow",
14486: u"Carrion Hard Wood Short Bow",
14487: u"Cleric's Hard Wood Short Bow",
14489: u"Berserker's Hard Wood Short Bow",
14490: u"Carrion Pearl Siren",
14491: u"Cleric's Pearl Siren",
14493: u"Berserker's Pearl Siren",
14494: u"Rampager's Pearl Siren",
14495: u"Knight's Pearl Siren",
14496: u"Valkyrie Pearl Siren",
14497: u"Carrion Elder Wood Warhorn",
14498: u"Knight's Elder Wood Warhorn",
14499: u"Valkyrie Elder Wood Warhorn",
14500: u"Cleric's Elder Wood Warhorn",
14502: u"Berserker's Elder Wood Warhorn",
14503: u"Rampager's Elder Wood Warhorn",
14504: u"Carrion Elder Wood Warhorn",
14505: u"Cleric's Elder Wood Warhorn",
14507: u"Berserker's Elder Wood Warhorn",
14508: u"Rampager's Elder Wood Warhorn",
14509: u"Knight's Elder Wood Warhorn",
14510: u"Valkyrie Elder Wood Warhorn",
14511: u"Carrion Krait Whelk",
14512: u"Cleric's Krait Whelk",
14514: u"Berserker's Krait Whelk",
14515: u"Rampager's Krait Whelk",
14516: u"Knight's Krait Whelk",
14517: u"Valkyrie Krait Whelk",
14518: u"Carrion Hard Wood Warhorn",
14519: u"Knight's Hard Wood Warhorn",
14520: u"Valkyrie Hard Wood Warhorn",
14521: u"Cleric's Hard Wood Warhorn",
14523: u"Berserker's Hard Wood Warhorn",
14524: u"Rampager's Hard Wood Warhorn",
14525: u"Rampager's Hard Wood Warhorn",
14526: u"Knight's Hard Wood Warhorn",
14527: u"Valkyrie Hard Wood Warhorn",
14528: u"Carrion Hard Wood Warhorn",
14529: u"Cleric's Hard Wood Warhorn",
14531: u"Berserker's Hard Wood Warhorn",
14532: u"Carrion Hard Wood Torch",
14533: u"Knight's Hard Wood Torch",
14534: u"Valkyrie Hard Wood Torch",
14535: u"Cleric's Hard Wood Torch",
14537: u"Berserker's Hard Wood Torch",
14538: u"Rampager's Hard Wood Torch",
14539: u"Rampager's Hard Wood Torch",
14540: u"Knight's Hard Wood Torch",
14541: u"Valkyrie Hard Wood Torch",
14542: u"Carrion Hard Wood Torch",
14543: u"Cleric's Hard Wood Torch",
14545: u"Berserker's Hard Wood Torch",
14546: u"Carrion Elder Wood Torch",
14547: u"Knight's Elder Wood Torch",
14548: u"Valkyrie Elder Wood Torch",
14549: u"Cleric's Elder Wood Torch",
14551: u"Berserker's Elder Wood Torch",
14552: u"Rampager's Elder Wood Torch",
14553: u"Carrion Elder Wood Torch",
14554: u"Cleric's Elder Wood Torch",
14556: u"Berserker's Elder Wood Torch",
14557: u"Rampager's Elder Wood Torch",
14558: u"Knight's Elder Wood Torch",
14559: u"Valkyrie Elder Wood Torch",
14560: u"Carrion Krait Brazier",
14561: u"Cleric's Krait Brazier",
14563: u"Berserker's Krait Brazier",
14564: u"Rampager's Krait Brazier",
14565: u"Knight's Krait Brazier",
14566: u"Valkyrie Krait Brazier",
14567: u"Carrion Pearl Brazier",
14568: u"Cleric's Pearl Brazier",
14570: u"Berserker's Pearl Brazier",
14571: u"Rampager's Pearl Brazier",
14572: u"Knight's Pearl Brazier",
14573: u"Valkyrie Pearl Brazier",
14574: u"Carrion Pearl Handcannon",
14575: u"Cleric's Pearl Handcannon",
14577: u"Berserker's Pearl Handcannon",
14578: u"Rampager's Pearl Handcannon",
14579: u"Knight's Pearl Handcannon",
14580: u"Valkyrie Pearl Handcannon",
14581: u"Carrion Mithril Pistol",
14582: u"Knight's Mithril Pistol",
14583: u"Valkyrie Mithril Pistol",
14584: u"Cleric's Mithril Pistol",
14586: u"Berserker's Mithril Pistol",
14587: u"Rampager's Mithril Pistol",
14588: u"Carrion Mithril Pistol",
14589: u"Cleric's Mithril Pistol",
14591: u"Berserker's Mithril Pistol",
14592: u"Rampager's Mithril Pistol",
14593: u"Knight's Mithril Pistol",
14594: u"Valkyrie Mithril Pistol",
14595: u"Carrion Krait Handgun",
14596: u"Cleric's Krait Handgun",
14598: u"Berserker's Krait Handgun",
14599: u"Rampager's Krait Handgun",
14600: u"Knight's Krait Handgun",
14601: u"Valkyrie Krait Handgun",
14602: u"Carrion Darksteel Pistol",
14603: u"Knight's Darksteel Pistol",
14604: u"Valkyrie Darksteel Pistol",
14605: u"Cleric's Darksteel Pistol",
14607: u"Berserker's Darksteel Pistol",
14608: u"Rampager's Darksteel Pistol",
14609: u"Rampager's Darksteel Pistol",
14610: u"Knight's Darksteel Pistol",
14611: u"Valkyrie Darksteel Pistol",
14612: u"Carrion Darksteel Pistol",
14613: u"Cleric's Darksteel Pistol",
14615: u"Berserker's Darksteel Pistol",
14616: u"Carrion Darksteel Rifle",
14617: u"Knight's Darksteel Rifle",
14618: u"Valkyrie Darksteel Rifle",
14619: u"Cleric's Darksteel Rifle",
14621: u"Berserker's Darksteel Rifle",
14622: u"Rampager's Darksteel Rifle",
14623: u"Rampager's Darksteel Rifle",
14624: u"Knight's Darksteel Rifle",
14625: u"Valkyrie Darksteel Rifle",
14626: u"Carrion Darksteel Rifle",
14627: u"Cleric's Darksteel Rifle",
14629: u"Berserker's Darksteel Rifle",
14630: u"Carrion Mithril Rifle",
14631: u"Knight's Mithril Rifle",
14632: u"Valkyrie Mithril Rifle",
14633: u"Cleric's Mithril Rifle",
14635: u"Berserker's Mithril Rifle",
14636: u"Rampager's Mithril Rifle",
14637: u"Carrion Mithril Rifle",
14638: u"Cleric's Mithril Rifle",
14640: u"Berserker's Mithril Rifle",
14641: u"Rampager's Mithril Rifle",
14642: u"Knight's Mithril Rifle",
14643: u"Valkyrie Mithril Rifle",
14644: u"Carrion Krait Shooter",
14645: u"Cleric's Krait Shooter",
14647: u"Berserker's Krait Shooter",
14648: u"Rampager's Krait Shooter",
14649: u"Knight's Krait Shooter",
14650: u"Valkyrie Krait Shooter",
14651: u"Carrion Pearl Blunderbuss",
14652: u"Cleric's Pearl Blunderbuss",
14654: u"Berserker's Pearl Blunderbuss",
14655: u"Rampager's Pearl Blunderbuss",
14656: u"Knight's Pearl Blunderbuss",
14657: u"Valkyrie Pearl Blunderbuss",
14658: u"Carrion Pearl Speargun",
14659: u"Cleric's Pearl Speargun",
14661: u"Berserker's Pearl Speargun",
14662: u"Rampager's Pearl Speargun",
14663: u"Knight's Pearl Speargun",
14664: u"Valkyrie Pearl Speargun",
14665: u"Carrion Elder Wood Harpoon Gun",
14666: u"Knight's Elder Wood Harpoon Gun",
14667: u"Valkyrie Elder Wood Harpoon Gun",
14668: u"Cleric's Elder Wood Harpoon Gun",
14670: u"Berserker's Elder Wood Harpoon Gun",
14671: u"Rampager's Elder Wood Harpoon Gun",
14672: u"Carrion Elder Wood Harpoon Gun",
14673: u"Cleric's Elder Wood Harpoon Gun",
14675: u"Berserker's Elder Wood Harpoon Gun",
14676: u"Rampager's Elder Wood Harpoon Gun",
14677: u"Knight's Elder Wood Harpoon Gun",
14678: u"Valkyrie Elder Wood Harpoon Gun",
14679: u"Carrion Krait Harpoon Gun",
14680: u"Cleric's Krait Harpoon Gun",
14682: u"Berserker's Krait Harpoon Gun",
14683: u"Rampager's Krait Harpoon Gun",
14684: u"Knight's Krait Harpoon Gun",
14685: u"Valkyrie Krait Harpoon Gun",
14686: u"Carrion Hard Wood Harpoon Gun",
14687: u"Knight's Hard Wood Harpoon Gun",
14688: u"Valkyrie Hard Wood Harpoon Gun",
14689: u"Cleric's Hard Wood Harpoon Gun",
14691: u"Berserker's Hard Wood Harpoon Gun",
14692: u"Rampager's Hard Wood Harpoon Gun",
14693: u"Rampager's Hard Wood Harpoon Gun",
14694: u"Knight's Hard Wood Harpoon Gun",
14695: u"Valkyrie Hard Wood Harpoon Gun",
14696: u"Carrion Hard Wood Harpoon Gun",
14697: u"Cleric's Hard Wood Harpoon Gun",
14699: u"Berserker's Hard Wood Harpoon Gun",
14700: u"Mighty Green Wood Short Bow",
14701: u"Mighty Green Wood Warhorn",
14702: u"Mighty Green Wood Torch",
14703: u"Mighty Green Wood Longbow",
14704: u"Mighty Bronze Pistol",
14705: u"Mighty Bronze Rifle",
14706: u"Mighty Green Wood Harpoon Gun",
14707: u"Rejuvenating Soft Wood Short Bow",
14708: u"Rejuvenating Soft Wood Short Bow",
14709: u"Rejuvenating Seasoned Wood Short Bow",
14710: u"Rejuvenating Seasoned Wood Short Bow",
14711: u"Rejuvenating Soft Wood Warhorn",
14712: u"Rejuvenating Soft Wood Warhorn",
14713: u"Rejuvenating Seasoned Wood Warhorn",
14714: u"Rejuvenating Seasoned Wood Warhorn",
14715: u"Rejuvenating Soft Wood Torch",
14716: u"Rejuvenating Soft Wood Torch",
14717: u"Rejuvenating Seasoned Wood Torch",
14718: u"Rejuvenating Seasoned Wood Torch",
14719: u"Rejuvenating Soft Wood Longbow",
14720: u"Rejuvenating Soft Wood Longbow",
14721: u"Rejuvenating Seasoned Wood Longbow",
14722: u"Rejuvenating Seasoned Wood Longbow",
14723: u"Rejuvenating Iron Pistol",
14724: u"Rejuvenating Iron Pistol",
14725: u"Rejuvenating Steel Pistol",
14726: u"Rejuvenating Steel Pistol",
14727: u"Rejuvenating Iron Rifle",
14728: u"Rejuvenating Iron Rifle",
14729: u"Rejuvenating Steel Rifle",
14730: u"Rejuvenating Steel Rifle",
14731: u"Rejuvenating Soft Wood Harpoon Gun",
14732: u"Rejuvenating Soft Wood Harpoon Gun",
14733: u"Rejuvenating Seasoned Wood Harpoon Gun",
14734: u"Rejuvenating Seasoned Wood Harpoon Gun",
14735: u"Ravaging Bandit Short Bow",
14736: u"Rejuvenating Bandit Short Bow",
14737: u"Honed Bandit Short Bow",
14739: u"Strong Bandit Short Bow",
14740: u"Vigorous Bandit Short Bow",
14741: u"Hearty Bandit Short Bow",
14742: u"Ravaging Dredge Short Bow",
14743: u"Rejuvenating Dredge Short Bow",
14744: u"Honed Dredge Short Bow",
14746: u"Strong Dredge Short Bow",
14747: u"Vigorous Dredge Short Bow",
14748: u"Hearty Dredge Short Bow",
14749: u"Carrion Ogre Short Bow",
14750: u"Cleric's Ogre Short Bow",
14752: u"Valkyrie Ogre Short Bow",
14753: u"Berserker's Ogre Short Bow",
14754: u"Rampager's Ogre Short Bow",
14755: u"Knight's Ogre Short Bow",
14756: u"Ravaging Bandit Bugle",
14757: u"Rejuvenating Bandit Bugle",
14758: u"Honed Bandit Bugle",
14760: u"Strong Bandit Bugle",
14761: u"Vigorous Bandit Bugle",
14762: u"Hearty Bandit Bugle",
14763: u"Ravaging Dredge Double Horn",
14764: u"Rejuvenating Dredge Double Horn",
14765: u"Honed Dredge Double Horn",
14767: u"Strong Dredge Double Horn",
14768: u"Vigorous Dredge Double Horn",
14769: u"Hearty Dredge Double Horn",
14770: u"Carrion Ogre Harbinger",
14771: u"Cleric's Ogre Harbinger",
14773: u"Valkyrie Ogre Harbinger",
14774: u"Berserker's Ogre Harbinger",
14775: u"Rampager's Ogre Harbinger",
14776: u"Knight's Ogre Harbinger",
14777: u"Ravaging Bandit Torch",
14778: u"Rejuvenating Bandit Torch",
14779: u"Honed Bandit Torch",
14781: u"Strong Bandit Torch",
14782: u"Vigorous Bandit Torch",
14783: u"Hearty Bandit Torch",
14784: u"Ravaging Dredge Lamp",
14785: u"Rejuvenating Dredge Lamp",
14786: u"Honed Dredge Lamp",
14788: u"Strong Dredge Lamp",
14789: u"Vigorous Dredge Lamp",
14790: u"Hearty Dredge Lamp",
14791: u"Carrion Ogre Blaze",
14792: u"Cleric's Ogre Blaze",
14794: u"Valkyrie Ogre Blaze",
14795: u"Berserker's Ogre Blaze",
14796: u"Rampager's Ogre Blaze",
14797: u"Knight's Ogre Blaze",
14798: u"Ravaging Bandit Longbow",
14799: u"Rejuvenating Bandit Longbow",
14800: u"Honed Bandit Longbow",
14802: u"Strong Bandit Longbow",
14803: u"Vigorous Bandit Longbow",
14804: u"Hearty Bandit Longbow",
14805: u"Ravaging Dredge Reflex Bow",
14806: u"Rejuvenating Dredge Reflex Bow",
14807: u"Honed Dredge Reflex Bow",
14809: u"Strong Dredge Reflex Bow",
14810: u"Vigorous Dredge Reflex Bow",
14811: u"Hearty Dredge Reflex Bow",
14812: u"Carrion Ogre Hornbow",
14813: u"Cleric's Ogre Hornbow",
14815: u"Valkyrie Ogre Hornbow",
14816: u"Berserker's Ogre Hornbow",
14817: u"Rampager's Ogre Hornbow",
14818: u"Knight's Ogre Hornbow",
14819: u"Ravaging Bandit Revolver",
14820: u"Rejuvenating Bandit Revolver",
14821: u"Honed Bandit Revolver",
14823: u"Strong Bandit Revolver",
14824: u"Vigorous Bandit Revolver",
14825: u"Hearty Bandit Revolver",
14826: u"Ravaging Dredge Firearm",
14827: u"Rejuvenating Dredge Firearm",
14828: u"Honed Dredge Firearm",
14830: u"Strong Dredge Firearm",
14831: u"Vigorous Dredge Firearm",
14832: u"Hearty Dredge Firearm",
14833: u"Carrion Ogre Flintlock",
14834: u"Cleric's Ogre Flintlock",
14836: u"Valkyrie Ogre Flintlock",
14837: u"Berserker's Ogre Flintlock",
14838: u"Rampager's Ogre Flintlock",
14839: u"Knight's Ogre Flintlock",
14840: u"Ravaging Bandit Musket",
14841: u"Rejuvenating Bandit Musket",
14842: u"Honed Bandit Musket",
14844: u"Strong Bandit Musket",
14845: u"Vigorous Bandit Musket",
14846: u"Hearty Bandit Musket",
14847: u"Ravaging Dredge Boomstick",
14848: u"Rejuvenating Dredge Boomstick",
14849: u"Honed Dredge Boomstick",
14851: u"Strong Dredge Boomstick",
14852: u"Vigorous Dredge Boomstick",
14853: u"Hearty Dredge Boomstick",
14854: u"Carrion Ogre Blaster",
14855: u"Cleric's Ogre Blaster",
14857: u"Valkyrie Ogre Blaster",
14858: u"Berserker's Ogre Blaster",
14859: u"Rampager's Ogre Blaster",
14860: u"Knight's Ogre Blaster",
14861: u"Ravaging Bandit Harpoon Gun",
14862: u"Rejuvenating Bandit Harpoon Gun",
14863: u"Honed Bandit Harpoon Gun",
14865: u"Strong Bandit Harpoon Gun",
14866: u"Vigorous Bandit Harpoon Gun",
14867: u"Hearty Bandit Harpoon Gun",
14868: u"Ravaging Dredge Harpoon Gun",
14869: u"Rejuvenating Dredge Harpoon Gun",
14870: u"Honed Dredge Harpoon Gun",
14872: u"Strong Dredge Harpoon Gun",
14873: u"Vigorous Dredge Harpoon Gun",
14874: u"Hearty Dredge Harpoon Gun",
14875: u"Carrion Ogre Harpoon Gun",
14876: u"Cleric's Ogre Harpoon Gun",
14878: u"Valkyrie Ogre Harpoon Gun",
14879: u"Berserker's Ogre Harpoon Gun",
14880: u"Rampager's Ogre Harpoon Gun",
14881: u"Knight's Ogre Harpoon Gun",
14920: u"Valkyrie Corrupted Skeggox",
14921: u"Valkyrie Corrupted Shard",
14922: u"Valkyrie Corrupted Artifact",
14923: u"Valkyrie Corrupted Avenger",
14924: u"Valkyrie Corrupted Sledgehammer",
14925: u"Valkyrie Corrupted Harpoon Gun",
14926: u"Valkyrie Corrupted Greatbow",
14927: u"Valkyrie Corrupted Cudgel",
14928: u"Valkyrie Corrupted Revolver",
14929: u"Valkyrie Corrupted Blaster",
14930: u"Valkyrie Corrupted Scepter",
14931: u"Valkyrie Corrupted Bulwark",
14932: u"Valkyrie Corrupted Short Bow",
14933: u"Valkyrie Corrupted Spear",
14934: u"Valkyrie Corrupted Branch",
14935: u"Valkyrie Corrupted Blade",
14936: u"Valkyrie Corrupted Wartorch",
14937: u"Valkyrie Corrupted Trident",
14938: u"Valkyrie Corrupted Harbinger",
14939: u"Mighty Bronze Axe",
14940: u"Vital Bronze Axe",
14941: u"Mighty Bronze Axe",
14942: u"Vital Bronze Axe",
14943: u"Precise Bronze Axe",
14944: u"Resilient Bronze Axe",
14945: u"Precise Bronze Axe",
14946: u"Resilient Bronze Axe",
14947: u"Vigorous Iron Axe",
14948: u"Vigorous Iron Axe",
14949: u"Mighty Bronze Sword",
14950: u"Vital Bronze Sword",
14951: u"Mighty Bronze Sword",
14952: u"Vital Bronze Sword",
14953: u"Precise Bronze Sword",
14954: u"Resilient Bronze Sword",
14955: u"Precise Bronze Sword",
14956: u"Resilient Bronze Sword",
14957: u"Vigorous Iron Sword",
14958: u"Vigorous Iron Sword",
14959: u"Mighty Bronze Mace",
14960: u"Vital Bronze Mace",
14961: u"Mighty Bronze Mace",
14962: u"Vital Bronze Mace",
14963: u"Precise Bronze Mace",
14964: u"Resilient Bronze Mace",
14965: u"Precise Bronze Mace",
14966: u"Resilient Bronze Mace",
14967: u"Vigorous Iron Mace",
14968: u"Vigorous Iron Mace",
14969: u"Mighty Bronze Shield",
14970: u"Vital Bronze Shield",
14971: u"Mighty Bronze Shield",
14972: u"Vital Bronze Shield",
14973: u"Precise Bronze Shield",
14974: u"Resilient Bronze Shield",
14975: u"Precise Bronze Shield",
14976: u"Resilient Bronze Shield",
14977: u"Vigorous Iron Shield",
14978: u"Vigorous Iron Shield",
14979: u"Mighty Bronze Dagger",
14980: u"Vital Bronze Dagger",
14981: u"Mighty Bronze Dagger",
14982: u"Vital Bronze Dagger",
14983: u"Precise Bronze Dagger",
14984: u"Resilient Bronze Dagger",
14985: u"Precise Bronze Dagger",
14986: u"Resilient Bronze Dagger",
14987: u"Vigorous Iron Dagger",
14988: u"Vigorous Iron Dagger",
14989: u"Mighty Bronze Greatsword",
14990: u"Vital Bronze Greatsword",
14991: u"Mighty Bronze Greatsword",
14992: u"Vital Bronze Greatsword",
14993: u"Precise Bronze Greatsword",
14994: u"Resilient Bronze Greatsword",
14995: u"Precise Bronze Greatsword",
14996: u"Resilient Bronze Greatsword",
14997: u"Vigorous Iron Greatsword",
14998: u"Vigorous Iron Greatsword",
14999: u"Mighty Bronze Hammer",
15000: u"Vital Bronze Hammer",
15001: u"Mighty Bronze Hammer",
15002: u"Vital Bronze Hammer",
15003: u"Precise Bronze Hammer",
15004: u"Resilient Bronze Hammer",
15005: u"Precise Bronze Hammer",
15006: u"Resilient Bronze Hammer",
15007: u"Vigorous Iron Hammer",
15008: u"Vigorous Iron Hammer",
15009: u"Mighty Bronze Spear",
15010: u"Vital Bronze Spear",
15011: u"Mighty Bronze Spear",
15012: u"Vital Bronze Spear",
15013: u"Precise Bronze Spear",
15014: u"Resilient Bronze Spear",
15015: u"Precise Bronze Spear",
15016: u"Resilient Bronze Spear",
15017: u"Vigorous Iron Spear",
15018: u"Vigorous Iron Spear",
15019: u"Healing Bronze Dagger",
15021: u"Strong Iron Dagger",
15022: u"Strong Iron Dagger",
15023: u"Honed Iron Dagger",
15025: u"Honed Iron Dagger",
15026: u"Hearty Iron Dagger",
15027: u"Strong Steel Dagger",
15028: u"Hearty Steel Dagger",
15029: u"Ravaging Steel Dagger",
15030: u"Honed Steel Dagger",
15032: u"Ravaging Steel Dagger",
15033: u"Honed Steel Dagger",
15035: u"Strong Steel Dagger",
15036: u"Vigorous Steel Dagger",
15037: u"Hearty Steel Dagger",
15038: u"Vigorous Steel Dagger",
15039: u"Healing Bronze Sword",
15041: u"Strong Iron Sword",
15042: u"Strong Iron Sword",
15043: u"Honed Iron Sword",
15045: u"Honed Iron Sword",
15046: u"Hearty Iron Sword",
15047: u"Strong Steel Sword",
15048: u"Hearty Steel Sword",
15049: u"Ravaging Steel Sword",
15050: u"Honed Steel Sword",
15052: u"Ravaging Steel Sword",
15053: u"Honed Steel Sword",
15055: u"Strong Steel Sword",
15056: u"Vigorous Steel Sword",
15057: u"Hearty Steel Sword",
15058: u"Vigorous Steel Sword",
15059: u"Healing Bronze Mace",
15061: u"Strong Iron Mace",
15062: u"Strong Iron Mace",
15063: u"Honed Iron Mace",
15065: u"Honed Iron Mace",
15066: u"Hearty Iron Mace",
15067: u"Strong Steel Mace",
15068: u"Hearty Steel Mace",
15069: u"Ravaging Steel Mace",
15070: u"Honed Steel Mace",
15072: u"Ravaging Steel Mace",
15073: u"Honed Steel Mace",
15075: u"Strong Steel Mace",
15076: u"Vigorous Steel Mace",
15077: u"Hearty Steel Mace",
15078: u"Vigorous Steel Mace",
15079: u"Healing Bronze Shield",
15081: u"Strong Iron Shield",
15082: u"Strong Iron Shield",
15083: u"Honed Iron Shield",
15085: u"Honed Iron Shield",
15086: u"Hearty Iron Shield",
15087: u"Strong Steel Shield",
15088: u"Hearty Steel Shield",
15089: u"Ravaging Steel Shield",
15090: u"Honed Steel Shield",
15092: u"Ravaging Steel Shield",
15093: u"Honed Steel Shield",
15095: u"Strong Steel Shield",
15096: u"Vigorous Steel Shield",
15097: u"Hearty Steel Shield",
15098: u"Vigorous Steel Shield",
15099: u"Healing Bronze Axe",
15101: u"Strong Iron Axe",
15102: u"Strong Iron Axe",
15103: u"Honed Iron Axe",
15105: u"Honed Iron Axe",
15106: u"Hearty Iron Axe",
15107: u"Strong Steel Axe",
15108: u"Hearty Steel Axe",
15109: u"Ravaging Steel Axe",
15110: u"Honed Steel Axe",
15112: u"Ravaging Steel Axe",
15113: u"Honed Steel Axe",
15115: u"Strong Steel Axe",
15116: u"Vigorous Steel Axe",
15117: u"Hearty Steel Axe",
15118: u"Vigorous Steel Axe",
15119: u"Healing Bronze Greatsword",
15121: u"Strong Iron Greatsword",
15122: u"Strong Iron Greatsword",
15123: u"Honed Iron Greatsword",
15125: u"Honed Iron Greatsword",
15126: u"Hearty Iron Greatsword",
15127: u"Strong Steel Greatsword",
15128: u"Hearty Steel Greatsword",
15129: u"Ravaging Steel Greatsword",
15130: u"Honed Steel Greatsword",
15132: u"Ravaging Steel Greatsword",
15133: u"Honed Steel Greatsword",
15135: u"Strong Steel Greatsword",
15136: u"Vigorous Steel Greatsword",
15137: u"Hearty Steel Greatsword",
15138: u"Vigorous Steel Greatsword",
15139: u"Healing Bronze Hammer",
15141: u"Strong Iron Hammer",
15142: u"Strong Iron Hammer",
15143: u"Honed Iron Hammer",
15145: u"Honed Iron Hammer",
15146: u"Hearty Iron Hammer",
15147: u"Strong Steel Hammer",
15148: u"Hearty Steel Hammer",
15149: u"Ravaging Steel Hammer",
15150: u"Honed Steel Hammer",
15152: u"Ravaging Steel Hammer",
15153: u"Honed Steel Hammer",
15155: u"Strong Steel Hammer",
15156: u"Vigorous Steel Hammer",
15157: u"Hearty Steel Hammer",
15158: u"Vigorous Steel Hammer",
15159: u"Healing Bronze Spear",
15161: u"Strong Iron Spear",
15162: u"Strong Iron Spear",
15163: u"Honed Iron Spear",
15165: u"Honed Iron Spear",
15166: u"Hearty Iron Spear",
15167: u"Strong Steel Spear",
15168: u"Hearty Steel Spear",
15169: u"Ravaging Steel Spear",
15170: u"Honed Steel Spear",
15172: u"Ravaging Steel Spear",
15173: u"Honed Steel Spear",
15175: u"Strong Steel Spear",
15176: u"Vigorous Steel Spear",
15177: u"Hearty Steel Spear",
15178: u"Vigorous Steel Spear",
15179: u"Malign Bronze Dagger",
15180: u"Malign Bronze Dagger",
15181: u"Healing Bronze Dagger",
15182: u"Ravaging Iron Dagger",
15183: u"Hearty Iron Dagger",
15184: u"Ravaging Iron Dagger",
15185: u"Malign Bronze Sword",
15186: u"Malign Bronze Sword",
15187: u"Healing Bronze Sword",
15188: u"Ravaging Iron Sword",
15189: u"Hearty Iron Sword",
15190: u"Ravaging Iron Sword",
15191: u"Malign Bronze Mace",
15192: u"Malign Bronze Mace",
15193: u"Healing Bronze Mace",
15194: u"Ravaging Iron Mace",
15195: u"Hearty Iron Mace",
15196: u"Ravaging Iron Mace",
15197: u"Malign Bronze Shield",
15198: u"Malign Bronze Shield",
15199: u"Healing Bronze Shield",
15200: u"Ravaging Iron Shield",
15201: u"Hearty Iron Shield",
15202: u"Ravaging Iron Shield",
15203: u"Malign Bronze Axe",
15204: u"Malign Bronze Axe",
15205: u"Healing Bronze Axe",
15206: u"Ravaging Iron Axe",
15207: u"Hearty Iron Axe",
15208: u"Ravaging Iron Axe",
15209: u"Malign Bronze Greatsword",
15210: u"Malign Bronze Greatsword",
15211: u"Healing Bronze Greatsword",
15212: u"Ravaging Iron Greatsword",
15213: u"Hearty Iron Greatsword",
15214: u"Ravaging Iron Greatsword",
15215: u"Malign Bronze Hammer",
15216: u"Malign Bronze Hammer",
15217: u"Healing Bronze Hammer",
15218: u"Ravaging Iron Hammer",
15219: u"Hearty Iron Hammer",
15220: u"Ravaging Iron Hammer",
15221: u"Malign Bronze Spear",
15222: u"Malign Bronze Spear",
15223: u"Healing Bronze Spear",
15224: u"Ravaging Iron Spear",
15225: u"Hearty Iron Spear",
15226: u"Ravaging Iron Spear",
15227: u"Valkyrie Darksteel Axe",
15228: u"Carrion Darksteel Axe",
15229: u"Knight's Darksteel Axe",
15230: u"Berserker's Darksteel Axe",
15231: u"Cleric's Darksteel Axe",
15232: u"Rampager's Darksteel Axe",
15234: u"Carrion Darksteel Dagger",
15235: u"Knight's Darksteel Dagger",
15236: u"Valkyrie Darksteel Dagger",
15237: u"Cleric's Darksteel Dagger",
15239: u"Berserker's Darksteel Dagger",
15240: u"Rampager's Darksteel Dagger",
15241: u"Carrion Darksteel Dagger",
15242: u"Cleric's Darksteel Dagger",
15244: u"Berserker's Darksteel Dagger",
15245: u"Rampager's Darksteel Dagger",
15246: u"Knight's Darksteel Dagger",
15247: u"Valkyrie Darksteel Dagger",
15248: u"Carrion Mithril Dagger",
15249: u"Knight's Mithril Dagger",
15250: u"Valkyrie Mithril Dagger",
15251: u"Cleric's Mithril Dagger",
15253: u"Berserker's Mithril Dagger",
15254: u"Rampager's Mithril Dagger",
15255: u"Carrion Mithril Dagger",
15256: u"Cleric's Mithril Dagger",
15258: u"Berserker's Mithril Dagger",
15259: u"Rampager's Mithril Dagger",
15260: u"Knight's Mithril Dagger",
15261: u"Valkyrie Mithril Dagger",
15262: u"Carrion Krait Ripper",
15263: u"Cleric's Krait Ripper",
15265: u"Berserker's Krait Ripper",
15266: u"Rampager's Krait Ripper",
15267: u"Knight's Krait Ripper",
15268: u"Valkyrie Krait Ripper",
15269: u"Carrion Pearl Carver",
15270: u"Cleric's Pearl Carver",
15272: u"Berserker's Pearl Carver",
15273: u"Rampager's Pearl Carver",
15274: u"Knight's Pearl Carver",
15275: u"Valkyrie Pearl Carver",
15276: u"Carrion Darksteel Sword",
15277: u"Carrion Darksteel Sword",
15278: u"Knight's Darksteel Sword",
15279: u"Knight's Darksteel Sword",
15280: u"Valkyrie Darksteel Sword",
15281: u"Valkyrie Darksteel Sword",
15282: u"Cleric's Darksteel Sword",
15283: u"Cleric's Darksteel Sword",
15286: u"Berserker's Darksteel Sword",
15287: u"Berserker's Darksteel Sword",
15288: u"Rampager's Darksteel Sword",
15289: u"Rampager's Darksteel Sword",
15290: u"Carrion Mithril Sword",
15291: u"Knight's Mithril Sword",
15292: u"Valkyrie Mithril Sword",
15293: u"Cleric's Mithril Sword",
15295: u"Berserker's Mithril Sword",
15296: u"Rampager's Mithril Sword",
15297: u"Carrion Mithril Sword",
15298: u"Cleric's Mithril Sword",
15300: u"Berserker's Mithril Sword",
15301: u"Rampager's Mithril Sword",
15302: u"Knight's Mithril Sword",
15303: u"Valkyrie Mithril Sword",
15304: u"Carrion Krait Machete",
15305: u"Cleric's Krait Machete",
15307: u"Berserker's Krait Machete",
15308: u"Rampager's Krait Machete",
15309: u"Knight's Krait Machete",
15310: u"Valkyrie Krait Machete",
15311: u"Carrion Pearl Sabre",
15312: u"Cleric's Pearl Sabre",
15314: u"Berserker's Pearl Sabre",
15315: u"Rampager's Pearl Sabre",
15316: u"Knight's Pearl Sabre",
15317: u"Valkyrie Pearl Sabre",
15318: u"Carrion Darksteel Mace",
15319: u"Knight's Darksteel Mace",
15320: u"Valkyrie Darksteel Mace",
15321: u"Cleric's Darksteel Mace",
15323: u"Berserker's Darksteel Mace",
15324: u"Rampager's Darksteel Mace",
15325: u"Carrion Darksteel Mace",
15326: u"Cleric's Darksteel Mace",
15328: u"Berserker's Darksteel Mace",
15329: u"Rampager's Darksteel Mace",
15330: u"Knight's Darksteel Mace",
15331: u"Valkyrie Darksteel Mace",
15332: u"Carrion Mithril Mace",
15333: u"Knight's Mithril Mace",
15334: u"Valkyrie Mithril Mace",
15335: u"Cleric's Mithril Mace",
15337: u"Berserker's Mithril Mace",
15338: u"Rampager's Mithril Mace",
15339: u"Carrion Mithril Mace",
15340: u"Cleric's Mithril Mace",
15342: u"Berserker's Mithril Mace",
15343: u"Rampager's Mithril Mace",
15344: u"Knight's Mithril Mace",
15345: u"Valkyrie Mithril Mace",
15346: u"Carrion Krait Morning Star",
15347: u"Cleric's Krait Morning Star",
15349: u"Berserker's Krait Morning Star",
15350: u"Rampager's Krait Morning Star",
15351: u"Knight's Krait Morning Star",
15352: u"Valkyrie Krait Morning Star",
15353: u"Carrion Pearl Bludgeoner",
15354: u"Cleric's Pearl Bludgeoner",
15356: u"Berserker's Pearl Bludgeoner",
15357: u"Rampager's Pearl Bludgeoner",
15358: u"Knight's Pearl Bludgeoner",
15359: u"Valkyrie Pearl Bludgeoner",
15360: u"Carrion Darksteel Shield",
15361: u"Knight's Darksteel Shield",
15362: u"Valkyrie Darksteel Shield",
15363: u"Cleric's Darksteel Shield",
15365: u"Berserker's Darksteel Shield",
15366: u"Rampager's Darksteel Shield",
15367: u"Carrion Darksteel Shield",
15368: u"Cleric's Darksteel Shield",
15370: u"Berserker's Darksteel Shield",
15371: u"Rampager's Darksteel Shield",
15372: u"Knight's Darksteel Shield",
15373: u"Valkyrie Darksteel Shield",
15374: u"Carrion Mithril Shield",
15375: u"Knight's Mithril Shield",
15376: u"Valkyrie Mithril Shield",
15377: u"Cleric's Mithril Shield",
15379: u"Berserker's Mithril Shield",
15380: u"Rampager's Mithril Shield",
15381: u"Carrion Mithril Shield",
15382: u"Cleric's Mithril Shield",
15384: u"Berserker's Mithril Shield",
15385: u"Rampager's Mithril Shield",
15386: u"Knight's Mithril Shield",
15387: u"Valkyrie Mithril Shield",
15388: u"Carrion Krait Shell",
15389: u"Cleric's Krait Shell",
15391: u"Berserker's Krait Shell",
15392: u"Rampager's Krait Shell",
15393: u"Knight's Krait Shell",
15394: u"Valkyrie Krait Shell",
15395: u"Carrion Pearl Shell",
15396: u"Cleric's Pearl Shell",
15398: u"Berserker's Pearl Shell",
15399: u"Rampager's Pearl Shell",
15400: u"Knight's Pearl Shell",
15401: u"Valkyrie Pearl Shell",
15402: u"Carrion Darksteel Axe",
15403: u"Knight's Darksteel Axe",
15404: u"Valkyrie Darksteel Axe",
15405: u"Cleric's Darksteel Axe",
15407: u"Berserker's Darksteel Axe",
15408: u"Rampager's Darksteel Axe",
15409: u"Carrion Mithril Axe",
15410: u"Knight's Mithril Axe",
15411: u"Valkyrie Mithril Axe",
15412: u"Cleric's Mithril Axe",
15414: u"Berserker's Mithril Axe",
15415: u"Rampager's Mithril Axe",
15416: u"Carrion Mithril Axe",
15417: u"Cleric's Mithril Axe",
15419: u"Berserker's Mithril Axe",
15420: u"Rampager's Mithril Axe",
15421: u"Knight's Mithril Axe",
15422: u"Valkyrie Mithril Axe",
15423: u"Carrion Krait Battleaxe",
15424: u"Cleric's Krait Battleaxe",
15426: u"Berserker's Krait Battleaxe",
15427: u"Rampager's Krait Battleaxe",
15428: u"Knight's Krait Battleaxe",
15429: u"Valkyrie Krait Battleaxe",
15430: u"Carrion Pearl Reaver",
15431: u"Cleric's Pearl Reaver",
15433: u"Berserker's Pearl Reaver",
15434: u"Rampager's Pearl Reaver",
15435: u"Knight's Pearl Reaver",
15436: u"Valkyrie Pearl Reaver",
15437: u"Carrion Darksteel Greatsword",
15438: u"Knight's Darksteel Greatsword",
15439: u"Valkyrie Darksteel Greatsword",
15440: u"Cleric's Darksteel Greatsword",
15442: u"Berserker's Darksteel Greatsword",
15443: u"Rampager's Darksteel Greatsword",
15444: u"Carrion Darksteel Greatsword",
15445: u"Cleric's Darksteel Greatsword",
15447: u"Berserker's Darksteel Greatsword",
15448: u"Rampager's Darksteel Greatsword",
15449: u"Knight's Darksteel Greatsword",
15450: u"Valkyrie Darksteel Greatsword",
15451: u"Carrion Mithril Greatsword",
15452: u"Knight's Mithril Greatsword",
15453: u"Valkyrie Mithril Greatsword",
15454: u"Cleric's Mithril Greatsword",
15456: u"Berserker's Mithril Greatsword",
15457: u"Rampager's Mithril Greatsword",
15458: u"Carrion Mithril Greatsword",
15459: u"Cleric's Mithril Greatsword",
15461: u"Berserker's Mithril Greatsword",
15462: u"Rampager's Mithril Greatsword",
15463: u"Knight's Mithril Greatsword",
15464: u"Valkyrie Mithril Greatsword",
15465: u"Carrion Krait Slayer",
15466: u"Cleric's Krait Slayer",
15468: u"Berserker's Krait Slayer",
15469: u"Rampager's Krait Slayer",
15470: u"Knight's Krait Slayer",
15471: u"Valkyrie Krait Slayer",
15472: u"Carrion Pearl Broadsword",
15473: u"Cleric's Pearl Broadsword",
15475: u"Berserker's Pearl Broadsword",
15476: u"Rampager's Pearl Broadsword",
15477: u"Knight's Pearl Broadsword",
15478: u"Valkyrie Pearl Broadsword",
15479: u"Carrion Darksteel Hammer",
15480: u"Knight's Darksteel Hammer",
15481: u"Valkyrie Darksteel Hammer",
15482: u"Cleric's Darksteel Hammer",
15484: u"Berserker's Darksteel Hammer",
15485: u"Rampager's Darksteel Hammer",
15486: u"Carrion Darksteel Hammer",
15487: u"Cleric's Darksteel Hammer",
15489: u"Berserker's Darksteel Hammer",
15490: u"Rampager's Darksteel Hammer",
15491: u"Knight's Darksteel Hammer",
15492: u"Valkyrie Darksteel Hammer",
15493: u"Carrion Mithril Hammer",
15494: u"Knight's Mithril Hammer",
15495: u"Valkyrie Mithril Hammer",
15496: u"Cleric's Mithril Hammer",
15498: u"Berserker's Mithril Hammer",
15499: u"Rampager's Mithril Hammer",
15500: u"Carrion Mithril Hammer",
15501: u"Cleric's Mithril Hammer",
15503: u"Berserker's Mithril Hammer",
15504: u"Rampager's Mithril Hammer",
15505: u"Knight's Mithril Hammer",
15506: u"Valkyrie Mithril Hammer",
15507: u"Carrion Krait Warhammer",
15508: u"Cleric's Krait Warhammer",
15510: u"Berserker's Krait Warhammer",
15511: u"Rampager's Krait Warhammer",
15512: u"Knight's Krait Warhammer",
15513: u"Valkyrie Krait Warhammer",
15514: u"Carrion Pearl Crusher",
15515: u"Cleric's Pearl Crusher",
15517: u"Berserker's Pearl Crusher",
15518: u"Rampager's Pearl Crusher",
15519: u"Knight's Pearl Crusher",
15520: u"Valkyrie Pearl Crusher",
15521: u"Carrion Darksteel Spear",
15522: u"Knight's Darksteel Spear",
15523: u"Valkyrie Darksteel Spear",
15524: u"Cleric's Darksteel Spear",
15526: u"Berserker's Darksteel Spear",
15527: u"Rampager's Darksteel Spear",
15528: u"Carrion Darksteel Spear",
15529: u"Cleric's Darksteel Spear",
15531: u"Berserker's Darksteel Spear",
15532: u"Rampager's Darksteel Spear",
15533: u"Knight's Darksteel Spear",
15534: u"Valkyrie Darksteel Spear",
15535: u"Carrion Mithril Spear",
15536: u"Knight's Mithril Spear",
15537: u"Valkyrie Mithril Spear",
15538: u"Cleric's Mithril Spear",
15540: u"Berserker's Mithril Spear",
15541: u"Rampager's Mithril Spear",
15542: u"Carrion Mithril Spear",
15543: u"Cleric's Mithril Spear",
15545: u"Berserker's Mithril Spear",
15546: u"Rampager's Mithril Spear",
15547: u"Knight's Mithril Spear",
15548: u"Valkyrie Mithril Spear",
15549: u"Carrion Krait Pilum",
15550: u"Cleric's Krait Pilum",
15552: u"Berserker's Krait Pilum",
15553: u"Rampager's Krait Pilum",
15554: u"Knight's Krait Pilum",
15555: u"Valkyrie Krait Pilum",
15556: u"Carrion Pearl Impaler",
15557: u"Cleric's Pearl Impaler",
15559: u"Berserker's Pearl Impaler",
15560: u"Rampager's Pearl Impaler",
15561: u"Knight's Pearl Impaler",
15562: u"Valkyrie Pearl Impaler",
15563: u"Mighty Bronze Dagger",
15564: u"Mighty Bronze Sword",
15565: u"Mighty Bronze Mace",
15566: u"Mighty Bronze Shield",
15567: u"Mighty Bronze Axe",
15568: u"Mighty Bronze Greatsword",
15569: u"Mighty Bronze Hammer",
15570: u"Mighty Bronze Spear",
15571: u"Rejuvenating Iron Dagger",
15572: u"Rejuvenating Iron Dagger",
15573: u"Rejuvenating Steel Dagger",
15574: u"Rejuvenating Steel Dagger",
15575: u"Rejuvenating Iron Sword",
15576: u"Rejuvenating Iron Sword",
15577: u"Rejuvenating Steel Sword",
15578: u"Rejuvenating Steel Sword",
15579: u"Rejuvenating Iron Mace",
15580: u"Rejuvenating Iron Mace",
15581: u"Rejuvenating Steel Mace",
15582: u"Rejuvenating Steel Mace",
15583: u"Rejuvenating Iron Shield",
15584: u"Rejuvenating Iron Shield",
15585: u"Rejuvenating Steel Shield",
15586: u"Rejuvenating Steel Shield",
15587: u"Rejuvenating Iron Axe",
15588: u"Rejuvenating Iron Axe",
15589: u"Rejuvenating Steel Axe",
15590: u"Rejuvenating Steel Axe",
15591: u"Rejuvenating Iron Greatsword",
15592: u"Rejuvenating Iron Greatsword",
15593: u"Rejuvenating Steel Greatsword",
15594: u"Rejuvenating Steel Greatsword",
15595: u"Rejuvenating Iron Hammer",
15596: u"Rejuvenating Iron Hammer",
15597: u"Rejuvenating Steel Hammer",
15598: u"Rejuvenating Steel Hammer",
15599: u"Rejuvenating Iron Spear",
15600: u"Rejuvenating Iron Spear",
15601: u"Rejuvenating Steel Spear",
15602: u"Rejuvenating Steel Spear",
15603: u"Ravaging Bandit Shiv",
15604: u"Rejuvenating Bandit Shiv",
15605: u"Honed Bandit Shiv",
15607: u"Strong Bandit Shiv",
15608: u"Vigorous Bandit Shiv",
15609: u"Hearty Bandit Shiv",
15610: u"Ravaging Dredge Bloodletter",
15611: u"Rejuvenating Dredge Bloodletter",
15612: u"Honed Dredge Bloodletter",
15614: u"Strong Dredge Bloodletter",
15615: u"Vigorous Dredge Bloodletter",
15616: u"Hearty Dredge Bloodletter",
15617: u"Carrion Ogre Dirk",
15618: u"Cleric's Ogre Dirk",
15620: u"Berserker's Ogre Dirk",
15621: u"Valkyrie Ogre Dirk",
15622: u"Rampager's Ogre Dirk",
15623: u"Knight's Ogre Dirk",
15624: u"Ravaging Bandit Slicer",
15625: u"Rejuvenating Bandit Slicer",
15626: u"Honed Bandit Slicer",
15628: u"Strong Bandit Slicer",
15629: u"Vigorous Bandit Slicer",
15630: u"Hearty Bandit Slicer",
15631: u"Ravaging Dredge Edge",
15632: u"Rejuvenating Dredge Edge",
15633: u"Honed Dredge Edge",
15635: u"Strong Dredge Edge",
15636: u"Vigorous Dredge Edge",
15637: u"Hearty Dredge Edge",
15638: u"Carrion Ogre Scimitar",
15639: u"Cleric's Ogre Scimitar",
15641: u"Berserker's Ogre Scimitar",
15642: u"Valkyrie Ogre Scimitar",
15643: u"Rampager's Ogre Scimitar",
15644: u"Knight's Ogre Scimitar",
15645: u"Ravaging Bandit Mallet",
15646: u"Rejuvenating Bandit Mallet",
15647: u"Honed Bandit Mallet",
15649: u"Strong Bandit Mallet",
15650: u"Vigorous Bandit Mallet",
15651: u"Hearty Bandit Mallet",
15652: u"Ravaging Dredge Flanged Mace",
15653: u"Rejuvenating Dredge Flanged Mace",
15654: u"Honed Dredge Flanged Mace",
15656: u"Strong Dredge Flanged Mace",
15657: u"Vigorous Dredge Flanged Mace",
15658: u"Hearty Dredge Flanged Mace",
15659: u"Carrion Ogre Bludgeoner",
15660: u"Cleric's Ogre Bludgeoner",
15662: u"Berserker's Ogre Bludgeoner",
15663: u"Valkyrie Ogre Bludgeoner",
15664: u"Rampager's Ogre Bludgeoner",
15665: u"Knight's Ogre Bludgeoner",
15666: u"Ravaging Bandit Ward",
15667: u"Rejuvenating Bandit Ward",
15668: u"Honed Bandit Ward",
15670: u"Strong Bandit Ward",
15671: u"Vigorous Bandit Ward",
15672: u"Hearty Bandit Ward",
15673: u"Ravaging Dredge Barricade",
15674: u"Rejuvenating Dredge Barricade",
15675: u"Honed Dredge Barricade",
15677: u"Strong Dredge Barricade",
15678: u"Vigorous Dredge Barricade",
15679: u"Hearty Dredge Barricade",
15680: u"Carrion Ogre Bulwark",
15681: u"Cleric's Ogre Bulwark",
15683: u"Berserker's Ogre Bulwark",
15684: u"Valkyrie Ogre Bulwark",
15685: u"Rampager's Ogre Bulwark",
15686: u"Knight's Ogre Bulwark",
15687: u"Ravaging Bandit Cleaver",
15688: u"Rejuvenating Bandit Cleaver",
15689: u"Honed Bandit Cleaver",
15691: u"Strong Bandit Cleaver",
15692: u"Vigorous Bandit Cleaver",
15693: u"Hearty Bandit Cleaver",
15694: u"Ravaging Dredge Bonehewer",
15695: u"Rejuvenating Dredge Bonehewer",
15696: u"Honed Dredge Bonehewer",
15698: u"Strong Dredge Bonehewer",
15699: u"Vigorous Dredge Bonehewer",
15700: u"Hearty Dredge Bonehewer",
15701: u"Carrion Ogre Cleaver",
15702: u"Cleric's Ogre Cleaver",
15704: u"Berserker's Ogre Cleaver",
15705: u"Valkyrie Ogre Cleaver",
15706: u"Rampager's Ogre Cleaver",
15707: u"Knight's Ogre Cleaver",
15708: u"Ravaging Bandit Sunderer",
15709: u"Rejuvenating Bandit Sunderer",
15710: u"Honed Bandit Sunderer",
15712: u"Strong Bandit Sunderer",
15713: u"Vigorous Bandit Sunderer",
15714: u"Hearty Bandit Sunderer",
15715: u"Ravaging Dredge Sunderer",
15716: u"Rejuvenating Dredge Sunderer",
15717: u"Honed Dredge Sunderer",
15719: u"Strong Dredge Sunderer",
15720: u"Vigorous Dredge Sunderer",
15721: u"Hearty Dredge Sunderer",
15722: u"Carrion Ogre Longsword",
15723: u"Cleric's Ogre Longsword",
15725: u"Berserker's Ogre Longsword",
15726: u"Valkyrie Ogre Longsword",
15727: u"Rampager's Ogre Longsword",
15728: u"Knight's Ogre Longsword",
15729: u"Ravaging Bandit Demolisher",
15730: u"Rejuvenating Bandit Demolisher",
15731: u"Honed Bandit Demolisher",
15733: u"Strong Bandit Demolisher",
15734: u"Vigorous Bandit Demolisher",
15735: u"Hearty Bandit Demolisher",
15736: u"Ravaging Dredge Pulverizer",
15737: u"Rejuvenating Dredge Pulverizer",
15738: u"Honed Dredge Pulverizer",
15740: u"Strong Dredge Pulverizer",
15741: u"Vigorous Dredge Pulverizer",
15742: u"Hearty Dredge Pulverizer",
15743: u"Carrion Ogre Breaker",
15744: u"Cleric's Ogre Breaker",
15746: u"Berserker's Ogre Breaker",
15747: u"Valkyrie Ogre Breaker",
15748: u"Rampager's Ogre Breaker",
15749: u"Knight's Ogre Breaker",
15750: u"Ravaging Bandit Spear",
15751: u"Rejuvenating Bandit Spear",
15752: u"Honed Bandit Spear",
15754: u"Strong Bandit Spear",
15755: u"Vigorous Bandit Spear",
15756: u"Hearty Bandit Spear",
15757: u"Ravaging Dredge Spear",
15758: u"Rejuvenating Dredge Spear",
15759: u"Honed Dredge Spear",
15761: u"Strong Dredge Spear",
15762: u"Vigorous Dredge Spear",
15763: u"Hearty Dredge Spear",
15764: u"Carrion Ogre Javelin",
15765: u"Cleric's Ogre Javelin",
15767: u"Berserker's Ogre Javelin",
15768: u"Valkyrie Ogre Javelin",
15769: u"Rampager's Ogre Javelin",
15770: u"Knight's Ogre Javelin",
19663: u"Bottle of Elonian Wine",
19676: u"Icy Runestone",
19679: u"Bronze Ingot",
19680: u"Copper Ingot",
19681: u"Darksteel Ingot",
19682: u"Gold Ingot",
19683: u"Iron Ingot",
19684: u"Mithril Ingot",
19685: u"Orichalcum Ingot",
19686: u"Platinum Ingot",
19687: u"Silver Ingot",
19688: u"Steel Ingot",
19697: u"Copper Ore",
19698: u"Gold Ore",
19699: u"Iron Ore",
19700: u"Mithril Ore",
19701: u"Orichalcum Ore",
19702: u"Platinum Ore",
19703: u"Silver Ore",
19704: u"Lump of Tin",
19709: u"Elder Wood Plank",
19710: u"Green Wood Plank",
19711: u"Hard Wood Plank",
19712: u"Ancient Wood Plank",
19713: u"Soft Wood Plank",
19714: u"Seasoned Wood Plank",
19718: u"Jute Scrap",
19719: u"Rawhide Leather Section",
19720: u"Bolt of Jute",
19721: u"Glob of Ectoplasm",
19722: u"Elder Wood Log",
19723: u"Green Wood Log",
19724: u"Hard Wood Log",
19725: u"Ancient Wood Log",
19726: u"Soft Wood Log",
19727: u"Seasoned Wood Log",
19728: u"Thin Leather Section",
19729: u"Thick Leather Section",
19730: u"Coarse Leather Section",
19731: u"Rugged Leather Section",
19732: u"Hardened Leather Section",
19733: u"Cured Thin Leather Square",
19734: u"Cured Coarse Leather Square",
19735: u"Cured Thick Leather Square",
19736: u"Cured Rugged Leather Square",
19737: u"Cured Hardened Leather Square",
19738: u"Stretched Rawhide Leather Square",
19739: u"Wool Scrap",
19740: u"Bolt of Wool",
19741: u"Cotton Scrap",
19742: u"Bolt of Cotton",
19743: u"Linen Scrap",
19744: u"Bolt of Linen",
19745: u"Gossamer Scrap",
19746: u"Bolt of Gossamer",
19747: u"Bolt of Silk",
19748: u"Silk Scrap",
19750: u"Lump of Coal",
19757: u"Elder Wood Dowel",
19758: u"Green Wood Dowel",
19759: u"Hard Wood Dowel",
19760: u"Seasoned Wood Dowel",
19761: u"Soft Wood Dowel",
19762: u"Berserker's Elder Inscription",
19763: u"Mighty Green Inscription",
19764: u"Knight's Hard Inscription",
19765: u"Vigorous Soft Inscription",
19766: u"Berserker's Mithril Plated Inscription",
19767: u"Mighty Bronze Plated Inscription",
19768: u"Strong Steel Plated Inscription",
19769: u"Vigorous Iron Plated Inscription",
19770: u"Strong Seasoned Inscription",
19771: u"Rampager's Mithril Plated Inscription",
19772: u"Precise Bronze Plated Inscription",
19773: u"Knight's Darksteel Plated Inscription",
19774: u"Honed Steel Plated Inscription",
19775: u"Rampager's Elder Inscription",
19776: u"Precise Green Inscription",
19777: u"Honed Seasoned Inscription",
19778: u"Cleric's Mithril Plated Inscription",
19779: u"Vital Bronze Plated Inscription",
19780: u"Valkyrie Darksteel Plated Inscription",
19781: u"Hearty Steel Plated Inscription",
19782: u"Cleric's Elder Inscription",
19783: u"Vital Green Inscription",
19784: u"Valkyrie Mithril Plated Inscription",
19785: u"Resilient Bronze Plated Inscription",
19786: u"Valkyrie Elder Inscription",
19787: u"Resilient Green Inscription",
19788: u"Hunter's Seasoned Inscription",
19789: u"Spool of Wool Thread",
19790: u"Spool of Gossamer Thread",
19791: u"Spool of Silk Thread",
19792: u"Spool of Jute Thread",
19793: u"Spool of Linen Thread",
19794: u"Spool of Cotton Thread",
19795: u"Mighty Jute Insignia",
19796: u"Precise Jute Insignia",
19797: u"Resilient Jute Insignia",
19798: u"Vital Jute Insignia",
19799: u"Mighty Embroidered Jute Insignia",
19800: u"Precise Embroidered Jute Insignia",
19801: u"Resilient Embroidered Jute Insignia",
19802: u"Vital Embroidered Jute Insignia",
19803: u"Vigorous Embroidered Wool Insignia",
19804: u"Vigorous Wool Insignia",
19805: u"Healing Jute Insignia",
19806: u"Hunter's Wool Insignia",
19807: u"Hunter's Embroidered Wool Insignia",
19808: u"Strong Wool Insignia",
19809: u"Honed Wool Insignia",
19810: u"Strong Embroidered Wool Insignia",
19811: u"Honed Embroidered Wool Insignia",
19812: u"Honed Cotton Insignia",
19813: u"Strong Cotton Insignia",
19814: u"Hearty Cotton Insignia",
19815: u"Strong Embroidered Cotton Insignia",
19816: u"Honed Embroidered Cotton Insignia",
19817: u"Hunter's Cotton Insignia",
19818: u"Vigorous Cotton Insignia",
19819: u"Hearty Embroidered Cotton Insignia",
19820: u"Hunter's Embroidered Cotton Insignia",
19821: u"Vigorous Embroidered Cotton Insignia",
19822: u"Healing Green Inscription",
19823: u"Strong Iron Plated Inscription",
19824: u"Honed Iron Plated Inscription",
19825: u"Hunter's Iron Plated Inscription",
19826: u"Strong Soft Inscription",
19827: u"Hunter's Soft Inscription",
19828: u"Hearty Soft Inscription",
19829: u"Honed Soft Inscription",
19830: u"Hearty Seasoned Inscription",
19831: u"Ravaging Seasoned Inscription",
19832: u"Ravaging Steel Plated Inscription",
19833: u"Hunter's Steel Plated Inscription",
19834: u"Vigorous Seasoned Inscription",
19835: u"Vigorous Steel Plated Inscription",
19836: u"Carrion Darksteel Plated Inscription",
19837: u"Berserker's Darksteel Plated Inscription",
19838: u"Valkyrie Hard Inscription",
19839: u"Carrion Hard Inscription",
19840: u"Rampager's Hard Inscription",
19841: u"Cleric's Hard Inscription",
19842: u"Berserker's Hard Inscription",
19843: u"Assassin's Mithril Plated Inscription",
19844: u"Carrion Mithril Plated Inscription",
19845: u"Knight's Mithril Plated Inscription",
19846: u"Carrion Elder Inscription",
19847: u"Knight's Elder Inscription",
19848: u"Assassin's Elder Inscription",
19849: u"Berserker's Embroidered Linen Insignia",
19850: u"Berserker's Linen Insignia",
19851: u"Cleric's Linen Insignia",
19852: u"Rampager's Linen Insignia",
19853: u"Assassin's Silk Insignia",
19854: u"Assassin's Embroidered Silk Insignia",
19855: u"Berserker's Embroidered Silk Insignia",
19856: u"Valkyrie Embroidered Silk Insignia",
19857: u"Rampager's Embroidered Silk Insignia",
19858: u"Knight's Embroidered Silk Insignia",
19859: u"Carrion Embroidered Silk Insignia",
19860: u"Cleric's Embroidered Silk Insignia",
19861: u"Berserker's Silk Insignia",
19862: u"Valkyrie Silk Insignia",
19863: u"Rampager's Silk Insignia",
19864: u"Knight's Silk Insignia",
19865: u"Carrion Silk Insignia",
19866: u"Cleric's Silk Insignia",
19867: u"Ravaging Cotton Insignia",
19868: u"Ravaging Embroidered Cotton Insignia",
19869: u"Healing Embroidered Jute Insignia",
19870: u"Malign Jute Insignia",
19871: u"Malign Embroidered Jute Insignia",
19872: u"Ravaging Wool Insignia",
19873: u"Ravaging Embroidered Wool Insignia",
19874: u"Hearty Wool Insignia",
19875: u"Hearty Embroidered Wool Insignia",
19876: u"Assassin's Linen Insignia",
19877: u"Cleric's Embroidered Linen Insignia",
19878: u"Assassin's Embroidered Linen Insignia",
19879: u"Rampager's Embroidered Linen Insignia",
19880: u"Carrion Intricate Silk Insignia",
19881: u"Cleric's Intricate Silk Insignia",
19882: u"Assassin's Intricate Silk Insignia",
19883: u"Berserker's Intricate Silk Insignia",
19884: u"Rampager's Intricate Silk Insignia",
19885: u"Knight's Intricate Silk Insignia",
19886: u"Valkyrie Intricate Silk Insignia",
19887: u"Malign Green Inscription",
19888: u"Malign Bronze Plated Inscription",
19889: u"Healing Bronze Plated Inscription",
19890: u"Ravaging Iron Plated Inscription",
19891: u"Hearty Iron Plated Inscription",
19892: u"Ravaging Soft Inscription",
19893: u"Cleric's Darksteel Plated Inscription",
19894: u"Assassin's Darksteel Plated Inscription",
19895: u"Rampager's Darksteel Plated Inscription",
19896: u"Assassin's Hard Inscription",
19897: u"Carrion Mithril Imbued Inscription",
19898: u"Cleric's Mithril Imbued Inscription",
19899: u"Assassin's Mithril Imbued Inscription",
19900: u"Berserker's Mithril Imbued Inscription",
19901: u"Rampager's Mithril Imbued Inscription",
19902: u"Knight's Mithril Imbued Inscription",
19903: u"Valkyrie Mithril Imbued Inscription",
19904: u"Carrion Linen Insignia",
19905: u"Knight's Linen Insignia",
19906: u"Valkyrie Linen Insignia",
19907: u"Carrion Embroidered Linen Insignia",
19908: u"Knight's Embroidered Linen Insignia",
19909: u"Valkyrie Embroidered Linen Insignia",
19910: u"Carrion Intricate Gossamer Insignia",
19911: u"Cleric's Intricate Gossamer Insignia",
19912: u"Assassin's Intricate Gossamer Insignia",
19913: u"Berserker's Intricate Gossamer Insignia",
19914: u"Rampager's Intricate Gossamer Insignia",
19915: u"Knight's Intricate Gossamer Insignia",
19916: u"Valkyrie Intricate Gossamer Insignia",
19917: u"Carrion Orichalcum Imbued Inscription",
19918: u"Cleric's Orichalcum Imbued Inscription",
19919: u"Assassin's Orichalcum Imbued Inscription",
19920: u"Berserker's Orichalcum Imbued Inscription",
19921: u"Rampager's Orichalcum Imbued Inscription",
19922: u"Knight's Orichalcum Imbued Inscription",
19923: u"Valkyrie Orichalcum Imbued Inscription",
19924: u"Lump of Primordium",
19925: u"Obsidian Shard",
19926: u"Rejuvenating Soft Inscription",
19927: u"Rejuvenating Iron Plated Inscription",
19928: u"Rejuvenating Seasoned Inscription",
19929: u"Rejuvenating Steel Plated Inscription",
19930: u"Rejuvenating Wool Insignia",
19931: u"Rejuvenating Embroidered Wool Insignia",
19932: u"Rejuvenating Cotton Insignia",
19933: u"Rejuvenating Embroidered Cotton Insignia",
19934: u"Ravaging Iron Imbued Inscription",
19935: u"Rejuvenating Iron Imbued Inscription",
19936: u"Honed Iron Imbued Inscription",
19937: u"Hunter's Iron Imbued Inscription",
19938: u"Strong Iron Imbued Inscription",
19939: u"Vigorous Iron Imbued Inscription",
19940: u"Hearty Iron Imbued Inscription",
19941: u"Ravaging Steel Imbued Inscription",
19942: u"Rejuvenating Steel Imbued Inscription",
19943: u"Honed Steel Imbued Inscription",
19944: u"Hunter's Steel Imbued Inscription",
19945: u"Strong Steel Imbued Inscription",
19946: u"Vigorous Steel Imbued Inscription",
19947: u"Hearty Steel Imbued Inscription",
19948: u"Carrion Darksteel Imbued Inscription",
19949: u"Cleric's Darksteel Imbued Inscription",
19950: u"Assassin's Darksteel Imbued Inscription",
19951: u"Berserker's Darksteel Imbued Inscription",
19952: u"Valkyrie Darksteel Imbued Inscription",
19953: u"Rampager's Darksteel Imbued Inscription",
19954: u"Knight's Darksteel Imbued Inscription",
19955: u"Ravaging Intricate Wool Insignia",
19956: u"Rejuvenating Intricate Wool Insignia",
19957: u"Honed Intricate Wool Insignia",
19958: u"Hunter's Intricate Wool Insignia",
19959: u"Strong Intricate Wool Insignia",
19960: u"Vigorous Intricate Wool Insignia",
19961: u"Hearty Intricate Wool Insignia",
19962: u"Ravaging Intricate Cotton Insignia",
19963: u"Rejuvenating Intricate Cotton Insignia",
19964: u"Honed Intricate Cotton Insignia",
19965: u"Hunter's Intricate Cotton Insignia",
19966: u"Strong Intricate Cotton Insignia",
19967: u"Vigorous Intricate Cotton Insignia",
19968: u"Hearty Intricate Cotton Insignia",
19969: u"Carrion Intricate Linen Insignia",
19970: u"Cleric's Intricate Linen Insignia",
19971: u"Assassin's Intricate Linen Insignia",
19972: u"Berserker's Intricate Linen Insignia",
19973: u"Valkyrie Intricate Linen Insignia",
19974: u"Rampager's Intricate Linen Insignia",
19975: u"Knight's Intricate Linen Insignia",
20314: u"Mystery Tonic",
20315: u"Mystery Tonic",
20318: u"Mystery Tonic",
20323: u"Unidentified Dye",
21156: u"Arrow Cart Blueprints",
21157: u"Ballista Blueprints",
21158: u"Catapult Blueprints",
21161: u"Flame Ram Blueprints",
24272: u"Pile of Glittering Dust",
24273: u"Pile of Shimmering Dust",
24274: u"Pile of Radiant Dust",
24275: u"Pile of Luminous Dust",
24276: u"Pile of Incandescent Dust",
24277: u"Pile of Crystalline Dust",
24278: u"Tiny Venom Sac",
24279: u"Small Venom Sac",
24280: u"Venom Sac",
24281: u"Full Venom Sac",
24282: u"Potent Venom Sac",
24283: u"Powerful Venom Sac",
24284: u"Tiny Scale",
24285: u"Small Scale",
24286: u"Scale",
24287: u"Smooth Scale",
24288: u"Large Scale",
24289: u"Armored Scale",
24290: u"Vial of Weak Blood",
24291: u"Vial of Thin Blood",
24292: u"Vial of Blood",
24293: u"Vial of Thick Blood",
24294: u"Vial of Potent Blood",
24295: u"Vial of Powerful Blood",
24296: u"Tiny Totem",
24297: u"Small Totem",
24298: u"Totem",
24299: u"Intricate Totem",
24300: u"Elaborate Totem",
24301: u"Charged Sliver",
24302: u"Charged Fragment",
24304: u"Charged Core",
24305: u"Charged Lodestone",
24307: u"Onyx Fragment",
24308: u"Onyx Shard",
24309: u"Onyx Core",
24310: u"Onyx Lodestone",
24312: u"Molten Fragment",
24313: u"Molten Shard",
24314: u"Molten Core",
24315: u"Molten Lodestone",
24316: u"Glacial Sliver",
24317: u"Glacial Fragment",
24319: u"Glacial Core",
24320: u"Glacial Lodestone",
24321: u"Destroyer Sliver",
24322: u"Destroyer Fragment",
24323: u"Destroyer Shard",
24324: u"Destroyer Core",
24325: u"Destroyer Lodestone",
24327: u"Crystal Fragment",
24329: u"Crystal Core",
24330: u"Crystal Lodestone",
24331: u"Pile of Soiled Essence",
24332: u"Pile of Foul Essence",
24333: u"Pile of Filthy Essence",
24334: u"Pile of Vile Essence",
24335: u"Pile of Putrid Essence",
24336: u"Corrupted Sliver",
24337: u"Corrupted Fragment",
24338: u"Corrupted Shard",
24339: u"Corrupted Core",
24340: u"Corrupted Lodestone",
24341: u"Large Bone",
24342: u"Bone Chip",
24343: u"Bone Shard",
24344: u"Bone",
24345: u"Heavy Bone",
24346: u"Tiny Claw",
24347: u"Small Claw",
24348: u"Claw",
24349: u"Sharp Claw",
24350: u"Large Claw",
24351: u"Vicious Claw",
24352: u"Tiny Fang",
24353: u"Small Fang",
24354: u"Fang",
24355: u"Sharp Fang",
24356: u"Large Fang",
24357: u"Vicious Fang",
24358: u"Ancient Bone",
24359: u"Slab of Red Meat",
24360: u"Slab of Poultry Meat",
24363: u"Engraved Totem",
24464: u"Garnet Pebble",
24465: u"Turquoise Pebble",
24466: u"Malachite Pebble",
24467: u"Tiger's Eye Pebble",
24468: u"Peridot Nugget",
24469: u"Carnelian Nugget",
24470: u"Lapis Nugget",
24471: u"Sunstone Nugget",
24472: u"Carnelian Lump",
24473: u"Emerald Crystal",
24474: u"Ruby Crystal",
24475: u"Sapphire Crystal",
24477: u"Adorned Amber Jewel",
24478: u"Intricate Carnelian Jewel",
24479: u"Gilded Carnelian Jewel",
24480: u"Adorned Malachite Jewel",
24481: u"Intricate Peridot Jewel",
24482: u"Gilded Peridot Jewel",
24483: u"Adorned Garnet Jewel",
24484: u"Intricate Amethyst Jewel",
24485: u"Adorned Turquoise Jewel",
24486: u"Intricate Lapis Jewel",
24487: u"Gilded Lapis Jewel",
24488: u"Adorned Tiger's Eye Jewel",
24489: u"Intricate Topaz Jewel",
24490: u"Gilded Topaz Jewel",
24492: u"Brilliant Chrysocola Jewel",
24493: u"Brilliant Emerald Jewel",
24494: u"Brilliant Ruby Jewel",
24495: u"Brilliant Sapphire Jewel",
24500: u"Pearl",
24501: u"Amethyst Nugget",
24502: u"Silver Doubloon",
24503: u"Sunstone Lump",
24504: u"Peridot Lump",
24506: u"Topaz Lump",
24507: u"Lapis Lump",
24508: u"Ruby Orb",
24509: u"Coral Tentacle",
24510: u"Coral Orb",
24511: u"Chrysocola Crystal",
24512: u"Chrysocola Orb",
24515: u"Emerald Orb",
24516: u"Sapphire Orb",
24519: u"Beryl Crystal",
24520: u"Beryl Orb",
24521: u"Opal Crystal",
24522: u"Opal Orb",
24526: u"Spinel Lump",
24527: u"Amethyst Lump",
24534: u"Amber Pebble",
24535: u"Topaz Nugget",
24536: u"Adorned Pearl",
24537: u"Intricate Sunstone Jewel",
24538: u"Gilded Amethyst Jewel",
24539: u"Gilded Sunstone Jewel",
24540: u"Brilliant Beryl Jewel",
24541: u"Brilliant Coral Jewel",
24542: u"Brilliant Opal Jewel",
24562: u"Superior Sigil of Strength",
24563: u"Major Sigil of Strength",
24564: u"Minor Sigil of Strength",
24565: u"Minor Sigil of Frailty",
24566: u"Major Sigil of Frailty",
24567: u"Superior Sigil of Frailty",
24584: u"Superior Sigil of Benevolence",
24585: u"Minor Sigil of Benevolence",
24586: u"Major Sigil of Benevolence",
24587: u"Minor Sigil of Speed",
24588: u"Major Sigil of Speed",
24589: u"Superior Sigil of Speed",
24590: u"Major Sigil of Luck",
24591: u"Superior Sigil of Luck",
24608: u"Major Sigil of Doom",
24609: u"Superior Sigil of Doom",
24637: u"Minor Sigil of Paralyzation",
24638: u"Major Sigil of Paralyzation",
24639: u"Superior Sigil of Paralyzation",
24649: u"Minor Sigil of Icebrood Slaying",
24650: u"Major Sigil of Icebrood Slaying",
24651: u"Superior Sigil of Icebrood Slaying",
24652: u"Minor Sigil of Destroyer Slaying",
24653: u"Major Sigil of Destroyer Slaying",
24654: u"Superior Sigil of Destroyer Slaying",
24661: u"Superior Sigil of Elemental Slaying",
24662: u"Major Sigil of Elemental Slaying",
24663: u"Minor Sigil of Elemental Slaying",
24718: u"Minor Rune of Speed",
24719: u"Major Rune of Speed",
24720: u"Superior Rune of Speed",
24726: u"Superior Rune of the Rata Sum",
24729: u"Superior Rune of Hoelbrak",
24732: u"Superior Rune of Divinity",
24735: u"Superior Rune of the Grove",
24741: u"Superior Rune of the Citadel",
24742: u"Minor Rune of the Earth",
24743: u"Major Rune of the Earth",
24744: u"Superior Rune of the Earth",
24745: u"Minor Rune of the Fire",
24746: u"Major Rune of the Fire",
24747: u"Superior Rune of the Fire",
24748: u"Minor Rune of the Air",
24749: u"Major Rune of the Air",
24750: u"Superior Rune of the Air",
24751: u"Minor Rune of the Ice",
24752: u"Major Rune of the Ice",
24753: u"Superior Rune of the Ice",
24754: u"Minor Rune of the Ogre",
24755: u"Major Rune of the Ogre",
24756: u"Superior Rune of the Ogre",
24757: u"Superior Rune of the Undead",
24758: u"Major Rune of the Undead",
24759: u"Minor Rune of the Undead",
24760: u"Minor Rune of the Krait",
24761: u"Major Rune of the Krait",
24762: u"Superior Rune of the Krait",
24772: u"Gold Doubloon",
24773: u"Platinum Doubloon",
24780: u"Minor Rune of the Privateer",
24781: u"Major Rune of the Privateer",
24782: u"Superior Rune of the Privateer",
24789: u"Minor Rune of the Wurm",
24790: u"Major Rune of the Wurm",
24791: u"Superior Rune of the Wurm",
24792: u"Minor Rune of Svanir",
24793: u"Major Rune of Svanir",
24794: u"Superior Rune of Svanir",
24798: u"Minor Rune of the Elementalist",
24799: u"Major Rune of the Elementalist",
24800: u"Superior Rune of the Elementalist",
24801: u"Minor Rune of the Mesmer",
24802: u"Major Rune of the Mesmer",
24803: u"Superior Rune of the Mesmer",
24804: u"Minor Rune of the Necromancer",
24805: u"Major Rune of the Necromancer",
24806: u"Superior Rune of the Necromancer",
24810: u"Minor Rune of the Engineer",
24811: u"Major Rune of the Engineer",
24812: u"Superior Rune of the Engineer",
24813: u"Minor Rune of the Ranger",
24814: u"Major Rune of the Ranger",
24815: u"Superior Rune of the Ranger",
24816: u"Minor Rune of the Thief",
24817: u"Major Rune of the Thief",
24818: u"Superior Rune of the Thief",
24819: u"Minor Rune of the Warrior",
24820: u"Major Rune of the Warrior",
24821: u"Superior Rune of the Warrior",
24822: u"Minor Rune of the Guardian",
24823: u"Major Rune of the Guardian",
24824: u"Superior Rune of the Guardian",
24825: u"Minor Rune of the Trooper",
24826: u"Major Rune of the Trooper",
24827: u"Superior Rune of the Trooper",
24828: u"Minor Rune of the Adventurer",
24829: u"Major Rune of the Adventurer",
24830: u"Superior Rune of the Adventurer",
24831: u"Minor Rune of the Brawler",
24832: u"Major Rune of the Brawler",
24833: u"Superior Rune of the Brawler",
24834: u"Minor Rune of the Scholar",
24835: u"Major Rune of the Scholar",
24836: u"Superior Rune of the Scholar",
24837: u"Minor Rune of the Water",
24838: u"Major Rune of the Water",
24839: u"Superior Rune of the Water",
24861: u"Minor Sigil of Luck",
24862: u"Minor Sigil of Doom",
24863: u"Minor Sigil of Celerity",
24864: u"Major Sigil of Celerity",
24865: u"Superior Sigil of Celerity",
24866: u"Minor Sigil of Impact",
24867: u"Major Sigil of Impact",
24868: u"Superior Sigil of Impact",
24869: u"Rune of Life",
24870: u"Chrysocola Shard",
24871: u"Emerald Shard",
24872: u"Beryl Shard",
24873: u"Ruby Shard",
24874: u"Coral Chunk",
24875: u"Opal Shard",
24876: u"Sapphire Shard",
24877: u"Ornate Ruby Jewel",
24878: u"Ornate Chrysocola Jewel",
24879: u"Ornate Coral Jewel",
24880: u"Ornate Opal Jewel",
24881: u"Ornate Emerald Jewel",
24882: u"Ornate Sapphire Jewel",
24883: u"Ornate Beryl Jewel",
24884: u"Copper Doubloon",
24889: u"Spinel Nugget",
24896: u"Intricate Spinel Jewel",
24897: u"Gilded Spinel Jewel",
24898: u"Embellished Intricate Amethyst Jewel",
24899: u"Embellished Intricate Carnelian Jewel",
24900: u"Embellished Intricate Lapis Jewel",
24901: u"Embellished Intricate Peridot Jewel",
24902: u"Embellished Intricate Spinel Jewel",
24903: u"Embellished Intricate Sunstone Jewel",
24904: u"Embellished Intricate Topaz Jewel",
24905: u"Embellished Gilded Amethyst Jewel",
24906: u"Embellished Gilded Carnelian Jewel",
24907: u"Embellished Gilded Lapis Jewel",
24908: u"Embellished Gilded Peridot Jewel",
24909: u"Embellished Gilded Spinel Jewel",
24910: u"Embellished Gilded Sunstone Jewel",
24911: u"Embellished Gilded Topaz Jewel",
24912: u"Embellished Ornate Beryl Jewel",
24913: u"Embellished Ornate Chrysocola Jewel",
24914: u"Embellished Ornate Coral Jewel",
24915: u"Embellished Ornate Emerald Jewel",
24916: u"Embellished Ornate Opal Jewel",
24917: u"Embellished Ornate Ruby Jewel",
24918: u"Embellished Ornate Sapphire Jewel",
24919: u"Embellished Brilliant Beryl Jewel",
24920: u"Embellished Brilliant Chrysocola Jewel",
24921: u"Embellished Brilliant Coral Jewel",
24922: u"Embellished Brilliant Emerald Jewel",
24923: u"Embellished Brilliant Opal Jewel",
24924: u"Embellished Brilliant Ruby Jewel",
24925: u"Embellished Brilliant Sapphire Jewel",
24959: u"Shaman's Etched Skeggox of Rage",
24999: u"Shaman's Etched Shard of Rage",
25039: u"Shaman's Etched Artifact of Rage",
25079: u"Shaman's Etched Avenger of Rage",
25119: u"Shaman's Etched Sledgehammer of Rage",
25159: u"Shaman's Etched Harpoon of Rage",
25199: u"Shaman's Etched Greatbow of Rage",
25239: u"Shaman's Etched Cudgel of Rage",
25279: u"Shaman's Etched Revolver of Rage",
25319: u"Shaman's Etched Blaster of Rage",
25359: u"Shaman's Etched Scepter of Rage",
25399: u"Shaman's Etched Bulwark of Rage",
25439: u"Shaman's Etched Short Bow of Rage",
25479: u"Shaman's Etched Speargun of Rage",
25519: u"Shaman's Etched Branch of Rage",
25559: u"Shaman's Etched Blade of Rage",
25599: u"Shaman's Etched Wartorch of Rage",
25639: u"Shaman's Etched Trident of Rage",
25679: u"Shaman's Etched Harbinger of Rage",
36041: u"Piece of Candy Corn",
36042: u"Minor Rune of the Mad King",
36043: u"Major Rune of the Mad King",
36044: u"Superior Rune of the Mad King",
36045: u"Recipe: Major Rune of the Mad King",
36046: u"Recipe: Superior Rune of the Mad King",
36047: u"Recipe: Major Rune of the Mad King",
36048: u"Recipe: Superior Rune of the Mad King",
36049: u"Recipe: Major Rune of the Mad King",
36050: u"Recipe: Superior Rune of the Mad King",
36052: u"Bowl of Candy Corn Glaze",
36053: u"Superior Sigil of the Night",
36055: u"Minor Sigil of the Night",
36059: u"Plastic Fangs",
36060: u"Chattering Skull",
36061: u"Nougat Center",
36063: u"Recipe: Superior Sigil of the Night",
36065: u"Recipe: Superior Sigil of the Night",
36067: u"Recipe: Superior Sigil of the Night",
36073: u"Candied Apple",
36074: u"Bowl of Candy Corn Custard",
36075: u"Candy Corn Cake",
36076: u"Strawberry Ghost",
36077: u"Piece of Candy Corn Almond Brittle",
36078: u"Candy Corn Cookie",
36079: u"Glazed Pumpkin Pie",
36082: u"Glazed Chocolate Raspberry Cookie",
36091: u"Weak Potion of Halloween Slaying",
36092: u"Powerful Potion of Halloween Slaying",
36093: u"Strong Potion of Halloween Slaying",
36094: u"Potent Potion of Halloween Slaying",
36095: u"Minor Potion of Halloween Slaying",
36096: u"Potion of Halloween Slaying",
36097: u"Multicolored Ooze Tonic",
36101: u"Recipe: Candy Corn Almond Brittle",
36102: u"Recipe: Strawberry Ghost",
36103: u"Recipe: Candy Corn Custard",
36115: u"Ruminant Tonic",
36731: u"Passion Fruit",
36732: u"Apothecary's Draconic Pauldrons",
36733: u"Apothecary's Prowler Boots",
36734: u"Apothecary's Mithril Axe",
36735: u"Apothecary's Elder Wood Focus",
36736: u"Apothecary's Elder Wood Longbow",
36738: u"Apothecary's Feathered Boots",
36739: u"Apothecary's Elder Wood Longbow",
36741: u"Passiflora Mithril Ring",
36742: u"Apothecary's Feathered Boots",
36743: u"Apothecary's Prowler Boots",
36744: u"Apothecary's Elder Wood Focus",
36745: u"Apothecary's Mithril Axe",
36746: u"Apothecary's Gladiator Pauldrons",
36747: u"Apothecary's Masquerade Boots",
36748: u"Apothecary's Krait Battleaxe",
36749: u"Apothecary's Barbaric Pauldrons",
36750: u"Apothecary's Krait Recurve Bow",
36751: u"Passiflora Mithril Ring",
36752: u"Apothecary's Noble Boots",
36753: u"Passion Fruit Bar",
36754: u"Apothecary's Krait Star",
36755: u"Bottle of Tropical Dressing",
36756: u"Apothecary's Barbaric Pauldrons",
36757: u"Apothecary's Emblazoned Boots",
36758: u"Apothecary's Exalted Boots",
36759: u"Apothecary's Pearl Stinger",
36760: u"Passiflora Mithril Ring",
36761: u"Apothecary's Pearl Reaver",
36762: u"Apothecary's Pearl Conch",
36763: u"Apothecary's Prowler Coat",
36764: u"Apothecary's Mithril Pistol",
36765: u"Apothecary's Draconic Legs",
36767: u"Bowl of Salad a la Consortium",
36768: u"Apothecary's Mithril Dagger",
36769: u"Apothecary's Feathered Vestments",
36770: u"Apothecary's Elder Wood Scepter",
36771: u"Apothecary's Mithril Dagger",
36772: u"Apothecary's Prowler Coat",
36773: u"Apothecary's Elder Wood Scepter",
36774: u"Apothecary's Gladiator Legplates",
36775: u"Apothecary's Feathered Vestments",
36776: u"Apothecary's Mithril Pistol",
36777: u"Orange Passion Fruit Tart",
36778: u"Passiflora Mithril Earring",
36779: u"Apothecary's Krait Ripper",
36780: u"Apothecary's Krait Wand",
36781: u"Apothecary's Krait Handgun",
36782: u"Raspberry Passion Fruit Compote",
36783: u"Apothecary's Noble Coat",
36784: u"Passiflora Mithril Earring",
36785: u"Apothecary's Barbaric Legplates",
36786: u"Apothecary's Masquerade Raiments",
36787: u"Apothecary's Pearl Handcannon",
36788: u"Apothecary's Exalted Coat",
36789: u"Apothecary's Barbaric Legplates",
36790: u"Passiflora Mithril Earring",
36791: u"Apothecary's Pearl Rod",
36792: u"Apothecary's Emblazoned Coat",
36794: u"Apothecary's Pearl Carver",
36795: u"Apothecary's Feathered Gloves",
36796: u"Bottle of Passion Fruit Sauce",
36797: u"Apothecary's Mithril Greatsword",
36798: u"Apothecary's Elder Wood Staff",
36799: u"Apothecary's Prowler Gloves",
36800: u"Apothecary's Mithril Rifle",
36802: u"Apothecary's Draconic Helm",
36803: u"Apple Passion Fruit Pie",
36804: u"Apothecary's Elder Wood Staff",
36805: u"Passiflora Mithril Amulet",
36806: u"Apothecary's Gladiator Helm",
36807: u"Apothecary's Prowler Gloves",
36808: u"Apothecary's Mithril Rifle",
36809: u"Apothecary's Feathered Gloves",
36810: u"Apothecary's Mithril Greatsword",
36811: u"Passiflora Mithril Amulet",
36812: u"Apothecary's Krait Shooter",
36813: u"Apothecary's Krait Crook",
36814: u"Apothecary's Masquerade Gloves",
36815: u"Apothecary's Barbaric Helm",
36816: u"Apothecary's Krait Slayer",
36817: u"Bowl of Grilled Bananas with Passion Fruit Sauce",
36818: u"Apothecary's Noble Gloves",
36819: u"Apothecary's Barbaric Helm",
36820: u"Apothecary's Pearl Blunderbuss",
36821: u"Cherry Passion Fruit Cake",
36822: u"Apothecary's Emblazoned Gloves",
36823: u"Passiflora Mithril Amulet",
36824: u"Apothecary's Pearl Quarterstaff",
36825: u"Apothecary's Pearl Broadsword",
36826: u"Apothecary's Exalted Gloves",
36827: u"Apothecary's Draconic Gauntlets",
36828: u"Passion Fruit Coconut Cookie",
36829: u"Apothecary's Elder Wood Trident",
36830: u"Apothecary's Elder Wood Short Bow",
36831: u"Apothecary's Prowler Mask",
36832: u"Apothecary's Feathered Headpiece",
36833: u"Apothecary's Mithril Hammer",
36834: u"Apothecary's Prowler Mask",
36835: u"Stuffed Artichoke with Tropical Dressing",
36836: u"Apothecary's Elder Wood Trident",
36837: u"Apothecary's Elder Wood Short Bow",
36838: u"Apothecary's Feathered Headpiece",
36839: u"Apothecary's Gladiator Gauntlets",
36840: u"Apothecary's Mithril Hammer",
36842: u"Apothecary's Noble Mask",
36843: u"Apothecary's Barbaric Gloves",
36844: u"Apothecary's Masquerade Mask",
36845: u"Apothecary's Krait Short Bow",
36846: u"Apothecary's Krait Trident",
36847: u"Apothecary's Krait Warhammer",
36848: u"Apothecary's Exalted Masque",
36849: u"Apothecary's Emblazoned Helm",
36850: u"Apothecary's Barbaric Gloves",
36851: u"Apothecary's Pearl Crusher",
36852: u"Apothecary's Pearl Trident",
36853: u"Apothecary's Pearl Needler",
36854: u"Apothecary's Mithril Mace",
36855: u"Apothecary's Feathered Pants",
36856: u"Apothecary's Elder Wood Harpoon Gun",
36857: u"Apothecary's Draconic Coat",
36858: u"Apothecary's Prowler Pants",
36859: u"Apothecary's Primordus Focus",
36860: u"Apothecary's Gladiator Chestplate",
36861: u"Apothecary's Prowler Pants",
36862: u"Apothecary's Mithril Mace",
36863: u"Apothecary's Primordus Scepter",
36864: u"Apothecary's Elder Wood Harpoon Gun",
36865: u"Apothecary's Feathered Pants",
36866: u"Apothecary's Masquerade Leggings",
36867: u"Apothecary's Noble Pants",
36868: u"Apothecary's Primordus Staff",
36869: u"Apothecary's Krait Morning Star",
36870: u"Apothecary's Barbaric Coat",
36871: u"Apothecary's Krait Harpoon Gun",
36872: u"Apothecary's Barbaric Coat",
36873: u"Apothecary's Primordus Trident",
36874: u"Apothecary's Emblazoned Pants",
36875: u"Apothecary's Pearl Speargun",
36876: u"Apothecary's Exalted Pants",
36877: u"Apothecary's Pearl Bludgeoner",
36878: u"Apothecary's Draconic Boots",
36879: u"Apothecary's Elder Wood Torch",
36880: u"Apothecary's Feathered Mantle",
36881: u"Apothecary's Mithril Shield",
36882: u"Apothecary's Prowler Shoulders",
36883: u"Apothecary's Mithril Shield",
36884: u"Apothecary's Elder Wood Torch",
36885: u"Apothecary's Prowler Shoulders",
36886: u"Apothecary's Feathered Mantle",
36887: u"Apothecary's Gladiator Boots",
36888: u"Apothecary's Krait Brazier",
36889: u"Apothecary's Krait Shell",
36890: u"Apothecary's Barbaric Boots",
36891: u"Apothecary's Noble Shoulders",
36892: u"Apothecary's Masquerade Mantle",
36893: u"Apothecary's Exalted Mantle",
36894: u"Apothecary's Pearl Brazier",
36895: u"Apothecary's Pearl Shell",
36896: u"Apothecary's Emblazoned Shoulders",
36897: u"Apothecary's Barbaric Boots",
36898: u"Apothecary's Mithril Spear",
36899: u"Apothecary's Elder Wood Warhorn",
36900: u"Apothecary's Mithril Spear",
36901: u"Apothecary's Elder Wood Warhorn",
36902: u"Apothecary's Krait Pilum",
36903: u"Apothecary's Krait Whelk",
36904: u"Apothecary's Pearl Impaler",
36905: u"Apothecary's Pearl Siren",
36906: u"Apothecary's Primordus Longbow",
36907: u"Apothecary's Mithril Sword",
36908: u"Apothecary's Primordus Pistol",
36909: u"Apothecary's Mithril Sword",
36910: u"Apothecary's Primordus Rifle",
36911: u"Apothecary's Krait Machete",
36912: u"Apothecary's Primordus Short Bow",
36913: u"Apothecary's Pearl Sabre",
36914: u"Apothecary's Primordus Harpoon Gun",
36915: u"Apothecary's Primordus Axe",
36916: u"Apothecary's Primordus Dagger",
36917: u"Apothecary's Primordus Torch",
36918: u"Apothecary's Primordus Warhorn",
36919: u"Apothecary's Primordus Greatsword",
36920: u"Apothecary's Primordus Maul",
36921: u"Apothecary's Primordus Mace",
36922: u"Apothecary's Primordus Shield",
36923: u"Apothecary's Primordus Pike",
36924: u"Apothecary's Primordus Sword",
37174: u"Apothecary's Silk Insignia",
37175: u"Apothecary's Embroidered Silk Insignia",
37176: u"Apothecary's Intricate Silk Insignia",
37177: u"Apothecary's Intricate Gossamer Insignia",
37178: u"Apothecary's Elder Inscription",
37179: u"Apothecary's Mithril Plated Inscription",
37180: u"Apothecary's Mithril Imbued Inscription",
37181: u"Apothecary's Orichalcum Imbued Inscription",
37897: u"Karka Shell",
37907: u"Passion Flower",
37908: u"Embellished Brilliant Passion Flower",
37909: u"Brilliant Passion Flower",
38014: u"Vial of Condensed Mists Essence",
38116: u"Endless Toy Ventari Tonic",
38117: u"Endless Plush Griffon Tonic",
38118: u"Endless Toy Golem Tonic",
38119: u"Endless Toy Soldier Tonic",
38120: u"Endless Princess Doll Tonic",
38138: u"Adorned Snowflake",
38139: u"Intricate Snowflake",
38140: u"Gilded Snowflake",
38141: u"Ornate Snowflake",
38142: u"Brilliant Snowflake",
38145: u"Snowflake Mithril Amulet",
38146: u"Snowflake Platinum Amulet",
38147: u"Snowflake Gold Amulet",
38148: u"Snowflake Silver Amulet",
38149: u"Snowflake Copper Amulet",
38150: u"Snowflake Copper Earring",
38151: u"Snowflake Silver Earring",
38152: u"Snowflake Gold Earring",
38153: u"Snowflake Platinum Earring",
38154: u"Snowflake Mithril Earring",
38156: u"Snowflake Copper Ring",
38157: u"Snowflake Silver Ring",
38158: u"Snowflake Gold Ring",
38159: u"Snowflake Platinum Ring",
38160: u"Snowflake Mithril Ring",
38162: u"Giver's Intricate Gossamer Insignia",
38163: u"Giver's Embroidered Cotton Insignia",
38164: u"Giver's Embroidered Wool Insignia",
38165: u"Giver's Embroidered Jute Insignia",
38166: u"Giver's Embroidered Silk Insignia",
38167: u"Giver's Embroidered Linen Insignia",
38168: u"Giver's Draconic Pauldrons",
38169: u"Giver's Gladiator Pauldrons",
38170: u"Giver's Gladiator Pauldrons",
38171: u"Giver's Splint Pauldrons",
38172: u"Giver's Scale Pauldrons",
38173: u"Giver's Draconic Legs",
38174: u"Giver's Gladiator Legplates",
38175: u"Giver's Gladiator Legplates",
38176: u"Giver's Splint Legs",
38177: u"Giver's Scale Legs",
38178: u"Giver's Draconic Helm",
38179: u"Giver's Gladiator Helm",
38180: u"Giver's Gladiator Helm",
38181: u"Giver's Splint Helm",
38182: u"Giver's Scale Helm",
38183: u"Giver's Draconic Gauntlets",
38184: u"Giver's Gladiator Gauntlets",
38185: u"Giver's Gladiator Gauntlets",
38186: u"Giver's Splint Gauntlets",
38187: u"Giver's Scale Gauntlets",
38188: u"Giver's Draconic Coat",
38189: u"Giver's Gladiator Chestplate",
38190: u"Giver's Gladiator Chestplate",
38191: u"Giver's Splint Coat",
38192: u"Giver's Scale Coat",
38193: u"Giver's Draconic Boots",
38194: u"Giver's Gladiator Boots",
38195: u"Giver's Gladiator Boots",
38196: u"Giver's Splint Greaves",
38197: u"Giver's Scale Boots",
38198: u"Giver's Chain Boots",
38199: u"Giver's Chain Coat",
38200: u"Giver's Chain Gauntlets",
38201: u"Giver's Chain Helm",
38202: u"Giver's Chain Legs",
38203: u"Giver's Chain Pauldrons",
38204: u"Minor Rune of Altruism",
38205: u"Major Rune of Altruism",
38206: u"Superior Rune of Altruism",
38207: u"Recipe: Giver's Intricate Gossamer Insignia",
38208: u"Recipe: Giver's Intricate Silk Insignia",
38209: u"Recipe: Giver's Intricate Linen Insignia",
38211: u"Bowl of Peach Raspberry Swirl Ice Cream",
38212: u"Bowl of Mint Chocolate Chip Ice Cream",
38213: u"Bowl of Ginger-Lime Ice Cream",
38214: u"Bowl of Blueberry Chocolate Chunk Ice Cream",
38215: u"Bowl of Chocolate Chip Ice Cream",
38216: u"Bowl of Ice Cream Base",
38217: u"Giver's Exalted Mantle",
38218: u"Giver's Masquerade Mantle",
38219: u"Giver's Masquerade Mantle",
38220: u"Giver's Acolyte Mantle",
38221: u"Giver's Student Mantle",
38222: u"Giver's Exalted Pants",
38223: u"Giver's Masquerade Leggings",
38224: u"Giver's Masquerade Leggings",
38225: u"Giver's Acolyte Pants",
38226: u"Giver's Student Leggings",
38227: u"Giver's Exalted Masque",
38228: u"Giver's Masquerade Mask",
38229: u"Giver's Masquerade Mask",
38230: u"Giver's Acolyte Mask",
38231: u"Giver's Student Circlet",
38232: u"Giver's Exalted Gloves",
38233: u"Giver's Masquerade Gloves",
38234: u"Giver's Masquerade Gloves",
38235: u"Giver's Acolyte Gloves",
38236: u"Giver's Student Gloves",
38237: u"Giver's Exalted Coat",
38238: u"Giver's Masquerade Raiments",
38239: u"Giver's Masquerade Raiments",
38240: u"Giver's Acolyte Coat",
38241: u"Giver's Student Coat",
38242: u"Giver's Exalted Boots",
38243: u"Giver's Masquerade Boots",
38244: u"Giver's Masquerade Boots",
38245: u"Giver's Acolyte Boots",
38246: u"Giver's Student Shoes",
38247: u"Giver's Embroidered Sandals",
38248: u"Giver's Embroidered Coat",
38249: u"Giver's Embroidered Wristguards",
38250: u"Giver's Embroidered Mask",
38251: u"Giver's Embroidered Pants",
38252: u"Giver's Embroidered Mantle",
38253: u"Giver's Emblazoned Shoulders",
38254: u"Giver's Noble Shoulders",
38255: u"Giver's Noble Shoulders",
38256: u"Giver's Leather Shoulders",
38257: u"Giver's Outlaw Shoulders",
38258: u"Giver's Emblazoned Pants",
38259: u"Giver's Noble Pants",
38260: u"Giver's Noble Pants",
38261: u"Giver's Leather Pants",
38262: u"Giver's Outlaw Pants",
38263: u"Giver's Emblazoned Helm",
38264: u"Giver's Noble Mask",
38265: u"Giver's Noble Mask",
38266: u"Giver's Leather Mask",
38267: u"Giver's Outlaw Mask",
38268: u"Giver's Emblazoned Gloves",
38269: u"Giver's Noble Gloves",
38270: u"Giver's Noble Gloves",
38271: u"Giver's Leather Gloves",
38272: u"Giver's Outlaw Gloves",
38273: u"Giver's Emblazoned Coat",
38274: u"Giver's Noble Coat",
38275: u"Giver's Noble Coat",
38276: u"Giver's Leather Coat",
38277: u"Giver's Outlaw Coat",
38278: u"Giver's Emblazoned Boots",
38279: u"Giver's Noble Boots",
38280: u"Giver's Noble Boots",
38281: u"Giver's Leather Shoes",
38282: u"Giver's Outlaw Boots",
38283: u"Giver's Seeker Boots",
38284: u"Giver's Seeker Coat",
38285: u"Giver's Seeker Gloves",
38286: u"Giver's Seeker Mask",
38287: u"Giver's Seeker Pants",
38288: u"Giver's Seeker Shoulders",
38295: u"Recipe: Giver's Darksteel Imbued Inscription",
38296: u"Recipe: Giver's Mithril Imbued Inscription",
38297: u"Recipe: Bringer's Orichalcum-Imbued Inscription",
38308: u"Bowl of Candy Corn Ice Cream",
38315: u"Giver's Iron Axe",
38316: u"Giver's Iron Axe",
38317: u"Bringer's Ogre Cleaver",
38318: u"Bringer's Krait Battleaxe",
38319: u"Bringer's Pearl Reaver",
38320: u"Bringer's Krait Machete",
38321: u"Bringer's Pearl Sabre",
38322: u"Bringer's Ogre Scimitar",
38323: u"Giver's Iron Sword",
38324: u"Giver's Iron Sword",
38325: u"Bringer's Pearl Impaler",
38326: u"Bringer's Krait Pilum",
38327: u"Bringer's Ogre Javelin",
38328: u"Giver's Iron Spear",
38329: u"Giver's Iron Spear",
38330: u"Bringer's Pearl Shell",
38331: u"Bringer's Krait Shell",
38332: u"Bringer's Ogre Bulwark",
38333: u"Giver's Iron Shield",
38334: u"Giver's Iron Shield",
38335: u"Bringer's Pearl Bludgeoner",
38336: u"Bringer's Mithril Mace",
38337: u"Bringer's Darksteel Mace",
38338: u"Giver's Iron Mace",
38339: u"Giver's Bandit Mallet",
38340: u"Bringer's Pearl Crusher",
38341: u"Bringer's Mithril Hammer",
38342: u"Bringer's Darksteel Hammer",
38343: u"Giver's Iron Hammer",
38344: u"Giver's Iron Hammer",
38345: u"Bringer's Pearl Broadsword",
38346: u"Bringer's Mithril Greatsword",
38347: u"Bringer's Darksteel Greatsword",
38348: u"Giver's Iron Greatsword",
38349: u"Giver's Iron Greatsword",
38350: u"Bringer's Pearl Carver",
38351: u"Bringer's Krait Ripper",
38352: u"Bringer's Ogre Dirk",
38353: u"Giver's Iron Dagger",
38354: u"Giver's Iron Dagger",
38355: u"Giver's Bronze Axe",
38356: u"Giver's Bronze Dagger",
38357: u"Giver's Bronze Greatsword",
38358: u"Giver's Bronze Hammer",
38359: u"Giver's Bronze Mace",
38360: u"Giver's Bronze Shield",
38361: u"Giver's Bronze Sword",
38362: u"Giver's Bronze Spear",
38363: u"Giver's Green Wood Longbow",
38364: u"Giver's Soft Wood Longbow",
38365: u"Giver's Soft Wood Longbow",
38366: u"Bringer's Ogre Hornbow",
38367: u"Bringer's Krait Recurve Bow",
38368: u"Bringer's Pearl Stinger",
38369: u"Giver's Bronze Pistol",
38370: u"Giver's Iron Pistol",
38371: u"Giver's Iron Pistol",
38372: u"Bringer's Ogre Flintlock",
38373: u"Bringer's Krait Handgun",
38374: u"Bringer's Pearl Handcannon",
38375: u"Giver's Bronze Rifle",
38376: u"Giver's Iron Rifle",
38377: u"Giver's Iron Rifle",
38378: u"Bringer's Ogre Blaster",
38379: u"Bringer's Krait Shooter",
38380: u"Bringer's Pearl Blunderbuss",
38381: u"Giver's Green Wood Short Bow",
38382: u"Giver's Soft Wood Short Bow",
38383: u"Giver's Soft Wood Short Bow",
38384: u"Bringer's Ogre Short Bow",
38385: u"Bringer's Krait Short Bow",
38386: u"Bringer's Pearl Needler",
38387: u"Giver's Green Wood Harpoon Gun",
38388: u"Giver's Soft Wood Harpoon Gun",
38389: u"Giver's Soft Wood Harpoon Gun",
38390: u"Bringer's Ogre Harpoon Gun",
38391: u"Bringer's Krait Harpoon Gun",
38392: u"Bringer's Pearl Speargun",
38393: u"Giver's Green Wood Torch",
38394: u"Giver's Soft Wood Torch",
38395: u"Giver's Soft Wood Torch",
38396: u"Bringer's Ogre Blaze",
38397: u"Bringer's Krait Brazier",
38398: u"Bringer's Pearl Brazier",
38399: u"Giver's Green Wood Warhorn",
38400: u"Giver's Soft Wood Warhorn",
38401: u"Giver's Soft Wood Warhorn",
38402: u"Bringer's Ogre Harbinger",
38403: u"Bringer's Krait Whelk",
38404: u"Bringer's Pearl Siren",
38405: u"Giver's Green Wood Focus",
38406: u"Giver's Soft Wood Focus",
38407: u"Giver's Soft Wood Focus",
38408: u"Bringer's Ogre Effigy",
38409: u"Bringer's Krait Star",
38410: u"Bringer's Pearl Conch",
38411: u"Giver's Green Wood Scepter",
38412: u"Giver's Soft Wood Scepter",
38413: u"Giver's Soft Wood Scepter",
38414: u"Bringer's Ogre Truncheon",
38415: u"Bringer's Krait Wand",
38416: u"Bringer's Pearl Rod",
38417: u"Giver's Green Wood Staff",
38418: u"Giver's Soft Wood Staff",
38419: u"Giver's Soft Wood Staff",
38420: u"Bringer's Ogre Warstaff",
38421: u"Bringer's Krait Crook",
38422: u"Bringer's Pearl Quarterstaff",
38423: u"Giver's Green Wood Trident",
38424: u"Giver's Soft Wood Trident",
38425: u"Giver's Soft Wood Trident",
38426: u"Bringer's Ogre Trident",
38427: u"Bringer's Krait Trident",
38428: u"Bringer's Pearl Trident",
38429: u"Giver's Green Inscription",
38430: u"Giver's Iron-Plated Inscription",
38431: u"Giver's Steel-Plated Inscription",
38432: u"Giver's Mithril Imbued Inscription",
38433: u"Giver's Darksteel Imbued Inscription",
38434: u"Bringer's Orichalcum-Imbued Inscription",
41374: u"Sentinel's Barbaric Boots",
41375: u"Sentinel's Barbaric Boots",
41376: u"Sentinel's Gladiator Boots",
41377: u"Sentinel's Draconic Boots",
41378: u"Sentinel's Barbaric Coat",
41379: u"Sentinel's Barbaric Coat",
41380: u"Sentinel's Gladiator Chestplate",
41381: u"Sentinel's Draconic Coat",
41382: u"Sentinel's Barbaric Gloves",
41383: u"Sentinel's Barbaric Gloves",
41384: u"Sentinel's Gladiator Gauntlets",
41385: u"Sentinel's Draconic Gauntlets",
41386: u"Sentinel's Barbaric Helm",
41387: u"Sentinel's Barbaric Helm",
41388: u"Sentinel's Gladiator Helm",
41389: u"Sentinel's Draconic Helm",
41390: u"Sentinel's Barbaric Legplates",
41391: u"Sentinel's Barbaric Legplates",
41392: u"Sentinel's Gladiator Legplates",
41393: u"Sentinel's Draconic Legs",
41394: u"Sentinel's Barbaric Pauldrons",
41395: u"Sentinel's Barbaric Pauldrons",
41396: u"Sentinel's Gladiator Pauldrons",
41397: u"Sentinel's Draconic Pauldrons",
41398: u"Sentinel's Feathered Boots",
41399: u"Sentinel's Feathered Boots",
41400: u"Sentinel's Masquerade Boots",
41401: u"Sentinel's Exalted Boots",
41402: u"Sentinel's Feathered Vestments",
41403: u"Sentinel's Feathered Vestments",
41404: u"Sentinel's Masquerade Raiments",
41405: u"Sentinel's Exalted Coat",
41406: u"Sentinel's Feathered Gloves",
41407: u"Sentinel's Feathered Gloves",
41408: u"Sentinel's Masquerade Gloves",
41409: u"Sentinel's Exalted Gloves",
41410: u"Sentinel's Feathered Headpiece",
41411: u"Sentinel's Feathered Headpiece",
41412: u"Sentinel's Masquerade Mask",
41413: u"Sentinel's Exalted Masque",
41414: u"Sentinel's Feathered Pants",
41415: u"Sentinel's Feathered Pants",
41416: u"Sentinel's Masquerade Leggings",
41417: u"Sentinel's Exalted Pants",
41418: u"Sentinel's Feathered Mantle",
41419: u"Sentinel's Feathered Mantle",
41420: u"Sentinel's Masquerade Mantle",
41421: u"Sentinel's Exalted Mantle",
41422: u"Sentinel's Prowler Boots",
41423: u"Sentinel's Prowler Boots",
41424: u"Sentinel's Noble Boots",
41425: u"Sentinel's Emblazoned Boots",
41426: u"Sentinel's Prowler Coat",
41427: u"Sentinel's Prowler Coat",
41428: u"Sentinel's Noble Coat",
41429: u"Sentinel's Emblazoned Coat",
41430: u"Sentinel's Prowler Gloves",
41431: u"Sentinel's Prowler Gloves",
41432: u"Sentinel's Noble Gloves",
41433: u"Sentinel's Emblazoned Gloves",
41434: u"Sentinel's Prowler Mask",
41435: u"Sentinel's Prowler Mask",
41436: u"Sentinel's Noble Mask",
41437: u"Sentinel's Emblazoned Helm",
41438: u"Sentinel's Prowler Pants",
41439: u"Sentinel's Prowler Pants",
41440: u"Sentinel's Noble Pants",
41441: u"Sentinel's Emblazoned Pants",
41442: u"Sentinel's Prowler Shoulders",
41443: u"Sentinel's Prowler Shoulders",
41444: u"Sentinel's Noble Shoulders",
41445: u"Sentinel's Emblazoned Shoulders",
41446: u"Azurite Mithril Amulet",
41447: u"Azurite Mithril Amulet",
41448: u"Azurite Mithril Amulet",
41450: u"Azurite Mithril Earring",
41451: u"Azurite Mithril Earring",
41452: u"Azurite Mithril Earring",
41454: u"Azurite Mithril Ring",
41455: u"Azurite Mithril Ring",
41456: u"Azurite Mithril Ring",
41458: u"Sentinel's Primordus Axe",
41459: u"Sentinel's Primordus Dagger",
41460: u"Sentinel's Primordus Focus",
41461: u"Sentinel's Primordus Greatsword",
41462: u"Sentinel's Primordus Maul",
41463: u"Sentinel's Primordus Harpoon Gun",
41464: u"Sentinel's Primordus Longbow",
41465: u"Sentinel's Primordus Mace",
41466: u"Sentinel's Primordus Pistol",
41467: u"Sentinel's Primordus Rifle",
41468: u"Sentinel's Primordus Scepter",
41469: u"Sentinel's Primordus Shield",
41470: u"Sentinel's Primordus Short Bow",
41471: u"Sentinel's Primordus Pike",
41472: u"Sentinel's Primordus Staff",
41473: u"Sentinel's Primordus Sword",
41474: u"Sentinel's Primordus Torch",
41475: u"Sentinel's Primordus Trident",
41476: u"Sentinel's Primordus Warhorn",
41477: u"Sentinel's Elder Wood Focus",
41478: u"Sentinel's Elder Wood Focus",
41479: u"Sentinel's Krait Star",
41480: u"Sentinel's Pearl Conch",
41481: u"Sentinel's Elder Wood Scepter",
41482: u"Sentinel's Elder Wood Scepter",
41483: u"Sentinel's Krait Wand",
41484: u"Sentinel's Pearl Rod",
41485: u"Sentinel's Elder Wood Staff",
41486: u"Sentinel's Elder Wood Staff",
41487: u"Sentinel's Krait Crook",
41488: u"Sentinel's Pearl Quarterstaff",
41489: u"Sentinel's Elder Wood Trident",
41490: u"Sentinel's Elder Wood Trident",
41491: u"Sentinel's Krait Trident",
41492: u"Sentinel's Pearl Trident",
41493: u"Sentinel's Elder Wood Longbow",
41494: u"Sentinel's Elder Wood Longbow",
41495: u"Sentinel's Krait Recurve Bow",
41496: u"Sentinel's Pearl Stinger",
41497: u"Sentinel's Mithril Pistol",
41498: u"Sentinel's Mithril Pistol",
41499: u"Sentinel's Krait Handgun",
41500: u"Sentinel's Pearl Handcannon",
41501: u"Sentinel's Mithril Rifle",
41502: u"Sentinel's Mithril Rifle",
41503: u"Sentinel's Krait Shooter",
41504: u"Sentinel's Pearl Blunderbuss",
41505: u"Sentinel's Elder Wood Short Bow",
41506: u"Sentinel's Elder Wood Short Bow",
41507: u"Sentinel's Krait Short Bow",
41508: u"Sentinel's Pearl Needler",
41509: u"Sentinel's Elder Wood Harpoon Gun",
41510: u"Sentinel's Elder Wood Harpoon Gun",
41511: u"Sentinel's Krait Harpoon Gun",
41512: u"Sentinel's Pearl Speargun",
41513: u"Sentinel's Elder Wood Torch",
41514: u"Sentinel's Elder Wood Torch",
41515: u"Sentinel's Krait Brazier",
41516: u"Sentinel's Pearl Brazier",
41517: u"Sentinel's Elder Wood Warhorn",
41518: u"Sentinel's Elder Wood Warhorn",
41519: u"Sentinel's Krait Whelk",
41520: u"Sentinel's Pearl Siren",
41521: u"Sentinel's Mithril Axe",
41522: u"Sentinel's Mithril Axe",
41523: u"Sentinel's Krait Battleaxe",
41524: u"Sentinel's Pearl Reaver",
41525: u"Sentinel's Mithril Dagger",
41526: u"Sentinel's Mithril Dagger",
41527: u"Sentinel's Krait Ripper",
41528: u"Sentinel's Pearl Carver",
41529: u"Sentinel's Mithril Greatsword",
41530: u"Sentinel's Mithril Greatsword",
41531: u"Sentinel's Krait Slayer",
41532: u"Sentinel's Pearl Broadsword",
41533: u"Sentinel's Mithril Hammer",
41534: u"Sentinel's Mithril Hammer",
41535: u"Sentinel's Krait Warhammer",
41536: u"Sentinel's Pearl Crusher",
41537: u"Sentinel's Mithril Mace",
41538: u"Sentinel's Mithril Mace",
41539: u"Sentinel's Krait Morning Star",
41540: u"Sentinel's Pearl Bludgeoner",
41541: u"Sentinel's Mithril Shield",
41542: u"Sentinel's Mithril Shield",
41543: u"Sentinel's Krait Shell",
41544: u"Sentinel's Pearl Shell",
41545: u"Sentinel's Mithril Spear",
41546: u"Sentinel's Mithril Spear",
41547: u"Sentinel's Krait Pilum",
41548: u"Sentinel's Pearl Impaler",
41549: u"Sentinel's Mithril Sword",
41550: u"Sentinel's Mithril Sword",
41551: u"Sentinel's Krait Machete",
41552: u"Sentinel's Pearl Sabre",
41553: u"Sentinel's Silk Insignia",
41554: u"Sentinel's Embroidered Silk Insignia",
41555: u"Sentinel's Intricate Silk Insignia",
41556: u"Sentinel's Intricate Gossamer Insignia",
41557: u"Sentinel's Elder Inscription",
41558: u"Sentinel's Mithril Plated Inscription",
41559: u"Sentinel's Mithril Imbued Inscription",
41560: u"Sentinel's Orichalcum Imbued Inscription",
42006: u"Azurite Crystal",
42007: u"Brilliant Azurite Jewel",
42009: u"Embellished Brilliant Azurite Jewel",
42403: u"Infinite Molten Berserker Tonic",
42404: u"Recipe: Infinite Molten Berserker Tonic",
43319: u"Piece of Zhaitaffy",
43351: u"Aetherized Tonic",
43352: u"Infinite Aetherized Tonic",
43449: u"Potent Master Tuning Crystal",
43450: u"Potent Master Maintenance Oil",
43451: u"Potent Superior Sharpening Stone",
43482: u"Recipe: Potent Master Maintenance Oil",
43483: u"Recipe: Potent Superior Sharpening Stones",
43484: u"Recipe: Potent Master Tuning Crystals",
43557: u"Recipe: Infinite Aetherized Tonic",
43773: u"Quartz Crystal",
44642: u"Watchwork Portal Device",
44646: u"Recipe: Watchwork Portal Device",
44941: u"Watchwork Sprocket",
45027: u"Twisted Watchwork Portal Device",
45028: u"Recipe: Twisted Watchwork Portal Device",
45061: u"Candy Corn Gold Amulet",
45062: u"Candy Corn Silver Pendant",
45598: u"Hunter's Scale Pauldrons",
45599: u"Hunter's Scale Pauldrons",
45600: u"Hunter's Gladiator Pauldrons",
45601: u"Hunter's Splint Pauldrons",
45602: u"Hunter's Splint Pauldrons",
45603: u"Hunter's Gladiator Pauldrons",
45604: u"Assassin's Tempered Scale Pauldrons",
45605: u"Assassin's Tempered Scale Pauldrons",
45606: u"Assassin's Gladiator Pauldrons",
45607: u"Assassin's Barbaric Pauldrons",
45608: u"Assassin's Barbaric Pauldrons",
45609: u"Assassin's Gladiator Pauldrons",
45610: u"Assassin's Draconic Pauldrons",
45611: u"Hunter's Scale Legs",
45612: u"Hunter's Scale Legs",
45613: u"Hunter's Gladiator Legplates",
45614: u"Hunter's Splint Legs",
45615: u"Hunter's Splint Legs",
45616: u"Hunter's Gladiator Legplates",
45617: u"Assassin's Tempered Scale Legs",
45618: u"Assassin's Tempered Scale Legs",
45619: u"Assassin's Gladiator Legplates",
45620: u"Assassin's Barbaric Legplates",
45621: u"Assassin's Barbaric Legplates",
45622: u"Assassin's Gladiator Legplates",
45623: u"Assassin's Draconic Legs",
45624: u"Hunter's Scale Boots",
45625: u"Hunter's Scale Boots",
45626: u"Hunter's Gladiator Boots",
45627: u"Hunter's Splint Greaves",
45628: u"Hunter's Splint Greaves",
45629: u"Hunter's Gladiator Boots",
45630: u"Assassin's Tempered Scale Greaves",
45631: u"Assassin's Tempered Scale Greaves",
45632: u"Assassin's Gladiator Boots",
45633: u"Assassin's Barbaric Boots",
45634: u"Assassin's Barbaric Boots",
45635: u"Assassin's Gladiator Boots",
45636: u"Assassin's Draconic Boots",
45637: u"Hunter's Scale Gauntlets",
45638: u"Hunter's Scale Gauntlets",
45639: u"Hunter's Gladiator Gauntlets",
45640: u"Hunter's Splint Gauntlets",
45641: u"Hunter's Splint Gauntlets",
45642: u"Hunter's Gladiator Gauntlets",
45643: u"Assassin's Tempered Scale Gauntlets",
45644: u"Assassin's Tempered Scale Gauntlets",
45645: u"Assassin's Gladiator Gauntlets",
45646: u"Assassin's Barbaric Gloves",
45647: u"Assassin's Barbaric Gloves",
45648: u"Assassin's Gladiator Gauntlets",
45649: u"Assassin's Draconic Gauntlets",
45650: u"Hunter's Scale Coat",
45651: u"Hunter's Scale Coat",
45652: u"Hunter's Gladiator Chestplate",
45653: u"Hunter's Splint Coat",
45654: u"Hunter's Splint Coat",
45655: u"Hunter's Gladiator Chestplate",
45656: u"Assassin's Tempered Scale Chestplate",
45657: u"Assassin's Tempered Scale Chestplate",
45658: u"Assassin's Gladiator Chestplate",
45659: u"Assassin's Draconic Coat",
45660: u"Assassin's Barbaric Coat",
45661: u"Assassin's Barbaric Coat",
45662: u"Assassin's Gladiator Chestplate",
45663: u"Hunter's Scale Helm",
45664: u"Hunter's Scale Helm",
45665: u"Hunter's Gladiator Helm",
45666: u"Hunter's Splint Helm",
45667: u"Hunter's Splint Helm",
45668: u"Hunter's Gladiator Helm",
45669: u"Assassin's Tempered Scale Helm",
45670: u"Assassin's Tempered Scale Helm",
45671: u"Assassin's Gladiator Helm",
45672: u"Assassin's Barbaric Helm",
45673: u"Assassin's Barbaric Helm",
45674: u"Assassin's Gladiator Helm",
45675: u"Assassin's Draconic Helm",
45676: u"Hunter's Student Mantle",
45677: u"Hunter's Student Mantle",
45678: u"Hunter's Masquerade Mantle",
45679: u"Hunter's Acolyte Mantle",
45680: u"Hunter's Acolyte Mantle",
45681: u"Hunter's Masquerade Mantle",
45682: u"Assassin's Winged Mantle",
45683: u"Assassin's Winged Mantle",
45684: u"Assassin's Masquerade Mantle",
45685: u"Assassin's Feathered Mantle",
45686: u"Assassin's Feathered Mantle",
45687: u"Assassin's Masquerade Mantle",
45688: u"Assassin's Exalted Mantle",
45689: u"Assassin's Exalted Boots",
45690: u"Assassin's Feathered Boots",
45691: u"Assassin's Feathered Boots",
45692: u"Assassin's Masquerade Boots",
45693: u"Assassin's Winged Boots",
45694: u"Assassin's Winged Boots",
45695: u"Assassin's Masquerade Boots",
45696: u"Hunter's Acolyte Boots",
45697: u"Hunter's Acolyte Boots",
45698: u"Hunter's Masquerade Boots",
45699: u"Hunter's Student Shoes",
45700: u"Hunter's Student Shoes",
45701: u"Hunter's Masquerade Boots",
45702: u"Assassin's Exalted Coat",
45703: u"Assassin's Feathered Vestments",
45704: u"Assassin's Feathered Vestments",
45705: u"Assassin's Masquerade Raiments",
45706: u"Assassin's Winged Tunic",
45707: u"Assassin's Winged Tunic",
45708: u"Assassin's Masquerade Raiments",
45709: u"Hunter's Acolyte Coat",
45710: u"Hunter's Acolyte Coat",
45711: u"Hunter's Masquerade Raiments",
45712: u"Hunter's Student Coat",
45713: u"Hunter's Student Coat",
45714: u"Hunter's Masquerade Raiments",
45715: u"Assassin's Exalted Masque",
45716: u"Assassin's Feathered Headpiece",
45717: u"Assassin's Feathered Headpiece",
45718: u"Assassin's Masquerade Mask",
45719: u"Assassin's Winged Headpiece",
45720: u"Assassin's Winged Headpiece",
45721: u"Assassin's Masquerade Mask",
45722: u"Hunter's Acolyte Mask",
45723: u"Hunter's Acolyte Mask",
45724: u"Hunter's Masquerade Mask",
45725: u"Hunter's Student Circlet",
45726: u"Hunter's Student Circlet",
45727: u"Hunter's Masquerade Mask",
45728: u"Assassin's Exalted Pants",
45729: u"Assassin's Feathered Pants",
45730: u"Assassin's Feathered Pants",
45731: u"Assassin's Masquerade Leggings",
45732: u"Assassin's Winged Pants",
45733: u"Assassin's Winged Pants",
45734: u"Assassin's Masquerade Leggings",
45735: u"Hunter's Acolyte Pants",
45736: u"Hunter's Acolyte Pants",
45737: u"Hunter's Masquerade Leggings",
45738: u"Hunter's Student Leggings",
45739: u"Hunter's Student Leggings",
45740: u"Hunter's Masquerade Leggings",
45741: u"Assassin's Exalted Gloves",
45742: u"Assassin's Feathered Gloves",
45743: u"Assassin's Feathered Gloves",
45744: u"Assassin's Masquerade Gloves",
45745: u"Assassin's Winged Gloves",
45746: u"Assassin's Winged Gloves",
45747: u"Assassin's Masquerade Gloves",
45748: u"Hunter's Acolyte Gloves",
45749: u"Hunter's Acolyte Gloves",
45750: u"Hunter's Masquerade Gloves",
45751: u"Hunter's Student Gloves",
45752: u"Hunter's Student Gloves",
45753: u"Hunter's Masquerade Gloves",
45754: u"Hunter's Outlaw Pants",
45755: u"Hunter's Outlaw Pants",
45756: u"Hunter's Noble Pants",
45757: u"Hunter's Leather Pants",
45758: u"Hunter's Leather Pants",
45759: u"Hunter's Noble Pants",
45760: u"Assassin's Rascal Pants",
45761: u"Assassin's Rascal Pants",
45762: u"Assassin's Noble Pants",
45763: u"Assassin's Prowler Pants",
45764: u"Assassin's Prowler Pants",
45765: u"Assassin's Noble Pants",
45766: u"Assassin's Emblazoned Pants",
45767: u"Hunter's Outlaw Coat",
45768: u"Hunter's Outlaw Coat",
45769: u"Hunter's Noble Coat",
45770: u"Hunter's Leather Coat",
45771: u"Hunter's Leather Coat",
45772: u"Hunter's Noble Coat",
45773: u"Assassin's Rascal Coat",
45774: u"Assassin's Rascal Coat",
45775: u"Assassin's Noble Coat",
45776: u"Assassin's Prowler Coat",
45777: u"Assassin's Prowler Coat",
45778: u"Assassin's Noble Coat",
45779: u"Assassin's Emblazoned Coat",
45780: u"Hunter's Outlaw Gloves",
45781: u"Hunter's Outlaw Gloves",
45782: u"Hunter's Noble Gloves",
45783: u"Hunter's Leather Gloves",
45784: u"Hunter's Leather Gloves",
45785: u"Hunter's Noble Gloves",
45786: u"Assassin's Rascal Gloves",
45787: u"Assassin's Rascal Gloves",
45788: u"Assassin's Noble Gloves",
45789: u"Assassin's Prowler Gloves",
45790: u"Assassin's Prowler Gloves",
45791: u"Assassin's Noble Gloves",
45792: u"Assassin's Emblazoned Gloves",
45793: u"Hunter's Outlaw Boots",
45794: u"Hunter's Outlaw Boots",
45795: u"Hunter's Noble Boots",
45796: u"Hunter's Leather Shoes",
45797: u"Hunter's Leather Shoes",
45798: u"Hunter's Noble Boots",
45799: u"Assassin's Rascal Boots",
45800: u"Assassin's Rascal Boots",
45801: u"Assassin's Noble Boots",
45802: u"Assassin's Prowler Boots",
45803: u"Assassin's Prowler Boots",
45804: u"Assassin's Noble Boots",
45805: u"Assassin's Emblazoned Boots",
45806: u"Hunter's Outlaw Shoulders",
45807: u"Hunter's Outlaw Shoulders",
45808: u"Hunter's Noble Shoulders",
45809: u"Hunter's Leather Shoulders",
45810: u"Hunter's Leather Shoulders",
45811: u"Hunter's Noble Shoulders",
45812: u"Assassin's Rascal Shoulders",
45813: u"Assassin's Rascal Shoulders",
45814: u"Assassin's Noble Shoulders",
45815: u"Assassin's Prowler Shoulders",
45816: u"Assassin's Prowler Shoulders",
45817: u"Assassin's Noble Shoulders",
45818: u"Assassin's Emblazoned Shoulders",
45819: u"Hunter's Leather Mask",
45820: u"Hunter's Leather Mask",
45821: u"Hunter's Noble Mask",
45822: u"Hunter's Outlaw Mask",
45823: u"Hunter's Outlaw Mask",
45824: u"Hunter's Noble Mask",
45825: u"Assassin's Rascal Mask",
45826: u"Assassin's Rascal Mask",
45827: u"Assassin's Noble Mask",
45828: u"Assassin's Prowler Mask",
45829: u"Assassin's Prowler Mask",
45830: u"Assassin's Noble Mask",
45831: u"Assassin's Emblazoned Helm",
45903: u"Sunstone Silver Band",
45904: u"Sunstone Silver Band",
45905: u"Sunstone Silver Band",
45906: u"Sunstone Gold Ring",
45907: u"Sunstone Gold Ring",
45908: u"Sunstone Gold Ring",
45909: u"Opal Platinum Ring",
45910: u"Opal Platinum Ring",
45911: u"Opal Platinum Ring",
45912: u"Opal Mithril Ring",
45913: u"Opal Mithril Ring",
45914: u"Opal Mithril Ring",
45916: u"Sunstone Silver Earring",
45917: u"Sunstone Silver Earring",
45918: u"Sunstone Silver Earring",
45919: u"Sunstone Gold Earring",
45920: u"Sunstone Gold Earring",
45921: u"Sunstone Gold Earring",
45922: u"Opal Platinum Earring",
45923: u"Opal Platinum Earring",
45924: u"Opal Platinum Earring",
45925: u"Opal Mithril Earring",
45926: u"Opal Mithril Earring",
45927: u"Opal Mithril Earring",
45929: u"Sunstone Silver Pendant",
45930: u"Sunstone Silver Pendant",
45931: u"Sunstone Silver Pendant",
45932: u"Sunstone Gold Amulet",
45933: u"Sunstone Gold Amulet",
45934: u"Sunstone Gold Amulet",
45935: u"Opal Platinum Amulet",
45936: u"Opal Platinum Amulet",
45937: u"Opal Platinum Amulet",
45938: u"Opal Mithril Amulet",
45939: u"Opal Mithril Amulet",
45940: u"Opal Mithril Amulet",
45943: u"Assassin's Primordus Axe",
45944: u"Assassin's Primordus Dagger",
45945: u"Assassin's Primordus Focus",
45946: u"Assassin's Primordus Greatsword",
45947: u"Assassin's Primordus Maul",
45948: u"Assassin's Primordus Harpoon Gun",
45949: u"Assassin's Primordus Longbow",
45950: u"Assassin's Primordus Mace",
45951: u"Assassin's Primordus Pistol",
45952: u"Assassin's Primordus Rifle",
45953: u"Assassin's Primordus Scepter",
45954: u"Assassin's Primordus Shield",
45955: u"Assassin's Primordus Short Bow",
45956: u"Assassin's Primordus Pike",
45957: u"Assassin's Primordus Staff",
45958: u"Assassin's Primordus Sword",
45959: u"Assassin's Primordus Torch",
45960: u"Assassin's Primordus Trident",
45961: u"Assassin's Primordus Warhorn",
45962: u"Rabid Pearl Quarterstaff",
45963: u"Dire Pearl Quarterstaff",
45964: u"Magi's Pearl Quarterstaff",
45965: u"Soldier's Pearl Quarterstaff",
45966: u"Settler's Pearl Quarterstaff",
45967: u"Cavalier's Pearl Quarterstaff",
45968: u"Shaman's Pearl Quarterstaff",
45969: u"Rabid Pearl Trident",
45970: u"Dire Pearl Trident",
45971: u"Magi's Pearl Trident",
45972: u"Soldier's Pearl Trident",
45973: u"Settler's Pearl Trident",
45974: u"Cavalier's Pearl Trident",
45975: u"Shaman's Pearl Trident",
45976: u"Rabid Pearl Rod",
45977: u"Dire Pearl Rod",
45978: u"Magi's Pearl Rod",
45979: u"Soldier's Pearl Rod",
45980: u"Settler's Pearl Rod",
45981: u"Cavalier's Pearl Rod",
45982: u"Shaman's Pearl Rod",
45983: u"Rabid Pearl Conch",
45984: u"Dire Pearl Conch",
45985: u"Magi's Pearl Conch",
45986: u"Soldier's Pearl Conch",
45987: u"Settler's Pearl Conch",
45988: u"Cavalier's Pearl Conch",
45989: u"Shaman's Pearl Conch",
45990: u"Hunter's Soft Wood Staff",
45991: u"Hunter's Soft Wood Staff",
45992: u"Hunter's Bandit Spire",
45993: u"Hunter's Seasoned Wood Staff",
45994: u"Hunter's Seasoned Wood Staff",
45995: u"Hunter's Dredge Pillar",
45996: u"Assassin's Hard Wood Staff",
45997: u"Assassin's Hard Wood Staff",
45998: u"Assassin's Ogre Warstaff",
45999: u"Assassin's Elder Wood Staff",
46000: u"Assassin's Elder Wood Staff",
46001: u"Assassin's Krait Crook",
46002: u"Assassin's Pearl Quarterstaff",
46003: u"Hunter's Soft Wood Trident",
46004: u"Hunter's Soft Wood Trident",
46005: u"Hunter's Bandit Trident",
46006: u"Hunter's Seasoned Wood Trident",
46007: u"Hunter's Seasoned Wood Trident",
46008: u"Hunter's Dredge Trident",
46009: u"Assassin's Pearl Trident",
46010: u"Assassin's Elder Wood Trident",
46011: u"Assassin's Elder Wood Trident",
46012: u"Assassin's Krait Trident",
46013: u"Assassin's Hard Wood Trident",
46014: u"Assassin's Hard Wood Trident",
46015: u"Assassin's Ogre Trident",
46016: u"Hunter's Soft Wood Scepter",
46017: u"Hunter's Soft Wood Scepter",
46018: u"Hunter's Bandit Baton",
46019: u"Hunter's Seasoned Wood Scepter",
46020: u"Hunter's Seasoned Wood Scepter",
46021: u"Hunter's Dredge Baton",
46022: u"Assassin's Hard Wood Scepter",
46023: u"Assassin's Hard Wood Scepter",
46024: u"Assassin's Ogre Truncheon",
46025: u"Assassin's Elder Wood Scepter",
46026: u"Assassin's Elder Wood Scepter",
46027: u"Assassin's Krait Wand",
46028: u"Assassin's Pearl Rod",
46029: u"Hunter's Soft Wood Focus",
46030: u"Hunter's Soft Wood Focus",
46031: u"Hunter's Bandit Focus",
46032: u"Hunter's Seasoned Wood Focus",
46033: u"Hunter's Seasoned Wood Focus",
46034: u"Hunter's Dredge Canary",
46035: u"Assassin's Hard Wood Focus",
46036: u"Assassin's Hard Wood Focus",
46037: u"Assassin's Ogre Effigy",
46038: u"Assassin's Elder Wood Focus",
46039: u"Assassin's Elder Wood Focus",
46040: u"Assassin's Krait Star",
46041: u"Assassin's Pearl Conch",
46061: u"Rabid Pearl Needler",
46062: u"Dire Pearl Needler",
46063: u"Magi's Pearl Needler",
46064: u"Soldier's Pearl Needler",
46065: u"Settler's Pearl Needler",
46066: u"Cavalier's Pearl Needler",
46067: u"Shaman's Pearl Needler",
46068: u"Rabid Pearl Siren",
46069: u"Dire Pearl Siren",
46070: u"Magi's Pearl Siren",
46071: u"Soldier's Pearl Siren",
46072: u"Settler's Pearl Siren",
46073: u"Cavalier's Pearl Siren",
46074: u"Shaman's Pearl Siren",
46075: u"Rabid Pearl Brazier",
46076: u"Dire Pearl Brazier",
46077: u"Magi's Pearl Brazier",
46078: u"Soldier's Pearl Brazier",
46079: u"Settler's Pearl Brazier",
46080: u"Cavalier's Pearl Brazier",
46081: u"Shaman's Pearl Brazier",
46082: u"Rabid Pearl Stinger",
46083: u"Dire Pearl Stinger",
46084: u"Magi's Pearl Stinger",
46085: u"Soldier's Pearl Stinger",
46086: u"Settler's Pearl Stinger",
46087: u"Cavalier's Pearl Stinger",
46088: u"Shaman's Pearl Stinger",
46089: u"Rabid Pearl Handcannon",
46090: u"Dire Pearl Handcannon",
46091: u"Magi's Pearl Handcannon",
46092: u"Soldier's Pearl Handcannon",
46093: u"Settler's Pearl Handcannon",
46094: u"Cavalier's Pearl Handcannon",
46095: u"Shaman's Pearl Handcannon",
46096: u"Rabid Pearl Blunderbuss",
46097: u"Dire Pearl Blunderbuss",
46098: u"Magi's Pearl Blunderbuss",
46099: u"Soldier's Pearl Blunderbuss",
46100: u"Settler's Pearl Blunderbuss",
46101: u"Cavalier's Pearl Blunderbuss",
46102: u"Shaman's Pearl Blunderbuss",
46103: u"Rabid Pearl Speargun",
46104: u"Dire Pearl Speargun",
46105: u"Magi's Pearl Speargun",
46106: u"Soldier's Pearl Speargun",
46107: u"Settler's Pearl Speargun",
46108: u"Cavalier's Pearl Speargun",
46109: u"Shaman's Pearl Speargun",
46110: u"Hunter's Soft Wood Short Bow",
46111: u"Hunter's Soft Wood Short Bow",
46112: u"Hunter's Bandit Short Bow",
46113: u"Hunter's Seasoned Wood Short Bow",
46114: u"Hunter's Seasoned Wood Short Bow",
46115: u"Hunter's Dredge Short Bow",
46116: u"Assassin's Pearl Needler",
46117: u"Assassin's Elder Wood Short Bow",
46118: u"Assassin's Elder Wood Short Bow",
46119: u"Assassin's Krait Short Bow",
46120: u"Assassin's Hard Wood Short Bow",
46121: u"Assassin's Hard Wood Short Bow",
46122: u"Assassin's Ogre Short Bow",
46123: u"Hunter's Soft Wood Warhorn",
46124: u"Hunter's Soft Wood Warhorn",
46125: u"Hunter's Bandit Bugle",
46126: u"Hunter's Seasoned Wood Warhorn",
46127: u"Hunter's Seasoned Wood Warhorn",
46128: u"Hunter's Dredge Double Horn",
46129: u"Assassin's Pearl Siren",
46130: u"Assassin's Elder Wood Warhorn",
46131: u"Assassin's Elder Wood Warhorn",
46132: u"Assassin's Krait Whelk",
46133: u"Assassin's Hard Wood Warhorn",
46134: u"Assassin's Hard Wood Warhorn",
46135: u"Assassin's Ogre Harbinger",
46136: u"Hunter's Soft Wood Torch",
46137: u"Hunter's Soft Wood Torch",
46138: u"Hunter's Bandit Torch",
46139: u"Hunter's Seasoned Wood Torch",
46140: u"Hunter's Seasoned Wood Torch",
46141: u"Hunter's Dredge Lamp",
46142: u"Assassin's Hard Wood Torch",
46143: u"Assassin's Hard Wood Torch",
46144: u"Assassin's Ogre Blaze",
46145: u"Assassin's Elder Wood Torch",
46146: u"Assassin's Elder Wood Torch",
46147: u"Assassin's Krait Brazier",
46148: u"Assassin's Pearl Brazier",
46149: u"Hunter's Soft Wood Longbow",
46150: u"Hunter's Soft Wood Longbow",
46151: u"Hunter's Bandit Longbow",
46152: u"Hunter's Seasoned Wood Longbow",
46153: u"Hunter's Seasoned Wood Longbow",
46154: u"Hunter's Dredge Reflex Bow",
46155: u"Assassin's Hard Wood Longbow",
46156: u"Assassin's Hard Wood Longbow",
46157: u"Assassin's Ogre Hornbow",
46158: u"Assassin's Elder Wood Longbow",
46159: u"Assassin's Elder Wood Longbow",
46160: u"Assassin's Krait Recurve Bow",
46161: u"Assassin's Pearl Stinger",
46162: u"Hunter's Iron Pistol",
46163: u"Hunter's Iron Pistol",
46164: u"Hunter's Bandit Revolver",
46165: u"Hunter's Steel Pistol",
46166: u"Hunter's Steel Pistol",
46167: u"Hunter's Dredge Firearm",
46168: u"Assassin's Pearl Handcannon",
46169: u"Assassin's Mithril Pistol",
46170: u"Assassin's Mithril Pistol",
46171: u"Assassin's Krait Handgun",
46172: u"Assassin's Darksteel Pistol",
46173: u"Assassin's Darksteel Pistol",
46174: u"Assassin's Ogre Flintlock",
46175: u"Hunter's Iron Rifle",
46176: u"Hunter's Iron Rifle",
46177: u"Hunter's Bandit Musket",
46178: u"Hunter's Steel Rifle",
46179: u"Hunter's Steel Rifle",
46180: u"Hunter's Dredge Boomstick",
46181: u"Assassin's Darksteel Rifle",
46182: u"Assassin's Darksteel Rifle",
46183: u"Assassin's Ogre Blaster",
46184: u"Assassin's Mithril Rifle",
46185: u"Assassin's Mithril Rifle",
46186: u"Assassin's Krait Shooter",
46187: u"Assassin's Pearl Blunderbuss",
46188: u"Hunter's Soft Wood Harpoon Gun",
46189: u"Hunter's Soft Wood Harpoon Gun",
46190: u"Hunter's Bandit Harpoon Gun",
46191: u"Hunter's Seasoned Wood Harpoon Gun",
46192: u"Hunter's Seasoned Wood Harpoon Gun",
46193: u"Hunter's Dredge Harpoon Gun",
46194: u"Assassin's Pearl Speargun",
46195: u"Assassin's Elder Wood Harpoon Gun",
46196: u"Assassin's Elder Wood Harpoon Gun",
46197: u"Assassin's Krait Harpoon Gun",
46198: u"Assassin's Hard Wood Harpoon Gun",
46199: u"Assassin's Hard Wood Harpoon Gun",
46200: u"Assassin's Ogre Harpoon Gun",
46201: u"Rabid Pearl Carver",
46202: u"Dire Pearl Carver",
46203: u"Magi's Pearl Carver",
46204: u"Soldier's Pearl Carver",
46205: u"Settler's Pearl Carver",
46206: u"Cavalier's Pearl Carver",
46207: u"Shaman's Pearl Carver",
46208: u"Rabid Pearl Sabre",
46209: u"Dire Pearl Sabre",
46210: u"Magi's Pearl Sabre",
46211: u"Soldier's Pearl Sabre",
46212: u"Settler's Pearl Sabre",
46213: u"Cavalier's Pearl Sabre",
46214: u"Shaman's Pearl Sabre",
46215: u"Rabid Pearl Bludgeoner",
46216: u"Dire Pearl Bludgeoner",
46217: u"Magi's Pearl Bludgeoner",
46218: u"Soldier's Pearl Bludgeoner",
46219: u"Settler's Pearl Bludgeoner",
46220: u"Cavalier's Pearl Bludgeoner",
46221: u"Shaman's Pearl Bludgeoner",
46222: u"Rabid Pearl Shell",
46223: u"Dire Pearl Shell",
46224: u"Magi's Pearl Shell",
46225: u"Soldier's Pearl Shell",
46226: u"Settler's Pearl Shell",
46227: u"Cavalier's Pearl Shell",
46228: u"Shaman's Pearl Shell",
46229: u"Rabid Pearl Reaver",
46230: u"Dire Pearl Reaver",
46231: u"Magi's Pearl Reaver",
46232: u"Soldier's Pearl Reaver",
46233: u"Settler's Pearl Reaver",
46234: u"Cavalier's Pearl Reaver",
46235: u"Shaman's Pearl Reaver",
46236: u"Rabid Pearl Broadsword",
46237: u"Dire Pearl Broadsword",
46238: u"Magi's Pearl Broadsword",
46239: u"Soldier's Pearl Broadsword",
46240: u"Settler's Pearl Broadsword",
46241: u"Cavalier's Pearl Broadsword",
46242: u"Shaman's Pearl Broadsword",
46243: u"Rabid Pearl Crusher",
46244: u"Dire Pearl Crusher",
46245: u"Magi's Pearl Crusher",
46246: u"Soldier's Pearl Crusher",
46247: u"Settler's Pearl Crusher",
46248: u"Cavalier's Pearl Crusher",
46249: u"Shaman's Pearl Crusher",
46250: u"Rabid Pearl Impaler",
46251: u"Dire Pearl Impaler",
46252: u"Magi's Pearl Impaler",
46253: u"Soldier's Pearl Impaler",
46254: u"Settler's Pearl Impaler",
46255: u"Cavalier's Pearl Impaler",
46256: u"Shaman's Pearl Impaler",
46257: u"Hunter's Iron Dagger",
46258: u"Hunter's Iron Dagger",
46259: u"Hunter's Bandit Shiv",
46260: u"Hunter's Steel Dagger",
46261: u"Hunter's Steel Dagger",
46262: u"Hunter's Dredge Bloodletter",
46263: u"Assassin's Darksteel Dagger",
46264: u"Assassin's Darksteel Dagger",
46265: u"Assassin's Ogre Dirk",
46266: u"Assassin's Mithril Dagger",
46267: u"Assassin's Mithril Dagger",
46268: u"Assassin's Krait Ripper",
46269: u"Assassin's Pearl Carver",
46270: u"Hunter's Iron Sword",
46271: u"Hunter's Iron Sword",
46272: u"Hunter's Bandit Slicer",
46273: u"Hunter's Steel Sword",
46274: u"Hunter's Steel Sword",
46275: u"Hunter's Dredge Edge",
46276: u"Assassin's Darksteel Sword",
46277: u"Assassin's Darksteel Sword",
46278: u"Assassin's Ogre Scimitar",
46279: u"Assassin's Mithril Sword",
46280: u"Assassin's Mithril Sword",
46281: u"Assassin's Krait Machete",
46282: u"Assassin's Pearl Sabre",
46283: u"Hunter's Iron Mace",
46284: u"Hunter's Iron Mace",
46285: u"Hunter's Bandit Mallet",
46286: u"Hunter's Steel Mace",
46287: u"Hunter's Steel Mace",
46288: u"Hunter's Dredge Flanged Mace",
46289: u"Assassin's Darksteel Mace",
46290: u"Assassin's Darksteel Mace",
46291: u"Assassin's Ogre Bludgeoner",
46292: u"Assassin's Mithril Mace",
46293: u"Assassin's Mithril Mace",
46294: u"Assassin's Krait Morning Star",
46295: u"Assassin's Pearl Bludgeoner",
46296: u"Hunter's Iron Shield",
46297: u"Hunter's Iron Shield",
46298: u"Hunter's Bandit Ward",
46299: u"Hunter's Steel Shield",
46300: u"Hunter's Steel Shield",
46301: u"Hunter's Dredge Barricade",
46302: u"Assassin's Darksteel Shield",
46303: u"Assassin's Darksteel Shield",
46304: u"Assassin's Ogre Bulwark",
46305: u"Assassin's Mithril Shield",
46306: u"Assassin's Mithril Shield",
46307: u"Assassin's Krait Shell",
46308: u"Assassin's Pearl Shell",
46309: u"Hunter's Iron Axe",
46310: u"Hunter's Iron Axe",
46311: u"Hunter's Bandit Cleaver",
46312: u"Hunter's Steel Axe",
46313: u"Hunter's Steel Axe",
46314: u"Hunter's Dredge Bonehewer",
46315: u"Assassin's Darksteel Axe",
46316: u"Assassin's Darksteel Axe",
46317: u"Assassin's Ogre Cleaver",
46318: u"Assassin's Mithril Axe",
46319: u"Assassin's Mithril Axe",
46320: u"Assassin's Krait Battleaxe",
46321: u"Assassin's Pearl Reaver",
46322: u"Hunter's Iron Greatsword",
46323: u"Hunter's Iron Greatsword",
46324: u"Hunter's Bandit Sunderer",
46325: u"Hunter's Steel Greatsword",
46326: u"Hunter's Steel Greatsword",
46327: u"Hunter's Dredge Sunderer",
46328: u"Assassin's Darksteel Greatsword",
46329: u"Assassin's Darksteel Greatsword",
46330: u"Assassin's Ogre Longsword",
46331: u"Assassin's Mithril Greatsword",
46332: u"Assassin's Mithril Greatsword",
46333: u"Assassin's Krait Slayer",
46334: u"Assassin's Pearl Broadsword",
46335: u"Hunter's Iron Hammer",
46336: u"Hunter's Iron Hammer",
46337: u"Hunter's Bandit Demolisher",
46338: u"Hunter's Steel Hammer",
46339: u"Hunter's Steel Hammer",
46340: u"Hunter's Dredge Pulverizer",
46341: u"Assassin's Darksteel Hammer",
46342: u"Assassin's Darksteel Hammer",
46343: u"Assassin's Ogre Breaker",
46344: u"Assassin's Mithril Hammer",
46345: u"Assassin's Mithril Hammer",
46346: u"Assassin's Krait Warhammer",
46347: u"Assassin's Pearl Crusher",
46348: u"Hunter's Iron Spear",
46349: u"Hunter's Iron Spear",
46350: u"Hunter's Bandit Spear",
46351: u"Hunter's Steel Spear",
46352: u"Hunter's Steel Spear",
46353: u"Hunter's Dredge Spear",
46354: u"Assassin's Darksteel Spear",
46355: u"Assassin's Darksteel Spear",
46356: u"Assassin's Ogre Javelin",
46357: u"Assassin's Mithril Spear",
46358: u"Assassin's Mithril Spear",
46359: u"Assassin's Krait Pilum",
46360: u"Assassin's Pearl Impaler",
46681: u"Glob of Dark Matter",
46684: u"Shaman's Orichalcum Imbued Inscription",
46685: u"Cavalier's Orichalcum Imbued Inscription",
46686: u"Rabid Orichalcum Imbued Inscription",
46687: u"Magi's Orichalcum Imbued Inscription",
46688: u"Soldier's Orichalcum Imbued Inscription",
46689: u"Settler's Orichalcum Imbued Inscription",
46690: u"Dire Orichalcum Imbued Inscription",
46708: u"Shaman's Intricate Gossamer Insignia",
46709: u"Cavalier's Intricate Gossamer Insignia",
46710: u"Rabid Intricate Gossamer Insignia",
46711: u"Magi's Intricate Gossamer Insignia",
46712: u"Soldier's Intricate Gossamer Insignia",
46713: u"Settler's Intricate Gossamer Insignia",
46730: u"Bloodstone Brick",
46731: u"Pile of Bloodstone Dust",
46732: u"Dragonite Ingot",
46733: u"Dragonite Ore",
46734: u"Empyreal Star",
46735: u"Empyreal Fragment",
46736: u"Spiritwood Plank",
46738: u"Deldrimor Steel Ingot",
46739: u"Elonian Leather Square",
46740: u"Spool of Silk Weaving Thread",
46741: u"Bolt of Damask",
46742: u"Lump of Mithrillium",
46744: u"Glob of Elder Spirit Residue",
46745: u"Spool of Thick Elonian Cord",
46747: u"Thermocatalytic Reagent",
48580: u"Rabid Draconic Pauldrons",
48581: u"Dire Draconic Pauldrons",
48582: u"Magi's Draconic Pauldrons",
48583: u"Soldier's Draconic Pauldrons",
48584: u"Settler's Draconic Pauldrons",
48585: u"Cavalier's Draconic Pauldrons",
48586: u"Shaman's Draconic Pauldrons",
48587: u"Rabid Draconic Legs",
48588: u"Dire Draconic Legs",
48589: u"Magi's Draconic Legs",
48590: u"Soldier's Draconic Legs",
48591: u"Settler's Draconic Legs",
48592: u"Cavalier's Draconic Legs",
48593: u"Shaman's Draconic Legs",
48594: u"Rabid Draconic Boots",
48595: u"Dire Draconic Boots",
48596: u"Magi's Draconic Boots",
48597: u"Soldier's Draconic Boots",
48598: u"Settler's Draconic Boots",
48599: u"Cavalier's Draconic Boots",
48600: u"Shaman's Draconic Boots",
48601: u"Rabid Draconic Gauntlets",
48602: u"Dire Draconic Gauntlets",
48603: u"Magi's Draconic Gauntlets",
48604: u"Soldier's Draconic Gauntlets",
48605: u"Settler's Draconic Gauntlets",
48606: u"Cavalier's Draconic Gauntlets",
48607: u"Shaman's Draconic Gauntlets",
48608: u"Rabid Draconic Coat",
48609: u"Dire Draconic Coat",
48610: u"Magi's Draconic Coat",
48611: u"Soldier's Draconic Coat",
48612: u"Settler's Draconic Coat",
48613: u"Cavalier's Draconic Coat",
48614: u"Shaman's Draconic Coat",
48615: u"Rabid Draconic Helm",
48616: u"Dire Draconic Helm",
48617: u"Magi's Draconic Helm",
48618: u"Soldier's Draconic Helm",
48619: u"Settler's Draconic Helm",
48620: u"Cavalier's Draconic Helm",
48621: u"Shaman's Draconic Helm",
48622: u"Rabid Exalted Mantle",
48623: u"Dire Exalted Mantle",
48624: u"Magi's Exalted Mantle",
48625: u"Soldier's Exalted Mantle",
48626: u"Settler's Exalted Mantle",
48627: u"Cavalier's Exalted Mantle",
48628: u"Shaman's Exalted Mantle",
48629: u"Rabid Exalted Boots",
48630: u"Dire Exalted Boots",
48631: u"Magi's Exalted Boots",
48632: u"Soldier's Exalted Boots",
48633: u"Settler's Exalted Boots",
48634: u"Cavalier's Exalted Boots",
48635: u"Shaman's Exalted Boots",
48636: u"Rabid Exalted Coat",
48637: u"Dire Exalted Coat",
48638: u"Magi's Exalted Coat",
48639: u"Soldier's Exalted Coat",
48640: u"Settler's Exalted Coat",
48641: u"Cavalier's Exalted Coat",
48642: u"Shaman's Exalted Coat",
48643: u"Rabid Exalted Masque",
48644: u"Dire Exalted Masque",
48645: u"Magi's Exalted Masque",
48646: u"Soldier's Exalted Masque",
48647: u"Settler's Exalted Masque",
48648: u"Cavalier's Exalted Masque",
48649: u"Shaman's Exalted Masque",
48650: u"Rabid Exalted Pants",
48651: u"Dire Exalted Pants",
48652: u"Magi's Exalted Pants",
48653: u"Soldier's Exalted Pants",
48654: u"Settler's Exalted Pants",
48655: u"Cavalier's Exalted Pants",
48656: u"Shaman's Exalted Pants",
48657: u"Rabid Exalted Gloves",
48658: u"Dire Exalted Gloves",
48659: u"Magi's Exalted Gloves",
48660: u"Soldier's Exalted Gloves",
48661: u"Settler's Exalted Gloves",
48662: u"Cavalier's Exalted Gloves",
48663: u"Shaman's Exalted Gloves",
48664: u"Rabid Emblazoned Pants",
48665: u"Dire Emblazoned Pants",
48666: u"Magi's Emblazoned Pants",
48667: u"Soldier's Emblazoned Pants",
48668: u"Settler's Emblazoned Pants",
48669: u"Cavalier's Emblazoned Pants",
48670: u"Shaman's Emblazoned Pants",
48671: u"Rabid Emblazoned Coat",
48672: u"Dire Emblazoned Coat",
48673: u"Magi's Emblazoned Coat",
48674: u"Soldier's Emblazoned Coat",
48675: u"Settler's Emblazoned Coat",
48676: u"Cavalier's Emblazoned Coat",
48677: u"Shaman's Emblazoned Coat",
48678: u"Rabid Emblazoned Gloves",
48679: u"Dire Emblazoned Gloves",
48680: u"Magi's Emblazoned Gloves",
48681: u"Soldier's Emblazoned Gloves",
48682: u"Settler's Emblazoned Gloves",
48683: u"Cavalier's Emblazoned Gloves",
48684: u"Shaman's Emblazoned Gloves",
48685: u"Rabid Emblazoned Boots",
48686: u"Dire Emblazoned Boots",
48687: u"Magi's Emblazoned Boots",
48688: u"Soldier's Emblazoned Boots",
48689: u"Settler's Emblazoned Boots",
48690: u"Cavalier's Emblazoned Boots",
48691: u"Shaman's Emblazoned Boots",
48692: u"Rabid Emblazoned Shoulders",
48693: u"Dire Emblazoned Shoulders",
48694: u"Magi's Emblazoned Shoulders",
48695: u"Soldier's Emblazoned Shoulders",
48696: u"Settler's Emblazoned Shoulders",
48697: u"Cavalier's Emblazoned Shoulders",
48698: u"Shaman's Emblazoned Shoulders",
48699: u"Rabid Emblazoned Helm",
48700: u"Dire Emblazoned Helm",
48701: u"Magi's Emblazoned Helm",
48702: u"Soldier's Emblazoned Helm",
48703: u"Settler's Emblazoned Helm",
48704: u"Cavalier's Emblazoned Helm",
48705: u"Shaman's Emblazoned Helm",
48805: u"High-Quality Plastic Fangs",
48806: u"Tyria's Best Nougat Center",
48807: u"Gibbering Skull",
48884: u"Pristine Toxic Spore Sample",
48907: u"Superior Rune of Antitoxin",
48908: u"Recipe: Superior Rune of Antitoxin",
48909: u"Recipe: Superior Rune of Antitoxin",
48910: u"Recipe: Superior Rune of Antitoxin",
48911: u"Superior Sigil of Torment",
48912: u"Recipe: Superior Sigil of Torment",
48913: u"Recipe: Superior Sigil of Torment",
48914: u"Recipe: Superior Sigil of Torment",
48915: u"Toxic Sharpening Stone",
48916: u"Toxic Maintenance Oil",
48917: u"Toxic Focusing Crystal",
48918: u"Recipe: Toxic Sharpening Stone",
48919: u"Recipe: Toxic Maintenance Oil",
48920: u"Recipe: Toxic Focusing Crystal",
49295: u"Recipe: Endless Toy Ventari Tonic",
49296: u"Recipe: Endless Toy Soldier Tonic",
49297: u"Recipe: Endless Princess Doll Tonic",
49298: u"Recipe: Endless Plush Griffon Tonic",
49299: u"Recipe: Endless Toy Golem Tonic",
49424: u"+1 Agony Infusion",
49425: u"+2 Agony Infusion",
49426: u"+3 Agony Infusion",
49427: u"+4 Agony Infusion",
49428: u"+5 Agony Infusion",
49429: u"+6 Agony Infusion",
49430: u"+7 Agony Infusion",
49431: u"+8 Agony Infusion",
49432: u"+9 Agony Infusion",
49433: u"+10 Agony Infusion",
49434: u"+11 Agony Infusion",
49435: u"+12 Agony Infusion",
49436: u"+13 Agony Infusion",
49437: u"+14 Agony Infusion",
49438: u"+15 Agony Infusion",
49439: u"+16 Agony Infusion",
49440: u"+17 Agony Infusion",
49441: u"+18 Agony Infusion",
49442: u"+19 Agony Infusion",
49443: u"+20 Agony Infusion",
49444: u"+21 Agony Infusion",
49522: u"Dire Intricate Gossamer Insignia",
49733: u"Zealot's Draconic Boots",
49734: u"Recipe: Zealot's Orichalcum Imbued Inscription",
49735: u"Recipe: Zealot's Intricate Gossamer Insignia",
49737: u"Recipe: Watchwork Mechanism",
49741: u"Recipe: Zealot's Draconic Boots",
49742: u"Recipe: Zealot's Draconic Coat",
49743: u"Recipe: Zealot's Draconic Gauntlets",
49744: u"Recipe: Zealot's Draconic Helm",
49745: u"Recipe: Zealot's Draconic Legs",
49746: u"Recipe: Zealot's Draconic Pauldrons",
49747: u"Recipe: Zealot's Exalted Boots",
49748: u"Recipe: Zealot's Exalted Coat",
49749: u"Recipe: Zealot's Exalted Gloves",
49750: u"Recipe: Zealot's Exalted Masque",
49751: u"Recipe: Zealot's Exalted Pants",
49752: u"Recipe: Zealot's Exalted Mantle",
49753: u"Recipe: Zealot's Emblazoned Boots",
49754: u"Recipe: Zealot's Emblazoned Coat",
49755: u"Recipe: Zealot's Emblazoned Gloves",
49756: u"Recipe: Zealot's Emblazoned Helm",
49757: u"Recipe: Zealot's Emblazoned Pants",
49758: u"Recipe: Zealot's Emblazoned Shoulders",
49759: u"Recipe: Zealot's Pearl Conch",
49760: u"Recipe: Zealot's Pearl Rod",
49761: u"Recipe: Zealot's Pearl Quarterstaff",
49762: u"Recipe: Zealot's Pearl Trident",
49763: u"Recipe: Zealot's Pearl Speargun",
49764: u"Recipe: Zealot's Pearl Stinger",
49765: u"Recipe: Zealot's Pearl Handcannon",
49766: u"Recipe: Zealot's Pearl Blunderbuss",
49767: u"Recipe: Zealot's Pearl Needler",
49768: u"Recipe: Zealot's Pearl Brazier",
49769: u"Recipe: Zealot's Pearl Siren",
49770: u"Recipe: Zealot's Pearl Reaver",
49771: u"Recipe: Zealot's Pearl Carver",
49772: u"Recipe: Zealot's Pearl Broadsword",
49773: u"Recipe: Zealot's Pearl Crusher",
49774: u"Recipe: Zealot's Pearl Bludgeoner",
49775: u"Recipe: Zealot's Pearl Shell",
49776: u"Recipe: Zealot's Pearl Impaler",
49777: u"Recipe: Zealot's Pearl Sabre",
49781: u"Zealot's Pearl Quarterstaff",
49782: u"Watchwork Mechanism",
49783: u"Zealot's Draconic Coat",
49784: u"Zealot's Draconic Gauntlets",
49785: u"Zealot's Draconic Helm",
49786: u"Zealot's Draconic Legs",
49787: u"Zealot's Draconic Pauldrons",
49788: u"Zealot's Exalted Boots",
49789: u"Zealot's Exalted Coat",
49790: u"Zealot's Exalted Gloves",
49791: u"Zealot's Exalted Masque",
49792: u"Zealot's Exalted Pants",
49793: u"Zealot's Exalted Mantle",
49794: u"Zealot's Emblazoned Boots",
49795: u"Zealot's Emblazoned Coat",
49796: u"Zealot's Emblazoned Gloves",
49797: u"Zealot's Emblazoned Helm",
49798: u"Zealot's Emblazoned Pants",
49799: u"Zealot's Emblazoned Shoulders",
49803: u"Zealot's Pearl Broadsword",
49804: u"Zealot's Pearl Conch",
49805: u"Zealot's Pearl Rod",
49806: u"Zealot's Pearl Crusher",
49807: u"Zealot's Pearl Trident",
49808: u"Zealot's Pearl Speargun",
49809: u"Zealot's Pearl Stinger",
49810: u"Zealot's Pearl Handcannon",
49811: u"Zealot's Pearl Blunderbuss",
49812: u"Zealot's Pearl Needler",
49813: u"Zealot's Pearl Brazier",
49814: u"Zealot's Pearl Siren",
49815: u"Zealot's Pearl Reaver",
49816: u"Zealot's Pearl Carver",
49818: u"Zealot's Pearl Bludgeoner",
49819: u"Zealot's Pearl Shell",
49820: u"Zealot's Pearl Impaler",
49821: u"Zealot's Pearl Sabre",
49865: u"Zealot's Orichalcum Imbued Inscription",
49866: u"Zealot's Intricate Gossamer Insignia",
50140: u"Swelter's Gait",
50141: u"Bella's Crown",
50142: u"Swelter's Gait",
50143: u"Brightcarved Breastplate",
50144: u"Barca's Gambit",
50145: u"Groaning Gloves",
50146: u"Barca's Studs",
50147: u"Pike's Ambition",
50148: u"Barca's Crest",
50149: u"Barca's Baldric",
50150: u"Clark's Chukkas",
50151: u"Sepulchre Greaves",
50152: u"Barca's Sleeve",
50153: u"Dual Peaks",
50154: u"Gieve's Plate",
50155: u"Clark's Chukkas",
50156: u"Duty-Bound Duffers",
50157: u"Gieve's Plate",
50158: u"Duty-Bound Brigandine",
50159: u"Duty-Bound Bracers",
50160: u"Duty-Bound Domeplate",
50161: u"Duty-Bound Thigh Guards",
50162: u"Duty-Bound Platepads",
50163: u"Massey's Gauntlet",
50164: u"Boots of Barca",
50165: u"Massey's Gauntlet",
50166: u"Brightcarved Breastplate",
50167: u"Groaning Gloves",
50168: u"Pike's Ambition",
50169: u"Sepulchre Greaves",
50170: u"Dual Peaks",
50171: u"Bella's Crown",
50172: u"Mail of Mac Quen",
50173: u"Boots of Barca",
50174: u"Swelter's Gait",
50175: u"Barca's Gambit",
50176: u"Brightcarved Breastplate",
50177: u"Mail of Mac Quen",
50178: u"Groaning Gloves",
50179: u"Barca's Studs",
50180: u"Barca's Crest",
50181: u"Pike's Ambition",
50182: u"Sepulchre Greaves",
50183: u"Barca's Baldric",
50184: u"Barca's Sleeve",
50185: u"Dual Peaks",
50186: u"Miucha's Mantle",
50187: u"Miucha's Mantle",
50188: u"Dashing Tricorner",
50189: u"Many-Buckled Swash",
50190: u"Ostholz's Kickers",
50191: u"Trackless Boots",
50192: u"Ostholz's Duster",
50193: u"Amnemoi's Robe",
50194: u"Ostholz's Work Gloves",
50195: u"Inconnu's Paw",
50196: u"Ostholz's Rag",
50197: u"Mask of 1,000 Faces",
50198: u"Ostholz's Britches",
50199: u"Inconspicuous Dungarees",
50200: u"Highsteppers",
50201: u"Ostholz's Brace",
50202: u"Obfuscation Ward",
50203: u"Many-Buckled Swash",
50204: u"Irresistible Javalinas",
50205: u"Irresistible Javalinas",
50206: u"Trackless Boots",
50207: u"Amnemoi's Robe",
50208: u"Inconnu's Paw",
50209: u"Dashing Tricorner",
50210: u"Mask of 1,000 Faces",
50211: u"Inconspicuous Dungarees",
50212: u"Highsteppers",
50213: u"Obfuscation Ward",
50214: u"Stiletto Jodhpurs",
50215: u"Stiletto Jodhpurs",
50216: u"Trackless Boots",
50217: u"Ostholz's Kickers",
50218: u"Ragamuffin Toeshoes",
50219: u"Ostholz's Duster",
50220: u"Ragamuffin Cape",
50221: u"Amnemoi's Robe",
50222: u"Ostholz's Work Gloves",
50223: u"Inconnu's Paw",
50224: u"Ragamuffin Grips",
50225: u"Ostholz's Rag",
50226: u"Breezy Backplate",
50227: u"Mask of 1,000 Faces",
50228: u"Ragamuffin Bandana",
50229: u"Inconspicuous Dungarees",
50230: u"Ragamuffin Slacks",
50231: u"Ostholz's Britches",
50232: u"Breezy Backplate",
50233: u"Ostholz's Brace",
50234: u"Ragamuffin Pads",
50235: u"Obfuscation Ward",
50342: u"Ineffable Orichalcum Plated Inscription",
50343: u"Ineffable Orichalcum Imbued Inscription",
50347: u"Ineffable Seasoned Inscription",
50349: u"Ineffable Hard Inscription",
50351: u"Ineffable Darksteel Plated Inscription",
50352: u"Ineffable Elder Inscription",
50353: u"Ineffable Mithril Plated Inscription",
50355: u"Ineffable Mithril Imbued Inscription",
50358: u"Ineffable Cotton Insignia",
50360: u"Ineffable Linen Insignia",
50362: u"Ineffable Intricate Linen Insignia",
50363: u"Ineffable Silk Insignia",
50364: u"Ineffable Embroidered Silk Insignia",
50366: u"Ineffable Intricate Silk Insignia",
50367: u"Ineffable Embroidered Gossamer Insignia",
50368: u"Ineffable Intricate Gossamer Insignia",
50370: u"Homespun Espadrilles",
50371: u"Cloven Hooves",
50372: u"Blessed Steps",
50373: u"Rattling Cage",
50374: u"Vesper Vest",
50375: u"Imuthi's Thorax",
50376: u"Kid Gloves",
50377: u"Clarion Clasps",
50378: u"Cabras Cowl",
50379: u"Anointed Diadem",
50380: u"Witherbreeches",
50381: u"Cumulus Habit",
50382: u"Dessicated Pauldrons",
50383: u"Pious Pinion",
50384: u"Imuthi's Tarsals",
50385: u"Imuthi's Thorax",
50386: u"Imuthi's Phalanges",
50387: u"Imuthi's Phalanges",
50388: u"Blessed Steps",
50389: u"Vesper Vest",
50390: u"Clarion Clasps",
50391: u"Anointed Diadem",
50392: u"Cumulus Habit",
50393: u"Pious Pinion",
50394: u"Imuthi's Tarsals",
50395: u"Imuthi's Occipitus",
50396: u"Imuthi's Occipitus",
50397: u"Homespun Garb",
50398: u"Homespun Mittens",
50399: u"Homespun Hood",
50400: u"Homespun Trousers",
50401: u"Homespun Shoulderpads",
50402: u"Imuthi's Femurs",
50403: u"Imuthi's Femurs",
50404: u"Cloven Hooves",
50405: u"Blessed Steps",
50406: u"Vesper Vest",
50407: u"Rattling Cage",
50408: u"Kid Gloves",
50409: u"Clarion Clasps",
50410: u"Cabras Cowl",
50411: u"Anointed Diadem",
50412: u"Witherbreeches",
50413: u"Cumulus Habit",
50414: u"Pious Pinion",
50415: u"Dessicated Pauldrons",
50416: u"Imuthi's Coracoid",
50417: u"Imuthi's Coracoid",
50418: u"Handcrafted Gladium Prod",
50419: u"Handcrafted Spiritcatcher",
50420: u"Handcrafted Bent Circuit",
50421: u"Handcrafted Wiseclaw",
50422: u"Handcrafted Yew Rod",
50423: u"Handcrafted Gladium Prod",
50424: u"Handcrafted Eagle's Perch",
50425: u"Handcrafted Impulse Spire",
50426: u"Handcrafted Speakerpride",
50427: u"Handcrafted Hand Furnace",
50428: u"Handcrafted Bent Circuit",
50429: u"Handcrafted Smodur's Charge",
50430: u"Handcrafted Stagsign",
50431: u"Handcrafted Radiance",
50432: u"Handcrafted Impulse Spire",
50433: u"Handcrafted Tally Reckoner",
50434: u"Handcrafted Nucleus",
50435: u"Handcrafted Runerod",
50436: u"Handcrafted Weftwand",
50437: u"Handcrafted Supplejack",
50438: u"Handcrafted Discharging Pole",
50439: u"Handcrafted Nucleus",
50440: u"Handcrafted Runerod",
50441: u"Handcrafted Discharging Pole",
50442: u"Handcrafted Bombard",
50443: u"Handcrafted Chromecurve",
50444: u"Handcrafted Gunnarsong",
50445: u"Handcrafted Elkslayer",
50446: u"Handcrafted Ravengift",
50447: u"Handcrafted Pulleybow",
50448: u"Handcrafted Deerchaser",
50449: u"Handcrafted Barking Dog",
50450: u"Handcrafted Boregun",
50451: u"Handcrafted Griffoncrack",
50452: u"Handcrafted Barking Wolf",
50453: u"Handcrafted Bombard",
50454: u"Handcrafted Thunderbluss",
50455: u"Handcrafted Hearthfriend",
50456: u"Handcrafted Honor of Njal",
50457: u"Handcrafted Legion's Light",
50458: u"Handcrafted Skaaldsinger",
50459: u"Handcrafted Charr Alarm Clock",
50460: u"Handcrafted Wayfinder",
50461: u"Handcrafted Big Grin",
50462: u"Handcrafted Chromecurve",
50463: u"Handcrafted Pulleybow",
50464: u"Handcrafted Little Smirk",
50465: u"Handcrafted Boregun",
50466: u"Handcrafted Glaukon's Greeting",
50467: u"Handcrafted Gritted Bayonet",
50468: u"Handcrafted Legion's Light",
50469: u"Handcrafted Titan's Brand",
50470: u"Handcrafted Charr Alarm Clock",
50471: u"Handcrafted Bellower",
50472: u"Handcrafted Blind Ed's Longbow",
50473: u"Handcrafted Housebreaker",
50474: u"Handcrafted Boltcaster",
50475: u"Handcrafted Sparrowcatcher",
50476: u"Handcrafted Gravelgun",
50477: u"Handcrafted Rhedo's Revenge",
50478: u"Handcrafted Poacher's Shot",
50479: u"Handcrafted Rock Carbine",
50480: u"Handcrafted Processional Flame",
50481: u"Handcrafted Phosphor Brand",
50482: u"Handcrafted Ram's Sigh",
50483: u"Handcrafted Cragvox",
50484: u"Handcrafted Housebreaker",
50485: u"Handcrafted Boltcaster",
50486: u"Handcrafted Gravelgun",
50487: u"Handcrafted Rock Carbine",
50488: u"Handcrafted Phosphor Brand",
50489: u"Handcrafted Cragvox",
50586: u"Handcrafted War Auger",
50587: u"Handcrafted Serrated Fate",
50588: u"Handcrafted Teardrawer",
50589: u"Handcrafted Scramasax",
50590: u"Handcrafted Problem Solver",
50591: u"Handcrafted Oxenblade",
50592: u"Handcrafted Woundfire",
50593: u"Handcrafted Steamsmash",
50594: u"Handcrafted Great Sledge",
50595: u"Handcrafted Barrowbuilder",
50596: u"Handcrafted War Auger",
50597: u"Handcrafted Dreadbell",
50598: u"Handcrafted Eye of Astrix",
50599: u"Handcrafted Swordshame",
50600: u"Handcrafted Geared Maw",
50601: u"Handcrafted Dun Targe",
50602: u"Handcrafted Chainblade",
50603: u"Handcrafted Warleek",
50604: u"Handcrafted Leopard's Bite",
50605: u"Handcrafted Cauterizer",
50606: u"Handcrafted Grawlgrinder",
50607: u"Handcrafted Razor Shard",
50608: u"Handcrafted Serrated Fate",
50609: u"Handcrafted Problem Solver",
50610: u"Handcrafted Fury Unleashed",
50611: u"Handcrafted Steamsmash",
50612: u"Handcrafted Thermaul",
50613: u"Handcrafted Forge Gavel",
50614: u"Handcrafted Tarnished Hewer",
50615: u"Handcrafted Woundwolf",
50616: u"Handcrafted Chiminea Ward",
50617: u"Handcrafted Geared Maw",
50618: u"Handcrafted Distemper",
50619: u"Handcrafted Chainblade",
50620: u"Handcrafted Francisca",
50621: u"Handcrafted Grawlgrinder",
50622: u"Handcrafted Handspar",
50623: u"Handcrafted Forkfang",
50624: u"Handcrafted Li'l Letter Opener",
50625: u"Handcrafted Vein Breaker",
50626: u"Handcrafted Great Fullsword",
50627: u"Handcrafted Seeger's Union",
50628: u"Handcrafted Pulse Hammer",
50629: u"Handcrafted Slate-Bound Star",
50630: u"Handcrafted Waring's Regulator",
50631: u"Handcrafted Improvised Bulwark",
50632: u"Handcrafted Immutabilis",
50633: u"Handcrafted Tuneblade",
50634: u"Handcrafted Moa Carver",
50635: u"Handcrafted Tarnished Hewer",
50636: u"Handcrafted Forkfang",
50637: u"Handcrafted Vein Breaker",
50638: u"Handcrafted Pulse Hammer",
50639: u"Handcrafted Slate-Bound Star",
50640: u"Handcrafted Immutabilis",
50641: u"Handcrafted Tuneblade",
62885: u"Gourmet Chef's Backpack",
62886: u"Simple Armorsmith's Backpack",
62887: u"Intricate Armorsmith's Backpack",
62888: u"Sturdy Armorsmith's Backpack",
62889: u"Elegant Armorsmith's Backpack",
62890: u"Elegant Armorsmith's Backpack",
62891: u"Practical Armorsmith's Backpack",
62892: u"Basic Chef's Backpack",
62893: u"Ornate Artificer's Backpack",
62894: u"Simple Artificer's Backpack",
62895: u"Intricate Artificer's Backpack",
62896: u"Sturdy Artificer's Backpack",
62897: u"Elegant Artificer's Backpack",
62898: u"Elegant Artificer's Backpack",
62899: u"Practical Artificer's Backpack",
62900: u"Savory Chef's Backpack",
62901: u"Ornate Huntsman's Backpack",
62902: u"Simple Huntsman's Backpack",
62903: u"Intricate Huntsman's Backpack",
62904: u"Sturdy Huntsman's Backpack",
62905: u"Elegant Huntsman's Backpack",
62906: u"Elegant Huntsman's Backpack",
62907: u"Practical Huntsman's Backpack",
62908: u"Intricate Leatherworker's Backpack",
62909: u"Ornate Jeweler's Backpack",
62910: u"Simple Jeweler's Backpack",
62911: u"Intricate Jeweler's Backpack",
62912: u"Sturdy Jeweler's Backpack",
62915: u"Practical Jeweler's Backpack",
62916: u"Practical Leatherworker's Backpack",
62917: u"Ornate Armorsmith's Backpack",
62918: u"Simple Chef's Backpack",
62919: u"Hearty Chef's Backpack",
62920: u"Ornate Tailor's Backpack",
62923: u"Practical Weaponsmith's Backpack",
62924: u"Intricate Tailor's Backpack",
62925: u"Ornate Leatherworker's Backpack",
62926: u"Simple Leatherworker's Backpack",
62927: u"Elegant Tailor's Backpack",
62928: u"Sturdy Leatherworker's Backpack",
62929: u"Elegant Leatherworker's Backpack",
62930: u"Elegant Leatherworker's Backpack",
62931: u"Practical Tailor's Backpack",
62933: u"Elegant Weaponsmith's Backpack",
62934: u"Simple Tailor's Backpack",
62935: u"Sturdy Weaponsmith's Backpack",
62936: u"Sturdy Tailor's Backpack",
62937: u"Intricate Weaponsmith's Backpack",
62938: u"Elegant Weaponsmith's Backpack",
62939: u"Simple Weaponsmith's Backpack",
62940: u"Ornate Weaponsmith's Backpack",
62942: u"Crafter's Backpack Frame",
62943: u"Simple Artificer's Tools",
62944: u"Sturdy Huntsman's Tools",
62945: u"Practical Huntsman's Tools",
62946: u"Intricate Huntsman's Tools",
62947: u"Ornate Huntsman's Tools",
62948: u"Elegant Huntsman's Tools",
62949: u"Simple Weaponsmith's Tools",
62950: u"Sturdy Weaponsmith's Tools",
62951: u"Practical Weaponsmith's Tools",
62952: u"Intricate Weaponsmith's Tools",
62953: u"Ornate Weaponsmith's Tools",
62954: u"Elegant Weaponsmith's Tools",
62955: u"Practical Leatherworker's Tools",
62956: u"Sturdy Leatherworker's Tools",
62957: u"Simple Leatherworker's Tools",
62958: u"Intricate Leatherworker's Tools",
62959: u"Ornate Leatherworker's Tools",
62960: u"Elegant Leatherworker's Tools",
62961: u"Simple Armorsmith's Tools",
62962: u"Sturdy Armorsmith's Tools",
62963: u"Practical Armorsmith's Tools",
62964: u"Intricate Armorsmith's Tools",
62965: u"Ornate Armorsmith's Tools",
62966: u"Elegant Armorsmith's Tools",
62967: u"Simple Huntsman's Tools",
62968: u"Sturdy Artificer's Tools",
62969: u"Practical Artificer's Tools",
62970: u"Intricate Artificer's Tools",
62971: u"Ornate Artificer's Tools",
62972: u"Elegant Artificer's Tools",
62973: u"Simple Tailor's Tools",
62974: u"Sturdy Tailor's Tools",
62975: u"Practical Tailor's Tools",
62976: u"Intricate Tailor's Tools",
62977: u"Ornate Tailor's Tools",
62978: u"Elegant Tailor's Tools",
62979: u"Simple Jeweler's Tools",
62980: u"Sturdy Jeweler's Tools",
62981: u"Practical Jeweler's Tools",
62982: u"Intricate Jeweler's Tools",
62983: u"Ornate Jeweler's Tools",
62985: u"Simple Chef's Tools",
62986: u"Basic Chef's Tools",
62987: u"Savory Chef's Tools",
62988: u"Hearty Chef's Tools",
62989: u"Gourmet Chef's Tools",
66610: u"Cleric's Adventurer's Spectacles",
66612: u"Rampager's Adventurer's Scarf",
66613: u"Cleric's Adventurer's Spectacles",
66616: u"Adventurer's Mantle",
66617: u"Adventurer's Mantle",
66618: u"Rampager's Adventurer's Scarf",
66620: u"Adventurer's Mantle",
66623: u"Rampager's Adventurer's Scarf",
66625: u"Cleric's Adventurer's Spectacles",
66637: u"Piece of Ambrite",
66650: u"Sheet of Ambrite",
67177: u"Recipe: Superior Rune of the Trapper",
67178: u"Recipe: Superior Rune of Radiance",
67179: u"Recipe: Superior Sigil of Incapacitation",
67180: u"Recipe: Superior Sigil of Cleansing",
67181: u"Recipe: Superior Sigil of Cruelty",
67182: u"Recipe: Superior Rune of Evasion",
67339: u"Superior Rune of the Trapper",
67340: u"Superior Sigil of Cleansing",
67341: u"Superior Sigil of Cruelty",
67342: u"Superior Rune of Radiance",
67343: u"Superior Sigil of Incapacitation",
67344: u"Superior Rune of Evasion",
67366: u"Recipe: Pumpkin Oil",
67367: u"Lump of Crystallized Nougat",
67368: u"Sharpening Skull",
67369: u"Recipe: Crystallized Nougat",
67371: u"Flask of Pumpkin Oil",
67377: u"Vial of Maize Balm",
67379: u"Bottle of Batwing Brew",
67380: u"Recipe: Batwing Brew",
67381: u"Recipe: Maize Balm",
67382: u"Recipe: Sharpening Skull",
67522: u"Bountiful Tuning Crystal",
67524: u"Furious Tuning Crystal",
67528: u"Bountiful Maintenance Oil",
67529: u"Furious Maintenance Oil",
67530: u"Furious Sharpening Stone",
67531: u"Bountiful Sharpening Stone",
67832: u"Sheet of Charged Ambrite",
67912: u"Superior Rune of the Defender",
67913: u"Superior Sigil of Blight",
67961: u"Recipe: Bountiful Tuning Crystal",
67962: u"Recipe: Bountiful Sharpening Stone",
67963: u"Recipe: Furious Tuning Crystal",
67964: u"Recipe: Furious Maintenance Oil",
67965: u"Recipe: Furious Sharpening Stone",
67966: u"Recipe: Bountiful Maintenance Oil",
68063: u"Amalgamated Gemstone",
68482: u"Recipe: Superior Rune of the Defender",
68483: u"Recipe: Superior Sigil of Blight",
68620: u"Lucky Great Ram Lantern",
68942: u"Evergreen Lodestone",
68944: u"Auric Sliver",
69370: u"Superior Rune of the Revenant",
70420: u"Bronze Chisel",
70437: u"Soft Wood Pulp",
70447: u"Frog's Breath",
70454: u"Basic Finishing Kit",
70468: u"Alpha Wolf Spirit",
70489: u"Master's Finishing Kit",
70493: u"Essence of Prey",
70522: u"Oiled Orichalcum Shield Boss",
70537: u"Bauxite Ore",
70592: u"Oiled Orichalcum Trident Head",
70612: u"Oiled Orichalcum Boot Lining",
70647: u"Crystalline Bottle",
70658: u"Essence of Natural Protection",
70666: u"Iron Craftsman's Hammer",
70670: u"Soft Wood Pen",
70673: u"Fine Book",
70681: u"Iron Chisel",
70682: u"Oiled Hardened Glove Panel",
70714: u"Sheet of Fine Paper",
70728: u"Oiled Orichalcum Warhorn Mouthpiece",
70730: u"Essence of Ancient Knowledge",
70750: u"Oiled Ancient Torch Handle",
70757: u"Expertise in Trident Crafting",
70759: u"Giant Mushroom Spore",
70763: u"Spirit of the Chaos Gun Experiment",
70765: u"Fine Scribing Kit",
70791: u"Feldspar Core",
70798: u"Expertise in Daysword Crafting",
70799: u"Oiled Gossamer Shoe Upper",
70820: u"Shard of Glory",
70842: u"Mordrem Lodestone",
70850: u"Expertise in Pistol Crafting",
70861: u"Jar of Wurmswort",
70883: u"Writ of Studied Strength",
70891: u"Oiled Orichalcum Legging Lining",
70926: u"Simple Finishing Kit",
70931: u"Bag of Radiant Blotting Powder",
70940: u"Raw Honey",
70955: u"Jungle Grass Seed",
70956: u"Nickel Ore",
70957: u"Maguuma Lily",
71036: u"Essence of Fish",
71049: u"Resonating Core",
71070: u"Seasoned Wood Pen",
71098: u"Oiled Orichalcum Spear Head",
71112: u"Sheet of Medium Sandpaper",
71136: u"Sheet of Quality Paper",
71137: u"Infinitely Spiraled Device",
71146: u"Fine Finishing Kit",
71155: u"Oiled Hardened String",
71159: u"Mithril Craftsman's Hammer",
71171: u"Fine Ink Set",
71195: u"Oiled Gossamer Pant Panel",
71201: u"Boar Bristle",
71203: u"Spirit of the Spark Experiment",
71220: u"Minor Sigil of Agility",
71225: u"Wool Patch",
71277: u"Expertise in Harpoon Gun Crafting",
71307: u"Jute Patch",
71312: u"Oiled Ancient Staff Shaft",
71331: u"Oiled Orichalcum Dagger Blade",
71336: u"Elder Wood Pulp",
71350: u"Oiled Orichalcum Pauldron Casing",
71352: u"Oiled Orichalcum Greatsword Blade",
71398: u"Ascalonian Lumber Core",
71412: u"Oiled Hardened Helmet Padding",
71428: u"Resonating Sliver",
71437: u"Essence of the Bonfire",
71446: u"Deadly Nightshade",
71449: u"Oiled Hardened Trouser Panel",
71473: u"Badge of Tribute",
71514: u"Writ of Studied Accuracy",
71574: u"Oiled Orichalcum Sword Blade",
71581: u"Memory of Battle",
71583: u"Copper Nib",
71613: u"Platinum Nib",
71641: u"Pile of Coarse Sand",
71661: u"Oiled Ancient Harpoon",
71668: u"Darksteel Chisel",
71671: u"Spirit of the Kudzu Experiment",
71692: u"Pouch of Red Pigment",
71702: u"Sheet of Smooth Paper",
71720: u"Spirit of the Venom Experiment",
71723: u"Spirit of The Energizer Experiment",
71724: u"Coarse Book Cover",
71730: u"Bag of Radiant Energy",
71736: u"Vial of Manganese Dioxide",
71776: u"Elder Wood Pen",
71793: u"Oiled Ancient Pistol Frame",
71852: u"Essence of the End",
71861: u"Oiled Ancient Scepter Rod",
71873: u"Essence of Chickens and Eggs",
71875: u"Oiled Hardened Longcoat Padding",
71879: u"Oiled Orichalcum Helmet Lining",
71908: u"Glob of Blue Ooze",
71928: u"Writ of Basic Speed",
71938: u"Fine Book Cover",
71952: u"Pouch of Yellow Pigment",
72009: u"Oiled Orichalcum Horn",
72010: u"Major Sigil of Concentration",
72022: u"Rough Book Cover",
72024: u"Oiled Gossamer Coat Lining",
72048: u"Writ of Strength",
72060: u"Oiled Orichalcum Pauldron Lining",
72092: u"Superior Sigil of Agility",
72126: u"Spirit of the Rodgort's Flame Experiment",
72129: u"Steel Chisel",
72146: u"Oiled Gossamer Pant Lining",
72154: u"Hylek Dart Poison Gland",
72194: u"Silk Patch",
72195: u"Oiled Orichalcum Helmet Casing",
72209: u"Hard Wood Pen",
72258: u"Oiled Ancient Trident Shaft",
72265: u"Copper Reinforcing Plate",
72291: u"Writ of Accuracy",
72293: u"Auric Crown",
72296: u"Vial of Enchanted Water",
72313: u"Oiled Hardened Trouser Padding",
72337: u"Essence of Sharks",
72339: u"Superior Sigil of Concentration",
72349: u"Basic Ink Set",
72428: u"Oiled Orichalcum Pistol Barrel",
72433: u"Bag of Glittering Blotting Powder",
72449: u"Darksteel Craftsman's Hammer",
72458: u"Spirit of Research",
72462: u"Sheet of Fine Sandpaper",
72484: u"Intricate Scribe's Backpack",
72497: u"Smooth Book Cover",
72537: u"Mithril Chisel",
72549: u"Mithril Nib",
72553: u"Oiled Hardened Glove Lining",
72563: u"Writ of Basic Malice",
72572: u"Writ of Calculated Malice",
72579: u"Sheet of Coarse Paper",
72622: u"Expertise in Advanced Axe Crafting",
72624: u"Jar of Wolf Pheromones",
72673: u"Oiled Ancient Longbow Stave",
72752: u"Cotton Patch",
72766: u"Essence of Hope",
72778: u"Spirit of The Lover Experiment",
72781: u"Green Wood Pen",
72795: u"Gold Nib",
72802: u"Pruning Shear",
72807: u"Writ of Learned Strength",
72813: u"Writ of Malice",
72821: u"Writ of Calculated Accuracy",
72826: u"Bag of Luminous Blotting Powder",
72846: u"Essence of the Hunt",
72892: u"Oiled Hardened Boot Upper",
72920: u"Sheet of Coarse Sandpaper",
72925: u"Journeyman's Scribing Kit",
72955: u"Basic Scribing Kit",
72961: u"Expertise in Longbow Crafting",
72995: u"Expertise in Scepter Crafting",
73017: u"Ascalonian Blueprint",
73027: u"Simple Scribe's Backpack",
73034: u"Vial of Linseed Oil",
73056: u"Oiled Orichalcum Chestplate Panel",
73081: u"Steel Reinforcing Plate",
73111: u"Essence of Villains",
73117: u"Spirit of The Bard Experiment",
73120: u"Mithril Reinforcing Plate",
73139: u"Pristine Feather",
73186: u"Quality Book Cover",
73199: u"Recipe: 20-Slot Equipment Pact Box",
73211: u"Steel Craftsman's Hammer",
73217: u"Oiled Gossamer Helm Padding",
73231: u"Vial of Green Goo",
73243: u"Oiled Gossamer Glove Panel",
73248: u"Stabilizing Matrix",
73264: u"Golden Oxide Compound",
73286: u"Writ of Basic Strength",
73289: u"Minor Sigil of Concentration",
73296: u"Oiled Orichalcum Sword Hilt",
73332: u"Essence of Audacity",
73369: u"Energized Branded Crystal",
73417: u"Oiled Gossamer Helm Strap",
73430: u"Tattered Bat Wing",
73444: u"Bag of Shimmering Energy",
73453: u"Ornate Scribe's Tools",
73471: u"Bag of Shimmering Blotting Powder",
73473: u"Expertise in Sword Crafting",
73499: u"Oiled Small Ancient Haft",
73503: u"Sabotaged Weapon Parts",
73504: u"Sawgill Mushroom",
73512: u"Bag of Incandescent Energy",
73524: u"Spirit of the Dusk Experiment",
73555: u"Oiled Ancient Rifle Stock",
73582: u"Mosquito Blood",
73584: u"Palm Lumber Core",
73595: u"Writ of Basic Accuracy",
73615: u"Spirit of The Hunter Experiment",
73665: u"Oiled Orichalcum Legging Panel",
73671: u"Essence of Dragons",
73733: u"Oiled Gossamer Epaulet Padding",
73753: u"Essence of Blooms",
73764: u"Ley-Line Mercuric Compound",
73804: u"Expertise in Hammer Crafting",
73806: u"Oiled Hardened Shoulderguard Panel",
73827: u"Simple Scribe's Tools",
73841: u"Spirit of the Zap Experiment",
73874: u"Oiled Orichalcum Rifle Barrel",
73881: u"Michotl Tribe's Herbs",
73891: u"Spirit of the Storm Experiment",
73903: u"Oiled Hardened Boot Sole",
73944: u"Oiled Orichalcum Axe Blade",
73955: u"Expertise in Runed Staff Crafting",
73964: u"Journeyman's Ink Set",
73980: u"Oiled Gossamer Epaulet Panel",
74016: u"Nuhoch Saliva",
74032: u"Spirit of The Colossus Experiment",
74090: u"Pile of Flax Seeds",
74091: u"Oiled Large Ancient Haft",
74158: u"Spirit of The Device",
74171: u"Expertise in Focus Crafting",
74202: u"Barbed Thorn",
74237: u"Vial of Cobalt Salts",
74253: u"Fire Bug Larva",
74296: u"Crate of Training Supplies",
74326: u"Superior Sigil of Transference",
74328: u"Leaf Fossil",
74335: u"Oiled Orichalcum Shield Backing",
74341: u"Oiled Orichalcum Gauntlet Lining",
74358: u"Basic Book",
74445: u"Simple Book",
74525: u"20-Slot Equipment Pact Box",
74544: u"Essence of Industry",
74596: u"Bloomhunger Sap",
74637: u"Bag of Dolyak Chow",
74643: u"Essence of the Colossal",
74662: u"Spirit of the Dawn Experiment",
74683: u"Lamp Finial",
74719: u"Bag of Incandescent Blotting Powder",
74763: u"Tub of Wood Glue",
74768: u"Journeyman's Finishing Kit",
74788: u"Expertise in Torch Crafting",
74822: u"Expertise in Spear Crafting",
74825: u"Oiled Ancient Scepter Core",
74846: u"Expertise in Warhorn Crafting",
74847: u"Major Sigil of Agility",
74850: u"Intricate Scribe's Tools",
74852: u"Sheet of Rough Paper",
74877: u"Master's Ink Set",
74920: u"Writ of Studied Speed",
74982: u"Pouch of Brown Pigment",
75000: u"Linen Rope",
75001: u"Ornate Scribe's Backpack",
75043: u"Essence of Tentacles",
75049: u"Oiled Ancient Focus Casing",
75051: u"Writ of Studied Malice",
75075: u"Lump of Glass",
75087: u"Essence of Elegance",
75095: u"Expertise in Shield Crafting",
75123: u"Oiled Ancient Focus Core",
75134: u"Oiled Orichalcum Chestplate Padding",
75169: u"Oiled Orichalcum Gauntlet Plates",
75181: u"Silver Nib",
75228: u"Essence of Love",
75232: u"Sun God's Vial",
75237: u"Spirit of the Rage Experiment",
75241: u"Flax Fiber",
75246: u"Spirit of The Chosen Experiment",
75270: u"Pouch of Orange Pigment",
75272: u"Black Powder",
75288: u"Glob of Yellow Ooze",
75321: u"Oiled Orichalcum Torch Head",
75414: u"Glob of Green Ooze",
75498: u"Pile of Beryl Dust",
75506: u"Oiled Hardened Shoulderguard Padding",
75534: u"Spirit of the Tooth of Frostfang Experiment",
75535: u"Spirit of The Legend Experiment",
75570: u"Simple Ink Set",
75606: u"Oiled Orichalcum Boot Casing",
75610: u"Writ of Learned Accuracy",
75612: u"Sheet of Aurillium",
75623: u"Major Sigil of Transference",
75633: u"Sturdy Scribe's Backpack",
75638: u"Coastal Lumber Core",
75648: u"Essence of Light",
75694: u"Pouch of Blue Pigment",
75696: u"Oiled Gossamer Coat Panel",
75698: u"Oiled Ancient Staff Head",
75738: u"Sheet of Superfine Sandpaper",
75739: u"Hard Wood Pulp",
75762: u"Bag of Mortar",
75769: u"Essence of Artistry",
75784: u"Box of Banner Supplies",
75801: u"Essence of Meteorology",
75857: u"Resonating Fragment",
75862: u"Pouch of White Pigment",
75900: u"Bronze Craftsman's Hammer",
75939: u"Essence of Technology",
75963: u"Minor Sigil of Transference",
75976: u"Charged Auric Particles",
75978: u"Oiled Hardened Helmet Strap",
75982: u"Expertise in Advanced Pistol Crafting",
75989: u"Experimental Reactor",
76051: u"Expertise in Axe Crafting",
76076: u"Jar of Paint Base",
76084: u"Oiled Orichalcum Greatsword Hilt",
76093: u"Expertise in Rifle Crafting",
76116: u"Spirit of the Ravenswood Branch",
76131: u"Pumpkin Smasher",
76133: u"Adorned Book",
76146: u"Linen Supply Sack",
76164: u"Oiled Orichalcum Hammer Head",
76167: u"Oiled Gossamer Shoe Sole",
76179: u"Freshwater Pearl",
76209: u"Zinc Ore",
76216: u"Linen Patch",
76232: u"Oiled Ancient Short-Bow Stave",
76281: u"Practical Scribe's Tools",
76297: u"Master's Scribing Kit",
76354: u"Spirit of the Carcharias Experiment",
76374: u"Glob of Red Ooze",
76411: u"Seasoned Wood Pulp",
76453: u"Strategic Defense Map of the Mists",
76460: u"Expertise in Dagger Crafting",
76478: u"Writ of Learned Malice",
76518: u"Simple Scribing Kit",
76523: u"Mamnoon Aloe",
76614: u"Gossamer Patch",
76682: u"Expertise in Short Bow Crafting",
76708: u"Green Wood Pulp",
76799: u"Pouch of Green Pigment",
76806: u"Essence of Concoctions",
76826: u"Oiled Orichalcum Dagger Hilt",
76839: u"Milling Basin",
76858: u"Oiled Gossamer Glove Padding",
76876: u"Sheet of Extra Coarse Sandpaper",
76903: u"Fountain Torch",
77018: u"Expertise in Mace Crafting",
77026: u"Spirit of the Howl Experiment",
77064: u"Oiled Orichalcum Mace Head",
77071: u"Elaborate Book",
77089: u"Expertise in Nightsword Crafting",
77112: u"Pouch of Purple Pigment",
77128: u"Writ of Calculated Strength",
77139: u"Expertise in Staff Crafting",
77174: u"Sturdy Scribe's Tools",
77186: u"Oiled Hardened Longcoat Panel",
77190: u"Essence of Ancient Mysticism",
77223: u"Practical Scribe's Backpack",
77256: u"Milling Stone",
77567: u"Tuning Icicle",
77569: u"Tin of Fruitcake",
77576: u"Mug of Eggnog",
77604: u"Wintersday Gift",
77610: u"Recipe: Tuning Icicle",
77619: u"Recipe: Tin of Fruitcake",
77632: u"Peppermint Oil",
77651: u"Candy Cane",
77653: u"Recipe: Peppermint Oil",
77685: u"Lucky Great Monkey Lantern",
78230: u"Skelk Liver",
78417: u"Oily Fish Meat",
78544: u"Expertise in Advanced Short Bow Crafting",
78548: u"Spirit of the Hunt",
78566: u"Infused Oily Fish Meat",
78604: u"Superior Skelk Liver",
78685: u"Essence of the Hunt",
79453: u"Tribute to Endeavor",
79845: u"Tribute to Friendship",
80076: u"Lucky Great Rooster Lantern",
80201: u"Tribute to the Man o' War",
81163: u"Tribute to Liturgy",
81871: u"Tribute to Arah",
81974: u"Tribute to the Queen",
82100: u"Recipe: Mordant Infantry Bow",
82117: u"Warbeast Gossamer Helm Strap",
82118: u"Encased Matrix",
82125: u"Warbeast Hardened Boot Sole",
82129: u"Recipe: Mordant Edge",
82150: u"Warbeast Boots",
82156: u"Harrier's Bounty Hunter's Breastplate",
82183: u"Recipe: Mordant Sickle",
82205: u"Warbeast Hardened Trouser Padding",
82207: u"Mordant Infantry Bow",
82208: u"Recipe: Bounty Hunter's Breastplate",
82255: u"Recipe: Sunspear Horn",
82259: u"Recipe: Bounty Hunter's Greaves",
82265: u"Recipe: Sunspear Recurve",
82266: u"Warbeast Hardened Glove Panel",
82273: u"Recipe: Bounty Hunter's Helmet",
82274: u"Recipe: Spearmarshal's Pauldrons",
82311: u"Harrier's Bounty Hunter's Shoulderpads",
82321: u"Harrier's Bounty Hunter's Mantle",
82323: u"Warbeast Gossamer Coat Panel",
82326: u"Warbeast Gossamer Epaulet Panel",
82329: u"Harrier's Bounty Hunter's Greaves",
82332: u"Warbeast Helmet",
82370: u"Recipe: Spearmarshal's Boots",
82378: u"Spearmarshal's Mantle",
82383: u"Spearmarshal's Tassets",
82384: u"Warbeast Gossamer Pant Panel",
82385: u"Warbeast Gossamer Glove Panel",
82415: u"Warbeast Orichalcum Helmet Casing",
82421: u"Recipe: Bounty Hunter's Pauldrons",
82453: u"Recipe: Sunspear Carver",
82469: u"Recipe: Harrier's Warbeast Shoes",
82470: u"Recipe: Harrier's Warbeast Jerkin",
82474: u"Recipe: Harrier's Warbeast Mask",
82491: u"Recipe: Bounty Hunter's Gauntlets",
82493: u"Recipe: Harrier's Warbeast Breastplate",
82501: u"Warbeast Hardened Longcoat Padding",
82523: u"Embossed Matrix",
82550: u"Recipe: Harrier's Warbeast Gauntlets",
82551: u"Warbeast Orichalcum Boot Casing",
82571: u"Harrier's Bounty Hunter's Pauldrons",
82573: u"Spearmarshal's Mask",
82579: u"Spearmarshal's Shoes",
82582: u"Sliver of Twitching Forgemetal",
82584: u"Recipe: Bounty Hunter's Jerkin",
82590: u"Warbeast Gloves",
82601: u"Recipe: Spearmarshal's Mask",
82603: u"Harrier's Bounty Hunter's Gloves",
82625: u"Recipe: Superior Rune of the Mirage",
82633: u"Superior Rune of the Holosmith",
82635: u"Warbeast Gossamer Shoe Upper",
82647: u"Spearmarshal's Jerkin",
82656: u"Elonian Matrix",
82659: u"Recipe: Mordant Cesta",
82663: u"Recipe: Mordant Longbow",
82668: u"Recipe: Mordant Scutum",
82678: u"Pulsing Brandspark",
82688: u"Recipe: Spearmarshal's Tasset",
82694: u"Recipe: Superior Rune of the Spellbreaker",
82709: u"Recipe: Superior Rune of the Firebrand",
82713: u"Recipe: Superior Rune of the Scourge",
82725: u"Recipe: Superior Rune of the Deadeye",
82730: u"Recipe: Mordant Warclub",
82737: u"Recipe: Sunspear Sidearm",
82744: u"Warbeast Hardened Shoulderguard Panel",
82749: u"Warbeast Orichalcum Gauntlet Lining",
82754: u"Warbeast Greaves",
82758: u"Warbeast Shoulderpads",
82760: u"Recipe: Superior Rune of the Soulbeast",
82765: u"Sunspear Runestone",
82771: u"Spearmarshal's Pants",
82773: u"Warbeast Jerkin",
82785: u"Mordant Trumpet",
82791: u"Superior Rune of the Deadeye",
82796: u"Oiled Forged Scrap",
82800: u"Mordant Scutum",
82822: u"Spearmarshal's Gauntlets",
82828: u"Mordant Revolver",
82836: u"Warbeast Pants",
82853: u"Recipe: Harrier's Warbeast Greaves",
82859: u"Warbeast Mask",
82882: u"Insignia of the Harrier",
82885: u"Warbeast Pauldrons",
82887: u"Spearmarshal's Vambraces",
82900: u"Recipe: Spearmarshal's Helm",
82961: u"Recipe: Mordant Slicer",
82978: u"Recipe: Sunspear Rod",
82990: u"Spearmarshal's Pauldrons",
83005: u"Warbeast Orichalcum Chestplate Padding",
83025: u"Harrier's Bounty Hunter's Vestments",
83070: u"Recipe: Spearmarshal's Breastplate",
83078: u"Recipe: Sunspear Wallshield",
83101: u"Recipe: Spearmarshal's Pants",
83103: u"Eye of Kormir",
83106: u"Recipe: Harrier's Warbeast Gloves",
83124: u"Recipe: Spearmarshal's Leggings",
83141: u"Recipe: Harrier's Warbeast Pauldrons",
83146: u"Mordant Brazier",
83154: u"Recipe: Spearmarshal's Mantle",
83184: u"Recipe: Sunspear Cutlass",
83187: u"Recipe: Bounty Hunter's Leggings",
83214: u"Warbeast Hardened Longcoat Panel",
83239: u"Warbeast Vestments",
83264: u"Insignia of the Spearmarshal",
83284: u"Ley-Infused Sand",
83319: u"Warbeast Hardened Helmet Strap",
83336: u"Spearmarshal's Breastplate",
83337: u"Recipe: Harrier's Warbeast Shoulders",
83338: u"Superior Rune of the Firebrand",
83342: u"Warbeast Orichalcum Gauntlet Plates",
83351: u"Recipe: Bounty Hunter's Mantle",
83355: u"Spearmarshal's Cowl",
83361: u"Recipe: Superior Rune of the Renegade",
83371: u"Recipe: Bounty Hunter's Tassets",
83391: u"Harrier's Bounty Hunter's Helmet",
83392: u"Warbeast Leggings",
83416: u"Recipe: Sunspear Firelight",
83423: u"Superior Rune of the Weaver",
83430: u"Recipe: Bounty Hunter's Gloves",
83433: u"Recipe: Bounty Hunter's Shoulderpads",
83438: u"Spearmarshal's Gloves",
83440: u"Warbeast Breastplate",
83450: u"Recipe: Bounty Hunter's Cowl",
83453: u"Mordant Slayer",
83471: u"Harrier's Bounty Hunter's Cowl",
83474: u"Warbeast Orichalcum Helmet Lining",
83502: u"Superior Rune of the Renegade",
83531: u"Recipe: Sunspear Smasher",
83537: u"Recipe: Harrier's Warbeast Cowl",
83538: u"Recipe: Bounty Hunter's Vestments",
83559: u"Warbeast Gossamer Shoe Sole",
83565: u"Harrier's Bounty Hunter's Jerkin",
83594: u"Warbeast Mantle",
83616: u"Warbeast Gossamer Pant Lining",
83626: u"Harrier's Bounty Hunter's Boots",
83663: u"Superior Rune of the Scourge",
83693: u"Recipe: Harrier's Warbeast Pants",
83696: u"Recipe: Mordant Trumpet",
83700: u"Recipe: Harrier's Warbeast Helmet",
83709: u"Harrier's Bounty Hunter's Vambraces",
83717: u"Warbeast Vambraces",
83731: u"Recipe: Mordant Crusher",
83735: u"Mordant Slicer",
83741: u"Harrier's Bounty Hunter's Leggings",
83748: u"Recipe: Sunspear Runestone",
83757: u"Congealed Putrescence",
83771: u"Warbeast Shoes",
83781: u"Spearmarshal's Boots",
83792: u"Warbeast Gauntlets",
83803: u"Recipe: Bounty Hunter's Mask",
83813: u"Warbeast Orichalcum Pauldron Lining",
83814: u"Recipe: Sunspear Standard",
83841: u"Recipe: Spearmarshal's Cowl",
83842: u"Recipe: Mordant Slayer",
83848: u"Recipe: Mordant Crosier",
83856: u"Recipe: Sunspear Matchlock",
83873: u"Harrier's Bounty Hunter's Mask",
83874: u"Recipe: Harrier's Warbeast Mantle",
83875: u"Recipe: Harrier's Warbeast Vambraces",
83883: u"Warbeast Orichalcum Boot Lining",
83908: u"Recipe: Harrier's Warbeast Tassets",
83916: u"Warbeast Hardened Trouser Panel",
83926: u"Inscription of the Spearmarshal",
83931: u"Warbeast Hardened Shoulderguard Padding",
83934: u"Mordant Sword",
83944: u"Recipe: Mordant Brazier",
83964: u"Superior Rune of the Soulbeast",
83970: u"Recipe: Sunspear Greatblade",
83972: u"Harrier's Bounty Hunter's Tassets",
83974: u"Mordant Inscription",
84012: u"Warbeast Gossamer Helm Padding",
84021: u"Recipe: Mordant Key",
84061: u"Warbeast Orichalcum Pauldron Casing",
84070: u"Recipe: Spearmarshal's Jerkin",
84107: u"Recipe: Harrier's Warbeast Leggings",
84125: u"Recipe: Spearmarshal's Vambraces",
84127: u"Superior Rune of the Mirage",
84131: u"Spearmarshal's Greaves",
84135: u"Recipe: Bounty Hunter's Shoes",
84136: u"Recipe: Bounty Hunter's Vambraces",
84150: u"Recipe: Harrier's Warbeast Vestments",
84153: u"Recipe: Spearmarshal's Vestments",
84189: u"Spearmarshal's Shoulderpads",
84191: u"Warbeast Gossamer Glove Padding",
84197: u"Warbeast Orichalcum Chestplate Panel",
84210: u"Warbeast Orichalcum Legging Panel",
84221: u"Recipe: Spearmarshal's Shoes",
84226: u"Warbeast Hardened Glove Lining",
84231: u"Recipe: Spearmarshal's Greaves",
84247: u"Recipe: Bounty Hunter's Pants",
84275: u"Recipe: Sunspear Pocketbow",
84277: u"Warbeast Gossamer Epaulet Padding",
84288: u"Recipe: Mordant Inscription",
84308: u"Harrier's Bounty Hunter's Pants",
84309: u"Recipe: Mordant Revolver",
84316: u"Spearmarshal's Vestments",
84373: u"Mordant Sickle",
84383: u"Spearmarshal's Helmet",
84403: u"Warbeast Hardened Boot Upper",
84406: u"Spearmarshal's Leggings",
84454: u"Recipe: Mordant Bonespitter",
84509: u"Harrier's Bounty Hunter's Gauntlets",
84539: u"Warbeast Orichalcum Legging Lining",
84541: u"Harrier's Bounty Hunter's Shoes",
84560: u"Recipe: Harrier's Warbeast Boots",
84572: u"Recipe: Superior Rune of the Weaver",
84602: u"Warbeast Hardened Helmet Padding",
84607: u"Recipe: Spearmarshal's Gloves",
84623: u"Warbeast Gossamer Coat Lining",
84635: u"Recipe: Spearmarshal's Shoulderpads",
84677: u"Warbeast Cowl",
84683: u"Recipe: Bounty Hunter's Boots",
84698: u"Recipe: Sunspear Thrasher",
84707: u"Mordant Bonespitter",
84729: u"Recipe: Sunspear Warsickle",
84744: u"Recipe: Spearmarshal's Gauntlets",
84745: u"Warbeast Tassets",
84749: u"Superior Rune of the Spellbreaker",
84751: u"Recipe: Superior Rune of the Holosmith",
84752: u"Gold Bound Matrix",
85379: u"Lunatic Templar Breastplate",
85382: u"Recipe: Lunatic Noble Shoulders",
85385: u"Lunatic Acolyte Boots",
85386: u"Recipe: Lunatic Acolyte Coat",
85389: u"Recipe: Lunatic Noble Gloves",
85390: u"Recipe: Lunatic Noble Mask",
85392: u"Lunatic Acolyte Mantle",
85395: u"Recipe: Lunatic Templar Pauldrons",
85405: u"Lunatic Gossamer Insignia",
85407: u"Lunatic Noble Gloves",
85410: u"Recipe: Lunatic Templar Greaves",
85411: u"Lunatic Templar Tassets",
85412: u"Recipe: Lunatic Noble Coat",
85415: u"Lunatic Templar Greaves",
85420: u"Lunatic Noble Pants",
85425: u"Lunatic Templar Gauntlets",
85427: u"Recipe: Lunatic Templar Gauntlets",
85432: u"Recipe: Lunatic Acolyte Boots",
85439: u"Recipe: Lunatic Acolyte Mantle",
85443: u"Recipe: Lunatic Gossamer Insignia",
85445: u"Lunatic Templar Helm",
85449: u"Lunatic Noble Coat",
85463: u"Lunatic Noble Shoulders",
85464: u"Lunatic Acolyte Mask",
85474: u"Recipe: Lunatic Acolyte Pants",
85477: u"Lunatic Acolyte Coat",
85486: u"Recipe: Lunatic Templar Tassets",
85487: u"Recipe: Lunatic Noble Pants",
85489: u"Recipe: Lunatic Templar Breastplate",
85490: u"Lunatic Acolyte Pants",
85497: u"Lunatic Noble Mask",
85498: u"Lunatic Templar Pauldrons",
85500: u"Recipe: Lunatic Templar Helm",
85503: u"Lunatic Acolyte Gloves",
85513: u"Recipe: Lunatic Noble Boots",
85515: u"Recipe: Lunatic Acolyte Gloves",
85519: u"Lunatic Noble Boots",
85520: u"Recipe: Lunatic Acolyte Mask",
85642: u"Marshal's Astral Revolver",
85670: u"Recipe: Astral Knobkerrie",
85713: u"Superior Rune of the Stars",
85715: u"Recipe: Astral Avenger",
85726: u"Recipe: Astral Apparatus",
85738: u"Recipe: Astral Revolver",
85760: u"Marshal's Astral Cleaver",
85788: u"Marshal's Astral Beacon",
85804: u"Marshal's Astral Razor",
85818: u"Recipe: Astral Beacon",
85820: u"Recipe: Astral Cleaver",
85828: u"Kralkatite Ingot",
85834: u"Recipe: Superior Rune of the Stars",
85844: u"Recipe: Corsair Sharpening Stone",
85849: u"Recipe: Astral Razor",
85857: u"Marshal's Astral Short Bow",
85866: u"Recipe: Corsair Maintenance Oil",
85902: u"Recipe: Superior Sigil of the Stars",
85919: u"Marshal's Astral Longbow",
85929: u"Recipe: Astral Spire",
85968: u"Recipe: Astral Khopesh",
85985: u"Recipe: Astral Disk",
86001: u"Recipe: Astral Short Bow",
86012: u"Marshal's Astral Spire",
86016: u"Corsair Maintenance Oil",
86066: u"Marshal's Astral Khopesh",
86069: u"Kralkatite Ore",
86092: u"Marshal's Astral Harbinger",
86095: u"Recipe: Astral Scepter",
86105: u"Marshal's Astral Orrery",
86107: u"Recipe: Astral Orrery",
86159: u"Recipe: Astral Longbow",
86170: u"Superior Sigil of the Stars",
86203: u"Marshal's Astral Avenger",
86214: u"Recipe: Astral Cannon",
86250: u"Marshal's Astral Apparatus",
86266: u"Tribute to the Dark Arts",
86269: u"Powdered Rose Quartz",
86283: u"Recipe: Kralkatite Ingot",
86287: u"Corsair Tuning Crystal",
86315: u"Recipe: Corsair Tuning Crystal",
86317: u"Marshal's Astral Cannon",
86321: u"Recipe: Astral Harbinger",
86337: u"Marshal's Astral Disk",
86355: u"Marshal's Astral Scepter",
86376: u"Marshal's Astral Knobkerrie",
86378: u"Corsair Sharpening Stone",
86403: u"Bringer's Draconic Pauldrons",
86413: u"Giver's Pearl Handcannon",
86416: u"Bringer's Emblazoned Shoulders",
86425: u"Bringer's Emblazoned Gloves",
86428: u"Bringer's Draconic Boots",
86429: u"Bringer's Exalted Pants",
86441: u"Giver's Pearl Broadsword",
86447: u"Giver's Pearl Trident",
86448: u"Bringer's Exalted Gloves",
86462: u"Giver's Pearl Blunderbuss",
86464: u"Giver's Pearl Speargun",
86466: u"Giver's Pearl Reaver",
86500: u"Bringer's Emblazoned Helm",
86508: u"Bringer's Draconic Legs",
86511: u"Bringer's Exalted Boots",
86513: u"Bringer's Draconic Coat",
86517: u"Giver's Pearl Brazier",
86522: u"Giver's Pearl Quarterstaff",
86529: u"Bringer's Emblazoned Pants",
86563: u"Bringer's Draconic Gauntlets",
86565: u"Bringer's Exalted Mantle",
86582: u"Giver's Pearl Rod",
86584: u"Giver's Pearl Bludgeoner",
86585: u"Bringer's Intricate Gossamer Insignia",
86589: u"Giver's Pearl Conch",
86600: u"Bringer's Emblazoned Boots",
86601: u"Snowflake",
86603: u"Giver's Pearl Crusher",
86611: u"Giver's Pearl Carver",
86627: u"Snow Diamond",
86643: u"Recipe: Bringer's Intricate Gossamer Insignia",
86657: u"Bringer's Exalted Coat",
86659: u"Bringer's Draconic Helm",
86677: u"Giver's Pearl Siren",
86680: u"Giver's Pearl Shell",
86685: u"Giver's Orichalcum-Imbued Inscription",
86695: u"Giver's Pearl Sabre",
86708: u"Bringer's Exalted Masque",
86718: u"Giver's Pearl Needler",
86720: u"Giver's Pearl Impaler",
86727: u"Giver's Pearl Stinger",
86728: u"Bringer's Emblazoned Coat",
86734: u"Recipe: Giver's Orichalcum-Imbued Inscription",
1000043: u"Guild Catapult",
1000051: u"Vault Transport",
1000073: u"+5% Experience Public Banner",
1000074: u"Guild Banquet",
1000084: u"+5% Karma Banner",
1000085: u"Road Marker",
1000086: u"+10% Magic Find Banner",
1000087: u"+10% Gathering Bonus Banner",
1000093: u"+15% Gathering and 10% Swiftness Banner",
1000094: u"+10% Kill Gold and 15% Magic Find Banner",
1000095: u"+10% Karma and 10% Experience Banner",
1000096: u"Guild Heroes Banner",
1000100: u"+5% Gold from Kills Banner",
1000114: u"Guild World Event",
1000121: u"Crooked Mushroom",
1000126: u"Bonfire",
1000131: u"Lion Statue",
1000132: u"Creepy Jack-o'-Lantern",
1000134: u"Thin Candlestick",
1000136: u"Summit Banner",
1000139: u"Throne",
1000143: u"Loaded Wagon",
1000144: u"Cheery Balloon Bundle",
1000145: u"Sylvari Summit Banner",
1000147: u"Iron Guards",
1000148: u"Campfire",
1000149: u"Potted Slender Fern Tree",
1000150: u"Hedge",
1000153: u"Broken Ascalonian Pillar",
1000154: u"Lattice Planter with Loosestrife",
1000155: u"Head Topiary",
1000164: u"Distressed Lion Statue",
1000171: u"Mists Griffon Statue",
1000172: u"Plush Armchair",
1000173: u"Guild Initiative Banner",
1000175: u"Fancy Table",
1000177: u"Festival Tent",
1000178: u"Emergency Waypoint",
1000179: u"Blue Cushion",
1000183: u"Auto Turrets",
1000185: u"Green Cushion",
1000186: u"Guild Ballista Blueprints",
1000190: u"Massive Balloon Bouquet",
1000193: u"Potted Shaggy Palm",
1000195: u"Red Pirate Flag",
1000196: u"Mists Minotaur Statue",
1000198: u"Bowl Topiary",
1000199: u"Lattice Planter with Red Petunias",
1000200: u"Potted Jungle Grass",
1000202: u"Basic Table",
1000205: u"Demolished Mast Pole",
1000206: u"Potted Sprouting Night Thistle",
1000209: u"Basic Shrub",
1000212: u"Empty Square Planter",
1000214: u"Human Summit Banner",
1000216: u"Lattice Planter with Daisies",
1000217: u"Red Flag",
1000218: u"Rectangular Planter",
1000222: u"Dune Roller",
1000223: u"Basic Planter",
1000224: u"Basic Bookshelf",
1000226: u"Blue Pirate Flag",
1000228: u"Worn Arch",
1000230: u"Gold Pillar",
1000235: u"Lattice Planter with Blue Petunias",
1000239: u"Simple Table",
1000243: u"Potted Blue Moa Fern",
1000244: u"Mists Rock Dog Statue",
1000247: u"Broken Sandstone Pillar",
1000248: u"Large Festival Tent",
1000249: u"Charr Statue",
1000250: u"Gold Wall",
1000252: u"Lattice Planter",
1000254: u"White Flag",
1000255: u"Potted Night Thistle Bud",
1000257: u"Fine Armor Stand",
1000263: u"Broken Mast Pole",
1000265: u"Potted Mature Night Thistle",
1000270: u"Block Topiary",
1000273: u"Guild Barstool",
1000276: u"Yellow Cushion",
1000277: u"Festive Balloon Bundle",
1000278: u"Capped Gold Pillar",
1000279: u"Jack-o'-Lantern",
1000281: u"Potted Broad Paddlefrond",
1000283: u"Square Guild Bar",
1000286: u"Norn Summit Flag",
1000291: u"Red Cushion",
1000293: u"Fancy Round Table",
1000294: u"Guild Chair",
1000295: u"Lit Wagon",
1000297: u"Tall Lattice",
1000305: u"Hedge Pillar",
1000306: u"Sabotage Depot",
1000307: u"Armored Dolyaks",
1000312: u"Square Planter",
1000315: u"Lattice",
1000317: u"Griffon Statue",
1000319: u"Guild Stool",
1000325: u"Rustic Brazier",
1000327: u"Guild Flame Ram Blueprint",
1000329: u"Hardened Siege",
1000330: u"Potted Night Thistle",
1000333: u"Fun Balloon Bundle",
1000337: u"Potted Djinn's Tongue",
1000343: u"Human Summit Flag",
1000345: u"Centaur Banner",
1000352: u"Basic Flagpole",
1000365: u"Packed Dolyaks",
1000369: u"Sandstone Pillar",
1000370: u"Hedge Planter",
1000372: u"Fuchsia Balloon",
1000383: u"Invulnerable Dolyaks",
1000386: u"Armor Stand",
1000388: u"Keg Rack",
1000389: u"Hardened Gates",
1000393: u"Asuran Summit Banner",
1000395: u"Ascalonian Lamp",
1000399: u"Turtle Banner",
1000400: u"Red Balloon",
1000401: u"Fire Circle",
1000403: u"White Balloon",
1000405: u"Squat Thorny Mushroom",
1000409: u"Potted Tree",
1000411: u"Crooked Thorny Mushroom",
1000412: u"Mists Drake Statue",
1000413: u"Basic Chair",
1000414: u"Purple Balloon",
1000418: u"Presence of the Keep",
1000426: u"Potted Cypress",
1000430: u"Charr Summit Flag",
1000431: u"Plush Sofa",
1000432: u"Green Balloon",
1000436: u"Potted Petticoat Palm",
1000437: u"Basic Candle",
1000438: u"Guild Bench",
1000448: u"Highback Chair",
1000449: u"Charr Summit Banner",
1000453: u"Refined Street Lamp",
1000456: u"Potted Maguuma Lily (Double Bloom)",
1000457: u"Potted Gold Fern",
1000459: u"Mast Pole",
1000460: u"Keg",
1000463: u"Potted Shrub",
1000470: u"Short Guild Bar",
1000471: u"Ceramic Planter",
1000475: u"Sphere Topiary",
1000479: u"Potted Croton",
1000480: u"Square Firepit",
1000481: u"Red Throw Pillow",
1000483: u"Minor Supply Drop",
1000485: u"Summit Flag",
1000488: u"Potted Palm",
1000492: u"Marriner Statue",
1000496: u"Potted Fern Tree",
1000503: u"Blue Balloon",
1000505: u"Guild Arrow Cart Blueprints",
1000508: u"Yellow Balloon",
1000510: u"Square Candlestick",
1000511: u"Wagon",
1000513: u"Invulnerable Fortifications",
1000516: u"Basic Crate",
1000518: u"Large Square Pillar",
1000519: u"Potted Fan Palm",
1000521: u"Body Topiary",
1000525: u"Potted Tall Cypress",
1000526: u"Simple Shelf",
1000528: u"Library Shelf",
1000531: u"Row of Candles",
1000532: u"Potted Junglerice",
1000534: u"Thorny Jack-o'-Lantern",
1000537: u"Pillar Candle",
1000540: u"Divinity Street Lamp",
1000542: u"Asuran Summit Flag",
1000548: u"Basic Pedestal",
1000549: u"Vined Lattice",
1000551: u"Potted Maguuma Lily (Triple Bloom)",
1000553: u"Potted Lady Palm",
1000554: u"Short Guild Banquet Table",
1000556: u"Pumpkin",
1000559: u"Chilling Fog",
1000560: u"Orange Balloon",
1000561: u"Square Cabinet",
1000562: u"Speedy Dolyaks",
1000567: u"Green Pirate Flag",
1000569: u"Worn Pillar",
1000573: u"Hedge Corner",
1000574: u"Basic Column",
1000575: u"Potted Reaching Gold Fern",
1000577: u"Ascalonian Pillar",
1000578: u"Thorny Mushroom",
1000582: u"Basic Torch",
1000583: u"Watchtower",
1000584: u"Potted Bamboo Cluster",
1000587: u"Mushroom",
1000589: u"Basic Boulder",
1000590: u"Dragon Banner",
1000591: u"Potted Paddlefrond",
1000595: u"Immense Lion Statue",
1000596: u"Gold Firepit",
1000599: u"Fancy Chair",
1000601: u"Basic Basket",
1000606: u"Square Guild Banquet Table",
1000612: u"Broken Square Pillar",
1000614: u"Potted Bamboo",
1000617: u"Sylvari Summit Flag",
1000618: u"Potted Maguuma Lily",
1000619: u"Spire Topiary",
1000620: u"Basic Tree",
1000626: u"Divinity Lamp",
1000627: u"Lattice Planter with Orange Petunias",
1000631: u"Pew",
1000634: u"Bundle of Corn Stalks",
1000639: u"Norn Summit Banner",
1000642: u"Elaborate Sandstone Pillar",
1000643: u"Fancy Armchair",
1000645: u"Scarecrow",
1000661: u"Snow Pile",
1000662: u"Wintersday Tree",
1000663: u"Holiday Wreath",
1000664: u"Festive Streetlamp",
1000666: u"Snow Maker",
1000668: u"Gorseval Tentacle",
1000669: u"Bronze Slothasor Trophy",
1000670: u"Silver Tequatl Trophy",
1000671: u"Silver Slothasor Trophy",
1000672: u"Bronze Vale Guardian Trophy",
1000674: u"Slothasor Mushroom",
1000675: u"Vale Guardian Pieces",
1000676: u"Chak Gerent Eye",
1000677: u"Sabetha Flamethrower Fragment",
1000679: u"Bronze Sabetha Trophy",
1000680: u"Red Festival Umbrella",
1000681: u"Tequatl Tailbone",
1000683: u"Red Lantern",
1000684: u"Monkey Statue",
1000685: u"Shatterer Crystal",
1000686: u"Bronze Triple Trouble Trophy",
1000687: u"White Mantle Abomination Crystal",
1000688: u"Bronze Chak Gerent Trophy",
1000689: u"Silver Triple Trouble Trophy",
1000690: u"Triple Trouble Tooth",
1000692: u"Silver Vale Guardian Trophy",
1000694: u"Silver Mordremoth Trophy",
1000695: u"Firecracker",
1000696: u"Bronze Shatterer Trophy",
1000697: u"Silver White Mantle Abomination Trophy",
1000698: u"Lunar Arch",
1000699: u"Red Festival Tent",
1000700: u"Bronze Gorseval Trophy",
1000704: u"Silver Shatterer Trophy",
1000705: u"Silver Sabetha Trophy",
1000706: u"Bronze Tequatl Trophy",
1000707: u"Bronze White Mantle Abomination Trophy",
1000708: u"Bronze Mordremoth Trophy",
1000709: u"Ram Statue",
1000711: u"Silver Gorseval Trophy",
1000712: u"Silver Chak Gerent Trophy",
1000714: u"Mordremoth Mandible",
1000719: u"Super Pagoda Column",
1000720: u"Super Tree",
1000721: u"Super Cloud",
1000722: u"Painting of Moto",
1000723: u"Super Rock Platform",
1000724: u"Super Rock Wall",
1000725: u"Super Mountain",
1000726: u"Unimpressive King Frog",
1000727: u"Super Rock",
1000728: u"Super Small Rock",
1000729: u"Super King Frog",
1000730: u"Super Rock Ramp",
1000749: u"Bronze Xera Trophy",
1000750: u"Bronze Keep Construct Trophy",
1000752: u"Xera's Ribbon Scrap",
1000753: u"Silver Keep Construct Trophy",
1000755: u"Silver Siege the Stronghold Trophy",
1000757: u"Bloodstone Turret Fragment",
1000759: u"Keep Construct Rubble",
1000760: u"Bronze Siege the Stronghold Trophy",
1000761: u"Silver Xera Trophy",
1000764: u"Basic Grave Marker",
1000766: u"Large Crystal Block of the Solid Ocean",
1000767: u"Female Norn Holo-Dancer",
1000769: u"Fragments of the Solid Ocean",
1000771: u"Wave of the Solid Ocean",
1000772: u"Male Norn Holo-Dancer",
1000776: u"Chunk of the Solid Ocean",
1000777: u"Spire of the Solid Ocean",
1000780: u"Large Block of the Solid Ocean",
1000786: u"Block of the Solid Ocean",
1000788: u"Crystal Block of the Solid Ocean",
1000791: u"Eternal Flame",
1000792: u"Demon Statue",
1000793: u"Mausoleum",
1000794: u"Hanging Tree",
1000795: u"Large Hemisphere of Snow",
1000796: u"Cuboid of Snow",
1000797: u"Large Wedge of Snow",
1000798: u"Cube of Snow",
1000800: u"Large Cuboid of Snow",
1000801: u"Large Cube of Snow",
1000802: u"Hemisphere of Snow",
1000803: u"Wedge of Snow",
1000804: u"Rooster Statue",
1000805: u"Impaled Prisoner",
1000806: u"Fragment of Saul's Burden",
1000807: u"Silver Mursaat Overseer Trophy",
1000811: u"Silver Samarog Trophy",
1000812: u"Cairn the Indomitable Shard",
1000813: u"Rec Room Floor Tile",
1000818: u"Bronze Cairn the Indomitable Trophy",
1000820: u"Bronze Mursaat Overseer Trophy",
1000823: u"Silver Deimos Trophy",
1000825: u"Silver Cairn the Indomitable Trophy",
1000826: u"Bronze Samarog Trophy",
1000829: u"Bronze Deimos Trophy",
1000833: u"Super Flower",
1000836: u"Super Mushroom",
1000837: u"Super Forest House",
1000839: u"Super Campfire",
1000843: u"Super Pine Tree",
1000873: u"Elonian Wood Table",
1000876: u"Tall Elonian Column",
1000879: u"Elonian Stone Tower",
1000880: u"Weathered Elonian Column",
1000882: u"Elonian Hawk Statue",
1000886: u"Elonian Railing",
1000888: u"Weathered Elonian Obelisk",
1000889: u"Elonian Snake Statue",
1000890: u"Weathered Elonian Arch",
1000891: u"Elonian Wood Chair",
1000897: u"Short Elonian Column",
1000903: u"Elonian Lattice",
1000908: u"Ghostly Dining Table",
1000909: u"Spooky Dining Chair",
1000910: u"Spooky Dining Table",
1000911: u"Ghostly Dining Chair",
1000912: u"Haunted Armchair",
1000914: u"Bronze Statue of Grenth Trophy",
1000917: u"Dhuum's Token",
1000919: u"Desmina's Token",
1000922: u"Bronze River of Souls Trophy",
1000923: u"Silver Statue of Grenth Trophy",
1000932: u"Silver Dhuum Trophy",
1000934: u"Silver River of Souls Trophy",
1000935: u"Statue of Grenth Token",
1000940: u"Silver Desmina Trophy",
1000941: u"Bronze Dhuum Trophy",
1000942: u"Bronze Desmina Trophy",
1000944: u"River of Souls Token",
1000946: u"Green Wintersday Gift",
1000947: u"Antler Pattern Snowflake Platform",
1000951: u"Arrow Pattern Snowflake Platform",
1000952: u"Blue Wintersday Gift",
1000954: u"White Wintersday Gift",
1000955: u"Tray of Eggnog",
}
|
xanthics/gw2craft-python3
|
auto_gen/Items_en.py
|
Python
|
mit
| 316,034
|
[
"Amber",
"CRYSTAL"
] |
d25bb3ec07735aa9b6d68f3ff47c67a7a33866633be8fa706d454576ee67e08d
|
"""
Recreate the trajectory fragments to the led to the discovery of a snapshot,
specified by the tuple (epoch, trajectory, snapshot) and write as a pdb file
"""
from __future__ import print_function
import os
import sys
import argparse
import glob
import itertools
from AdaptivePELE.utilities import utilities
from AdaptivePELE.atomset import atomset
try:
basestring
except NameError:
basestring = str
def parseArguments():
"""
Parse the command-line options
:returns: int, int, int, str, str, str -- number of trajectory, number of snapshot, number of epoch,
output path where to write the files, name of the files, name of the topology
"""
desc = "Write the information related to the conformation network to file\n"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("epoch", type=str, help="Path to the epoch to search the snapshot")
parser.add_argument("trajectory", type=int, help="Trajectory number")
parser.add_argument("snapshot", type=int, help="Snapshot to select (in accepted steps)")
parser.add_argument("-o", type=str, default=None, help="Output path where to write the files")
parser.add_argument("--name", type=str, default="pathway.pdb", help="Name of the pdb to write the files")
parser.add_argument("--top", type=str, default=None, help="Name of the pdb topology for loading non-pdb trajectories")
parser.add_argument("--use_pdb", action="store_true", help="Force to use extraction for pdb. Only useful in case of having a pdb with .xtc extension")
args = parser.parse_args()
return args.trajectory, args.snapshot, args.epoch, args.o, args.name, args.top, args.use_pdb
def main(trajectory, snapshot, epoch, outputPath, out_filename, topology, use_pdb=False):
if outputPath is not None:
outputPath = os.path.join(outputPath, "")
if not os.path.exists(outputPath):
os.makedirs(outputPath)
else:
outputPath = ""
if topology is not None:
topology = utilities.getTopologyObject(topology)
else:
topology = None
topology_contents = None
if os.path.exists(outputPath+out_filename):
# If the specified name exists, append a number to distinguish the files
name, ext = os.path.splitext(out_filename)
out_filename = "".join([name, "_%d", ext])
i = 1
while os.path.exists(outputPath+out_filename % i):
i += 1
out_filename %= i
pathway = []
# Strip out trailing backslash if present
pathPrefix, epoch = os.path.split(epoch.rstrip("/"))
sys.stderr.write("Creating pathway...\n")
while True:
filename = glob.glob(os.path.join(pathPrefix, epoch, "*traj*_%d.*" % trajectory))
if not filename:
raise ValueError("Trajectory %s not found!" % os.path.join(pathPrefix, epoch, "*traj*_%d.*" % trajectory))
snapshots = utilities.getSnapshots(filename[0])
if epoch == '0':
initial = 0
else:
# avoid repeating the initial snapshot
initial = 1
if topology is not None:
topology_contents = topology.getTopology(int(epoch), trajectory)
if not isinstance(snapshots[0], basestring):
new_snapshots = []
for i in range(initial, snapshot+1):
PDB = atomset.PDB()
PDB.initialise(snapshots[i], topology=topology_contents)
new_snapshots.append(PDB.pdb)
snapshots = new_snapshots
else:
snapshots = snapshots[initial:snapshot+1]
pathway.insert(0, snapshots)
if epoch == '0':
# Once we get to epoch 0, we just need to append the trajectory
# where the cluster was found and we can break out of the loop
break
procMapping = open(os.path.join(pathPrefix, epoch, "processorMapping.txt")).read().rstrip().split(':')
epoch, trajectory, snapshot = map(int, procMapping[trajectory-1][1:-1].split(','))
epoch = str(epoch)
sys.stderr.write("Writing pathway...\n")
with open(outputPath+out_filename, "a") as f:
if topology:
#Quick fix to avoid problems when visualizing with PyMol
f.write("ENDMDL\nMODEL 2\n".join(itertools.chain.from_iterable(pathway)))
else:
f.write("ENDMDL\n".join(itertools.chain.from_iterable(pathway)))
if __name__ == "__main__":
traj, num_snapshot, num_epoch, output_path, output_filename, top, force_use_pdb = parseArguments()
main(traj, num_snapshot, num_epoch, output_path, output_filename, top, force_use_pdb)
|
AdaptivePELE/AdaptivePELE
|
AdaptivePELE/analysis/backtrackAdaptiveTrajectory.py
|
Python
|
mit
| 4,654
|
[
"PyMOL"
] |
ac0bc2256c71c9ee127f3dfb60afb4dd8c2a866c3d9f9cc316e6ba773157607e
|
## Automatically adapted for numpy.oldnumeric Mar 26, 2007 by alter_code1.py
## generate random orientations of receptor and ligand
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
##
## $Revision$
## last $Author$
## last $Date$
"""
Create Complexes with random orientation from a receptor and ligand structure.
"""
from Biskit.Dock.Complex import Complex
import Biskit.mathUtils as ma
import Biskit.molUtils as mol
import Biskit.tools as t
import numpy.oldnumeric.random_array as ra
import numpy.oldnumeric as N
from Biskit import Xplorer, PCRModel
import tempfile
class ComplexRandomizer:
"""
Create Biskit.Dock.Complex(es) with random orientation
"""
def __init__( self, mrec, mlig, rec_out=None, lig_out=None, debug=0 ):
"""
@param mrec: receptor model
@type mrec: PCRModel
@param mlig: ligand model
@type mlig: PCRModel
@param rec_out: rec output(default: None)
@type rec_out: str
@param lig_out: lig output (default: None)
@type lig_out: str
@param debug: 1, keep temporary xplor files (default: 0)
@type debug: 1|0
"""
## rec and lig with centered coordinates
self.rec = self.__center_model( mrec )
self.lig = self.__center_model( mlig )
## this way they will be unique in ComplexList
if rec_out:
self.rec.saveAs( rec_out )
if lig_out:
self.lig.saveAs( lig_out )
## get max. center-to-atom distances
self.d_max_rec = self.__max_distance( self.rec )
self.d_max_lig = self.__max_distance( self.lig )
self.xp_log = tempfile.mktemp('rb_min_xplor_log')
## keep temporary xplor files
self.debug = debug
def __center_model( self, model ):
"""
translate PDBModel so that it's center is in 0,0,0
@param model: model to center
@type model: PDBModel
@return: PDBModel (clone of model)
@rtype: PDBModel
"""
r = model.clone()
r.keep( N.nonzero( N.logical_not( r.maskH2O() ) ) )
center = r.centerOfMass()
r.setXyz( r.getXyz() - center )
return r
def __max_distance( self, model ):
"""
largest center to any other atom distance
@param model: model with centered coordinates
@type model: PDBModel
@return: largest distance
@rtype: float
"""
center = model.centerOfMass()
dist = N.sqrt( N.sum( ( model.getXyz()-center )**2 , 1 ) )
return max( dist )
def __random_translation( self ):
"""
Random translation on a sphere around 0,0,0 with fixed radius
The radius is the sum of the (max) radius of receptor and ligand
@return: translation array 3 x 1 of float
@rtype: array
"""
radius = (self.d_max_rec + self.d_max_lig) / 2.0
xyz = ra.random( 3 ) - 0.5
scale = radius*1.0 / N.sqrt( N.sum( xyz**2 ) )
return scale * xyz
def __random_matrix( self ):
"""
Random rotation matrix.
@return: 4 x 4 array of float, random rotation and translation matrix
@rtype: array
"""
r = ma.randomRotation()
## r = N.array([[1,0,0],[0,1,0],[0,0,1]],'f')
t = self.__random_translation()
## create 3 x 4 matrix: 0:3, 0:3 contains rot; 3,0:3 contains trans
result = N.concatenate( (r, N.transpose( [ t.tolist() ] )), 1)
## make it square
result = N.concatenate( (result, N.array([[0,0,0,1]], N.Float32)), 0 )
return result
def random_complex_remote( self ):
"""
Create a complex where the recrptor and ligand have random
orientations but are spaced within contact distance.
@return: rec & lig spaced r_rec + r_lig apart in random orientation
@rtype: Complex
"""
return Complex( self.rec, self.lig,
ligMatrix= self.__random_matrix() )
def __minimize_complex( self, com ):
"""
Use Xplor to rigid body minimize the random complex.
@param com: random complex
@type com: Complex
"""
xp = ComplexMinimizer( com, t.tempDir(), log=self.xp_log )
xp.run()
def random_complex( self, inp_mirror=None ):
"""
@return: randomized and minimized complex
@rtype: Complex
"""
self.cm = ComplexMinimizer( self.random_complex_remote(),
debug=self.debug )
self.cm.run( inp_mirror=inp_mirror )
com = Complex( self.rec, self.lig )
rt = com.extractLigandMatrix( self.cm.lig )
com.setLigMatrix( rt )
return com
class ComplexMinimizer( Xplorer ):
"""
Rigid-body minimize receptor and ligand of a Complex
using soft vdW pot.
"""
def __init__( self, com, debug=0, **params ):
self.com = com
self.rec_psf = com.rec().getPsfFile()
self.lig_psf = com.lig().getPsfFile()
recCode = com.rec().getPdbCode()
ligCode = com.lig().getPdbCode()
self.rec_in = tempfile.mktemp( recCode + ".pdb" )
self.lig_in = tempfile.mktemp( ligCode + ".pdb" )
self.lig_out = tempfile.mktemp( "lig_out.pdb" )
self.rec_out = tempfile.mktemp( "rec_out.pdb" )
self.inp_template = t.dataRoot() +\
'/xplor/rb_minimize_complex.inp'
self.param19 = t.dataRoot() + \
'/xplor/toppar/param19.pro'
self.result = None
Xplorer.__init__( self, self.inp_template, debug=debug, **params )
def prepare( self ):
"""
Prepare for calculation. Write input files.
"""
self.com.rec().writePdb( self.rec_in )
self.com.lig().writePdb( self.lig_in )
def cleanup( self ):
"""
Remove temporary files.
"""
Xplorer.cleanup( self )
if not self.debug:
t.tryRemove( self.rec_in )
t.tryRemove( self.lig_in )
t.tryRemove( self.rec_out )
t.tryRemove( self.lig_out )
def finish( self ):
"""
When done, write result to disc.
"""
self.rec = PCRModel( self.com.rec_model.getPsfFile(), self.rec_out )
self.lig = PCRModel( self.com.lig_model.getPsfFile(), self.lig_out )
#############
## TESTING
#############
import Biskit.test as BT
class Test(BT.BiskitTest):
"""Test case
The test generates 3 random complexes. In interactive mode,
the 3 complexes are displayed as movie in Pymol. They are
written out as Amber trajectory if debug=True.
"""
TAGS = [ BT.EXE, BT.LONG ]
def prepare(self):
import tempfile
self.f_pfb = tempfile.mktemp('_test.pdb')
self.f_crd = tempfile.mktemp('_test.crd')
def cleanUp(self):
t.tryRemove( self.f_pfb )
t.tryRemove( self.f_crd )
def test_ComplexRandomizer(self):
"""Dock.ComplexRandomizer test"""
from Biskit import Trajectory
if self.local:
print "\nLoading Rec and Lig files ...",
rec_pdb = t.testRoot() + '/rec/1A2P.pdb'
lig_pdb = t.testRoot() + '/lig/1A19.pdb'
rec_psf = t.testRoot() + '/rec/1A2P.psf'
lig_psf = t.testRoot() + '/lig/1A19.psf'
rec = PCRModel( rec_psf, rec_pdb )
lig = PCRModel( lig_psf, lig_pdb )
if self.local:
print "Initializing Randomizer..."
self.cr = ComplexRandomizer( rec, lig, debug=self.DEBUG )
if self.local:
print "Creating 3 random complexes..."
cs = [ self.cr.random_complex() for i in range(3) ]
self.traj = Trajectory( [ c.model() for c in cs ] )
if self.local:
self.display( self.traj )
globals().update( locals() )
self.assertEqual( len(self.traj), 3 )
def display(self, traj ):
"""Display random complexes as trajectory in Pymol.
Only run in local interactive mode.
"""
from Biskit import Pymoler
print "activate debug switch to get random complexes written to disc!"
if self.DEBUG:
print "writing random complex as trajectory to file..."
traj.ref.writePdb( self.f_pfb )
traj.writeCrd( self.f_crd )
print 'Wrote reference pdb file to: %s' % self.f_pfb
print 'Wrote crd file to: %s' % self.f_crd
self.pm = Pymoler( full=0 )
mname = self.pm.addMovie( [ traj[i] for i in range(len(traj)) ] )
self.pm.add('hide all')
self.pm.add('show cartoon')
self.pm.add('spectrum')
self.pm.add('mplay')
self.pm.run()
if __name__ == '__main__':
BT.localTest()
|
ostrokach/biskit
|
Biskit/Dock/ComplexRandomizer.py
|
Python
|
gpl-3.0
| 9,607
|
[
"Amber",
"PyMOL"
] |
f3bbc5ade2db4b40b4913967dd4c67cc14ec2401ccf5e93d9f04000862e4700f
|
"""
# Copyright (C) 2007 Nathan Ramella (nar@remix.net)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Nathan Ramella <nar@remix.net> or visit http://www.liveapi.org
"""
#import sys
#import Live
#path = "/Users/ST8/Production/Arduinome/Dev/LiveOSC"
#errorLog = open(path + "/stderr.txt", "w")
#errorLog.write("Starting Error Log")
#sys.stderr = errorLog
#stdoutLog = open(path + "/stdout.txt", "w")
#stdoutLog.write("Starting Standard Out Log")
#sys.stdout = stdoutLog
from LiveOSC import LiveOSC
def create_instance(c_instance):
return LiveOSC(c_instance)
|
avroshk/VRDAW
|
VRDAW_working/__init__.py
|
Python
|
gpl-3.0
| 1,297
|
[
"VisIt"
] |
a1f5fdbe081e79cc76fe60e83cebfafd418fa59b01c50a4acc22dcf800185d53
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
from spack import *
class PyMixedhtseq(PythonPackage):
"""HTSeq for mixed single and paired end reads"""
homepage = "https://github.com/schae234/MixedHTSeq"
url = "https://github.com/schae234/MixedHTSeq/archive/v0.1.0.tar.gz"
version('0.1.0', sha256='234689c8743ae2ba7ad13bc1809a5248184a4b8d16112d5413e09164ab67e157', deprecated=True)
depends_on('python@2.5:2.8', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-htseq', type=('build', 'run'))
depends_on('py-ipython', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-setuptools', type='build')
@run_after('install')
def install_scripts(self):
shebang = '#!{0}\n'.format(self.spec['python'].command)
for fname in glob.glob('scripts/*.py'):
filter_file('^#!.*', '', fname)
with open(fname, 'r') as orig:
fdata = orig.read()
with open(fname, 'w') as new:
new.write(shebang + fdata)
set_executable(fname)
mkdirp(self.prefix.bin)
install_tree('scripts', self.prefix.bin)
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-mixedhtseq/package.py
|
Python
|
lgpl-2.1
| 1,373
|
[
"HTSeq"
] |
10dbcd5da68fc474fe6062b4c6b30dd24cbffee6a86a09fda411d70753c500b4
|
#!/usr/bin/env python
"""
VMat Class
@author: Alicia Schep, Greenleaf Lab, Stanford University
"""
#Import necessary python modules
from scipy import signal, ndimage
import numpy as np
from copy import copy
import matplotlib.pyplot as plt
class VMat_Error(Exception):
"""Class for errors in VMat function"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class VMat:
"""Class for storing and processing V-plot matrix"""
def __init__(self, mat, lower, upper):
"""
Assumes Vplot is centered!
Inputs:
mat = matrix (as numpy array)
lower = lower bound of insert sizes represented by mat
upper = upper bound of insert sizes represented by mat
"""
if mat.shape[0]!=upper-lower:
raise VMat_Error("mat shape is not consistent with insert limits")
self.mat = mat
self.upper = upper
self.lower = lower
self.w = mat.shape[1]/2
def trim(self,lower,upper,w):
"""reduce the size of the vplot
lower is new lower bound
upper is new upper bound
w is new flanking region around center
"""
up = upper-self.lower
dn = lower-self.lower
left = self.w - w
right = self.w + w + 1
if up > self.mat.shape[0] or dn < 0 or left < 0 or right > self.mat.shape[1]:
raise VMat_Error("Mat is smaller than desired trim")
self.mat = self.mat[dn:up,left:right]
self.lower = lower
self.upper = upper
self.w = w
def symmetrize(self):
"""Force the V-plot to be symmetric"""
for j in range(self.lower,self.upper):
i=j-self.lower
if j%2==1:
lefthalf = (self.mat[i,:(self.w+1)]+self.mat[i,self.w:][::-1])*0.5
self.mat[i,:] = np.hstack((lefthalf,lefthalf[:-1][::-1]))
else:
righthalf = (self.mat[i,(self.w):-1]+self.mat[i,:self.w][::-1])*0.5
self.mat[i,:] = np.hstack((righthalf[::-1],righthalf,self.mat[i,-1]))
def flip(self, mode = 'same'):
"""Flip V-plot"""
if mode == 'same':
new = np.zeros(self.mat.shape)
for j in range(self.lower,self.upper):
i = j - self.lower
if j%2==1:
new[i,:] = self.mat[i,][::-1]
else:
new[i,:-1] = self.mat[i,:-1][::-1]
#for -1 postion don't actually have values
new[i,-1] = np.mean([self.mat[i,-1],self.mat[i,1]])
self.mat = new
elif mode == 'valid':
new = np.zeros((self.mat.shape[0],self.mat.shape[1]-2))
for j in range(self.lower,self.upper):
i = j - self.lower
if j%2==1:
new[i,:] = self.mat[i,1:-1][::-1]
else:
new[i,:] = self.mat[i,:-1][::-1][1:]
self.mat = new
self.w += -1
else:
raise Exception("Mode must be one of 'same' or 'valid'")
def smooth(self, sd = 1):
"""smooth v-plot using gaussian kernel"""
self.mat = ndimage.filters.gaussian_filter(self.mat,sd,
mode='constant')
def smooth1d(self, sd = 1, axis = 1):
"""smooth v-plot along one axis only"""
self.mat = ndimage.filters.gaussian_filter1d(self.mat,sd,axis,
mode='nearest')
def norm(self):
"""normalize v matrix so that signal minus even background will be 1 divided by base pairs in window"""
tmp1 = self.mat / np.sum(self.mat)
tmp2 = np.ones(self.mat.shape) * (1.0 / self.mat.size)
self.mat = self.mat / (np.sum(self.mat * tmp1)- np.sum(self.mat * tmp2))
self.mat = (self.mat / self.mat.shape[1]) * 10.0
def norm_y(self,dist):
"""normalize vplot so insertsize matches supplied distribution"""
for i in range(self.mat.shape[0]):
self.mat[i] = self.mat[i] * (dist.get(size = i + self.lower)/ np.sum(self.mat[i]))
def converto1d(self):
"""convert the 2d matrix to a 1d representation of insertions"""
self.one_d = np.zeros(self.upper + self.upper%2 +2*self.w+1)
center = self.upper/2 + self.w
for j in range(self.mat.shape[0]):
for i in range(self.mat.shape[1]):
ilen=j+self.lower
val = copy(self.mat[j,i])
if ilen%2==0:
self.one_d[center-(self.w-i)-(ilen/2)]+= val
self.one_d[center-(self.w-i)+(ilen/2)]+= val
else:
self.one_d[center-(self.w-i)-(ilen/2)]+= val * 0.5
self.one_d[center-(self.w-i)+(ilen/2)]+= val * 0.5
self.one_d[center-(self.w-i)-(ilen/2+1)]+= val * 0.5
self.one_d[center-(self.w-i)+(ilen/2+1)]+= val * 0.5
self.one_d = self.one_d / sum(self.one_d)
def plot(self, mat=None, title=None, filename=None):
"""Plot current main matrix or specified matrix (of same dimensions)"""
if mat is None:
mat=self.mat
elif mat.shape!=(self.upper-self.lower,self.w*2+1):
raise VMat_Error("dimensions of input mat should match \
dim of vmat")
fig = plt.figure()
plt.imshow(mat,origin="lower",interpolation='nearest',
extent=[-self.w,self.w,self.lower,self.upper-1])
plt.xlabel("Position relative to dyad")
plt.ylabel("Insert size")
if title:
plt.title(title)
plt.colorbar(shrink=0.8)
if filename:
fig.savefig(filename)
plt.close(fig)
else:
fig.show()
def plot_1d(self,filename=None):
"""plot the 1d insertion representation of the matrix"""
fig = plt.figure()
xlim = len(self.one_d)/2
plt.plot(range(-xlim,xlim+1),self.one_d)
plt.vlines(-73,0,max(self.one_d)*1.1,linestyles='dashed')
plt.vlines(73,0,max(self.one_d)*1.1,linestyles='dashed')
plt.xlabel("Position relative to dyad")
plt.ylabel("Insertion Frequency")
if filename:
fig.savefig(filename)
plt.close(fig)
#Also save text output!
filename2 = ".".join(filename.split(".")[:-1]+['txt'])
np.savetxt(filename2,self.one_d,delimiter="\t")
else:
fig.show()
def plot_insertsize(self,filename=None):
"""plot the insert size disribution in the main matrix"""
fig = plt.figure()
ins = np.sum(self.mat,axis=1)
ins = ins/sum(ins)
plt.plot(range(self.lower,self.upper),ins)
plt.xlabel("Insert Size")
plt.ylabel("Frequency")
if filename:
fig.savefig(filename)
plt.close(fig)
#Also save text output!
filename2 = ".".join(filename.split(".")[:-1]+['txt'])
np.savetxt(filename2,ins,delimiter="\t")
else:
fig.show()
def save(self,filename):
"""write text output description of VMat object attributes"""
out=open(filename,'w')
out.write('#VMat Descriptor File\n')
out.write('#Contains VMat and pertinent information\n')
out.write('#lower\n')
out.write(str(self.lower)+'\n')
out.write('#upper\n')
out.write(str(self.upper)+'\n')
out.write('#mat\n')
for row in self.mat:
out.write("\t".join(map(str,row))+'\n')
out.close()
@staticmethod
def open(filename):
"""Create VMat object from text descriptor file"""
infile = open(filename,'r')
state = ''
mat = []
for line in infile:
if '#lower' in line:
state = 'lower'
elif '#upper' in line:
state = 'upper'
elif '#mat' in line:
state = 'mat'
elif '#' in line:
state = 'other'
elif state == 'lower':
lower = int(line.strip('\n'))
elif state == 'upper':
upper = int(line.strip('\n'))
elif state == 'mat':
mat.append(map(float,line.strip('\n').split('\t')))
try:
new = VMat(np.array(mat), lower, upper)
except NameError:
raise VMat_Error("VMat decriptor file appeas to be missing some\
needed components")
infile.close()
return new
|
GreenleafLab/NucleoATAC
|
pyatac/VMat.py
|
Python
|
mit
| 8,649
|
[
"Gaussian"
] |
a6a4b9bb535320538afb3842363669efa2abf8c232174858f8dfb226fd90d68f
|
"""Module which contains all spells that check something in a nif file."""
# --------------------------------------------------------------------------
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, NIF File Format Library and Tools.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
# --------------------------------------------------------------------------
from contextlib import closing
from itertools import repeat
import tempfile
from pyffi.formats.nif import NifFormat
import pyffi.spells.nif
import pyffi.utils.tristrip # for check_tristrip
class SpellReadWrite(pyffi.spells.nif.NifSpell):
"""Like the original read-write spell, but with additional file size
check."""
SPELLNAME = "check_readwrite"
def datainspect(self):
"""Only process nifs if they have all admissible block types.
Note that the default rule is to process a nif if it has at
least one admissible block type, but for read write spells it
makes more sense to impose all.
"""
return all(self.toaster.is_admissible_branch_class(header_type)
for header_type in self.header_types)
def dataentry(self):
num_empty_strings = len([s for s in self.data.header.strings if not s])
self.toaster.msgblockbegin("writing to temporary file")
f_tmp = tempfile.TemporaryFile()
try:
self.data.write(f_tmp)
# comparing the files will usually be different because
# blocks may have been written back in a different order,
# so cheaply just compare file sizes
self.toaster.msg("comparing file sizes")
self.stream.seek(0, 2)
f_tmp.seek(0, 2)
if self.stream.tell() != f_tmp.tell():
self.toaster.msg("original size: %i" % self.stream.tell())
self.toaster.msg("written size: %i" % f_tmp.tell())
# could be due to empty strings
if self.stream.tell() - f_tmp.tell() == 4 * num_empty_strings:
self.toaster.msg("difference due to %i empty string(s)"
% num_empty_strings)
else:
f_tmp.seek(0)
f_debug = open("debug.nif", "wb")
f_debug.write(f_tmp.read(-1))
f_debug.close()
raise Exception('write check failed: file sizes differ (written file saved as debug.nif for inspection)')
finally:
f_tmp.close()
self.toaster.msgblockend()
# spell is finished: prevent recursing into the tree
return False
class SpellNodeNamesByFlag(pyffi.spells.nif.NifSpell):
"""This spell goes over all nif files, and at the end, it gives a summary
of which node names where used with particular flags."""
SPELLNAME = "check_nodenamesbyflag"
@classmethod
def toastentry(cls, toaster):
toaster.flagdict = {}
return True
@classmethod
def toastexit(cls, toaster):
for flag, names in toaster.flagdict.items():
toaster.msg("%s %s" % (flag, names))
def datainspect(self):
return self.inspectblocktype(NifFormat.NiNode)
def branchinspect(self, branch):
# stick to main tree
return isinstance(branch, NifFormat.NiAVObject)
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiAVObject):
if not branch.flags in self.toaster.flagdict:
self.toaster.flagdict[branch.flags] = []
if not branch.name in self.toaster.flagdict[branch.flags]:
self.toaster.flagdict[branch.flags].append(branch.name)
return True
else:
return False
class SpellCompareSkinData(pyffi.spells.nif.NifSpell):
"""This spell compares skinning data with a reference nif."""
SPELLNAME = "check_compareskindata"
# helper functions (to compare with custom tolerance)
@staticmethod
def are_vectors_equal(oldvec, newvec, tolerance=0.01):
return (max([abs(x-y)
for (x,y) in zip(oldvec.as_list(), newvec.as_list())])
< tolerance)
@staticmethod
def are_matrices_equal(oldmat, newmat, tolerance=0.01):
return (max([max([abs(x-y)
for (x,y) in zip(oldrow, newrow)])
for (oldrow, newrow) in zip(oldmat.as_list(),
newmat.as_list())])
< tolerance)
@staticmethod
def are_floats_equal(oldfloat, newfloat, tolerance=0.01):
return abs(oldfloat - newfloat) < tolerance
@classmethod
def toastentry(cls, toaster):
"""Read reference nif file given as argument."""
# if no argument given, do not apply spell
if not toaster.options.get("arg"):
return False
# read reference nif
toaster.refdata = NifFormat.Data()
with closing(open(toaster.options["arg"], "rb")) as reffile:
toaster.refdata.read(reffile)
# find bone data in reference nif
toaster.refbonedata = []
for refgeom in toaster.refdata.get_global_iterator():
if (isinstance(refgeom, NifFormat.NiGeometry)
and refgeom.skin_instance and refgeom.skin_instance.data):
toaster.refbonedata += list(zip(
repeat(refgeom.skin_instance.skeleton_root),
repeat(refgeom.skin_instance.data),
refgeom.skin_instance.bones,
refgeom.skin_instance.data.bone_list))
# only apply spell if the reference nif has bone data
return bool(toaster.refbonedata)
def datainspect(self):
return self.inspectblocktype(NifFormat.NiSkinData)
def branchinspect(self, branch):
# stick to main tree
return isinstance(branch, NifFormat.NiAVObject)
def branchentry(self, branch):
if (isinstance(branch, NifFormat.NiGeometry)
and branch.skin_instance and branch.skin_instance.data):
for skelroot, skeldata, bonenode, bonedata in zip(
repeat(branch.skin_instance.skeleton_root),
repeat(branch.skin_instance.data),
branch.skin_instance.bones,
branch.skin_instance.data.bone_list):
for refskelroot, refskeldata, refbonenode, refbonedata \
in self.toaster.refbonedata:
if bonenode.name == refbonenode.name:
self.toaster.msgblockbegin("checking bone %s"
% bonenode.name)
# check that skeleton roots are identical
if skelroot.name == refskelroot.name:
# no extra transform
branchtransform_extra = NifFormat.Matrix44()
branchtransform_extra.set_identity()
else:
self.toaster.msg(
"skipping: skeleton roots are not identical")
self.toaster.msgblockend()
continue
# the following is an experimental way of
# compensating for different skeleton roots
# (disabled by default)
# can we find skeleton root of data in reference
# data?
for refskelroot_branch \
in self.toaster.refdata.get_global_iterator():
if not isinstance(refskelroot_branch,
NifFormat.NiAVObject):
continue
if skelroot.name == refskelroot_branch.name:
# yes! found!
#self.toaster.msg(
# "found alternative in reference nif")
branchtransform_extra = \
refskelroot_branch.get_transform(refskelroot).get_inverse()
break
else:
for skelroot_ref \
in self.data.get_global_iterator():
if not isinstance(skelroot_ref,
NifFormat.NiAVObject):
continue
if refskelroot.name == skelroot_ref.name:
# yes! found!
#self.toaster.msg(
# "found alternative in nif")
branchtransform_extra = \
skelroot_ref.get_transform(skelroot)
break
else:
self.toaster.msgblockbegin("""\
skipping: skeleton roots are not identical
and no alternative found""")
self.toaster.msgblockend()
continue
# calculate total transform matrix that would be applied
# to a vertex in the reference geometry in the position
# of the reference bone
reftransform = (
refbonedata.get_transform()
* refbonenode.get_transform(refskelroot)
* refskeldata.get_transform())
# calculate total transform matrix that would be applied
# to a vertex in this branch in the position of the
# reference bone
branchtransform = (
bonedata.get_transform()
* refbonenode.get_transform(refskelroot) # NOT a typo
* skeldata.get_transform()
* branchtransform_extra) # skelroot differences
# compare
if not self.are_matrices_equal(reftransform,
branchtransform):
#raise ValueError(
self.toaster.msg(
"transform mismatch\n%s\n!=\n%s\n"
% (reftransform, branchtransform))
self.toaster.msgblockend()
# stop in this branch
return False
else:
# keep iterating
return True
class SpellCheckBhkBodyCenter(pyffi.spells.nif.NifSpell):
"""Recalculate the center of mass and inertia matrix,
compare them to the originals, and report accordingly.
"""
SPELLNAME = "check_bhkbodycenter"
def datainspect(self):
return self.inspectblocktype(NifFormat.bhkRigidBody)
def branchinspect(self, branch):
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.bhkNiCollisionObject,
NifFormat.bhkRigidBody))
def branchentry(self, branch):
if not isinstance(branch, NifFormat.bhkRigidBody):
# keep recursing
return True
else:
self.toaster.msg("getting rigid body mass, center, and inertia")
mass = branch.mass
center = branch.center.get_copy()
inertia = branch.inertia.get_copy()
self.toaster.msg("recalculating...")
branch.update_mass_center_inertia(mass=branch.mass)
#self.toaster.msg("checking mass...")
#if mass != branch.mass:
# #raise ValueError("center does not match; original %s, calculated %s"%(center, branch.center))
# self.toaster.logger.warn("warning: mass does not match; original %s, calculated %s"%(mass, branch.mass))
# # adapt calculated inertia matrix with observed mass
# if mass > 0.001:
# correction = mass / branch.mass
# for i in range(12):
# branch.inertia[i] *= correction
#else:
# self.toaster.msg("perfect match!")
self.toaster.msg("checking center...")
report = {}
if center != branch.center:
#raise ValueError("center does not match; original %s, calculated %s"%(center, branch.center))
self.toaster.logger.warn(
"center does not match; original %s, calculated %s"
% (center, branch.center))
report["center"] = {
"orig": center.as_tuple(),
"calc": branch.center.as_tuple(),
}
self.toaster.msg("checking inertia...")
scale = max(max(abs(x) for x in row) for row in inertia.as_list() + branch.inertia.as_list())
if (max(max(abs(x - y)
for x, y in zip(row1, row2))
for row1, row2 in zip(inertia.as_list(), branch.inertia.as_list()))
> 0.1 * scale):
#raise ValueError("center does not match; original %s, calculated %s"%(center, branch.center))
self.toaster.logger.warn(
"inertia does not match:\n\noriginal\n%s\n\ncalculated\n%s\n"
% (inertia, branch.inertia))
report["inertia"] = {
"orig": inertia.as_tuple(),
"calc": branch.inertia.as_tuple(),
}
if report:
self.append_report(report)
# stop recursing
return False
class SpellCheckCenterRadius(pyffi.spells.nif.NifSpell):
"""Recalculate the center and radius, compare them to the originals,
and report mismatches.
"""
# tentative results
# -----------------
# oblivion: ok
# civ4: mostly ok (with very few exceptions: effects/magpie/flock.nif, units/!errorunit/bear.nif, maybe some more)
# daoc: ok
# morrowind: usually ok (quite some exceptions here)
# zoo tycoon 2: mostly ok (except *_Adult_*.nif files)
SPELLNAME = "check_centerradius"
def datainspect(self):
return self.inspectblocktype(NifFormat.NiGeometry)
def branchinspect(self, branch):
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiGeometry,
NifFormat.NiGeometryData))
def branchentry(self, branch):
if not isinstance(branch, NifFormat.NiGeometryData):
# keep recursing
return True
else:
report = {}
self.toaster.msg("getting bounding sphere")
center = NifFormat.Vector3()
center.x = branch.center.x
center.y = branch.center.y
center.z = branch.center.z
radius = branch.radius
self.toaster.msg("checking that all vertices are inside")
maxr = 0.0
maxv = None
for vert in branch.vertices:
dist = vert - center
if dist * dist > maxr:
maxr = dist * dist
maxv = vert
maxr = maxr ** 0.5
if maxr > 1.01 * radius + 0.01:
#raise ValueError(
self.toaster.logger.warn(
"not all vertices inside bounding sphere (vertex %s, error %s)"
% (maxv, abs(maxr - radius)))
report["vertex_outside"] = maxv.as_tuple()
self.toaster.msg("recalculating bounding sphere")
branch.update_center_radius()
self.toaster.msg("comparing old and new spheres")
if center != branch.center:
self.toaster.logger.warn(
"center does not match; original %s, calculated %s"
% (center, branch.center))
report["center"] = {
"orig": center.as_tuple(),
"calc": branch.center.as_tuple(),
}
if abs(radius - branch.radius) > NifFormat.EPSILON:
self.toaster.logger.warn(
"radius does not match; original %s, calculated %s"
% (radius, branch.radius))
report["radius"] = {
"orig": radius,
"calc": branch.radius,
}
if report:
self.append_report(report)
# stop recursing
return False
class SpellCheckSkinCenterRadius(pyffi.spells.nif.NifSpell):
"""Recalculate the skindata center and radius for each bone, compare them
to the originals, and report mismatches.
"""
SPELLNAME = "check_skincenterradius"
def datainspect(self):
return self.inspectblocktype(NifFormat.NiSkinData)
def branchinspect(self, branch):
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiGeometry))
def branchentry(self, branch):
if not(isinstance(branch, NifFormat.NiGeometry) and branch.is_skin()):
# keep recursing
return True
else:
self.toaster.msg("getting skindata block bounding spheres")
center = []
radius = []
for skindatablock in branch.skin_instance.data.bone_list:
center.append(skindatablock.bounding_sphere_offset.get_copy())
radius.append(skindatablock.bounding_sphere_radius)
self.toaster.msg("recalculating bounding spheres")
branch.update_skin_center_radius()
self.toaster.msg("comparing old and new spheres")
for i, skindatablock in enumerate(branch.skin_instance.data.bone_list):
if center[i] != skindatablock.bounding_sphere_offset:
self.toaster.logger.error(
"%s center does not match; original %s, calculated %s"
% (branch.skin_instance.bones[i].name,
center[i], skindatablock.bounding_sphere_offset))
if abs(radius[i] - skindatablock.bounding_sphere_radius) \
> NifFormat.EPSILON:
self.toaster.logger.error(
"%s radius does not match; original %s, calculated %s"
% (branch.skin_instance.bones[i].name,
radius[i], skindatablock.bounding_sphere_radius))
# stop recursing
return False
class SpellCheckConvexVerticesShape(pyffi.spells.nif.NifSpell):
"""This test checks whether each vertex is the intersection of at least
three planes.
"""
SPELLNAME = "check_convexverticesshape"
def datainspect(self):
return self.inspectblocktype(NifFormat.bhkConvexVerticesShape)
def branchinspect(self, branch):
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.bhkNiCollisionObject,
NifFormat.bhkRefObject))
def branchentry(self, branch):
if not isinstance(branch, NifFormat.bhkConvexVerticesShape):
# keep recursing
return True
else:
self.toaster.msg("checking vertices and planes")
for v4 in branch.vertices:
v = NifFormat.Vector3()
v.x = v4.x
v.y = v4.y
v.z = v4.z
num_intersect = 0
for n4 in branch.normals:
n = NifFormat.Vector3()
n.x = n4.x
n.y = n4.y
n.z = n4.z
d = n4.w
if abs(v * n + d) < 0.01:
num_intersect += 1
if num_intersect == 0:
self.toaster.logger.error(
"vertex %s does not intersect with any plane" % v)
elif num_intersect == 1:
self.toaster.logger.warn(
"vertex %s only intersects with one plane" % v)
elif num_intersect == 2:
self.toaster.logger.warn(
"vertex %s only intersects with two planes" % v)
# stop recursing
return False
class SpellCheckMopp(pyffi.spells.nif.NifSpell):
"""Parse and dump mopp trees, and check their validity:
* do they have correct origin and scale?
* do they refer to every triangle exactly once?
* does the parser visit every byte exactly once?
Mainly useful to check the heuristic parser and for debugging mopp codes.
"""
SPELLNAME = "check_mopp"
def datainspect(self):
return self.inspectblocktype(NifFormat.bhkMoppBvTreeShape)
def branchinspect(self, branch):
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.bhkNiCollisionObject,
NifFormat.bhkRefObject))
def branchentry(self, branch):
if not isinstance(branch, NifFormat.bhkMoppBvTreeShape):
# keep recursing
return True
else:
mopp = [b for b in branch.mopp_data]
o = NifFormat.Vector3()
o.x = branch.origin.x
o.y = branch.origin.y
o.z = branch.origin.z
scale = branch.scale
self.toaster.msg("recalculating mopp origin and scale")
branch.update_origin_scale()
if branch.origin != o:
self.toaster.logger.warn("origin mismatch")
self.toaster.logger.warn("(was %s and is now %s)"
% (o, branch.origin))
if abs(branch.scale - scale) > 0.5:
self.toaster.logger.warn("scale mismatch")
self.toaster.logger.warn("(was %s and is now %s)"
% (scale, branch.scale))
self.toaster.msg("parsing mopp")
# ids = indices of bytes processed, tris = triangle indices
ids, tris = branch.parse_mopp(verbose=True)
error = False
# check triangles
counts = [tris.count(i) for i in range(branch.shape.data.num_triangles)]
missing = [i for i in range(branch.shape.data.num_triangles)
if counts[i] != 1]
if missing:
self.toaster.logger.error(
"some triangles never visited, or visited more than once")
self.toaster.logger.debug(
"triangles index, times visited")
for i in missing:
self.toaster.logger.debug(i, counts[i])
error = True
wrong = [i for i in tris if i > branch.shape.data.num_triangles]
if wrong:
self.toaster.logger.error("invalid triangle indices")
self.toaster.logger.debug(wrong)
error = True
# check bytes
counts = [ids.count(i) for i in range(branch.mopp_data_size)]
missing = [i for i in range(branch.mopp_data_size) if counts[i] != 1]
if missing:
self.toaster.logger.error(
"some bytes never visited, or visited more than once")
self.toaster.logger.debug(
"byte index, times visited, value")
for i in missing:
self.toaster.logger.debug(i, counts[i], "0x%02X" % mopp[i])
self.toaster.logger.debug([mopp[k] for k in range(i, min(branch.mopp_data_size, i + 10))])
error = True
#if error:
# raise ValueError("mopp parsing failed")
# stop recursing
return False
class SpellCheckTangentSpace(pyffi.spells.nif.NifSpell):
"""Check and recalculate the tangent space, compare them to the originals,
and report accordingly.
"""
SPELLNAME = 'check_tangentspace'
PRECISION = 0.3 #: Difference between values worth warning about.
def datainspect(self):
return self.inspectblocktype(NifFormat.NiTriBasedGeom)
def branchinspect(self, branch):
return isinstance(branch, NifFormat.NiAVObject)
def branchentry(self, branch):
if not isinstance(branch, NifFormat.NiTriBasedGeom):
# keep recursing
return True
else:
# get tangent space
tangentspace = branch.get_tangent_space()
if not tangentspace:
# no tangent space present
return False
self.toaster.msg("checking tangent space")
oldspace = [] # we will store the old tangent space here
for i, (n, t, b) in enumerate(tangentspace):
oldspace.append(n.as_list() + t.as_list() + b.as_list())
if abs(n * n - 1) > NifFormat.EPSILON:
self.toaster.logger.warn(
'non-unit normal %s (norm %f) at vertex %i'
% (n, (n * n) ** 0.5, i))
if abs(t * t - 1) > NifFormat.EPSILON:
self.toaster.logger.warn(
'non-unit tangent %s (norm %f) at vertex %i'
% (t, (t * t) ** 0.5, i))
if abs(b * b - 1) > NifFormat.EPSILON:
self.toaster.logger.warn(
'non-unit binormal %s (norm %f) at vertex %i'
% (b, (b * b) ** 0.5, i))
if abs(n * t) + abs(n * b) > NifFormat.EPSILON:
volume = n * t.crossproduct(b)
self.toaster.logger.warn(
'non-ortogonal tangent space at vertex %i' % i)
self.toaster.logger.warn(
'n * t = %s * %s = %f'%(n, t, n * t))
self.toaster.logger.warn(
'n * b = %s * %s = %f'%(n, b, n * b))
self.toaster.logger.warn(
't * b = %s * %s = %f'%(t, b, t * b))
self.toaster.logger.warn(
'volume = %f' % volume)
# recalculate the tangent space
branch.update_tangent_space()
newspace = [] # we will store the old tangent space here
for i, (n, t, b) in enumerate(branch.get_tangent_space()):
newspace.append(n.as_list() + t.as_list() + b.as_list())
# check if old matches new
for i, (old, new) in enumerate(zip(oldspace, newspace)):
for oldvalue, newvalue in zip(old, new):
# allow fairly big error
if abs(oldvalue - newvalue) > self.PRECISION:
self.toaster.logger.warn(
'calculated tangent space differs from original '
'at vertex %i' % i)
self.toaster.logger.warn('old: %s' % old[0:3])
self.toaster.logger.warn('old: %s' % old[3:6])
self.toaster.logger.warn('old: %s' % old[6:9])
self.toaster.logger.warn('new: %s' % new[0:3])
self.toaster.logger.warn('new: %s' % new[3:6])
self.toaster.logger.warn('new: %s' % new[6:9])
break
# don't recurse further
return False
class SpellCheckTriStrip(pyffi.spells.nif.NifSpell):
"""Run the stripifier on all triangles from nif files. This spell is also
useful for checking and profiling the stripifier and the
stitcher/unstitcher (for instance it checks that it does not
change the geometry).
Reports at the end with average strip length (this is useful to compare
various stripification algorithms over a large collection of geometries).
"""
SPELLNAME = 'check_tristrip'
@classmethod
def toastentry(cls, toaster):
toaster.striplengths = []
return True
@classmethod
def toastexit(cls, toaster):
toaster.msg("average strip length = %.6f"
% (sum(toaster.striplengths)
/ float(len(toaster.striplengths))))
def datainspect(self):
return self.inspectblocktype(NifFormat.NiTriBasedGeomData)
def branchinspect(self, branch):
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiTriBasedGeomData))
def branchentry(self, branch):
def report_strip_statistics(triangles, strips):
"""Print some statistics."""
# handle this just in case
if not strips:
return
# run check
self.toaster.msg('checking strip triangles')
pyffi.utils.tristrip._check_strips(triangles, strips)
if len(strips) == 1:
# stitched strip
stitchedstrip = strips[0]
self.toaster.msg("stitched strip length = %i"
% len(stitchedstrip))
unstitchedstrips = pyffi.utils.tristrip.unstitch_strip(
stitchedstrip)
self.toaster.msg("num stitches = %i"
% (len(stitchedstrip)
- sum(len(strip)
for strip in unstitchedstrips)))
# run check
self.toaster.msg('checking unstitched strip triangles')
pyffi.utils.tristrip._check_strips(triangles, unstitchedstrips)
# test stitching algorithm
self.toaster.msg("restitching")
restitchedstrip = pyffi.utils.tristrip.stitch_strips(
unstitchedstrips)
self.toaster.msg("stitched strip length = %i"
% len(restitchedstrip))
self.toaster.msg("num stitches = %i"
% (len(restitchedstrip)
- sum(len(strip)
for strip in unstitchedstrips)))
# run check
self.toaster.msg('checking restitched strip triangles')
pyffi.utils.tristrip._check_strips(triangles, [restitchedstrip])
else:
unstitchedstrips = strips
self.toaster.msg("num strips = %i"
% len(unstitchedstrips))
self.toaster.msg("average strip length = %.3f"
% (sum((len(strip) for strip in unstitchedstrips), 0.0)
/ len(unstitchedstrips)))
if not isinstance(branch, NifFormat.NiTriBasedGeomData):
# keep recursing
return True
else:
# get triangles
self.toaster.msg('getting triangles')
triangles = branch.get_triangles()
# report original strip statistics
if isinstance(branch, NifFormat.NiTriStripsData):
report_strip_statistics(triangles, branch.get_strips())
# recalculate strips
self.toaster.msg('recalculating strips')
try:
strips = pyffi.utils.tristrip.stripify(
triangles, stitchstrips=False)
report_strip_statistics(triangles, strips)
except Exception:
self.toaster.logger.error('failed to strip triangles')
self.toaster.logger.error('%s' % triangles)
raise
# keep track of strip length
self.toaster.striplengths += [len(strip) for strip in strips]
self.toaster.msg('checking stitched strip triangles')
stitchedstrip = pyffi.utils.tristrip.stitch_strips(strips)
pyffi.utils.tristrip._check_strips(triangles, [stitchedstrip])
self.toaster.msg('checking unstitched strip triangles')
unstitchedstrips = pyffi.utils.tristrip.unstitch_strip(stitchedstrip)
pyffi.utils.tristrip._check_strips(triangles, unstitchedstrips)
class SpellCheckVersion(pyffi.spells.nif.NifSpell):
"""Checks all versions used by the files (without reading the full files).
"""
SPELLNAME = 'check_version'
@classmethod
def toastentry(cls, toaster):
toaster.versions = {} # counts number of nifs with version
toaster.user_versions = {} # tracks used user version's per version
toaster.user_version_2s = {} # tracks used user version2's per version
return True
@classmethod
def toastexit(cls, toaster):
for version in toaster.versions:
toaster.msgblockbegin("version 0x%08X" % version)
toaster.msg("number of nifs: %s" % toaster.versions[version])
toaster.msg("user version: %s" % toaster.user_versions[version])
toaster.msg("user version2: %s" % toaster.user_version_2s[version])
toaster.msgblockend()
def datainspect(self):
# some shortcuts
version = self.data.version
user_version = self.data.user_version
user_version_2 = self.data.user_version_2
# report
self.toaster.msg("version 0x%08X" % version)
self.toaster.msg("user version %i" % user_version)
self.toaster.msg("user version %i" % user_version_2)
# update stats
if version not in self.toaster.versions:
self.toaster.versions[version] = 0
self.toaster.user_versions[version] = []
self.toaster.user_version_2s[version] = []
self.toaster.versions[version] += 1
if user_version not in self.toaster.user_versions[version]:
self.toaster.user_versions[version].append(user_version)
if user_version_2 not in self.toaster.user_version_2s[version]:
self.toaster.user_version_2s[version].append(user_version_2)
return False
class SpellCheckMaterialEmissiveValue(pyffi.spells.nif.NifSpell):
"""Check (and warn) about potentially bad material emissive values."""
SPELLNAME = "check_materialemissivevalue"
def datainspect(self):
# only run the spell if there are material property blocks
return self.inspectblocktype(NifFormat.NiMaterialProperty)
def dataentry(self):
self.check_emissive_done = False
return True
def branchinspect(self, branch):
# if we are done, don't recurse further
if self.check_emissive_done:
return False
# only inspect the NiAVObject branch, and material properties
return isinstance(branch, (NifFormat.NiAVObject,
NifFormat.NiMaterialProperty))
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiMaterialProperty):
# check if any emissive values exceeds usual values
emissive = branch.emissive_color
if emissive.r > 0.5 or emissive.g > 0.5 or emissive.b > 0.5:
# potentially too high (there are some instances (i.e.
# most glass, flame, gems, willothewisps etc.) that
# that is not too high but most other instances (i.e.
# ogres!) that this is the case it is incorrect)
self.toaster.logger.warn(
"emissive value may be too high (highest value: %f)"
% (max(emissive.r, emissive.g, emissive.b)))
# we're done...
self.check_emissive_done = True
# stop recursion
return False
else:
# keep recursing into children
return True
class SpellCheckTriangles(pyffi.spells.nif.NifSpell):
"""Base class for spells which need to check all triangles."""
SPELLNAME = "check_triangles"
def datainspect(self):
# only run the spell if there are geometries
return self.inspectblocktype(NifFormat.NiTriBasedGeom)
@classmethod
def toastentry(cls, toaster):
toaster.geometries = []
return True
def branchinspect(self, branch):
# only inspect the NiAVObject branch
return isinstance(branch, NifFormat.NiAVObject)
def branchentry(self, branch):
if isinstance(branch, NifFormat.NiTriBasedGeom):
# get triangles
self.toaster.geometries.append(branch.data.get_triangles())
# stop recursion
return False
else:
# keep recursing into children
return True
@classmethod
def toastexit(cls, toaster):
toaster.msg("found {0} geometries".format(len(toaster.geometries)))
try:
import numpy
import scipy.optimize
except ImportError:
numpy = None
scipy = None
class SpellCheckTrianglesATVR(SpellCheckTriangles):
"""Find optimal parameters for vertex cache algorithm by simulated
annealing.
"""
SPELLNAME = "check_triangles_atvr"
INITIAL = [1.5, 0.75, 2.0, 0.5]
LOWER = [0.01, -10.0, 0.1, 0.01]
UPPER = [5.0, 1.0, 10.0, 5.0]
@classmethod
def toastentry(cls, toaster):
# call base class method
if not SpellCheckTriangles.toastentry(toaster):
return False
# check that we have numpy and scipy
if (numpy is None) or (scipy is None):
toaster.logger.error(
self.SPELLNAME
+ " requires numpy and scipy (http://www.scipy.org/)")
return False
return True
@classmethod
def get_atvr(cls, toaster, *args):
# check bounds
if any(value < lower or value > upper
for (lower, value, upper) in zip(
cls.LOWER, args, cls.UPPER)):
return 1e30 # infinity
cache_decay_power, last_tri_score, valence_boost_scale, valence_boost_power = args
vertex_score = pyffi.utils.vertex_cache.VertexScore()
vertex_score.CACHE_DECAY_POWER = cache_decay_power
vertex_score.LAST_TRI_SCORE = last_tri_score
vertex_score.VALENCE_BOOST_SCALE = valence_boost_scale
vertex_score.VALENCE_BOOST_POWER = valence_boost_power
vertex_score.precalculate()
print("{0:.3f} {1:.3f} {2:.3f} {3:.3f}".format(
cache_decay_power, last_tri_score,
valence_boost_scale, valence_boost_power))
atvr = []
for triangles in toaster.geometries:
mesh = pyffi.utils.vertex_cache.Mesh(triangles, vertex_score)
new_triangles = mesh.get_cache_optimized_triangles()
atvr.append(
pyffi.utils.vertex_cache.average_transform_to_vertex_ratio(
new_triangles, 32))
print(sum(atvr) / len(atvr))
return sum(atvr) / len(atvr)
@classmethod
def toastexit(cls, toaster):
toaster.msg("found {0} geometries".format(len(toaster.geometries)))
result = scipy.optimize.anneal(
lambda x: cls.get_atvr(toaster, *x),
numpy.array(cls.INITIAL),
full_output=True,
lower=numpy.array(cls.LOWER),
upper=numpy.array(cls.UPPER),
#maxeval=10,
#maxaccept=10,
#maxiter=10,
#dwell=10,
#feps=0.1,
)
toaster.msg(str(result))
|
griest024/PokyrimTools
|
pyffi-develop/pyffi/spells/nif/check.py
|
Python
|
mit
| 41,559
|
[
"VisIt"
] |
e3f24d792cc2bcbee0dfe66f21bdd4d4329178027d24bdc65fd8884d33070a8b
|
import numpy as np
import datetime as dt
import matplotlib.pylab as plt
import netCDF4 as nc
import ezmovie_plots as ezp
import os
# ROMS specific packages
try:
import pyroms
except:
print 'pyroms is not installed, will not work with ROMS'
# MITgcm specific packages
try:
import xarray as xr
import MITgcmutils
except:
print 'xarray or MITgcmutils are not installed, will not work with MITgcm '
#------------------------------------------------------------------------------------
class EZmovie():
def __init__(self,diags,plotdir='./anim/'):
self.diags = diags
self.plotdir = plotdir
self.ncol=0
self.nrow=0
self.grd=None
return
def ROMS_movie(self,fileout,start_date,end_date,means='no',step=5):
if (means == 'no'):
total_days = (end_date -start_date).days
add_days = np.arange(0,total_days+step,step)
list_dates = [start_date + dt.timedelta(n) for n in add_days]
elif (means == 'monthly'):
# do something else
pass
elif (means == 'annual'):
# do something else
pass
else:
pass
for current_date in list_dates:
print 'working on ', current_date.isoformat()
self.write_one_frame(model='ROMS',current_date=current_date)
self.make_animated_gif(fileout)
return
def MITgcm_movie(self,fileout,start_step,end_step,means='no',delta_step=5):
if (means == 'no'):
list_steps = list(np.arange(start_step,end_step+delta_step,delta_step))
elif (means == 'monthly'):
# do something else
pass
elif (means == 'annual'):
# do something else
pass
else:
pass
for current_step in list_steps:
print 'working on step', current_step
self.write_one_frame(model='MITgcm',current_step=current_step)
self.make_animated_gif(fileout)
return
def write_one_frame(self,model='ROMS',current_date=None,current_step=None):
''' write one frame of plots '''
# arguments check
if model == 'ROMS' and current_data is None:
exit('for ROMS model, you must provide a date')
if model == 'MITgcm' and current_step is None:
exit('for MITgcm model, you must provide a timestep')
ccurrent_step = str(current_step).zfill(10)
# find number of subplots
for diag in self.diags:
self.ncol = max(self.ncol,diag['col'])
self.nrow = max(self.nrow,diag['row'])
# create figure
plt.figure(figsize=[8*self.ncol,8*self.nrow])
for diag in self.diags:
this_subplot = diag['col'] + (diag['row'] - 1) * self.ncol
ax = plt.subplot(self.nrow,self.ncol,this_subplot)
if model == 'ROMS':
coord1, coord2, data = self.read_data_roms_esm(diag,current_date)
elif model == 'MITgcm':
coord1, coord2, data = self.read_data_mitgcm(diag,current_step)
if diag.has_key('operation'):
if diag['operation'] == 'log10':
data = np.log10(data)
if diag['type'] == 'map':
ezp.plot_map(ax,diag,coord1,coord2,data,current_date,ccurrent_step)
elif diag['type'] == 'section':
ezp.plot_section(ax,diag,coord1,coord2,data,current_date,ccurrent_step)
if current_date is not None:
gifname = self.plotdir + 'frame_' + current_date.isoformat() + '.gif'
pngname = self.plotdir + 'frame_' + current_date.isoformat() + '.png'
if current_step is not None:
gifname = self.plotdir + 'frame_' + str(ccurrent_step) + '.gif'
pngname = self.plotdir + 'frame_' + str(ccurrent_step) + '.png'
plt.savefig(pngname,bbox_inches='tight')
plt.close()
os.system(' convert ' + pngname + ' ' + gifname)
os.system(' rm ' + pngname )
return None
def make_animated_gif(self,fileout):
os.system(' rm ' + self.plotdir + fileout)
os.system(' gifsicle -d 20 -l ' + self.plotdir + 'frame*.gif -o ' + self.plotdir + fileout)
os.system(' rm ' + self.plotdir + 'frame*gif')
return None
#------------------------- ROMS ----------------------------------
def read_data_roms_esm(self,diag,current_date):
''' read the data for each diag '''
ncfile = diag['run']['directory'] + str(current_date.year) + '/' + diag['run']['name'] + \
'_' + diag['filetype'] + '_' + current_date.isoformat() + '.nc'
tmp = self.readnc(ncfile,diag['variable'])
# load grid first time only
if self.grd is None:
self.grd = pyroms.grid.get_ROMS_grid(diag['run']['grid'])
# reload if different grid (allows multiple grid in one movie)
if self.grd.name != diag['run']['grid']:
self.grd = pyroms.grid.get_ROMS_grid(diag['run']['grid'])
if diag['type'] == 'map':
if diag.has_key('level') and diag.has_key('depth'):
exit('you can choose a level or a depth')
if diag.has_key('level'):
if len(tmp.shape) == 2:
# variable is 2d (ssh,...)
data = tmp
elif len(tmp.shape) == 3:
data = tmp[diag['level'],:]
coord1 = self.grd.hgrid.lon_rho
coord2 = self.grd.hgrid.lat_rho
if diag.has_key('depth'):
data, coord1, coord2 = pyroms.tools.zslice(tmp, diag['depth'], self.grd, \
Cpos='rho', vert=False, mode='linear')
elif diag['type'] == 'section':
if diag.has_key('jindex') and diag.has_key('iindex'):
exit('you can choose a section along i or j')
if diag.has_key('jindex'):
data, coord2, coord1, coord3 = pyroms.tools.jslice(tmp, diag['jindex'], self.grd)
if diag.has_key('iindex'):
data, coord2, coord3, coord1 = pyroms.tools.islice(tmp, diag['iindex'], self.grd)
pass
return coord1, coord2, data
def readnc(self,myfile,myvar,myframe=None):
''' read data from netcdf '''
fid = nc.Dataset(myfile,'r')
if myframe is None:
out = fid.variables[myvar][:].squeeze()
else:
out = fid.variables[myvar][myframe,:].squeeze()
fid.close()
return out
#------------------------- MITgcm ----------------------------------
def read_data_mitgcm(self,diag,current_step):
''' '''
tmp = MITgcmutils.mds.rdmds(diag['run']['directory'] + os.sep + diag['variable'],itrs=current_step)
if diag['type'] == 'map':
coord1 = MITgcmutils.mds.rdmds(diag['run']['directory'] + os.sep + 'XC')
coord2 = MITgcmutils.mds.rdmds(diag['run']['directory'] + os.sep + 'YC')
if diag.has_key('level'):
if len(tmp.shape) == 2:
# variable is 2d (Eta,...)
data = tmp
elif len(tmp.shape) == 3:
data = tmp[diag['level'],:]
else:
data = tmp
elif diag['type'] == 'section':
print 'not implemented yet'
pass
return coord1, coord2, data
|
raphaeldussin/EZmovie
|
EZmovie/lib_EZmovie.py
|
Python
|
gpl-3.0
| 6,438
|
[
"NetCDF"
] |
8774ea4f82edff492676caf6f510044bf5d3a9d5c1fe5e7d011adba1901ac411
|
__author__ = 'Michael Isik'
from variate import UniformVariate, GaussianVariate
class Filter(object):
""" Base class for all kinds of operators on the population during the
evolutionary process like mutation, selection or evaluation.
"""
def __init__(self):
pass
def apply(self, population):
""" Applies an operation on a population. """
raise NotImplementedError()
def isiter(obj):
try:
iter(obj)
return True
except TypeError:
return False
class SimpleGenomeManipulation(Filter):
""" Abstract filter class for simple genome manipulation. """
def __init__(self):
Filter.__init__(self)
def _manipulateGenome(self, genome, manfunc=None):
""" Manipulates the genome inplace by calling the abstract _manipulateValue()
method on each float found.
:key genome: Arbitrary netsted iterateable container whose leaf
elements may be floats or empty containers.
E.g. [ [1.] , [1. , 2. , 2 , [3. , 4.] ] , [] ]
:key manfunc: function that manipulates the found floats.
If omitted, self._manipulateValue() is used.
See its documentation for the signature description.
"""
assert isiter(genome)
if manfunc is None: manfunc = self._manipulateValue
for i, v in enumerate(genome):
if isiter(v):
self._manipulateGenome(v, manfunc)
else:
genome[i] = manfunc(v)
def _manipulateValue(self, value):
""" Abstract Method, which should manipulate a value.
Should return the manipulated value
"""
raise NotImplementedError()
class SimpleMutation(SimpleGenomeManipulation):
mutationVariate = None
""" A simple mutation filter, which uses a gaussian variate per default
for mutation.
"""
def __init__(self):
""" :key kwargs: See setArgs() method documentation
"""
SimpleGenomeManipulation.__init__(self)
self.mutationVariate = GaussianVariate()
self.mutationVariate.alpha = 0.1
self.verbosity = 0
def apply(self, population):
""" Apply the mutation to the population
:key population: must implement the getIndividuals() method
"""
for individual in population.getIndividuals():
self._mutateIndividual(individual)
def _mutateIndividual(self, individual):
""" Mutate a single individual
:key individual: must implement the getGenome() method
"""
genome = individual.getGenome()
self._manipulateGenome(genome)
def _manipulateValue(self, value):
""" Implementation of the abstract method of class SimpleGenomeManipulation
Set's the x0 value of the variate to value and takes a new sample
value and returns it.
"""
self.mutationVariate.x0 = value
newval = self.mutationVariate.getSample()
# print "MUTATED: ", value, "--->", newval
return newval
class Randomization(SimpleGenomeManipulation):
""" Randomizes the genome of all individuals of a population
Uses UniformVariate to do so.
"""
def __init__(self, minval=0., maxval=1.):
SimpleGenomeManipulation.__init__(self)
self._minval = minval
self._maxval = maxval
def apply(self, population):
self._uniform_variate = UniformVariate(self._minval, self._maxval)
for individual in population.getIndividuals():
self._manipulateGenome(individual.getGenome())
def _manipulateValue(self, value):
""" See SimpleGenomeManipulation._manipulateValue() for more information """
return self._uniform_variate.getSample()
|
iut-ibk/Calimero
|
site-packages/pybrain/supervised/evolino/gfilter.py
|
Python
|
gpl-2.0
| 3,909
|
[
"Gaussian"
] |
a6b4fe21d0a3ae87209ac75adc8e44dfb354d4be0c0eb068a2f6f2ecae0b7ba3
|
from __future__ import division
import difflib
import collections
import traceback
import sys
import ast
import re
from StringIO import StringIO
import sympy
from sympy.core.relational import Relational
import sympy.parsing.sympy_tokenize as sympy_tokenize
from token import NAME
OTHER_SYMPY_FUNCTIONS = ('sqrt',)
Arguments = collections.namedtuple('Arguments', 'function args kwargs')
class Eval(object):
def __init__(self, namespace={}):
self._namespace = namespace
def get(self, name):
return self._namespace.get(name)
def set(self, name, value):
self._namespace[name] = value
def eval_node(self, node):
tree = ast.fix_missing_locations(ast.Expression(node))
return eval(compile(tree, '<string>', 'eval'), self._namespace)
def eval(self, x, use_none_for_exceptions=False, repr_expression=True):
globals = self._namespace
try:
x = x.strip()
x = x.replace("\r", "")
y = x.split('\n')
if len(y) == 0:
return ''
s = '\n'.join(y[:-1]) + '\n'
t = y[-1]
try:
z = compile(t + '\n', '', 'eval')
except SyntaxError:
s += '\n' + t
z = None
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
eval(compile(s, '', 'exec', division.compiler_flag), globals, globals)
if not z is None:
r = eval(z, globals)
if repr_expression:
r = repr(r)
else:
r = ''
if repr_expression:
sys.stdout.seek(0)
r = sys.stdout.read() + r
finally:
sys.stdout = old_stdout
return r
except:
if use_none_for_exceptions:
return
etype, value, tb = sys.exc_info()
# If we decide in the future to remove the first frame fromt he
# traceback (since it links to our code, so it could be confusing
# to the user), it's easy to do:
#tb = tb.tb_next
s = "".join(traceback.format_exception(etype, value, tb))
return s
class LatexVisitor(ast.NodeVisitor):
EXCEPTIONS = {'integrate': sympy.Integral, 'diff': sympy.Derivative}
formatters = {}
@staticmethod
def formats_function(name):
def _formats_function(f):
LatexVisitor.formatters[name] = f
return f
return _formats_function
def format(self, name, node):
formatter = LatexVisitor.formatters.get(name)
if not formatter:
return None
return formatter(node, self)
def visit_Call(self, node):
buffer = []
fname = node.func.id
# Only apply to lowercase names (i.e. functions, not classes)
if fname in self.__class__.EXCEPTIONS:
node.func.id = self.__class__.EXCEPTIONS[fname].__name__
self.latex = sympy.latex(self.evaluator.eval_node(node))
else:
result = self.format(fname, node)
if result:
self.latex = result
elif fname[0].islower() and fname not in OTHER_SYMPY_FUNCTIONS:
buffer.append("\\mathrm{%s}" % fname.replace('_', '\\_'))
buffer.append('(')
latexes = []
for arg in node.args:
if isinstance(arg, ast.Call) and getattr(arg.func, 'id', None) and arg.func.id[0].lower() == arg.func.id[0]:
latexes.append(self.visit_Call(arg))
else:
latexes.append(sympy.latex(self.evaluator.eval_node(arg)))
buffer.append(', '.join(latexes))
buffer.append(')')
self.latex = ''.join(buffer)
else:
self.latex = sympy.latex(self.evaluator.eval_node(node))
return self.latex
@LatexVisitor.formats_function('solve')
def format_solve(node, visitor):
expr = visitor.evaluator.eval_node(node.args[0])
buffer = [r'\mathrm{solve}\;', sympy.latex(expr)]
if not isinstance(expr, Relational):
buffer.append('=0')
if len(node.args) > 1:
buffer.append(r'\;\mathrm{for}\;')
for arg in node.args[1:]:
buffer.append(sympy.latex(visitor.evaluator.eval_node(arg)))
buffer.append(r',\, ')
if len(node.args) > 1:
buffer.pop()
return ''.join(buffer)
@LatexVisitor.formats_function('limit')
def format_limit(node, visitor):
if len(node.args) >= 3:
return sympy.latex(
sympy.Limit(*[visitor.evaluator.eval_node(arg) for arg in node.args]))
@LatexVisitor.formats_function('prime')
def format_prime(node, visitor):
number = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
return ''.join([number,
r'^\mathrm{',
ordinal(int(number)),
r'}\; \mathrm{prime~number}'])
@LatexVisitor.formats_function('isprime')
def format_isprime(node, visitor):
number = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
return ''.join([r'\mathrm{Is~}', number, r'\mathrm{~prime?}'])
@LatexVisitor.formats_function('nextprime')
def format_nextprime(node, visitor):
number = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
return r'\mathrm{Least~prime~greater~than~}' + number
@LatexVisitor.formats_function('factorint')
def format_factorint(node, visitor):
number = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
return r'\mathrm{Prime~factorization~of~}' + number
@LatexVisitor.formats_function('factor')
def format_factor(node, visitor):
expression = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
return r'\mathrm{Factorization~of~}' + expression
@LatexVisitor.formats_function('solve_poly_system')
def format_factorint(node, visitor):
equations = visitor.evaluator.eval_node(node.args[0])
variables = tuple(map(visitor.evaluator.eval_node, node.args[1:]))
if len(variables) == 1:
variables = variables[0]
return ''.join([r'\mathrm{Solve~} \begin{cases} ',
r'\\'.join(map(sympy.latex, equations)),
r'\end{cases} \mathrm{~for~}',
sympy.latex(variables)])
@LatexVisitor.formats_function('plot')
def format_plot(node, visitor):
if node.args:
function = sympy.latex(visitor.evaluator.eval_node(node.args[0]))
else:
keywords = {}
for keyword in node.keywords:
keywords[keyword.arg] = visitor.evaluator.eval_node(keyword.value)
function = sympy.latex(keywords)
return r'\mathrm{Plot~}' + function
@LatexVisitor.formats_function('rsolve')
def format_rsolve(node, visitor):
recurrence = sympy.latex(sympy.Eq(visitor.evaluator.eval_node(node.args[0]), 0))
if len(node.args) == 3:
conds = visitor.evaluator.eval_node(node.args[2])
initconds = '\\\\\n'.join('&' + sympy.latex(sympy.Eq(eqn, val)) for eqn, val in conds.items())
text = r'&\mathrm{Solve~the~recurrence~}' + recurrence + r'\\'
condstext = r'&\mathrm{with~initial~conditions}\\'
return r'\begin{align}' + text + condstext + initconds + r'\end{align}'
else:
return r'\mathrm{Solve~the~recurrence~}' + recurrence
diophantine_template = (r"\begin{{align}}&{}\\&\mathrm{{where~}}"
r"{}\mathrm{{~are~integers}}\end{{align}}")
@LatexVisitor.formats_function('diophantine')
def format_diophantine(node, visitor):
expression = visitor.evaluator.eval_node(node.args[0])
symbols = None
if isinstance(expression, sympy.Basic):
symbols = expression.free_symbols
equation = sympy.latex(sympy.Eq(expression, 0))
result = r'\mathrm{Solve~the~diophantine~equation~}' + equation
if symbols:
result = diophantine_template.format(result, tuple(symbols))
return result
@LatexVisitor.formats_function('summation')
@LatexVisitor.formats_function('product')
def format_diophantine(node, visitor):
if node.func.id == 'summation':
klass = sympy.Sum
else:
klass = sympy.Product
return sympy.latex(klass(*map(visitor.evaluator.eval_node, node.args)))
@LatexVisitor.formats_function('help')
def format_help(node, visitor):
if node.args:
function = visitor.evaluator.eval_node(node.args[0])
return r'\mathrm{Show~documentation~for~}' + function.__name__
return r'\mathrm{Show~documentation~(requires~1~argument)}'
class TopCallVisitor(ast.NodeVisitor):
def __init__(self):
super(TopCallVisitor, self).__init__()
self.call = None
def visit_Call(self, node):
self.call = node
def visit_Name(self, node):
if not self.call:
self.call = node
# From http://stackoverflow.com/a/739301/262727
def ordinal(n):
if 10 <= n % 100 < 20:
return 'th'
else:
return {1 : 'st', 2 : 'nd', 3 : 'rd'}.get(n % 10, "th")
# TODO: modularize all of this
def latexify(string, evaluator):
a = LatexVisitor()
a.evaluator = evaluator
a.visit(ast.parse(string))
return a.latex
def topcall(string):
a = TopCallVisitor()
a.visit(ast.parse(string))
if hasattr(a, 'call'):
return getattr(a.call.func, 'id', None)
return None
def arguments(string_or_node, evaluator):
node = None
if not isinstance(string_or_node, ast.Call):
a = TopCallVisitor()
a.visit(ast.parse(string_or_node))
if hasattr(a, 'call'):
node = a.call
else:
node = string_or_node
if node:
if isinstance(node, ast.Call):
name = getattr(node.func, 'id', None) # when is it undefined?
args, kwargs = None, None
if node.args:
args = list(map(evaluator.eval_node, node.args))
kwargs = node.keywords
if kwargs:
kwargs = {kwarg.arg: evaluator.eval_node(kwarg.value) for kwarg in kwargs}
return Arguments(name, args, kwargs)
elif isinstance(node, ast.Name):
return Arguments(node.id, [], {})
return None
re_calls = re.compile(r'(Integer|Symbol|Float|Rational)\s*\([\'\"]?([a-zA-Z0-9\.]+)[\'\"]?\s*\)')
def re_calls_sub(match):
return match.groups()[1]
def removeSymPy(string):
try:
return re_calls.sub(re_calls_sub, string)
except IndexError:
return string
from sympy.parsing.sympy_parser import (
AppliedFunction, implicit_multiplication, split_symbols,
function_exponentiation, implicit_application, OP, NAME,
_group_parentheses, _apply_functions, _flatten, _token_callable)
def _implicit_multiplication(tokens, local_dict, global_dict):
result = []
for tok, nextTok in zip(tokens, tokens[1:]):
result.append(tok)
if (isinstance(tok, AppliedFunction) and
isinstance(nextTok, AppliedFunction)):
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and
nextTok[0] == OP and nextTok[1] == '('):
# Applied function followed by an open parenthesis
if (tok.function[1] == 'Symbol' and
len(tok.args[1][1]) == 3):
# Allow implicit function symbol creation
# TODO XXX need some way to offer alternative parsing here -
# sometimes we want this and sometimes not, hard to tell when
# (making it context-sensitive based on input function best)
continue
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
isinstance(nextTok, AppliedFunction)):
# Close parenthesis followed by an applied function
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
nextTok[0] == NAME):
# Close parenthesis followed by an implicitly applied function
result.append((OP, '*'))
elif (tok[0] == nextTok[0] == OP
and tok[1] == ')' and nextTok[1] == '('):
# Close parenthesis followed by an open parenthesis
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and nextTok[0] == NAME):
# Applied function followed by implicitly applied function
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == OP and nextTok[1] == '('):
# Constant followed by parenthesis
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == NAME and
not _token_callable(nextTok, local_dict, global_dict)):
# Constant followed by constant
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
(isinstance(nextTok, AppliedFunction) or nextTok[0] == NAME)):
# Constant followed by (implicitly applied) function
result.append((OP, '*'))
if tokens:
result.append(tokens[-1])
return result
def implicit_multiplication(result, local_dict, global_dict):
"""Makes the multiplication operator optional in most cases.
Use this before :func:`implicit_application`, otherwise expressions like
``sin 2x`` will be parsed as ``x * sin(2)`` rather than ``sin(2*x)``.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication)
>>> transformations = standard_transformations + (implicit_multiplication,)
>>> parse_expr('3 x y', transformations=transformations)
3*x*y
"""
for step in (_group_parentheses(implicit_multiplication),
_apply_functions,
_implicit_multiplication):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
def custom_implicit_transformation(result, local_dict, global_dict):
"""Allows a slightly relaxed syntax.
- Parentheses for single-argument method calls are optional.
- Multiplication is implicit.
- Symbol names can be split (i.e. spaces are not needed between
symbols).
- Functions can be exponentiated.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication_application)
>>> parse_expr("10sin**2 x**2 + 3xyz + tan theta",
... transformations=(standard_transformations +
... (implicit_multiplication_application,)))
3*x*y*z + 10*sin(x**2)**2 + tan(theta)
"""
for step in (split_symbols, implicit_multiplication,
implicit_application, function_exponentiation):
result = step(result, local_dict, global_dict)
return result
SYNONYMS = {
u'derivative': 'diff',
u'derive': 'diff',
u'integral': 'integrate',
u'antiderivative': 'integrate',
u'factorize': 'factor',
u'graph': 'plot',
u'draw': 'plot'
}
def synonyms(tokens, local_dict, global_dict):
"""Make some names synonyms for others.
This is done at the token level so that the "stringified" output that
Gamma displays shows the correct function name. Must be applied before
auto_symbol.
"""
result = []
for token in tokens:
if token[0] == NAME:
if token[1] in SYNONYMS:
result.append((NAME, SYNONYMS[token[1]]))
continue
result.append(token)
return result
def close_matches(s, global_dict):
"""
Checks undefined names to see if they are close matches to a defined name.
"""
tokens = sympy_tokenize.generate_tokens(StringIO(s.strip()).readline)
result = []
has_result = False
all_names = set(global_dict).union(SYNONYMS)
# strip the token location info to avoid strange untokenize results
tokens = [(tok[0], tok[1]) for tok in tokens]
for token in tokens:
if (token[0] == NAME and
token[1] not in all_names and
len(token[1]) > 1):
matches = difflib.get_close_matches(token[1], all_names)
if matches and matches[0] == token[1]:
matches = matches[1:]
if matches:
result.append((NAME, matches[0]))
has_result = True
continue
result.append(token)
if has_result:
return sympy_tokenize.untokenize(result).strip()
return None
|
kaichogami/sympy_gamma
|
app/logic/utils.py
|
Python
|
bsd-3-clause
| 16,740
|
[
"VisIt"
] |
33fe24a13f4c861ea3c56327bb18f2579aa2ec00c28483a3c07ba33f2d3cb93e
|
from os import makedirs
from os.path import join, dirname, exists
from string import Template
from galaxy.util.bunch import Bunch
from galaxy.objectstore import build_object_store_from_config
from .test_utils import TempDirectoryTestCase
from .test_utils import skip
class MockDataset(object):
def __init__(self, id):
self.id = id
self.object_store_id = None
class PulsarObjectStoreTest(TempDirectoryTestCase):
def __write(self, contents, name):
path = join(self.temp_directory, name)
directory = dirname(path)
if not exists(directory):
makedirs(directory)
open(path, "wb").write(contents)
return path
@skip("No longer testing defunct objectstore")
def test_pulsar_objectstore(self):
# Define real object store used by Pulsar server.
object_store_config_file = join(self.temp_directory, "object_store_conf.xml")
with open(object_store_config_file, "w") as configf:
config_template = Template("""<?xml version="1.0"?>
<object_store type="disk">
<files_dir path="${temp_directory}"/>
<extra_dir type="temp" path="${temp_directory}"/>
<extra_dir type="job_work" path="${temp_directory}"/>
</object_store>
""")
config_contents = config_template.safe_substitute(temp_directory=self.temp_directory)
configf.write(config_contents)
app_conf = dict(
object_store_config_file=object_store_config_file,
private_token="12345",
)
from .test_utils import test_pulsar_server
with test_pulsar_server(app_conf=app_conf) as server:
url = server.application_url
# Define a proxy Pulsar object store.
proxy_object_store_config_file = join(self.temp_directory, "proxy_object_store_conf.xml")
with open(proxy_object_store_config_file, "w") as configf:
config_template = Template("""<?xml version="1.0"?>
<object_store type="pulsar" url="$url" private_token="12345" transport="urllib">
<!-- private_token is optional - see Pulsar documentation for more information. -->
<!-- transport is optional, set to curl to use libcurl instead of urllib for communication with Pulsar. -->
</object_store>
""")
contents = config_template.safe_substitute(url=url)
configf.write(contents)
config = Bunch(object_store_config_file=proxy_object_store_config_file)
object_store = build_object_store_from_config(config=config)
# Test no dataset with id 1 exists.
absent_dataset = MockDataset(1)
assert not object_store.exists(absent_dataset)
# Write empty dataset 2 in second backend, ensure it is empty and
# exists.
empty_dataset = MockDataset(2)
self.__write(b"", "000/dataset_2.dat")
assert object_store.exists(empty_dataset)
assert object_store.empty(empty_dataset)
# Write non-empty dataset in backend 1, test it is not emtpy & exists.
hello_world_dataset = MockDataset(3)
self.__write(b"Hello World!", "000/dataset_3.dat")
assert object_store.exists(hello_world_dataset)
assert not object_store.empty(hello_world_dataset)
# Test get_data
data = object_store.get_data(hello_world_dataset)
assert data == "Hello World!"
data = object_store.get_data(hello_world_dataset, start=1, count=6)
assert data == "ello W"
# Test Size
# Test absent and empty datasets yield size of 0.
assert object_store.size(absent_dataset) == 0
assert object_store.size(empty_dataset) == 0
# Elsewise
assert object_store.size(hello_world_dataset) > 0 # Should this always be the number of bytes?
# Test percent used (to some degree)
percent_store_used = object_store.get_store_usage_percent()
assert percent_store_used > 0.0
assert percent_store_used < 100.0
# Test update_from_file test
output_dataset = MockDataset(4)
output_real_path = join(self.temp_directory, "000", "dataset_4.dat")
assert not exists(output_real_path)
output_working_path = self.__write(b"NEW CONTENTS", "job_working_directory1/example_output")
object_store.update_from_file(output_dataset, file_name=output_working_path, create=True)
assert exists(output_real_path)
# Test delete
to_delete_dataset = MockDataset(5)
to_delete_real_path = self.__write(b"content to be deleted!", "000/dataset_5.dat")
assert object_store.exists(to_delete_dataset)
assert object_store.delete(to_delete_dataset)
assert not object_store.exists(to_delete_dataset)
assert not exists(to_delete_real_path)
# Test json content.
complex_contents_dataset = MockDataset(6)
complex_content = b'{"a":6}'
self.__write(complex_content, "000/dataset_6.dat")
assert object_store.exists(complex_contents_dataset)
data = object_store.get_data(complex_contents_dataset) == complex_content
|
natefoo/pulsar
|
test/pulsar_objectstore_test.py
|
Python
|
apache-2.0
| 5,307
|
[
"Galaxy"
] |
0e5f90ad20f0358cfa775d95ac01810b5306ff9511e423ec706137acb238ecea
|
# vim:expandtab:smarttab
import logging
from pylons import g, Response, config, cache
import os
import os.path as path
import cjson
from amqplib import client_0_8 as amqp
import facebook
from facebook import FacebookError
from urllib2 import URLError
import df
from puid import lookup_fingerprint
from musicbrainz2.webservice import (
Query,
TrackFilter,
WebServiceError
)
from facebook.wsgi import facebook
from sqlalchemy.sql import or_
from masterapp.lib.base import *
from masterapp import model
from sqlalchemy.sql import and_
import thread
from mailer import mail
from masterapp.lib.fbaccess import fbaccess_noredirect
log = logging.getLogger(__name__)
class Response(object):
upload = 'upload'
reauthenticate = 'reauthenticate'
done = 'done'
upload = 'upload'
wait = 'wait'
retry = 'retry'
class UploadController(BaseController):
def __init__(self, *args):
super(BaseController, self).__init__(args)
self.apikey = config['pyfacebook.apikey']
self.secret = config['pyfacebook.secret']
self._connection = amqp.Connection(
host = "localhost:5672",
userid = "guest",
password = "guest",
virtual_host = config['fileprocess.vhost'],
insist = False)
self._channel = self._connection.channel()
def _check_tags(self, user, fdict):
song = None
if fdict.has_key('title') and fdict.has_key('album') and \
fdict.has_key('artist'):
qry = model.Session.query(model.Song).join(
model.Song.artist, model.Song.album).filter(
model.Artist.name == fdict['artist']).filter(
model.Album.title == fdict['album']).filter(
model.Song.title == fdict['title'])
song = qry.first()
if not song:
return False
# Check to see if this user owns this songs
owner = model.Session.query(model.SongOwner).filter(
and_(model.SongOwner.songid==song.id,
model.SongOwner.uid == user.id)
).first()
if owner:
# This request.params has already been uploaded by this fella
log.debug('%s has already been uploaded by %s',
fdict.get('title'), user.id)
return True
# Make a new owner
user.add_song(song)
log.debug('%s added to %s\'s files', fdict.get('title'), user.id)
return True
def _build_fdict(self, user, src=None):
if not src:
src = request.params
return dict(
puid = src.get('puid'),
artist = src.get('artist'),
album = src.get('album'),
title = src.get('title'),
duration = src.get('duration'),
bitrate = src.get('bitrate'),
date = src.get('date'),
tracknumber = src.get('tracknumber'),
genre = src.get('genre'),
fbid = user.fbid
)
def _check_puid(self, user):
userpuid = request.params.get('puid')
if not userpuid:
log.debug("Puid was blank, upload the file")
return False
dbpuids = model.Session.query(model.Puid).filter(
model.Puid.puid == userpuid
).all()
if len(dbpuids) > 0:
self._process(self._build_fdict(user))
log.debug("We have the puid for %s in our db, don't need the song",
request.params.get('title'))
return True
return False
def _get_user(self, fbid):
return model.Session.query(model.User).filter_by(fbid = fbid).one()
def _get_fbid(self, request):
session_key = request.params.get('session_key')
if session_key == None:
return None
@fbaccess_noredirect
def get_fbid():
facebook.session_key = session_key
fbid = facebook.users.getLoggedInUser()
if type(fbid) == int:
return str(fbid)
return fbid
sessionc = cache.get_cache('upload.sessions')
return sessionc.get(session_key,
expiretime = 120,
createfunc = get_fbid
)
class PostException(Exception):
"""An exception that means the content-length did not match the actual
amount of data read"""
pass
def read_postdata(self, dest_file=None):
"""Reads the postdata into the file object or throws it away
otherwise"""
chunk_size = 1024
file_size = int(request.environ["CONTENT_LENGTH"])
body = request.environ['wsgi.input']
for i in range(0, file_size/chunk_size):
data = body.read(chunk_size)
if len(data) != chunk_size:
raise self.PostException
if dest_file != None:
dest_file.write(data)
data = body.read(file_size%chunk_size)
if len(data) != file_size%chunk_size:
raise self.PostException
if dest_file != None:
dest_file.write(data)
def _process(self, file):
message = amqp.Message(cjson.encode(file), delivery_mode=2)
self._channel.basic_publish(
message,
exchange = config['app_conf']['fileprocess.exchange'],
routing_key = "start_fileprocessing")
def file(self, id):
"""POST /upload/file/id: This one uploads new songs for realsies"""
# first get session key
try:
fbid = self._get_fbid(request)
if fbid == None:
return Response.reauthenticate
except Exception, e:
try:
self.read_postdata()
except self.PostException:
pass
if hasattr(e, 'code') and e.code == 102:
return Response.reauthenticate
else:
return Response.retry
if config['app_conf']['check_df'] == 'true' and \
df.check(config['app_conf']['upload_dir']) > 85:
try:
self.read_postdata()
except self.PostException:
pass
return Response.wait
dest_dir = path.join(config['app_conf']['upload_dir'], fbid)
if not path.exists(dest_dir):
os.mkdir(dest_dir)
dest_path = os.path.join(dest_dir, id)
if not os.path.exists(dest_path):
dest_file = file(dest_path, 'wb')
try:
self.read_postdata(dest_file)
except self.PostException, e:
dest_file.close()
os.remove(dest_path)
return Response.retry
dest_file.close()
#finally, put the file in file_queue for processing
fdict = {
'fname': dest_path,
'fbid': fbid,
'usersha': id,
'puid': request.params.get('puid')
}
self._process(fdict)
else:
try:
self.read_postdata()
except self.PostException, e:
log.warn("A problem occurred with the post: %s", e)
return Response.done
def tags(self):
try:
fbid = self._get_fbid(request)
if fbid == None:
return Response.reauthenticate
except Exception, e:
if hasattr(e, 'code') and e.code == 102:
return Response.reauthenticate
else:
return Response.retry
user = self._get_user(fbid)
# Check for api version
version = request.params.get('version')
if not version=='1.0':
abort(400, 'Version must be 1.0')
if request.params.get('tags'):
return self._mass_tags(user, request.params['tags'])
# Check our database for tag match
if self._check_tags(user, self._build_fdict(user)):
return Response.done
# Check our database for PUID
if self._check_puid(user):
return Response.done
# We haven't seen the song, let's get the whole file
return Response.upload
def _mass_tags(self, user, str_tags):
tag_list = cjson.decode(str_tags)
if type(tag_list) != list:
abort(400, 'Invalid tags sent')
response_list = []
for tags in tag_list:
if self._check_tags(user, self._build_fdict(user, tags)):
response_list.append(Response.done)
else:
response_list.append(Response.upload)
return cjson.encode(response_list)
@fbaccess_noredirect
def desktop_redirect(self):
if facebook.check_session(request):
url = 'http://localhost:26504/complete_login?session_key='+ \
facebook.session_key
else:
abort(404)
#session_key returns unicode, have to convert back to string
redirect_to(str(url))
def desktop_login(self):
url = facebook.get_login_url(canvas=False,next='/desktop_redirect')
redirect_to(str(url))
def upload_ping(self):
return ''
def error(self):
client_size = int(request.environ["CONTENT_LENGTH"])
read_size = min(client_size, 4096)
stack_trace = request.environ['wsgi.input'].read(read_size)
if stack_trace == '':
return '0'
def sendmail():
if config['use_gmail'] == 'yes':
password = config['feedback_password']
else:
password = None
mail(config['smtp_server'], config['smtp_port'],
config['feedback_email'], password,
'brian@harmonize.fm', 'Uploader exception', stack_trace)
thread.start_new_thread(sendmail, ())
return '1'
|
JustinTulloss/harmonize.fm
|
masterapp/masterapp/controllers/upload.py
|
Python
|
mit
| 9,890
|
[
"Brian"
] |
06fa2b598f85ea664d75809f5f897a8d4c68ae7b569847b0aec931724bab2d41
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Modifications Copyright 2017 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tries to translate a GYP file into a GN file.
Output from this script should be piped to ``gn format --stdin``. It most likely
needs to be manually fixed after that as well.
"""
import argparse
import ast
import collections
import itertools
import os
import os.path
import re
import sys
sys.path.append(
os.path.abspath(
os.path.join(__file__, os.pardir, os.pardir, os.pardir)))
import cobalt.tools.paths # pylint: disable=g-import-not-at-top
VariableRewrite = collections.namedtuple('VariableRewrite',
['name', 'value_rewrites'])
# The directory containing the input file, as a repository-absolute (//foo/bar)
# path
repo_abs_input_file_dir = ''
deps_substitutions = {}
variable_rewrites = {}
class GNException(Exception):
pass
class GYPCondToGNNodeVisitor(ast.NodeVisitor):
"""An AST NodeVisitor which translates GYP conditions to GN strings.
Given a GYP condition as an AST with mode eval, outputs a string containing
the GN equivalent of that condition. Simplifies conditions involving the
variables OS and os_posix, and performs variable substitutions.
Example:
(Assume arm_neon is renamed to arm_use_neon and converted from 0/1 to
true/false):
>>> g = GYPCondToGNNodeVisitor()
>>> g.visit(ast.parse('arm_neon and target_arch=="raspi-2"', mode='eval'))
'(arm_use_neon && target_cpu == "raspi-2")'
>>> g.visit(ast.parse('use_system_libjpeg and target_arch=="raspi-2"',
... mode='eval'))
'(use_system_libjpeg && target_cpu == "raspi-2")'
>>> g.visit(ast.parse('arm_neon == 1', mode='eval'))
'arm_use_neon == true'
>>> g.visit(ast.parse('1', mode='eval'))
'true'
>>> g.visit(ast.parse('0', mode='eval'))
'false'
>>> g.visit(ast.parse('arm_neon != 0 and target_arch != "raspi-2" and
use_system_libjpeg or enable_doom_melon', mode='eval'))
'((arm_use_neon != false && target_cpu != "raspi-2" && use_system_libjpeg) ||
enable_doom_melon)'
>>> g.visit(ast.parse('arm_neon != 0 and target_arch != "raspi-2" or
use_system_libjpeg and enable_doom_melon', mode='eval'))
'((arm_use_neon != false && target_cpu != "raspi-2") || (use_system_libjpeg &&
enable_doom_melon))'
"""
def visit_Expression(self, expr): # pylint: disable=invalid-name
return self.visit(expr.body)
def visit_Num(self, num): # pylint: disable=invalid-name
# A number that doesn't occur inside a Compare is taken in boolean context
return GYPValueToGNString(bool(num.n))
def visit_Str(self, string): # pylint: disable=invalid-name
return GYPValueToGNString(string.s)
def visit_Name(self, name): # pylint: disable=invalid-name
if name.id in variable_rewrites:
return variable_rewrites[name.id].name
else:
return name.id
def visit_BoolOp(self, boolop): # pylint: disable=invalid-name
glue = ' && ' if isinstance(boolop.op, ast.And) else ' || '
return '(' + glue.join(itertools.imap(self.visit, boolop.values)) + ')'
def visit_Compare(self, compare): # pylint: disable=invalid-name
if len(compare.ops) != 1:
raise GNException("This script doesn't support multiple operators in "
'comparisons')
if isinstance(compare.ops[0], ast.Eq):
op = ' == '
elif isinstance(compare.ops[0], ast.NotEq):
op = ' != '
else:
raise GNException('Operator ' + str(compare.ops[0]) + ' not supported')
if isinstance(compare.left, ast.Name):
if isinstance(compare.comparators[0], ast.Name): # var1 == var2
left = self.visit_Name(compare.left)
right = self.visit_Name(compare.comparators[0])
elif isinstance(compare.comparators[0], ast.Num): # var1 == 42
left, right = TransformVariable(compare.left.id,
compare.comparators[0].n)
elif isinstance(compare.comparators[0], ast.Str): # var1 == "some string"
left, right = TransformVariable(compare.left.id,
compare.comparators[0].s)
else:
raise GNException('Unknown RHS type ' + str(compare.comparators[0]))
else:
raise GNException('Non-variables on LHS of comparison are not supported')
if right == 'true' and op == ' == ' or right == 'false' and op == ' != ':
return left
elif right == 'false' and op == ' == ' or right == 'true' and op == ' != ':
return '!(' + left + ')'
else:
return left + op + right
def generic_visit(self, node):
raise GNException("I don't know how to convert node " + str(node))
class OSComparisonRewriter(ast.NodeTransformer):
def visit_Compare(self, compare): # pylint: disable=invalid-name
"""Substitute instances of comparisons involving the OS and os_posix
variables with their known values in Compare nodes.
Examples:
``OS == "starboard"`` -> ``True``
``OS != "starboard"`` -> ``False``
``OS == "linux"`` -> ``False``
``os_posix == 1`` -> ``False``
``os_bsd == 1`` -> ``False``
"""
if len(compare.ops) != 1:
raise GNException("This script doesn't support multiple operators in "
'comparisons')
if not isinstance(compare.left, ast.Name):
return compare
elif compare.left.id == 'OS':
if (isinstance(compare.comparators[0], ast.Str) and
compare.comparators[0].s == 'starboard'):
# OS == "starboard" -> True, OS != "starboard" -> False
new_node = ast.Num(1 if isinstance(compare.ops[0], ast.Eq) else 0)
else:
# OS == "something else" -> False, OS != "something else" -> True
new_node = ast.Num(0 if isinstance(compare.ops[0], ast.Eq) else 1)
return ast.copy_location(new_node, compare)
elif compare.left.id in {'os_posix', 'os_bsd'}:
if (isinstance(compare.comparators[0], ast.Num) and
compare.comparators[0].n == 0):
# os_posix == 0 -> True, os_posix != 0 -> False
# ditto for os_bsd
new_node = ast.Num(1 if isinstance(compare.ops[0], ast.Eq) else 0)
else:
# os_posix == (not 0) -> False, os_posix != (not 0) -> True
# ditto for os_bsd
new_node = ast.Num(0 if isinstance(compare.ops[0], ast.Eq) else 1)
return ast.copy_location(new_node, compare)
else:
return compare
def visit_BoolOp(self, boolop): # pylint: disable=invalid-name
"""Simplify BoolOp nodes by weeding out Falses and Trues resulting from
the elimination of OS comparisons.
"""
doing_and = isinstance(boolop.op, ast.And)
new_values = map(self.visit, boolop.values)
new_values_filtered = []
for v in new_values:
if isinstance(v, ast.Num):
# "x and False", or "y or True" - short circuit
if (doing_and and v.n == 0) or (not doing_and and v.n == 1):
new_values_filtered = None
break
# "x and True", or "y or False" - skip the redundant value
elif (doing_and and v.n == 1) or (not doing_and and v.n == 0):
pass
else:
new_values_filtered.append(v)
else:
new_values_filtered.append(v)
if new_values_filtered is None:
new_node = ast.Num(0 if doing_and else 1)
elif len(new_values_filtered) == 0:
new_node = ast.Num(1 if doing_and else 0)
elif len(new_values_filtered) == 1:
new_node = new_values_filtered[0]
else:
new_node = ast.BoolOp(boolop.op, new_values_filtered)
return ast.copy_location(new_node, boolop)
def Warn(msg):
print >> sys.stderr, '\x1b[1;33mWarning:\x1b[0m', msg
def WarnAboutRemainingKeys(dic):
for key in dic:
Warn("I don't know what {} is".format(key))
def TransformVariable(var, value):
"""Given a variable and value, substitutes the variable name/value with
the new name/new value from the variable_rewrites dict, if applicable.
Example:
``('arm_neon', 1)`` -> ``('arm_use_neon', 'true')``
``('gl_type', 'none')`` -> ``('gl_type', 'none')``
"""
if var in variable_rewrites:
var, value_rewrites = variable_rewrites[var]
if value_rewrites is not None:
value = value_rewrites[value]
return var, GYPValueToGNString(value)
def GYPValueToGNString(value, allow_dicts=True):
"""Returns a stringified GN equivalent of the Python value.
allow_dicts indicates if this function will allow converting dictionaries
to GN scopes. This is only possible at the top level, you can't nest a
GN scope in a list, so this should be set to False for recursive calls."""
if isinstance(value, str):
if value.find('\n') >= 0:
raise GNException("GN strings don't support newlines")
# Escape characters
ret = value.replace('\\', r'\\').replace('"', r'\"') \
.replace('$', r'\$')
# Convert variable substitutions
ret = re.sub(r'[<>]\((\w+)\)', r'$\1', ret)
return '"' + ret + '"'
if isinstance(value, unicode):
if value.find(u'\n') >= 0:
raise GNException("GN strings don't support newlines")
# Escape characters
ret = value.replace(u'\\', ur'\\').replace(u'"', ur'\"') \
.replace(u'$', ur'\$')
# Convert variable substitutions
ret = re.sub(ur'[<>]\((\w+)\)', ur'$\1', ret)
return (u'"' + ret + u'"').encode('utf-8')
if isinstance(value, bool):
if value:
return 'true'
return 'false'
if isinstance(value, list):
return '[ %s ]' % ', '.join(GYPValueToGNString(v) for v in value)
if isinstance(value, dict):
if not allow_dicts:
raise GNException('Attempting to recursively print a dictionary.')
result = ''
for key in sorted(value):
if not isinstance(key, basestring):
raise GNException('Dictionary key is not a string.')
result += '%s = %s\n' % (key, GYPValueToGNString(value[key], False))
return result
if isinstance(value, int):
return str(value)
raise GNException('Unsupported type when printing to GN.')
def GYPTargetToGNString(target_dict):
"""Given a target dict, output the GN equivalent."""
target_header_text = ''
target_name = target_dict.pop('target_name')
target_type = target_dict.pop('type')
if target_type in {'executable', 'shared_library', 'static_library'}:
if target_type == 'static_library':
Warn('converting static library, check to see if it should be a '
'source_set')
target_header_text += '{}("{}") {{\n'.format(target_type, target_name)
elif target_type == 'none':
target_header_text += 'group("{}") {{\n'.format(target_name)
elif target_type == '<(gtest_target_type)':
target_header_text += 'test("{}") {{\n'.format(target_name)
elif target_type == '<(final_executable_type)':
target_header_text += 'final_executable("{}") {{\n'.format(target_name)
elif target_type == '<(component)' or target_type == '<(library)':
Warn('converting static library, check to see if it should be a '
'source_set')
target_header_text += 'static_library("{}") {{\n'.format(target_name)
else:
raise GNException("I don't understand target type {}".format(target_type))
target_body_text, configs_text = \
GYPTargetToGNString_Inner(target_dict, target_name)
return configs_text + target_header_text + target_body_text + '}\n\n'
def ProcessIncludeExclude(dic, param):
"""Translate dic[param] and dic[param + '!'] lists to GN.
Example input:
{
'sources': [ 'foo.cc', 'bar.cc', 'baz.cc' ],
'sources!': [ 'bar.cc' ]
}
Example output:
sources = [ 'foo.cc', 'bar.cc', 'baz.cc' ]
sources -= [ 'bar.cc' ]
"""
ret = ''
if param in dic:
value = dic.pop(param)
ret += '{} = [ {} ]\n'.format(param, ', '.join(
GYPValueToGNString(s) for s in value))
if param + '!' in dic:
value = dic.pop(param + '!')
ret += '{} -= [ {} ]\n'.format(param, ', '.join(
GYPValueToGNString(s) for s in value))
ret += '\n'
return ret
def ProcessDependentSettings(dependent_settings_dict, target_dict, param,
renamed_param, config_name):
"""Translates direct_dependent_settings and all_dependent_settings blocks
to their GN equivalents. This is done by creating a new GN config, putting
the settings in that config, and adding the config to the target's
public_configs/all_dependent_configs. Returns a tuple of
(target_text, config_text).
Also eliminates the translated settings from the target_dict so they aren't
translated twice.
Example input:
{
'target_name': 'abc',
'direct_dependent_settings': {
'defines': [ "FOO" ]
}
}
Example target text output:
public_configs = [ ":abc_direct_config" ]
Example config text output:
config("abc_direct_config") {
defines = [ "FOO" ]
}
"""
ret_target = ''
ret_config = 'config("{}") {{\n'.format(config_name)
def FilterList(key):
if key in target_dict and key in dependent_settings_dict:
filtered_list = sorted(
set(target_dict[key]) - set(dependent_settings_dict[key]))
if filtered_list:
target_dict[key] = filtered_list
else:
del target_dict[key]
for inner_param in [
'include_dirs', 'defines', 'asmflags', 'cflags', 'cflags_c', 'cflags_cc',
'cflags_objc', 'cflags_objcc', 'ldflags'
]:
FilterList(inner_param)
FilterList(inner_param + '!')
ret_config += ProcessIncludeExclude(dependent_settings_dict, inner_param)
if 'variables' in dependent_settings_dict:
Warn("variables block inside {}. You'll need to handle that manually."
.format(param))
del dependent_settings_dict['variables']
if 'conditions' in dependent_settings_dict:
for i, cond_block in enumerate(dependent_settings_dict['conditions']):
cond_config_name = '{}_{}'.format(config_name, i)
t, c = GYPConditionToGNString(cond_block,
lambda dsd: ProcessDependentSettings(dsd, target_dict, param,
renamed_param,
cond_config_name))
ret_config += c
ret_target += t
del dependent_settings_dict['conditions']
if 'target_conditions' in dependent_settings_dict:
for i, cond_block in \
enumerate(dependent_settings_dict['target_conditions']):
cond_config_name = '{}_t{}'.format(config_name, i)
t, c = GYPConditionToGNString(cond_block,
lambda dsd: ProcessDependentSettings(dsd, target_dict, param,
renamed_param,
cond_config_name))
ret_config += c
ret_target += t
del dependent_settings_dict['target_conditions']
ret_config += '}\n\n'
ret_target += '{} = [ ":{}" ]\n\n'.format(renamed_param, config_name)
WarnAboutRemainingKeys(dependent_settings_dict)
return ret_target, ret_config
def GYPTargetToGNString_Inner(target_dict, target_name):
"""Translates the body of a GYP target into a GN string."""
configs_text = ''
target_text = ''
dependent_text = ''
if 'variables' in target_dict:
target_text += GYPVariablesToGNString(target_dict['variables'])
del target_dict['variables']
target_text += ProcessIncludeExclude(target_dict, 'sources')
for param, renamed_param, config_name_elem in \
[('direct_dependent_settings', 'public_configs', 'direct'),
('all_dependent_settings', 'all_dependent_configs', 'dependent')]:
if param in target_dict:
config_name = '{}_{}_config'.format(target_name, config_name_elem)
# Append dependent_text to target_text after include_dirs/defines/etc.
dependent_text, c = ProcessDependentSettings(
target_dict[param], target_dict, param, renamed_param, config_name)
configs_text += c
del target_dict[param]
for param in [
'include_dirs', 'defines', 'asmflags', 'cflags', 'cflags_c', 'cflags_cc',
'cflags_objc', 'cflags_objcc', 'ldflags'
]:
target_text += ProcessIncludeExclude(target_dict, param)
target_text += dependent_text
if 'export_dependent_settings' in target_dict:
target_text += GYPDependenciesToGNString(
'public_deps', target_dict['export_dependent_settings'])
# Remove dependencies covered here from the ordinary dependencies list
target_dict['dependencies'] = sorted(
set(target_dict['dependencies']) -
set(target_dict['export_dependent_settings']))
if not target_dict['dependencies']:
del target_dict['dependencies']
del target_dict['export_dependent_settings']
if 'dependencies' in target_dict:
target_text += GYPDependenciesToGNString('deps',
target_dict['dependencies'])
del target_dict['dependencies']
if 'conditions' in target_dict:
for cond_block in target_dict['conditions']:
t, c = GYPConditionToGNString(
cond_block, lambda td: GYPTargetToGNString_Inner(td, target_name))
configs_text += c
target_text += t
del target_dict['conditions']
if 'target_conditions' in target_dict:
for cond_block in target_dict['target_conditions']:
t, c = GYPConditionToGNString(
cond_block, lambda td: GYPTargetToGNString_Inner(td, target_name))
configs_text += c
target_text += t
del target_dict['target_conditions']
WarnAboutRemainingKeys(target_dict)
return target_text, configs_text
def GYPDependenciesToGNString(param, dep_list):
r"""Translates a GYP dependency into a GN dependency. Tries to intelligently
perform target renames as according to GN style conventions.
Examples:
(Note that <(DEPTH) has already been translated into // by the time this
function is called)
>>> GYPDependenciesToGNString('deps', ['//cobalt/math/math.gyp:math'])
'deps = [ "//cobalt/math" ]\n\n'
>>> GYPDependenciesToGNString('public_deps',
... ['//testing/gtest.gyp:gtest'])
'public_deps = [ "//testing/gtest" ]\n\n'
>>> GYPDependenciesToGNString('deps', ['//third_party/icu/icu.gyp:icui18n'])
'deps = [ "//third_party/icu:icui18n" ]\n\n'
>>> GYPDependenciesToGNString('deps',
... ['//cobalt/browser/browser_bindings_gen.gyp:generated_types'])
'deps = [ "//cobalt/browser/browser_bindings_gen:generated_types" ]\n\n'
>>> GYPDependenciesToGNString('deps', ['allocator'])
'deps = [ ":allocator" ]\n\n'
>>> # Suppose the input file is in //cobalt/foo/bar/
>>> GYPDependenciesToGNString('deps', ['../baz.gyp:quux'])
'deps = [ "//cobalt/foo/baz:quux" ]\n\n'
"""
new_dep_list = []
for dep in dep_list:
if dep in deps_substitutions:
new_dep_list.append(deps_substitutions[dep])
else:
m1 = re.match(r'(.*/)?(\w+)/\2\.gyp:\2$', dep)
m2 = re.match(r'(.*/)?(\w+)\.gyp:\2$', dep)
m3 = re.match(r'(.*/)?(\w+)/\2.gyp:(\w+)$', dep)
m4 = re.match(r'(.*)\.gyp:(\w+)$', dep)
m5 = re.match(r'\w+', dep)
if m1:
new_dep = '{}{}'.format(m1.group(1) or '', m1.group(2))
elif m2:
new_dep = '{}{}'.format(m2.group(1) or '', m2.group(2))
elif m3:
new_dep = '{}{}:{}'.format(m3.group(1) or '', m3.group(2), m3.group(3))
elif m4:
new_dep = '{}:{}'.format(m4.group(1), m4.group(2))
elif m5:
new_dep = ':' + dep
else:
Warn("I don't know how to translate dependency " + dep)
continue
if not (new_dep.startswith('//') or new_dep.startswith(':') or
os.path.isabs(new_dep)):
# Rebase new_dep to be repository-absolute
new_dep = os.path.normpath(repo_abs_input_file_dir + new_dep)
new_dep_list.append(new_dep)
quoted_deps = ['"{}"'.format(d) for d in new_dep_list]
return '{} = [ {} ]\n\n'.format(param, ', '.join(quoted_deps))
def GYPVariablesToGNString(varblock):
"""Translates a GYP variables block into its GN equivalent, performing
variable substitutions as necessary."""
ret = ''
if 'variables' in varblock:
# Recursively flatten nested variables dicts.
ret += GYPVariablesToGNString(varblock['variables'])
for k, v in varblock.viewitems():
if k in {'variables', 'conditions'}:
continue
if k.endswith('%'):
k = k[:-1]
if not k.endswith('!'):
ret += '{} = {}\n'.format(*TransformVariable(k, v))
else:
ret += '{} -= {}\n'.format(*TransformVariable(k[:-1], v))
if 'conditions' in varblock:
for cond_block in varblock['conditions']:
ret += GYPConditionToGNString(cond_block, GYPVariablesToGNString)[0]
ret += '\n'
return ret
def GYPConditionToGNString(cond_block, recursive_translate):
"""Translates a GYP condition block into a GN string. The recursive_translate
param is a function that is called with the true subdict and the false
subdict if applicable. The recursive_translate function returns either a
single string that should go inside the if/else block, or a tuple of
(target_text, config_text), in which the target_text goes inside the if/else
block and the config_text goes outside.
Returns a tuple (target_text, config_text), where config_text is the empty
string if recursive_translate only returns one string.
"""
cond = cond_block[0]
iftrue = cond_block[1]
iffalse = cond_block[2] if len(cond_block) == 3 else None
ast_cond = ast.parse(cond, mode='eval')
ast_cond = OSComparisonRewriter().visit(ast_cond)
translated_cond = GYPCondToGNNodeVisitor().visit(ast_cond)
if translated_cond == 'false' and iffalse is None:
# if (false) { ... } -> nothing
# Special cased to avoid printing warnings from the unnecessary translation
# of the true clause
return '', ''
translated_iftrue = recursive_translate(iftrue)
if isinstance(translated_iftrue, tuple):
iftrue_text, aux_iftrue_text = translated_iftrue
else:
iftrue_text, aux_iftrue_text = translated_iftrue, ''
if translated_cond == 'true': # Tautology - just return the body
return iftrue_text, aux_iftrue_text
elif iffalse is None: # Non-tautology, non-contradiction, no else clause
return ('if (' + translated_cond + ') {\n' + iftrue_text + '}\n\n',
aux_iftrue_text)
else: # Non-tautology, else clause present
translated_iffalse = recursive_translate(iffalse)
if isinstance(translated_iffalse, tuple):
iffalse_text, aux_iffalse_text = translated_iffalse
else:
iffalse_text, aux_iffalse_text = translated_iffalse, ''
if translated_cond == 'false': # if (false) { blah } else { ... } -> ...
return iffalse_text, aux_iffalse_text
else:
return ('if (' + translated_cond + ') {\n' + iftrue_text + '} else {\n' +
iffalse_text + '}\n\n', aux_iftrue_text + aux_iffalse_text)
def ToplevelGYPToGNString(toplevel_dict):
"""Translates a toplevel GYP dict to GN. This is the main function which is
called to perform the GYP to GN translation.
"""
ret = ''
if 'variables' in toplevel_dict:
ret += GYPVariablesToGNString(toplevel_dict['variables'])
del toplevel_dict['variables']
if 'targets' in toplevel_dict:
for t in toplevel_dict['targets']:
ret += GYPTargetToGNString(t)
del toplevel_dict['targets']
if 'conditions' in toplevel_dict:
for cond_block in toplevel_dict['conditions']:
ret += GYPConditionToGNString(cond_block, ToplevelGYPToGNString)[0]
del toplevel_dict['conditions']
if 'target_conditions' in toplevel_dict:
for cond_block in toplevel_dict['target_conditions']:
ret += GYPConditionToGNString(cond_block, ToplevelGYPToGNString)[0]
del toplevel_dict['target_conditions']
WarnAboutRemainingKeys(toplevel_dict)
return ret
def LoadPythonDictionary(path):
with open(path, 'r') as fin:
file_string = fin.read()
try:
file_data = eval(file_string, {'__builtins__': None}, None) # pylint: disable=eval-used
except SyntaxError, e:
e.filename = path
raise
except Exception, e:
raise Exception('Unexpected error while reading %s: %s' % (path, str(e)))
assert isinstance(file_data, dict), '%s does not eval to a dictionary' % path
return file_data
def ReplaceSubstrings(values, search_for, replace_with):
"""Recursively replaces substrings in a value.
Replaces all substrings of the "search_for" with "replace_with" for all
strings occurring in "values". This is done by recursively iterating into
lists as well as the keys and values of dictionaries.
"""
if isinstance(values, str):
return values.replace(search_for, replace_with)
if isinstance(values, list):
return [ReplaceSubstrings(v, search_for, replace_with) for v in values]
if isinstance(values, dict):
# For dictionaries, do the search for both the key and values.
result = {}
for key, value in values.viewitems():
new_key = ReplaceSubstrings(key, search_for, replace_with)
new_value = ReplaceSubstrings(value, search_for, replace_with)
result[new_key] = new_value
return result
# Assume everything else is unchanged.
return values
def CalculatePaths(filename):
global repo_abs_input_file_dir
abs_input_file_dir = os.path.abspath(os.path.dirname(filename))
abs_repo_root = cobalt.tools.paths.REPOSITORY_ROOT + '/'
if not abs_input_file_dir.startswith(abs_repo_root):
raise ValueError('Input file is not in repository')
repo_abs_input_file_dir = '//' + abs_input_file_dir[len(abs_repo_root):] + '/'
def LoadDepsSubstitutions():
dirname = os.path.dirname(__file__)
if dirname:
dirname += '/'
with open(dirname + 'deps_substitutions.txt', 'r') as f:
for line in itertools.ifilter(lambda lin: lin.strip(), f):
dep, new_dep = line.split()
deps_substitutions[dep.replace('<(DEPTH)/', '//')] = new_dep
def LoadVariableRewrites():
dirname = os.path.dirname(__file__)
if dirname:
dirname += '/'
vr = LoadPythonDictionary(dirname + 'variable_rewrites.dict')
if 'rewrites' in vr:
for old_name, rewrite in vr['rewrites'].viewitems():
variable_rewrites[old_name] = VariableRewrite(*rewrite)
if 'renames' in vr:
for old_name, new_name in vr['renames'].viewitems():
variable_rewrites[old_name] = VariableRewrite(new_name, None)
if 'booleans' in vr:
bool_mapping = {0: False, 1: True}
for v in vr['booleans']:
if v in variable_rewrites:
variable_rewrites[v] = \
variable_rewrites[v]._replace(value_rewrites=bool_mapping)
else:
variable_rewrites[v] = VariableRewrite(v, bool_mapping)
def main():
parser = argparse.ArgumentParser(description='Convert a subset of GYP to GN.')
parser.add_argument(
'-r',
'--replace',
action='append',
help='Replaces substrings. If passed a=b, replaces all '
'substrs a with b.')
parser.add_argument('file', help='The GYP file to read.')
args = parser.parse_args()
CalculatePaths(args.file)
data = LoadPythonDictionary(args.file)
if args.replace:
# Do replacements for all specified patterns.
for replace in args.replace:
split = replace.split('=')
# Allow 'foo=' to replace with nothing.
if len(split) == 1:
split.append('')
assert len(split) == 2, "Replacement must be of the form 'key=value'."
data = ReplaceSubstrings(data, split[0], split[1])
# Also replace <(DEPTH)/ with //
data = ReplaceSubstrings(data, '<(DEPTH)/', '//')
LoadDepsSubstitutions()
LoadVariableRewrites()
print ToplevelGYPToGNString(data)
if __name__ == '__main__':
main()
|
youtube/cobalt
|
cobalt/tools/gyp_to_gn.py
|
Python
|
bsd-3-clause
| 28,277
|
[
"VisIt"
] |
80c2951e7b6ecdfe735e7b7116b8475e8f00fefafa41b55a3d3bd29bd6a9707e
|
import os, sys, colorsys
from os.path import join, getsize
import ConfigParser
import random
import gobject
import gtk
import pango
import gconf
import goocanvas, cairo
#import canvasItems
#import db_conf, gene, pham
from phamerator import db_conf, gene, pham, phamerator_manage_db
from phamerator_manage_db import *
def main(argv):
window = gtk.Window()
window.set_default_size(640, 600)
window.show()
window.connect("delete_event", on_delete_event)
scrolled_win = gtk.ScrolledWindow()
scrolled_win.set_shadow_type(gtk.SHADOW_IN)
scrolled_win.show()
window.add(scrolled_win)
# FIXME: need to specify username, password, server, and database
c = db_conf.db_conf().get_cursor()
PhageID = get_PhageID_from_name(c, sys.argv[1])
length = get_length_of_genome(c, PhageID)
#print length
#canvas = goocanvas.CanvasView()
canvas = goocanvas.Canvas()
canvas.props.automatic_bounds = True
canvas.background_color = 'white'
#canvas.set_size_request(600, 200)
#canvas.set_bounds(0, 0, float(length)/200, 60)
#canvas.set_bounds(0, 0, float(length)/10, 600)
canvas.show()
scrolled_win.add(canvas)
canvas.connect("item-created", on_item_created)
## Create the canvas model
self.root = goocanvas.GroupModel()
self.set_root_item_model(self.root)
gtk.main()
class phameratorGroupModel(goocanvas.GroupModel):
def __init__(self, *args, **kwargs):
super(goocanvas.GroupModel, self).__init__ (*args, **kwargs)
self.x = 0
self.y = 0
def translate(self, x, y):
self.x = self.x + x
self.y = self.y + y
#print '[%s,%s]' % (self.x, self.y)
super(phameratorGroupModel, self).translate(x, y)
class CanvasController:
def __init__(self,c, mapWTree, interface):
print 'init CanvasController'
self.c = c
self.mapWTree = mapWTree
self.DNATextView = mapWTree.get_object('DNATextView')
self.DNATextView.set_wrap_mode(gtk.WRAP_CHAR)
self.ProteinTextView = mapWTree.get_object('ProteinTextView')
self.ProteinTextView.set_wrap_mode(gtk.WRAP_CHAR)
#self.assign_interface(interface)
self.DNATextBuffer = self.DNATextView.get_buffer()
self.ProteinTextBuffer = self.ProteinTextView.get_buffer()
self.client = gconf.client_get_default()
checkBox = self.mapWTree.get_object('show_phamily_names_checkBox')
checkBox.set_active(self.client.get_bool('/apps/phamerator/show_pham_names'))
checkBox = self.mapWTree.get_object('shorten_description_checkBox')
checkBox.set_active(self.client.get_bool('/apps/phamerator/shorten_description'))
checkBox = self.mapWTree.get_object('show_description_checkBox')
checkBox.set_active(self.client.get_bool('/apps/phamerator/show_description'))
checkBox = self.mapWTree.get_object('show_domains_checkBox')
checkBox.set_active(self.client.get_bool('/apps/phamerator/show_domains'))
checkBox = self.mapWTree.get_object('hover_highlights_pham_checkBox')
checkBox.set_active(self.client.get_bool('/apps/phamerator/hover_highlights_pham'))
checkBox = self.mapWTree.get_object('blastAlignmentCheckBox')
checkBox.set_active(self.client.get_bool('/apps/phamerator/show_alignment'))
checkBox = self.mapWTree.get_object('eValuesCheckBox')
checkBox.set_active(self.client.get_bool('/apps/phamerator/show_alignment_text'))
if self.client.get_bool('/apps/phamerator/show_alignment'):
checkBox.set_sensitive(True)
else:
checkBox.set_sensitive(False)
self.client.set_bool('/apps/phamerator/show_alignment_text', False)
gene_color = self.client.get_string('/apps/phamerator/gene_color')
try: self.mapWTree.get_object(gene_color).set_active(True)
except: self.mapWTree.get_object('color_by_phamily').set_active(True)
def assign_interface(self, interface):
self.interface = interface
interface.controller = self
def gene_selection_changed(self, selectedItems):
# get the nt and aa sequences and stick them in the boxes
GeneIDs = []
for s in selectedItems:
GeneIDs.append(s.get_model().get_data("GeneID"))
self.interface.show_gene_sequences(GeneIDs)
def scan_for_plugins(self):
'''look in the plugin directory (~/.phamerator/plugins) for plugins'''
pluginDir = os.environ['HOME'] + '.phamerator/plugins'
print 'scanning for plugins in %s' % pluginDir
for module in os.listdir(pluginDir):
if module in ('.', '..'): continue # ignore these in the directory listing
for file in os.listdir(module):
if file == '__init__.py':
plugin = canvasPlugin(module)
class canvasPlugin:
'''this class is intended to be subclassed by plugins, who will use it to draw on the genome canvas'''
def __init__(self, module):
'''instances get a reference to the db cursor and canvas from the CanvasInterface'''
self.c = None
self.canvas = None # how do I get this from phageManager App()?
self.register(module)
def register():
'''make the App() aware of this plugin'''
pass
# do something, but what?
def unregister():
'''make the App() unaware of this plugin'''
pass
class geneModel(goocanvas.RectModel):
def __init__(self, *args, **kwargs):
super (geneModel, self).__init__ (*args, **kwargs)
self.set_data('width', kwargs['width'])
self.client = gconf.client_get_default()
self.client.add_dir('/apps/phamerator', gconf.CLIENT_PRELOAD_NONE)
self.client.notify_add('/apps/phamerator/gene_color', self.change_color)
color_by = self.client.get_string('/apps/phamerator/gene_color')
if color_by == 'no_color':
self.props.fill_color = 'white'
def change_color(self, client, *args, **kwargs):
color_by = self.client.get_string('/apps/phamerator/gene_color')
if color_by == 'no_color':
self.client.set_string('/apps/phamerator/gene_color', 'no_color')
self.props.fill_color = 'white'
elif color_by == 'color_by_phamily':
self.client.set_string('/apps/phamerator/gene_color', 'color_by_phamily')
self.props.fill_color = self.get_data('phamily_color')
elif color_by == 'color_by_gc':
self.client.set_string('/apps/phamerator/gene_color', 'color_by_gc')
self.props.fill_color = self.get_data('gc_color')
elif color_by == 'color_by_abundance':
self.client.set_string('/apps/phamerator/gene_color', 'color_by_abundance')
self.props.fill_color = self.get_data('abundance_color')
elif color_by == 'color_by_cluster_conservation':
self.client.set_string('/apps/phamerator/gene_color', 'color_by_cluster_conservation')
self.props.fill_color = self.get_data('cluster_conservation_color')
class domainModel(goocanvas.RectModel):
def __init__(self, *args, **kwargs):
super (domainModel, self).__init__ (*args, **kwargs)
self.client = gconf.client_get_default()
self.client.add_dir('/apps/phamerator', gconf.CLIENT_PRELOAD_NONE)
#super (PolylineModelListener, self).__init__ (*args, **kwargs)
self.client.notify_add('/apps/phamerator/show_domains', self.show_domains)
self.show_domains(self.client)
def show_domains(self, client, *args, **kwargs):
if self.client.get_bool('/apps/phamerator/show_domains'):
self.props.visibility = goocanvas.ITEM_VISIBLE
else:
self.props.visibility = goocanvas.ITEM_INVISIBLE
def change_color(self, client, *args, **kwargs):
color_by = self.client.get_string('/apps/phamerator/gene_color')
if color_by == 'no_color':
self.client.set_string('/apps/phamerator/gene_color', 'no_color')
self.props.fill_color = 'white'
elif color_by == 'color_by_phamily':
self.client.set_string('/apps/phamerator/gene_color', 'color_by_phamily')
self.props.fill_color = self.get_data('phamily_color')
elif color_by == 'color_by_cluster_conservation':
self.client.set_string('/apps/phamerator/gene_color', 'color_by_cluster_conservation')
self.props.fill_color = self.get_data('cluster_conservation_color')
class PhamNameLabel(goocanvas.TextModel):
def __init__(self, *args, **kwargs):
super (PhamNameLabel, self).__init__ (*args, **kwargs)
#goocanvas.RectModel.__init__(self)
self.client = gconf.client_get_default()
#self.client.add_dir('/apps/phamerator', gconf.CLIENT_PRELOAD_NONE)
self.client.notify_add('/apps/phamerator/show_pham_names', self.show_pham_names)
if self.client.get_bool('/apps/phamerator/show_pham_names'):
self.props.visibility = goocanvas.ITEM_VISIBLE
else:
self.props.visibility = goocanvas.ITEM_INVISIBLE
def show_pham_names(self, client, *args, **kwargs):
#print 'show_pham_names called'
show_pham_names = self.client.get_bool('/apps/phamerator/show_pham_names')
if show_pham_names: self.props.visibility = goocanvas.ITEM_VISIBLE
else: self.props.visibility = goocanvas.ITEM_INVISIBLE
class GeneDescriptionLabel(goocanvas.TextModel):
def __init__(self, *args, **kwargs):
super (GeneDescriptionLabel, self).__init__ (*args, **kwargs)
self.client = gconf.client_get_default()
self.client.add_dir('/apps/phamerator', gconf.CLIENT_PRELOAD_NONE)
self.client.notify_add('/apps/phamerator/shorten_description', self.shorten_description)
self.client.notify_add('/apps/phamerator/show_description', self.show_description)
if self.client.get_bool('/apps/phamerator/shorten_description'):
self.props.ellipsize=pango.ELLIPSIZE_END
#self.props.ellipsize = True
else:
self.props.ellipsize=pango.ELLIPSIZE_NONE
#self.props.ellipsize = False
self.show_description(self.client)
def shorten_description(self, client, *args, **kwargs):
#print 'shorten_description called'
shorten_description = self.client.get_bool('/apps/phamerator/shorten_description')
if shorten_description:
self.props.ellipsize=pango.ELLIPSIZE_END
#self.props.ellipsize = True
else: self.props.ellipsize = pango.ELLIPSIZE_NONE
def show_description(self, client, *args, **kwargs):
if self.client.get_bool('/apps/phamerator/show_description'):
self.props.visibility = goocanvas.ITEM_VISIBLE
else:
self.props.visibility = goocanvas.ITEM_INVISIBLE
class PolylineModelListener(goocanvas.PolylineModel):
def __init__(self, *args, **kwargs):
super (PolylineModelListener, self).__init__ (*args, **kwargs)
self.client = gconf.client_get_default()
self.client.add_dir('/apps/phamerator', gconf.CLIENT_PRELOAD_NONE)
self.client.notify_add('/apps/phamerator/show_alignment', self.show_alignment)
self.show_alignment(self.client)
def show_alignment(self, client, *args, **kwargs):
if self.client.get_bool('/apps/phamerator/show_alignment'):
self.props.visibility = goocanvas.ITEM_VISIBLE
else:
self.props.visibility = goocanvas.ITEM_INVISIBLE
class BlastAlignmentLabel(goocanvas.TextModel):
def __init__(self, *args, **kwargs):
super (BlastAlignmentLabel, self).__init__ (*args, **kwargs)
self.set_data('type', 'blastAlignmentLabel')
self.client = gconf.client_get_default()
self.client.add_dir('/apps/phamerator', gconf.CLIENT_PRELOAD_NONE)
self.client.notify_add('/apps/phamerator/show_alignment_text', self.show_alignment_text)
self.show_alignment_text(self.client)
def show_alignment_text(self, client, *args, **kwargs):
show_alignment_text = self.client.get_bool('/apps/phamerator/show_alignment_text')
if show_alignment_text:
self.props.visibility = goocanvas.ITEM_VISIBLE
else:
self.props.visibility = goocanvas.ITEM_INVISIBLE
class ColorConverter:
def e_value_to_color(self, e_value):
e = "%e" % e_value
exp = abs(int(e.split('e')[-1]))
if int(exp) == 0: # just in case e value is actually zero
hue = 1.0
else:
hue = exp/200.0 # e values seem to round to zero once they get below 1e-200
hue = min(hue,0.75) # otherwise hsv(1,0.8,0.8) and hsv(0,0.8,0.8) translate to the same rgb value
rgb = colorsys.hsv_to_rgb(hue, 1.0, 1.0)
#rgb = (rgb[0]*255,rgb[1]*255,rgb[2]*255)
#return '#%02x%02x%02x' % rgb
rgb = (rgb[0]*255,rgb[1]*255,rgb[2]*255)
return int('%#02x%02x%02x' % rgb, 16)
def e_value_to_color_rgba(self, e_value):
e = "%e" % e_value
exp = abs(int(e.split('e')[-1]))
if int(exp) == 0: # just in case e value is actually zero
hue = 1.0
else:
hue = exp/200.0 # e values seem to round to zero once they get below 1e-200
hue = min(hue,0.75) # otherwise hsv(1,0.8,0.8) and hsv(0,0.8,0.8) translate to the same rgb value
rgb = colorsys.hsv_to_rgb(hue, 1.0, 1.0)
#rgb = (rgb[0]*255,rgb[1]*255,rgb[2]*255)
#return '#%02x%02x%02x' % rgb
rgb = (rgb[0]*255,rgb[1]*255,rgb[2]*255,50.0) # was 100.0
return int('%#02x%02x%02x%02x' % rgb, 16)
def GC_to_color(self, GC):
scaledGC = max(0, GC-52.0)
scaledGC = min(15, scaledGC)
value = scaledGC/15.0
#value = GC/100.0
#hue = min(hue,0.75) # otherwise hsv(1,0.8,0.8) and hsv(0,0.8,0.8) translate to the same rgb value
if GC > 65:
rgb = colorsys.hsv_to_rgb(0.33, 1.0, value)
elif GC< 55:
rgb = colorsys.hsv_to_rgb(0.33, 0.0, value)
else:
rgb = colorsys.hsv_to_rgb(0.33, 0.5, value)
rgb = (rgb[0]*255,rgb[1]*255,rgb[2]*255)
return '#%02x%02x%02x' % rgb
def gene_abundance_to_color(self, abundance, largestPhamSize):
largestPhamSize = float(largestPhamSize)
abundance = float(abundance)
scaledAbundance = abundance/largestPhamSize
rgb = colorsys.hsv_to_rgb(0.66, 0.0, 1-min(1,(5*scaledAbundance))) #(scaledAbundance+(abundance/largestPhamSize))) # orphams should be white for consistency
rgb = (rgb[0]*255,rgb[1]*255,rgb[2]*255)
return '#%02x%02x%02x' % rgb
def gene_cluster_conservation_to_color(self, intra_conservation, inter_conservation):
# @cluster: a value like "A", "B1", or None
# @intra_conservation: is the gene in some or all of the genomes in this cluster
# @inter_conservation: is the gene in none, some, or all of the genomes NOT in this cluster
if intra_conservation == 'some':
if not inter_conservation:
rgb = colorsys.hsv_to_rgb(0.15, 1.0, 1.0) # some, None: Yellow?
elif inter_conservation == 'some':
rgb = colorsys.hsv_to_rgb(0.3, 1.0, 0.6) # some, some: Green
elif inter_conservation == 'all':
rgb = colorsys.hsv_to_rgb(0.45, 1.0, 1.0) # some, all: N/D
if intra_conservation == 'all':
if not inter_conservation:
rgb = colorsys.hsv_to_rgb(0.0, 1.0, 1.0) # all, None: Blue
elif inter_conservation == 'some':
rgb = colorsys.hsv_to_rgb(0.75, 1.0, 1.0) # all, some: Purple
elif inter_conservation == 'all':
rgb = colorsys.hsv_to_rgb(0.9, 1.0, 1.0) # all, all: N/D
rgb = (rgb[0]*255,rgb[1]*255,rgb[2]*255)
return '#%02x%02x%02x' % rgb
class BlastAlignmentModel:
def __init__(self, group, query, query_start, query_end, subject, subject_start, subject_end,
e_value, zoomFactor, height):
#print 'adding blast result at %s, %s, %s, %s, %s, %s' % (query_start, query_end, subject_start, subject_end, zoomFactor, height)
#print 'query: %s :: subject: %s' % (query, subject)
#parent = group
close_path = True
stroke_color = "black"
fill_color = "yellow"
cc = ColorConverter()
fill_color = cc.e_value_to_color_rgba(e_value)
stroke_pattern = goocanvas.LineDash([5.0, 10.0, 20.0, 10.0, 5.0])
top_left = dict()
top_right = dict()
bottom_left = dict()
bottom_right = dict()
line_width = 0 # 0.35
top_left['x'] = query_start/zoomFactor
#top_left['y'] = height-6*line_width+((120*query)+50)
top_left['y'] = height-6*line_width+((120*query)+50)-0.5
top_right['x'] = query_end/zoomFactor
top_right['y'] = height-6*line_width+((120*query)+50)-0.5
bottom_left['x'] = subject_start/zoomFactor
bottom_left['y'] = height+109+((120*query)+50)
#bottom_left['y'] = ((120*query))+50-10
bottom_right['x'] = subject_end/zoomFactor
bottom_right['y'] = height+109+((120*query)+50)
#bottom_right['y'] = ((120*query))+50-10
#print top_left['x'],top_left['y']
#print top_right['x'], top_right['y']
#print bottom_left['x'], bottom_left['y']
#print bottom_right['x'], bottom_right['y']
points = goocanvas.Points([(top_left['x'],top_left['y']),(top_right['x'], top_right['y']),
(bottom_right['x'],bottom_right['y']),(bottom_left['x'], bottom_left['y'])])
self.polylineModel = PolylineModelListener(parent=group,
points=points,
close_path=True,
stroke_color=stroke_color,
fill_color_rgba=fill_color,
line_width=line_width)
self.blastAlignmentLabel = BlastAlignmentLabel(parent=group, text='%s' % (e_value),
x=(top_right['x']+top_left['x']+bottom_left['x']+bottom_right['x'])/4.0,
y=(top_right['y']+top_left['y']+bottom_left['y']+bottom_right['y'])/4.0,
anchor=gtk.ANCHOR_CENTER,
font="Arial 8")
self.polylineModel.set_data('type', 'blastAlignment')
self.polylineModel.set_data('label', self.blastAlignmentLabel)
class BlastMatch:
def __init__(self, params):
#print params
query, subject, percent_identity, alignment_length, mismatches, gap_openings, \
q_start, q_end, s_start, s_end, e_value, bit_score = params
self.properties = dict()
self.properties['query'] = query
self.properties['subject'] = subject
self.properties['percent_identity'] = percent_identity
self.properties['alignment_length'] = alignment_length
self.properties['mismatches'] = mismatches
self.properties['gap_openings'] = gap_openings
self.properties['q_start'] = q_start
self.properties['q_end'] = q_end
self.properties['s_start'] = s_start
self.properties['s_end'] = s_end
self.properties['e_value'] = e_value
self.properties['bit_score'] = bit_score
class BLAST2Seq:
def __init__(self, c, query, subject, threshold=1e-4):
print 'query: %s :: subject: %s' % (query, subject)
self.c = c
self.query = query
self.subject = subject
self.threshold = threshold
self.query_file = os.path.join('/tmp', 'bl2seq.query')
self.subject_file = os.path.join('/tmp', 'bl2seq.subject')
self.results = os.path.join('/tmp', 'bl2seq.query_subject')
self.textResults = os.path.join('/tmp', 'bl2seq.query_subject.txt')
self.matches = []
self.formatted_matches = []
self.write_query()
self.write_subject()
self.bl2seq()
def write_query(self):
self.query_fasta = get_fasta_from_phage(self.c, self.query)
out = open(self.query_file, 'w')
out.write(self.query_fasta)
out.close()
def write_subject(self):
self.subject_fasta = get_fasta_from_phage(self.c, self.subject)
out = open(self.subject_file, 'w')
out.write(self.subject_fasta)
out.close()
def bl2seq(self):
cfg = ConfigParser.RawConfigParser()
cfg.read(os.path.join(os.environ['HOME'], '.phamerator', 'phamerator.conf'))
BLAST_dir = cfg.get('Phamerator','BLAST_dir')
os.system('%s -p blastn -i %s -D 1 -F F -j %s -o %s' % (os.path.join(BLAST_dir, 'bin/' ,'bl2seq'), self.query_file, self.subject_file, self.results))
os.system('%s -p blastn -i %s -D 0 -F F -j %s -o %s' % (os.path.join(BLAST_dir, 'bin/' ,'bl2seq'), self.query_file, self.subject_file, self.textResults))
def get_results(self):
results = open(self.results).readlines()
for result in results:
if not result.startswith('#'):
self.matches.append(BlastMatch(result.split('\t')))
for match in self.matches:
if float(match.properties['e_value']) <= self.threshold:
self.formatted_matches.append((int(match.properties['q_start']), int(match.properties['q_end']),
int(match.properties['s_start']), int(match.properties['s_end']), float(match.properties['e_value'])),)
return self.formatted_matches
class Phage:
def __init__(self, c, PhageID):
self.c = c
self.PhageID = PhageID
def _get_blast_matches(self, subject):
bl2seq = BLAST2Seq(self.c,self.PhageID, subject)
self.matches = bl2seq.get_results()
return self.matches
class CanvasInterface:
def __init__(self, c, canvas, phages=None, length=None):
print 'init CanvasInterface'
self.root = None
self.c = c
self.db = pham.db(c = self.c)
self.canvas = canvas
self.genes = []
self.canvas.connect('item-created', self.on_item_created)
#self.root = self.create_canvas_model(phages, length)
#self.canvas.set_root_item_model(self.root)
#self.canvas.connect("button_press_event", self.on_canvas_button_press)
print 'root: %s' % self.root
self.prefs = { 'defaultRectColor' : 'black', 'activeRectColor' : 'blue',
'selectedRectColor' : 'orange', 'activeRectLineWidth' : 1.0, 'selectedRectLineWidth' : 2.0, 'defaultRectLineWidth' : 1.0}
self.selectedCanvasItems = []
self.dragging = False
self.highlighted_text = None
self.client = gconf.client_get_default()
self.client.add_dir('/apps/phamerator', gconf.CLIENT_PRELOAD_NONE)
print 'registering /apps/phamerator/gene_color'
def create_canvas_model(self, phages=None, length=None):
c = self.c
self.phages = phages
#if isinstance(self.root, goocanvas.GroupModel):
# print 'removing old root group', '&' * 155
# self.root.remove()
self.root = goocanvas.GroupModel()
self.blastMatches = []
if not phages:
item = goocanvas.TextModel(text="Select a genome from the main window",
x=5, y=20,
anchor=gtk.ANCHOR_WEST,
font="Arial 12")
print 'adding a blank phageGroup'
self.root.add_child(item, -1)
return self.root
# calculate this here, not in the loop below
largestPhams, self.largestPhamSize = get_largest_pham_size(self.c)
for n, p in enumerate(phages):
PhageID = p['PhageID']
phageName = get_phage_name_from_PhageID(c, PhageID)
phageGroup = phameratorGroupModel(parent = self.root)
phageGroup.set_data('type', 'phageGroup')
phageGroup.set_data('name', phageName)
scaleVGroup = goocanvas.GroupModel( parent = phageGroup)
scaleVGroup.set_data('type', 'scaleVGroup')
geneGroup = goocanvas.GroupModel(parent = phageGroup)
geneGroup.set_data('type', 'geneGroup')
scaleHGroup = goocanvas.GroupModel(parent = phageGroup)
scaleHGroup.set_data('type', 'scaleHGroup')
phageGroup.set_data('scaleVGroup', scaleVGroup)
phageGroup.set_data('scaleHGroup', scaleHGroup)
phageGroup.set_data('geneGroup', geneGroup)
display_reversed = p['display_reversed']
print 'drawing phage:', PhageID, '-->', phageName
height = 12 # height of gene boxes
print n, PhageID
#vert = 120*n+50
vert = 0
phageGroup.translate(0,120*n+50)
print 'vert:', vert
spacer = 13 # FIXME
line_width = 0.5
scale_height = 10.0
item = goocanvas.TextModel(text=phageName,
# x=5, y=vert-30,
x=0, y=0,
anchor=gtk.ANCHOR_WEST,
font="Arial 12")
item.set_data('type', 'phageName')
item.translate(5, vert-30)
phageGroup.add_child(item, -1)
zoomFactor = 20.0
self.root.set_data('zoomFactor', zoomFactor)
genomeLength = get_length_of_genome(c, PhageID)
self.draw_scale(scaleHGroup, scaleVGroup, display_reversed=display_reversed, x=0, y=vert+(2*height)+(4*line_width), length=genomeLength, height=scale_height, line_width=line_width, zoomFactor=zoomFactor)
phage = Phage(c, PhageID)
GeneIDs = get_genes_from_PhageID(c, PhageID)
genes = []
for GeneID in GeneIDs:
phage = get_phage_name_from_GeneID(c, GeneID)
name = get_gene_name_from_GeneID(c, GeneID)
exp = re.compile('(PBI)*[1-9]+\d*[.]*\d*$', re.IGNORECASE)
try:
name = exp.search(name).group().strip()
#name = exp.split(name)[-1]
except: print 'EXCEPTION: %s, %s' % (phage, name)
start, stop, length, orientation = get_gene_start_stop_length_orientation_from_GeneID(c, GeneID)
if display_reversed:
if orientation == 'F': orientation = 'R'
elif orientation == 'R': orientation = 'F'
newstart = genomeLength - stop + 1
stop = genomeLength - start - 1
start = newstart
g = gene.gene(GeneID, name, start, stop, length, orientation)
pham = get_pham_from_GeneID(c, GeneID)
domains = get_domain_hits_from_GeneID(c, GeneID)
genes.append((g, pham, domains))
genes.sort()
up = 0
y = None
cc = ColorConverter()
for g, p, d in genes:
gc = get_gene_percent_GC(self.c, g.GeneID)
if gc != -1: gc_color = cc.GC_to_color(get_gene_percent_GC(self.c, g.GeneID))
else: gc_color = '#ff0000'
abundance = {}
abundance = get_number_of_pham_members(self.c, p)
abundance_color = cc.gene_abundance_to_color(abundance, self.largestPhamSize)
cluster = get_cluster_from_PhageID(self.c, get_PhageID_from_GeneID(self.c, g.GeneID))
if cluster:
cluster = cluster[0] # ignore subclusters for now...
intra_conservation = ""
inter_conservation = ""
if cluster:
clusterPhageIDs = set(get_PhageIDs_from_cluster(self.c, cluster))
else:
clusterPhageIDs = set([get_PhageID_from_GeneID(self.c, g.GeneID)])
phamPhageIDs = set(get_PhageID_members_of_pham(self.c, p))
allPhageIDs = set(get_PhageIDs(self.c))
print allPhageIDs, phamPhageIDs, clusterPhageIDs
if allPhageIDs == phamPhageIDs:
intra_conservation = 'all'
inter_conservation = 'all'
elif clusterPhageIDs == phamPhageIDs:
intra_conservation = 'all'
inter_conservation = None
elif clusterPhageIDs < phamPhageIDs:
intra_conservation = 'all'
inter_conservation = 'some'
elif phamPhageIDs < clusterPhageIDs:
intra_conservation = 'some'
inter_conservation = None
elif (allPhageIDs - clusterPhageIDs) == (phamPhageIDs - clusterPhageIDs):
intra_conservation = 'some'
inter_conservation = 'all'
else:
intra_conservation = 'some'
inter_conservation = 'some'
print "intra: %s inter: %s" % (intra_conservation, inter_conservation)
conservation_color = cc.gene_cluster_conservation_to_color(intra_conservation, inter_conservation)
if g.orientation == 'F':
#fillColor = 'green'
if up: y = vert
else: y = vert + height + 3*line_width
else:
#fillColor = 'red'
if up: y = vert + height*2 + scale_height + line_width*5
else: y = vert + height*2 + scale_height + line_width*5 + height
h=s=v=0
numberOfMembers = get_number_of_pham_members(self.db.c, p)
#print g.name, '->', numberOfMembers
if numberOfMembers > 1:
hsv = self.db.select('pham_color', 'color', name=p)
rgb = hsv[0][0]
else:
rgb = '#ffffff'
# if this gene wraps from the right end of the genome to the left end of the genome
if g.start > g.stop and g.orientation == 'F':
print 'wrap around gene: forward'
start = g.start
stop = genomeLength
length = stop - start
print 'name: %s, GeneID: %s, start: %s, stop: %s, length: %s' % (g.name, g.GeneID, start, stop, length)
f1 = gene.gene(g.GeneID, g.name, start, genomeLength, length, orientation)
gene_model = self.draw_gene(geneGroup, f1, p, y, zoomFactor, height, pham_color=rgb, gc_color=gc_color, abundance_color=abundance_color, cluster_conservation_color = conservation_color)
self.genes.append(geneModel)
start = 0
stop = g.stop
length = stop - start
print 'start: %s, stop: %s, length: %s' % (start, stop, length)
f2 = gene.gene(g.GeneID, g.name, start, g.stop, length, orientation)
self.draw_gene(geneGroup, f2, p, y, zoomFactor, height, pham_color=rgb, gc_color=gc_color, abundance_color=abundance_color, cluster_conservation_color=conservation_color, domains=d)
# self.draw_gene(self, t, y, zoomFactor, height):
# else if this gene wraps from the left end of the genome to the right end of the genome
elif g.start > g.stop and g.orientation == 'R':
print 'wrap around gene: reverse (but it is not being drawn)'
pass
# a typical gene
else:
gene_model = self.draw_gene(geneGroup, g, p, y, zoomFactor, height, pham_color=rgb, gc_color=gc_color, abundance_color=abundance_color, cluster_conservation_color=conservation_color, domains=d)
self.genes.append(gene_model)
while gtk.events_pending():
gtk.main_iteration(False)
if up: up = False
else: up = True
import ConfigParser
cfg = ConfigParser.RawConfigParser()
cfg.read(os.path.join(os.environ['HOME'], '.phamerator', 'phamerator.conf'))
if cfg.get('Phamerator','draw_blast_alignments') == 'True':
self.draw_blastn_alignments()
return self.root
def draw_blastn_alignments(self):
print 'draw_blastn_alignments()'
group = phameratorGroupModel()
group.props.stroke_color = 'black'
group.props.line_width = 1
phages = self.phages
for n, p in enumerate(phages):
#print 'drawing blast alignment'
PhageID = p['PhageID']
phageName = get_phage_name_from_PhageID(self.c, PhageID)
g = self.root.get_n_children()
for i in range(0, g):
child = self.root.get_child(i)
if child.get_data('name') == get_phage_name_from_PhageID(self.c, PhageID):
phageGroup = child
query_offset = phageGroup.get_simple_transform()[0]
break
scaleVGroup = phageGroup.get_data('scaleVGroup')
display_reversed = p['display_reversed']
vert = 0
spacer = 13 # FIXME
line_width = 0 # 0.35
scale_height = 10.0
zoomFactor = 20.0
genomeLength = get_length_of_genome(self.c, PhageID)
phage = Phage(self.c, PhageID)
try:
subject = self.phages[n+1]['PhageID']
except:
subject = None
if subject:
g = self.root.get_n_children()
for i in range(0, g):
child = self.root.get_child(i)
if child.get_data('name') == get_phage_name_from_PhageID(self.c, subject):
subjectPhageGroup = child
subject_offset = subjectPhageGroup.get_simple_transform()[0]
break
for match in phage._get_blast_matches(subject=subject):
if display_reversed:
blastMatch = BlastAlignmentModel(group=group,
query=n,
query_start=genomeLength-match[0],
query_end=genomeLength-match[1],
subject=None,
subject_start=match[2],
subject_end=match[3],
e_value=match[4],
zoomFactor=20.0,
height=vert+(0)+(4*line_width))
else:
blastMatch = BlastAlignmentModel(group=group,
query=n,
query_start=match[0]+query_offset*zoomFactor,
query_end=match[1]+query_offset*zoomFactor,
subject=None,
subject_start=match[2]+subject_offset*zoomFactor,
subject_end=match[3]+subject_offset*zoomFactor,
e_value=match[4],
zoomFactor=20.0,
#height=vert+(35)+(4*line_width))
height= 50 - spacer)
self.blastMatches.append(blastMatch)
self.root.add_child(group, 0)
# move this code to on_polyline_enter/leave
#try:
# group.raise_(above=subjectPhageGroup)
#except:
# pass
# from pygoocanvas demo.py
def create_stipple (self, color_name, stipple_data):
import cairo
color = gtk.gdk.color_parse (color_name)
stipple_data[2] = stipple_data[14] = color.red >> 8
stipple_data[1] = stipple_data[13] = color.green >> 8
stipple_data[0] = stipple_data[12] = color.blue >> 8
surface = cairo.ImageSurface.create_for_data (stipple_data, cairo.FORMAT_ARGB32, 2, 2, 8)
pattern = cairo.SurfacePattern(surface)
pattern.set_extend (cairo.EXTEND_REPEAT)
return pattern
def draw_gene(self, group, g, p, y, zoomFactor, height, pham_color, gc_color, abundance_color, cluster_conservation_color, domains=[]):
cc = ColorConverter()
color_by = self.client.get_string('/apps/phamerator/gene_color')
color_dict = {'color_by_abundance': abundance_color, 'color_by_cluster_conservation': cluster_conservation_color, 'color_by_phamily': pham_color, 'color_by_gc': gc_color, 'no_color': 'white'}
if not color_dict.has_key(color_by):
color_by = 'color_by_phamily'
self.controller.mapWTree.get_object(color_by).set_active(True)
self.client.set_string('/apps/phamerator/gene_color', 'color_by_phamily')
rectModel = geneModel(x=0, y=0, width=float(g.length/zoomFactor), height=height,
line_width=1.0,
radius_x=1.0,
radius_y=1.0,
fill_color=color_dict[color_by])
rectModel.set_data("name", g.name)
rectModel.set_data("GeneID", g.GeneID)
rectModel.set_data('type', 'gene')
rectModel.set_data("status", 'default')
rectModel.set_data('pham', p)
rectModel.set_data("phamily_color", pham_color)
rectModel.set_data("gc_color", gc_color)
rectModel.set_data("abundance_color", abundance_color)
rectModel.set_data("cluster_conservation_color", cluster_conservation_color)
group.add_child(rectModel, -1)
rectModel.translate(g.start/zoomFactor,y)
for n, domain in enumerate(domains):
domainGroupModel = goocanvas.GroupModel()
group.add_child(domainGroupModel, -1)
# domain coordinates are in amino acids, so multiply by 3 for DNA positions
if g.orientation == 'F':
start, end, e_value, description = domain['start'] * 3, domain['end'] * 3, domain['expect'], domain['description']
elif g.orientation == 'R':
end, start, e_value, description = domain['start'] * 3, domain['end'] * 3, domain['expect'], domain['description']
else:
print 'domain error!'
sys.exit()
domain_length = abs(start - end)
domain_height=(height-1.0)/float(len(domains))
domain_model = domainModel(x=0, y=n*domain_height+0.5, width=float(domain_length/zoomFactor), height=domain_height,
line_width=0.3,
radius_x=0.2,
radius_y=0.2,
fill_color_rgba = 0xFFDB1Cff) #cc.e_value_to_color(e_value))
domain_model.set_data('type', 'domainModel')
domain_model.set_data('domain', domain)
domainGroupModel.add_child(domain_model, -1)
if g.orientation == 'F':
domain_model.translate((g.start+start)/zoomFactor,y)
if g.orientation == 'R':
domain_model.translate(((g.start+g.length)-start)/zoomFactor,y)
domain_model.set_data('domainGroupModel', domainGroupModel)
item = goocanvas.TextModel(text=g.name,
#x=float((g.start+g.start+g.length)/2.0)/zoomFactor, y=y+height/2.0,
x=0, y=0,
anchor=gtk.ANCHOR_CENTER,
font="Arial 4")
item.set_data('type', 'gene')
item.set_data('text', g.name)
group.add_child(item, -1)
item.translate(float((g.start+g.start+g.length)/2.0)/zoomFactor, y+height/2.0)
if g.orientation == 'F':
py = y - height/2.0
else:
py = y + height*1.5
# pham label
if p:
if g.orientation == 'F':
phamAnchor = gtk.ANCHOR_WEST
else:
phamAnchor = gtk.ANCHOR_EAST
translation_length = len(get_translation_from_GeneID(self.db.c, g.GeneID))
if translation_length >= 120:
phamAnchor = gtk.ANCHOR_CENTER
numberOfMembers = get_number_of_pham_members(self.db.c, p)
px = float((g.start+g.start+g.length)/2.0)/zoomFactor
# item should be a TextModelListener
#item = goocanvas.TextModel(text='%s (%s)' % (p, numberOfMembers),
item = PhamNameLabel(text='%s (%s)' % (p, numberOfMembers),
# x=px, y=py,
x=0, y=0,
anchor=phamAnchor,
font="Arial 4")
item.set_data('type', 'pham')
item.set_data('text', p)
if translation_length < 120:
#print 'rotating label for pham', p, "(270, %s, %s)" % (px,py)
item.rotate(270,px, py)
else:
item.rotate(270,px, py)
#item.translate(0,0)
if translation_length >= 120:
#item.rotate(89,px, py)
item.rotate(90,px, py)
group.add_child(item, -1)
item.translate(px,py)
# if the gene has a description, show it
desc = get_description_from_GeneID(self.db.c, g.GeneID)
#print desc
if desc:
descModel = GeneDescriptionLabel(text=desc,
# x=float((g.start+g.start+g.length)/2.0)/zoomFactor,
x=0,
# y=y-40,
y=0,
anchor=gtk.ANCHOR_CENTER,
font='Arial 4',
ellipsize=pango.ELLIPSIZE_END,
width=g.length/zoomFactor)
descModel.set_data('type', 'desc')
group.add_child(descModel, -1)
descModel.translate(float((g.start+g.start+g.length)/2.0)/zoomFactor, y-40)
return rectModel
def draw_scale(self, scaleHGroup, scaleVGroup, display_reversed, x, y, height, length, line_width=0.5, zoomFactor=1):
'''draws the scale bar on a linear genome map'''
c = self.c
#print 'drawing scale at (%s,%s)' % (x,y)
#print 'length:', length
trueLength = length
length = length / float(zoomFactor)
#pos = 0
small = 100
top = 500
large = 1000
topLine = (x, y)
bottomLine = (x, y+height)
for line in (topLine, bottomLine):
#rect_model = goocanvas.RectModel(x=float(line[0]), y=line[1], width=length, height=line_width,
rect_model = goocanvas.RectModel(x=0, y=0, width=length, height=line_width,
line_width=line_width,
radius_x=0,
radius_y=0,
fill_color="#000000")
rect_model.set_data('type', 'scale')
scaleHGroup.add_child(rect_model, -1)
rect_model.translate(float(line[0]), line[1])
basePositions = range(int(x), int(x+trueLength+1))
if display_reversed: basePositions.reverse()
for i in basePositions:
if (not display_reversed and not i % large) or (display_reversed and not (trueLength-i) % large):
# draw large scale bar
#rect_model = goocanvas.RectModel(x=float(i)/zoomFactor, y=y, width=line_width, height=height-line_width,
rect_model = goocanvas.RectModel(x=0, y=0, width=line_width, height=height-line_width,
line_width=line_width,
radius_x=0,
radius_y=0,
fill_color="#000000")
#print i
rect_model.set_data('type', 'scale')
scaleVGroup.add_child(rect_model, -1)
rect_model.translate(float(i)/zoomFactor, y)
if display_reversed:
label_model = goocanvas.TextModel(text=str(int(i/1000)),
#x=(trueLength-float(i)+500)/zoomFactor, y = (( y + (y+height))/2 + y)/2,
x=0, y=0,
anchor=gtk.ANCHOR_EAST,
font="Arial 3",
fill_color="#009900")
label_model.translate((trueLength-float(i)+500)/zoomFactor, (( y + (y+height))/2 + y)/2)
else:
label_model = goocanvas.TextModel(text=str(int(i/1000)),
# x=(float(i)+50)/zoomFactor, y = (( y + (y+height))/2 + y)/2,
x=0, y=0,
anchor=gtk.ANCHOR_WEST,
font="Arial 3",
fill_color="#009900")
label_model.set_data('type', 'marker')
label_model.set_data('text', str(int(i/1000)))
label_model.set_data('type', 'scale')
scaleVGroup.add_child(label_model, -1)
label_model.translate((float(i)+50)/zoomFactor, (( y + (y+height))/2 + y)/2)
#elif not i % top:
elif (not display_reversed and not i % top) or (display_reversed and not (trueLength - i) % top):
# draw top scale bar
# rect_model = goocanvas.RectModel(x=float(i)/zoomFactor, y=y, width=line_width, height=height/2,
rect_model = goocanvas.RectModel(x=0, y=0, width=line_width, height=height/2,
line_width=line_width,
radius_x=0,
radius_y=0,
fill_color="#000000")
rect_model.set_data('type', 'scale')
scaleVGroup.add_child(rect_model, -1)
rect_model.translate(float(i)/zoomFactor, y)
#elif not i % small:
elif (not display_reversed and not i % small) or (display_reversed and not (trueLength - i) % small):
# draw small scale bar
#rect_model = goocanvas.RectModel(x=float(i)/zoomFactor, y=y+(height-height/2), width=line_width, height=height/2,
rect_model = goocanvas.RectModel(x=0, y=0, width=line_width, height=height/2,
line_width=line_width,
radius_x=0,
radius_y=0,
fill_color="#000000")
rect_model.set_data('type', 'scale')
scaleVGroup.add_child(rect_model, -1)
rect_model.translate(float(i)/zoomFactor, y+(height-height/2))
## This is our handler for the "item-created" signal of the GooCanvasView.
def on_item_created (self, canvas, item, model):
#print "item created :: canvas: %s, item: %s, model: %s, " % (canvas, item, model)
if isinstance(item, goocanvas.Text):
item.connect("button-press-event", self.on_text_button_press)
item.connect("button-release-event", self.on_text_button_release)
item.connect("enter-notify-event", self.on_text_enter)
item.connect("leave-notify-event", self.on_text_leave)
item.connect("motion-notify-event", self.on_motion_notify)
if item.get_model().get_data('type') == 'domainLabelModel':
b= item.get_bounds()
item.get_model().set_data('x', b.x1)
item.get_model().set_data('y', b.y1)
item.get_model().set_data('width', b.x2 - b.x1)
item.get_model().set_data('height', b.y2 - b.y1)
print 'adding domain label...'
item.get_model().set_data('domainLabel', item)
if isinstance(item, goocanvas.Rect):
item.connect("button-press-event", self.on_rect_button_press)
item.connect("button-release-event", self.on_rect_button_release)
item.connect("enter-notify-event", self.on_rect_enter)
item.connect("leave-notify-event", self.on_rect_leave)
item.connect("motion-notify-event", self.on_motion_notify)
if isinstance(item, goocanvas.Polyline):
item.connect("enter-notify-event", self.on_polyline_enter)
item.connect("leave-notify-event", self.on_polyline_leave)
if isinstance(item, goocanvas.Group):
item.connect("button-press-event", self.on_group_button_press)
item.connect("motion-notify-event", self.on_group_motion_notify)
item.connect("button-release-event", self.on_group_button_release)
def on_polyline_button_press(self, view, target, event):
print 'polyline clicked'
view.lower(None)
def on_group_button_press(self, view, target, event):
#print 'group clicked'
print view
if view.get_model() != self.canvas.get_root_item_model():
return True
selectionRect = self.canvas.get_root_item_model().get_data('selectionRect')
if selectionRect:
selectionRect.remove()
#if (event.state & gtk.gdk.BUTTON1_MASK and not event.state & gtk.gdk.CONTROL_MASK):
# return True
bounds = self.canvas.get_bounds()
rectModel = goocanvas.RectModel(parent=self.canvas.get_root_item_model(),
x=event.x,
y=0,
width=0,
line_width=0.6,
height=bounds[3]-bounds[1],
#fill_color_rgba=0x0000ff44)
fill_color_rgba=0x00ff0044)
#rectModel.translate(event.x, 0)
self.canvas.get_root_item_model().set_data('selectionRect', rectModel)
rectModel.set_data('resizing', True)
def on_group_motion_notify(self, view, target, event):
if view.get_model() != self.canvas.get_root_item_model():
return True
selectionRect = self.canvas.get_root_item_model().get_data('selectionRect')
if not selectionRect:
return True
if not selectionRect.get_data('resizing'):
return True
if event.x > selectionRect.props.x:
selectionRect.props.width = event.x - selectionRect.props.x
return True
def on_group_button_release(self, view, target, event):
if view.get_model() != self.canvas.get_root_item_model():
return True
selectionRect = self.canvas.get_root_item_model().get_data('selectionRect')
if selectionRect.props.x == event.x:
selectionRect.remove()
self.show_fasta_sequences(None)
return True
if not selectionRect:
return True
selectionRect.set_data('resizing', False)
fasta = self.get_fasta_from_selectionRect(selectionRect)
self.show_fasta_sequences(fasta)
return False
def on_polyline_enter(self, view, target, event):
itemModel = view.get_model()
if itemModel.get_data('type') == 'blastAlignment':
if self.client.get_bool('/apps/phamerator/show_alignment_text') == False:
itemModel.get_data('label').props.visibility = goocanvas.ITEM_VISIBLE
def on_polyline_leave(self, view, target, event):
itemModel = view.get_model()
if itemModel.get_data('type') == 'blastAlignment':
if self.client.get_bool('/apps/phamerator/show_alignment_text') == False:
itemModel.get_data('label').props.visibility = goocanvas.ITEM_INVISIBLE
def on_text_button_press(self, view, target, event):
'''display information about the selected text'''
print view.get_data('type')
if event.state & gtk.gdk.CONTROL_MASK:
if view.get_model().get_data('type') == 'phageName':
cursor = gtk.gdk.Cursor (gtk.gdk.SB_H_DOUBLE_ARROW)
for blastMatch in self.blastMatches:
blastMatch.blastAlignmentLabel.remove()
blastMatch.polylineModel.remove()
else:
if view.get_model().get_data('type') == 'pham':
print 'you clicked on pham', view.get_model().get_data('text')
cursor = gtk.gdk.Cursor (gtk.gdk.LEFT_PTR)
elif view.get_model().get_data('type') == 'phageName':
cursor = gtk.gdk.Cursor (gtk.gdk.SB_V_DOUBLE_ARROW)
for blastMatch in self.blastMatches:
blastMatch.blastAlignmentLabel.remove()
blastMatch.polylineModel.remove()
# get current position of phageGroups
print 'you clicked on phageName'
item = view
while item.get_model().get_data('type') != 'phageGroup':
item = item.get_parent()
print 'got phageGroup!'
else:
print view.get_model().props.text
self.canvas.pointer_grab (view, gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.BUTTON_RELEASE_MASK, cursor, event.time)
return False
def on_text_button_release(self, view, target, event):
self.canvas.pointer_ungrab (view, event.time)
# only redraw blastn alignments if user released click on phage name
if view.get_model().get_data('type') != 'phageName':
#self.draw_blastn_alignments()
return
# get current position of phageGroups
n = self.canvas.get_root_item_model().get_n_children()
phages = {}
for child in range(0, n):
group = self.canvas.get_root_item_model().get_child(child)
if group.get_data('type') == 'phageGroup':
phages[group.y] = group
keys = phages.keys()
keys.sort()
count = 0
for key in keys:
print phages[key].get_data('name'), phages[key].y
phages[key].translate(0, (120*count+50)-key)
x = phages[key].get_simple_transform()[0]
if x < 0:
phages[key].translate(-x, 0)
count = count + 1
new_phages = []
for key in keys:
for d in self.phages:
name = get_PhageID_from_name(self.c, phages[key].get_data('name'))
print 'name: %s' % name
if d['PhageID'] == name:
new_phages.append(d)
self.phages = new_phages
print self.phages
self.draw_blastn_alignments()
# for key in key, translate phages[key] if needed to put it back in the proper spacing.
def on_text_enter(self, view, target_view, event):
itemModel = view.get_model()
if itemModel.get_data('type') == 'desc':
try:
self.selected_text.remove()
backRect = itemModel.get_data('backRect')
backRect.remove()
except:
pass
#itemModel.props.ellipsize = pango.ELLIPSIZE_NONE
def on_text_leave(self, view, target_view, event):
itemModel = view.get_model()
if itemModel.get_data('type') == 'desc':
pass
#backRect = itemModel.get_data('backRect')
#backRect.props.visibility = False
#backRect.remove()
#itemModel.props.ellipsize = pango.ELLIPSIZE_END
def color_by_has_changed(self, client, *args, **kwargs):
print 'color_by_has_changed'
pass
def on_motion_notify (self, item, target, event):
'''called when mouse moves over canvas items, it handles the dragging of canvas items'''
if event.state & gtk.gdk.BUTTON1_MASK and item.get_model().get_data('type') == 'phageName':
while item.get_model().get_data('type') != 'phageGroup':
item = item.get_parent()
if not (event.state & gtk.gdk.CONTROL_MASK):
item.get_model().translate(0, event.y)
elif (event.state & gtk.gdk.CONTROL_MASK):
item.get_model().translate(event.x, 0)
phageGroup = item
#start_x = phageGroup.get_bounds().x1
#new_x = event.x
#new_y = event.y
pBounds = phageGroup.get_bounds()
rBounds = self.canvas.get_root_item().get_bounds()
px1, py1, px2, py2 = pBounds.x1, pBounds.y1, pBounds.x2, pBounds.y2
rx1, ry1, rx2, ry2 = rBounds.x1, rBounds.y1, rBounds.x2, rBounds.y2
upper = max(px2, rx2)
#sw = self.controller.mapWTree.get_object('MapWindowScrolledWindow')
#self.canvas.set_bounds(px1, py1, upper, py2)
scale = self.canvas.get_scale()
self.canvas.set_size_request(int((upper+10)*scale), int((ry2-ry1+20)*scale))
#hAdj = sw.get_hadjustment()
#hAdj.upper = rx2
#print 'page_size: %s' % hAdj.page_size
#print 'upper: %s' % upper
#hAdj.changed()
#self.dragging == False
return False
def on_rect_button_press (self, item, target_item, event):
r = item
#print 'status before clicking: %s' % r.get_model().get_data('status')
#print 'item: %s target_item: %s event: %s event.type: %s event.state: %s' % (item, target_item, event, event.type, event.state)
if event.button == 3:
for g in self.genes:
g.change_color(self.client)
print g.get_data('pham')
if g.get_data('pham') != r.get_model().get_data('pham'):
g.props.fill_color = '#eeeeee'
if item.get_model().get_data('type') == 'gene':
print 'setting gene to active'
# unselect this gene
if r.get_model().get_data('status') == 'selected':
if event.state & gtk.gdk.CONTROL_MASK:
r.get_model().set_data('status', 'default')
r.get_model().props.stroke_color = self.prefs['activeRectColor']
r.get_model().props.line_width = self.prefs['activeRectLineWidth']
self.selectedCanvasItems.remove(r)
else:
r.get_model().set_data('status', 'default')
r.get_model().props.stroke_color = self.prefs['activeRectColor']
r.get_model().props.line_width = self.prefs['activeRectLineWidth']
self.controller.DNATextBuffer.set_text('click on a gene')
self.controller.ProteinTextBuffer.set_text('click on a gene')
self.selectedCanvasItems = []
elif r.get_model().get_data('status') == 'default':
if event.state & gtk.gdk.CONTROL_MASK:
# control key pressed, so add this gene to current selection
r.get_model().set_data('status', 'selected')
r.get_model().props.stroke_color = self.prefs['selectedRectColor']
r.get_model().props.line_width = self.prefs['selectedRectLineWidth']
self.selectedCanvasItems.append(r)
else:
# control key not pressed, so unselect everything and then select the clicked gene
print 'setting gene to selected'
# unselect the previously selected gene, if there is one
for s in self.selectedCanvasItems:
s.get_model().set_data('status', 'default')
s.get_model().props.stroke_color = self.prefs['defaultRectColor']
s.get_model().props.line_width = self.prefs['defaultRectLineWidth']
self.selectedCanvasItems = []
# select the gene that was just clicked
r.get_model().set_data('status', 'selected')
r.get_model().props.stroke_color = self.prefs['selectedRectColor']
r.get_model().props.line_width = self.prefs['selectedRectLineWidth']
self.selectedCanvasItems.append(r)
self.controller.gene_selection_changed(self.selectedCanvasItems)
elif item.get_model().get_data('type') == 'scale':
group = item.get_parent()
item.set_data('scaleBounds', group.get_bounds())
print 'scale button press'
self.drag_x = event.x
self.drag_y = event.y
return False
def on_rect_button_release(self, item, target_item, event):
print 'rect button release'
self.canvas = item.get_canvas ()
self.dragging = False
#parent = item.get_parent()
#n_children = parent.get_n_children()
# return True
return False
def get_fasta_from_selectionRect(self, selectionRect):
zoomFactor = self.root.get_data('zoomFactor')
if not zoomFactor: return
print 'zoomFactor: %s' % zoomFactor
w = int(selectionRect.props.width * zoomFactor)
fasta = ""
for n, phage in enumerate(self.phages):
#print 'drawing blast alignment'
PhageID = phage['PhageID']
print phage
phageName = get_phage_name_from_PhageID(self.c, PhageID)
g = self.root.get_n_children()
for i in range(0, g):
child = self.root.get_child(i)
if child.get_data('name') == get_phage_name_from_PhageID(self.c, PhageID):
phageGroup = child
x = int((selectionRect.props.x - phageGroup.get_simple_transform()[0]) * zoomFactor)
if x+w <= 0 or x >= phage['length']:
continue
start = max(x, 0)
end = min(x+w, phage['length'])
if start == x:
fasta = fasta + '>' + get_phage_name_from_PhageID(self.c, phage['PhageID']) + \
'_(' + str(x+1) + '-' + str(end) + ')\n' + \
get_seq_from_PhageID(self.c, phage['PhageID'])[x:x+w] + '\n'
else:
fasta = fasta + '>' + get_phage_name_from_PhageID(self.c, phage['PhageID']) + \
'_(' + str(start+1) + '-' + str(end) + ')\n' + \
get_seq_from_PhageID(self.c, phage['PhageID'])[start:x+w] + '\n'
return fasta
#for phage in self.phages:
# fasta = fasta + '>' + get_phage_name_from_PhageID(self.c, phage['PhageID']) + \
# '_[' + str(x) + ', ' + str(x+w) + ']\n' + \
# get_seq_from_PhageID(self.c, phage['PhageID'])[x:x+w] + '\n'
#return fasta
def show_gene_sequences(self, GeneIDs):
DNASeq = ''
proteinSeq = ''
for GeneID in GeneIDs:
DNASeq = DNASeq + '>' + GeneID + '\n' + get_seq_from_GeneID(self.c, GeneID) + '\n'
proteinSeq = proteinSeq + '>' + GeneID + '\n' + get_translation_from_GeneID(self.c, GeneID) + '\n'
controller = self.controller
controller.DNATextBuffer.set_text(DNASeq)
controller.ProteinTextBuffer.set_text(proteinSeq)
def remove_ellipse(self, ellipse):
ellipse.remove()
return True
def show_fasta_sequences(self, fasta):
controller = self.controller
if fasta:
controller.DNATextBuffer.set_text(fasta)
else:
controller.DNATextBuffer.set_text('')
controller.ProteinTextBuffer.set_text('')
def on_rect_enter(self, item, target_item, event):
if item.get_model().get_data('type') == 'scale':
return
r = item
def add_tooltip(group, labelModel, r, x, y, width, height):
print 'adding tooltip...'
padding = 5.0
tooltipBox = goocanvas.RectModel(x=x-(padding/2), y=y-(padding/2), width=width+padding, height=height+padding,
line_width=1,
radius_x=2.5,
radius_y=2.5,
stroke_color = 'black',
fill_color_rgba=0xD5FF8EDD)
r.get_model().set_data('tooltip', tooltipBox)
#group.get_model().add_child(tooltipBox, -1)
self.canvas.get_root_item_model().add_child(tooltipBox, -1)
tooltipBox.lower(below=labelModel)
print 'done!'
return False
highlight = self.client.get_bool('/apps/phamerator/hover_highlights_pham')
if highlight and r.get_model().get_data('type') == 'gene' and self.dragging == False:
for g in self.genes:
# for each gene, if it's not in the hovered pham, set it's fill color to gray
if g.get_data('pham') != r.get_model().get_data('pham'):
g.props.fill_color = '#eeeeee'
else:
# zap it with a laser!
from_x = r.get_transform()[4] + r.get_model().get_parent().get_parent().get_transform()[4]
from_x = from_x + (r.get_model().props.width/2.0)
from_y = r.get_transform()[5] + r.get_model().get_parent().get_parent().get_transform()[5]
from_y = from_y + (r.get_model().props.height/2.0)
try:
phageGroupMatrix = g.get_parent().get_parent().get_transform()
to_x = (((g.get_transform()[4] + phageGroupMatrix[4] - from_x) * 2) + g.props.width) / 2.0
to_y = (((g.get_transform()[5] + phageGroupMatrix[5] - from_y) * 2) + g.props.height) / 2.0
ellipse_model = goocanvas.EllipseModel(parent = self.canvas.get_root_item_model(), center_x=from_x, center_y=from_y, radius_x=(g.props.width/2.0)+2, radius_y=(g.props.width/2.0)+2,
stroke_color='black', fill_color_rgba = 0xFABD0577,
line_width=0.75)
ellipse_model.animate(to_x, to_y, 1, 0, True, 2000, 40, goocanvas.ANIMATE_FREEZE)
gobject.timeout_add(15000, self.remove_ellipse, ellipse_model)
except:
print 'g is a: %s' % g.get_data('type')
#group = r.get_parent()
r.get_model().props.stroke_color = self.prefs['activeRectColor']
r.get_model().props.line_width = self.prefs['activeRectLineWidth']
return True
if r.get_model().get_data('type') == 'domainModel':
domainGroupModel = item.get_model().get_data('domainGroupModel')
domain = item.get_model().get_data('domain')
start, end, e_value, description = domain['start'] * 3, domain['end'] * 3, domain['expect'], domain['description']
domainLabelModel = item.get_model().get_data('domainLabelModel')
if domainLabelModel: domainLabelModel.remove()
size = "Arial %s" % str(round(10.0*(1/self.canvas.get_scale())))
width = 400.0/self.canvas.get_scale()
domainLabelModel = goocanvas.TextModel(text=domain['description'],
x=0, y=0,
width = width,
anchor=gtk.ANCHOR_NORTH_WEST,
font=size)
item.get_model().set_data('domainLabelModel', domainLabelModel)
domainLabelModel.set_data('type', 'domainLabelModel')
self.canvas.get_root_item_model().add_child(domainLabelModel, -1)
domainLabelModel.set_transform(item.get_model().get_transform())
zoomFactor = self.root.get_data('zoomFactor')
if not zoomFactor: zoomFactor = 20.0
phageGroup = item.get_parent().get_parent().get_parent()
domainLabelModel.translate((phageGroup.get_model().get_simple_transform()[0]+(abs(end-start)+200)/zoomFactor), phageGroup.get_model().get_simple_transform()[1])
if domainLabelModel:
#domainLabelModel.translate(0, item.get_model().get_parent().get_simple_transform()[1])
label = domainLabelModel.get_data('domainLabel')
b = label.get_bounds()
t = label.get_simple_transform()
y = t[1]
x = b.x1
width = b.x2-b.x1
height = b.y2-b.y1
group = r.get_parent()
add_tooltip(group, domainLabelModel, r, x, y, width, height)
return True
def on_rect_leave(self, item, target_item, event):
domainLabelModel = item.get_model().get_data('domainLabelModel')
if domainLabelModel: domainLabelModel.remove()
itemModel = item.get_model()
r = item
if r.get_data('tooltip'): r.get_data('tooltip').remove()
if r.get_model().get_data('type') == 'gene':
for g in self.genes:
g.change_color(self.client)
if r.get_model().get_data('status') == 'default':
r.get_model().props.stroke_color = self.prefs['defaultRectColor']
r.get_model().props.line_width = self.prefs['defaultRectLineWidth']
elif r.get_model().get_data('status') == 'selected':
r.get_model().props.stroke_color = self.prefs['selectedRectColor']
r.get_model().props.line_width = self.prefs['selectedRectLineWidth']
if r.get_model().get_data('type') == 'domainModel':
#tt = r.get_model().get_data('tooltip')
r.get_model().get_data('domainLabelModel').remove()
tt = r.get_model().get_data('tooltip')
if tt: tt.remove()
return True
## This is our handler for the "delete-event" signal of the window, which
## is emitted when the 'x' close button is clicked. We just exit here. */
def on_delete_event(self, window, event):
raise SystemExit
if __name__ == "__main__":
main(sys.argv)
|
byuphamerator/phamerator-dev
|
phamerator/cartographerold.py
|
Python
|
gpl-2.0
| 62,711
|
[
"BLAST"
] |
86c0275b65f7817f855389aa582adbd9b8fbe20b998368a4901f30aab6bbfa6a
|
import os
import unittest
from pymatgen.apps.battery.analyzer import BatteryAnalyzer
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
class BatteryAnalyzerTest(PymatgenTest):
def load_from_cif(self, filename, oxidations, working_ion="Li"):
s = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, filename))
s.add_oxidation_state_by_element(oxidations)
return BatteryAnalyzer(s, working_ion)
def load_from_internal(self, name, oxidations, working_ion="Li"):
s = self.get_structure(name).copy()
s.add_oxidation_state_by_element(oxidations)
return BatteryAnalyzer(s, working_ion)
def setUp(self):
self.lifepo4 = self.load_from_internal("LiFePO4", {"Li": 1, "Fe": 2, "P": 5, "O": -2})
self.nafepo4 = self.load_from_internal("NaFePO4", {"Na": 1, "Fe": 2, "P": 5, "O": -2}, working_ion="Na")
self.la2coo4f = self.load_from_internal("La2CoO4F", {"La": 3, "Co": 3, "O": -2, "F": -1}, working_ion="F")
self.fepo4 = self.load_from_cif("FePO4a.cif", {"Fe": 3, "P": 5, "O": -2})
self.la2coo4 = self.load_from_cif("La2CoO4.cif", {"La": 3, "Co": 2, "O": -2}, working_ion="F")
self.lifemnpo4 = self.load_from_cif("Li4Fe3Mn1(PO4)4.cif", {"Li": 1, "Fe": 2, "Mn": 2, "P": 5, "O": -2})
self.li8nicofe208 = self.load_from_cif(
"Li8Fe2NiCoO8.cif", {"Li": 1, "Fe": 2, "Mn": 2, "Co": 2, "Ni": 2, "O": -2}
)
self.li3v2p3o12 = self.load_from_internal("Li3V2(PO4)3", {"Li": 1, "V": 3, "O": -2, "P": 5})
self.mgnif6 = self.load_from_cif("MgNiF6.cif", {"Mg": 2, "Ni": 4, "F": -1}, working_ion="F")
def test_oxid_check(self):
s = self.get_structure("LiFePO4")
self.assertRaises(ValueError, BatteryAnalyzer, s, "Li")
def test_capacitygrav_calculations(self):
lifepo4_cap = 169.89053 # same as fepo4 cap
nafepo4_cap = 154.20331
la2coo4f_cap = 175.6564
li3v2p3o12_cap_remove = 197.25339
li3v2p3o12_cap_insert = 127.17129
self.assertAlmostEqual(self.lifepo4.get_max_capgrav(), lifepo4_cap, 3)
self.assertEqual(self.lifepo4.get_max_capgrav(remove=False), 0)
self.assertAlmostEqual(self.lifepo4.get_max_capgrav(insert=False), lifepo4_cap, 3)
self.assertAlmostEqual(self.nafepo4.get_max_capgrav(), nafepo4_cap, 3)
self.assertEqual(self.nafepo4.get_max_capgrav(remove=False), 0)
self.assertAlmostEqual(self.fepo4.get_max_capgrav(), lifepo4_cap, 3)
self.assertEqual(self.fepo4.get_max_capgrav(insert=False), 0)
self.assertAlmostEqual(self.la2coo4f.get_max_capgrav(), la2coo4f_cap, 3)
self.assertAlmostEqual(self.la2coo4.get_max_capgrav(), la2coo4f_cap, 3)
self.assertEqual(self.la2coo4.get_max_capgrav(insert=False), 0)
self.assertAlmostEqual(self.li3v2p3o12.get_max_capgrav(insert=False), li3v2p3o12_cap_remove, 3)
self.assertAlmostEqual(self.li3v2p3o12.get_max_capgrav(remove=False), li3v2p3o12_cap_insert, 3)
def test_capacityvol_calculations(self):
lifepo4_cap = 594.17518
nafepo4_cap = 542.86104
fepo4_cap = 624.82289 # this is different than lifepo4 cap if lifepo4 volume not known
self.assertAlmostEqual(self.lifepo4.get_max_capvol(), lifepo4_cap, 3)
self.assertEqual(self.lifepo4.get_max_capvol(remove=False), 0)
self.assertAlmostEqual(self.lifepo4.get_max_capvol(insert=False), lifepo4_cap, 3)
self.assertAlmostEqual(self.nafepo4.get_max_capvol(), nafepo4_cap, 3)
self.assertEqual(self.nafepo4.get_max_capvol(remove=False), 0)
self.assertAlmostEqual(self.nafepo4.get_max_capvol(insert=False), nafepo4_cap, 3)
self.assertAlmostEqual(self.fepo4.get_max_capvol(), fepo4_cap, 3)
self.assertAlmostEqual(self.fepo4.get_max_capvol(remove=False), fepo4_cap, 3)
self.assertEqual(self.fepo4.get_max_capvol(insert=False), 0)
# give the lifepo4 volume, should get lifepo4 capacity
self.assertAlmostEqual(
self.fepo4.get_max_capvol(volume=self.lifepo4.struc_oxid.volume),
lifepo4_cap,
3,
)
def test_ion_removal(self):
self.assertEqual(self.lifemnpo4.get_removals_int_oxid(), {1.0, 2.0, 3.0, 4.0})
self.assertEqual(
self.li8nicofe208.get_removals_int_oxid(),
{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0},
)
self.assertEqual(self.li3v2p3o12.get_removals_int_oxid(), {4.0, 6.0})
self.assertEqual(self.mgnif6.get_removals_int_oxid(), {1.0, 2.0})
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/apps/battery/tests/test_analyzer.py
|
Python
|
mit
| 4,663
|
[
"pymatgen"
] |
7d98365e8d0511f7f00e9b76c7bfce10637194e2f8831bd41109338f79947946
|
from behave import *
use_step_matcher("parse")
@given('Exists a team created by "{username}"')
def step_impl(context, username):
from django.contrib.auth.models import User
user = User.objects.get(username=username)
from sportsBetting.models import Team
for row in context.table:
team = Team(created_by=user)
for heading in row.headings:
setattr(team, heading, row[heading])
if not Team.objects.filter(name=team.name).exists():
team.save()
@when('I add a new team')
def step_impl(context):
for row in context.table:
context.browser.visit(context.get_url('/teams/create/'))
if context.browser.url == context.get_url('/teams/create/'):
form = context.browser.find_by_tag('form').first
for heading in row.headings:
context.browser.fill(str(heading), str(row[heading]))
form.find_by_id('team_submit').first.click()
@when('I want to edit the team "{team_name}"')
def step_impl(context, team_name):
from sportsBetting.models import Team
id = Team.objects.get(name=team_name).id
context.browser.visit(context.get_url('/teams/edit/' + str(id)))
@when('I edit the team')
def step_impl(context):
for row in context.table:
if context.browser.url.startswith(context.get_url('/teams/edit/')):
form = context.browser.find_by_tag('form').first
for heading in row.headings:
context.browser.fill(str(heading), str(row[heading]))
form.find_by_id('team_submit').first.click()
assert context.browser.url == context.get_url('/teams/list_teams/')
|
Marcelpv96/SITWprac2017
|
sportsBetting/features/steps/register_teams.py
|
Python
|
gpl-3.0
| 1,654
|
[
"VisIt"
] |
11aa18f210c15f1aeb8b0839e1277fea606c12c04079f52933c39b0b3a86752a
|
""" Module for converting various mesh formats."""
# Copyright (C) 2006 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Garth N. Wells (gmsh function)
# Modified by Alexander H. Jarosch (gmsh fix)
# Modified by Angelo Simone (Gmsh and Medit fix)
# Modified by Andy R. Terrel (gmsh fix and triangle function)
# Modified by Magnus Vikstrom (metis and scotch function)
# Modified by Bartosz Sawicki (diffpack function)
# Modified by Gideon Simpson (Exodus II function)
# Modified by Kent-Andre Mardal (Star-CD function)
# Modified by Nuno Lopes (fix for emc2 mesh format (medit version 0))
# Modified by Neilen Marais (add gmsh support for reading physical region)
# Modified by Evan Lezar (add support for reading gmsh physical regions on facets)
# Modified by Jan Blechta (add triangle support for marker on edges and attributes on triangles)
#
# Last changed: 2014-02-06
# NOTE: This module does not depend on (py)dolfin beeing installed.
# NOTE: If future additions need that please import dolfin in a try: except:
# NOTE: clause and tell the user to install dolfin if it is not installed.
from __future__ import print_function
import getopt
import sys
from instant import get_status_output
import re
import warnings
import os.path
import numpy
import six
from . import abaqus
from . import xml_writer
def format_from_suffix(suffix):
"Return format for given suffix"
if suffix == "xml":
return "xml"
elif suffix == "mesh":
return "mesh"
elif suffix == "gmsh":
return "gmsh"
elif suffix == "msh":
return "gmsh"
elif suffix == "gra":
return "metis"
elif suffix == "grf":
return "scotch"
elif suffix == "grid":
return "diffpack"
elif suffix == "inp":
return "abaqus"
elif suffix == "ncdf":
return "NetCDF"
elif suffix =="exo":
return "ExodusII"
elif suffix =="e":
return "ExodusII"
elif suffix == "vrt" or suffix == "cel":
return "StarCD"
elif suffix == "ele" or suffix == "node":
return "Triangle"
else:
_error("Sorry, unknown suffix %s." % suffix)
def mesh2xml(ifilename, ofilename):
"""Convert between .mesh and .xml, parser implemented as a
state machine:
0 = read 'Dimension'
1 = read dimension
2 = read 'Vertices'
3 = read number of vertices
4 = read next vertex
5 = read 'Triangles' or 'Tetrahedra'
6 = read number of cells
7 = read next cell
8 = done
"""
print("Converting from Medit format (.mesh) to DOLFIN XML format")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Remove newline
line = line.rstrip("\n\r")
# Read dimension
if line == "Dimension" or line == " Dimension":
line = ifile.readline()
num_dims = int(line)
if num_dims == 2:
cell_type = "triangle"
dim = 2
elif num_dims == 3:
cell_type = "tetrahedron"
dim = 3
break
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Step to beginning of file
ifile.seek(0)
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
line = line.rstrip("\n\r")
if state == 0:
if line == "Dimension" or line == " Dimension":
state += 1
elif state == 1:
num_dims = int(line)
state +=1
elif state == 2:
if line == "Vertices" or line == " Vertices":
state += 1
elif state == 3:
num_vertices = int(line)
xml_writer.write_header_vertices(ofile, num_vertices)
state +=1
elif state == 4:
if num_dims == 2:
(x, y, tmp) = line.split()
x = float(x)
y = float(y)
z = 0.0
elif num_dims == 3:
(x, y, z, tmp) = line.split()
x = float(x)
y = float(y)
z = float(z)
xml_writer.write_vertex(ofile, num_vertices_read, x, y, z)
num_vertices_read +=1
if num_vertices == num_vertices_read:
xml_writer.write_footer_vertices(ofile)
state += 1
elif state == 5:
if (line == "Triangles" or line == " Triangles") and num_dims == 2:
state += 1
if line == "Tetrahedra" and num_dims == 3:
state += 1
elif state == 6:
num_cells = int(line)
xml_writer.write_header_cells(ofile, num_cells)
state +=1
elif state == 7:
if num_dims == 2:
(n0, n1, n2, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
(n0, n1, n2, n3, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
n3 = int(n3) - 1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
state += 1
elif state == 8:
break
# Check that we got all data
if state == 8:
print("Conversion done")
else:
_error("Missing data, unable to convert")
# Write footer
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def gmsh2xml(ifilename, handler):
"""Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
parser implemented as a state machine:
0 = read 'MeshFormat'
1 = read mesh format data
2 = read 'EndMeshFormat'
3 = read 'Nodes'
4 = read number of vertices
5 = read vertices
6 = read 'EndNodes'
7 = read 'Elements'
8 = read number of cells
9 = read cells
10 = done
Afterwards, extract physical region numbers if they are defined in
the mesh file as a mesh function.
"""
print("Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format")
# The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}
cell_type_for_dim = {1: "interval", 2: "triangle", 3: "tetrahedron" }
# the gmsh element types supported for conversion
supported_gmsh_element_types = [1, 2, 4, 15]
# Open files
ifile = open(ifilename, "r")
# Scan file for cell type
cell_type = None
highest_dim = 0
line = ifile.readline()
while line:
# Remove newline
line = line.rstrip("\n\r")
# Read dimension
if line.find("$Elements") == 0:
line = ifile.readline()
num_elements = int(line)
if num_elements == 0:
_error("No elements found in gmsh file.")
line = ifile.readline()
# Now iterate through elements to find largest dimension. Gmsh
# format might include elements of lower dimensions in the element list.
# We also need to count number of elements of correct dimensions.
# Also determine which vertices are not used.
dim_count = {0: 0, 1: 0, 2: 0, 3: 0}
vertices_used_for_dim = {0: [], 1: [], 2: [], 3: []}
# Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements
tags_for_dim = {0: [], 1: [], 2: [], 3: []}
while line.find("$EndElements") == -1:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type in supported_gmsh_element_types:
dim = gmsh_dim[elem_type]
if highest_dim < dim:
highest_dim = dim
node_num_list = [int(node) for node in element[3 + num_tags:]]
vertices_used_for_dim[dim].extend(node_num_list)
if num_tags > 0:
tags_for_dim[dim].append(tuple(int(tag) for tag in element[3:3+num_tags]))
dim_count[dim] += 1
else:
#TODO: output a warning here. "gmsh element type %d not supported" % elem_type
pass
line = ifile.readline()
else:
# Read next line
line = ifile.readline()
# Check that we got the cell type and set num_cells_counted
if highest_dim == 0:
_error("Unable to find cells of supported type.")
num_cells_counted = dim_count[highest_dim]
vertex_set = set(vertices_used_for_dim[highest_dim])
vertices_used_for_dim[highest_dim] = None
vertex_dict = {}
for n,v in enumerate(vertex_set):
vertex_dict[v] = n
# Step to beginning of file
ifile.seek(0)
# Set mesh type
handler.set_mesh_type(cell_type_for_dim[highest_dim], highest_dim)
# Initialise node list (gmsh does not export all vertexes in order)
nodelist = {}
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
# Only import the dolfin objects if facet markings exist
process_facets = False
if len(tags_for_dim[highest_dim-1]) > 0:
# first construct the mesh
try:
from dolfin import MeshEditor, Mesh
except ImportError:
_error("DOLFIN must be installed to handle Gmsh boundary regions")
mesh = Mesh()
mesh_editor = MeshEditor ()
mesh_editor.open( mesh, highest_dim, highest_dim )
process_facets = True
else:
# TODO: Output a warning or an error here
me = None
while state != 10:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
line = line.rstrip("\n\r")
if state == 0:
if line == "$MeshFormat":
state = 1
elif state == 1:
(version, file_type, data_size) = line.split()
state = 2
elif state == 2:
if line == "$EndMeshFormat":
state = 3
elif state == 3:
if line == "$Nodes":
state = 4
elif state == 4:
num_vertices = len(vertex_dict)
handler.start_vertices(num_vertices)
if process_facets:
mesh_editor.init_vertices_global(num_vertices, num_vertices)
state = 5
elif state == 5:
(node_no, x, y, z) = line.split()
node_no = int(node_no)
x,y,z = [float(xx) for xx in (x,y,z)]
if node_no in vertex_dict:
node_no = vertex_dict[node_no]
else:
continue
nodelist[int(node_no)] = num_vertices_read
handler.add_vertex(num_vertices_read, [x, y, z])
if process_facets:
if highest_dim == 1:
coords = numpy.array([x])
elif highest_dim == 2:
coords = numpy.array([x, y])
elif highest_dim == 3:
coords = numpy.array([x, y, z])
mesh_editor.add_vertex(num_vertices_read, coords)
num_vertices_read +=1
if num_vertices == num_vertices_read:
handler.end_vertices()
state = 6
elif state == 6:
if line == "$EndNodes":
state = 7
elif state == 7:
if line == "$Elements":
state = 8
elif state == 8:
handler.start_cells(num_cells_counted)
if process_facets:
mesh_editor.init_cells_global(num_cells_counted, num_cells_counted)
state = 9
elif state == 9:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type in supported_gmsh_element_types:
dim = gmsh_dim[elem_type]
else:
dim = 0
if dim == highest_dim:
node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
for node in node_num_list:
if not node in nodelist:
_error("Vertex %d of %s %d not previously defined." %
(node, cell_type_for_dim[dim], num_cells_read))
cell_nodes = [nodelist[n] for n in node_num_list]
handler.add_cell(num_cells_read, cell_nodes)
if process_facets:
cell_nodes = numpy.array([nodelist[n] for n in node_num_list], dtype=numpy.uintp)
mesh_editor.add_cell(num_cells_read, cell_nodes)
num_cells_read +=1
if num_cells_counted == num_cells_read:
handler.end_cells()
if process_facets:
mesh_editor.close()
state = 10
elif state == 10:
break
# Write mesh function based on the Physical Regions defined by
# gmsh, but only if they are not all zero. All zero physical
# regions indicate that no physical regions were defined.
if highest_dim not in [1,2,3]:
_error("Gmsh tags not supported for dimension %i. Probably a bug" % dim)
tags = tags_for_dim[highest_dim]
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in physical_regions):
handler.start_meshfunction("physical_region", dim, num_cells_counted)
for i, physical_region in enumerate(physical_regions):
handler.add_entity_meshfunction(i, physical_region)
handler.end_meshfunction()
# Now process the facet markers
tags = tags_for_dim[highest_dim-1]
if (len(tags) > 0) and (mesh is not None):
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in physical_regions):
mesh.init(highest_dim-1,0)
# Get the facet-node connectivity information (reshape as a row of node indices per facet)
if highest_dim==1:
# for 1d meshes the mesh topology returns the vertex to vertex map, which isn't what we want
# as facets are vertices
facets_as_nodes = numpy.array([[i] for i in range(mesh.num_facets())])
else:
facets_as_nodes = mesh.topology()(highest_dim-1,0)().reshape ( mesh.num_facets(), highest_dim )
# Build the reverse map
nodes_as_facets = {}
for facet in range(mesh.num_facets()):
nodes_as_facets[tuple(facets_as_nodes[facet,:])] = facet
data = [int(0*k) for k in range(mesh.num_facets()) ]
for i, physical_region in enumerate(physical_regions):
nodes = [n-1 for n in vertices_used_for_dim[highest_dim-1][highest_dim*i:(highest_dim*i+highest_dim)]]
nodes.sort()
if physical_region != 0:
try:
index = nodes_as_facets[tuple(nodes)]
data[index] = physical_region
except IndexError:
raise Exception ( "The facet (%d) was not found to mark: %s" % (i, nodes) )
# Create and initialise the mesh function
handler.start_meshfunction("facet_region", highest_dim-1, mesh.num_facets() )
for index, physical_region in enumerate ( data ):
handler.add_entity_meshfunction(index, physical_region)
handler.end_meshfunction()
# Check that we got all data
if state == 10:
print("Conversion done")
else:
_error("Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?")
# Close files
ifile.close()
def triangle2xml(ifilename, ofilename):
"""Convert between triangle format
(http://www.cs.cmu.edu/~quake/triangle.html) and .xml. The
given ifilename should be the prefix for the corresponding
.node, and .ele files.
"""
def get_next_line (fp):
"""Helper function for skipping comments and blank lines"""
line = fp.readline()
if line == '':
_error("Hit end of file prematurely.")
line = line.strip()
if not (line.startswith('#') or line == ''):
return line
return get_next_line(fp)
print("Converting from Triangle format {.node, .ele} to DOLFIN XML format")
# Open files
for suffix in [".node", ".ele"]:
if suffix in ifilename and ifilename[-len(suffix):] == suffix:
ifilename = ifilename.replace(suffix, "")
node_file = open(ifilename+".node", "r")
ele_file = open(ifilename+".ele", "r")
ofile = open(ofilename, "w")
try:
edge_file = open(ifilename+".edge", "r")
print("Found .edge file")
except IOError:
edge_file = None
# Read all the nodes
nodes = {}
num_nodes, dim, attr, bound = list(map(int, get_next_line(node_file).split()))
while len(nodes) < num_nodes:
node, x, y = get_next_line(node_file).split()[:3]
nodes[int(node)] = (float(x), float(y))
# Read all the triangles
tris = {}
tri_attrs = {}
num_tris, n_per_tri, attrs = list(map(int, get_next_line(ele_file).split()))
while len(tris) < num_tris:
line = get_next_line(ele_file).split()
tri, n1, n2, n3 = list(map(int, line[:4]))
# vertices are ordered according to current UFC ordering scheme -
# - may change in future!
tris[tri] = tuple(sorted((n1, n2, n3)))
tri_attrs[tri] = tuple(map(float, line[4:4+attrs]))
# Read all the boundary markers from edges
edge_markers_global = {}
edge_markers_local = []
got_negative_edge_markers = False
if edge_file is not None:
num_edges, num_edge_markers = list(map(int, get_next_line(edge_file).split()))
if num_edge_markers == 1:
while len(edge_markers_global) < num_edges:
edge, v1, v2, marker = list(map(int, get_next_line(edge_file).split()))
if marker < 0: got_negative_edge_markers = True
edge_markers_global[tuple(sorted((v1, v2)))] = marker
if got_negative_edge_markers:
print("Some edge markers are negative! dolfin will increase "\
"them by probably 2**32 when loading xml. "\
"Consider using non-negative edge markers only.")
for tri, vertices in six.iteritems(tris):
v0, v1, v2 = sorted((vertices[0:3]))
try:
edge_markers_local.append((tri, 0, \
edge_markers_global[(v1, v2)]))
edge_markers_local.append((tri, 1, \
edge_markers_global[(v0, v2)]))
edge_markers_local.append((tri, 2, \
edge_markers_global[(v0, v1)]))
except IndexError:
raise Exception("meshconvert.py: The facet was not found.")
elif num_edge_markers == 0:
print("...but no markers in it. Ignoring it")
else:
print("...but %d markers specified in it. It won't be processed."\
%num_edge_markers)
# Write everything out
xml_writer.write_header_mesh(ofile, "triangle", 2)
xml_writer.write_header_vertices(ofile, num_nodes)
node_off = 0 if 0 in nodes else -1
for node, node_t in six.iteritems(nodes):
xml_writer.write_vertex(ofile, node+node_off, node_t[0], node_t[1], 0.0)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_cells(ofile, num_tris)
tri_off = 0 if 0 in tris else -1
for tri, tri_t in six.iteritems(tris):
xml_writer.write_cell_triangle(ofile, tri+tri_off, tri_t[0] + node_off,
tri_t[1] + node_off, tri_t[2] + node_off)
xml_writer.write_footer_cells(ofile)
if len(edge_markers_local) > 0:
xml_writer.write_header_domains(ofile)
xml_writer.write_header_meshvaluecollection(ofile, \
"edge markers", 1, len(edge_markers_local), "uint")
for tri, local_edge, marker in edge_markers_local:
xml_writer.write_entity_meshvaluecollection(ofile, \
1, tri+tri_off, marker, local_edge)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_footer_domains(ofile)
xml_writer.write_footer_mesh(ofile)
for i in range(attrs):
afilename = ofilename.replace(".xml", ".attr"+str(i)+".xml")
afile = open(afilename, "w")
xml_writer.write_header_meshfunction2(afile)
xml_writer.write_header_meshvaluecollection(afile, \
"triangle attribs "+str(i), 2, num_tris, "double")
for tri, tri_a in six.iteritems(tri_attrs):
xml_writer.write_entity_meshvaluecollection(afile, \
2, tri+tri_off, tri_a[i], 0)
xml_writer.write_footer_meshvaluecollection(afile)
xml_writer.write_footer_meshfunction(afile)
print("triangle attributes from .ele file written to "+afilename)
afile.close()
# Close files
node_file.close()
ele_file.close()
if edge_file is not None:
edge_file.close()
ofile.close()
def xml_old2xml(ifilename, ofilename):
"Convert from old DOLFIN XML format to new."
print("Converting from old (pre DOLFIN 0.6.2) to new DOLFIN XML format...")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type (assuming there is just one)
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Read dimension
if "<triangle" in line:
cell_type = "triangle"
dim = 2
break
elif "<tetrahedron" in line:
cell_type = "tetrahedron"
dim = 3
break
# Step to beginning of file
ifile.seek(0)
# Read lines and make changes
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Modify line
if "xmlns" in line:
line = "<dolfin xmlns:dolfin=\"http://fenicsproject.org\">\n"
if "<mesh>" in line:
line = " <mesh celltype=\"%s\" dim=\"%d\">\n" % (cell_type, dim)
if dim == 2 and " z=\"0.0\"" in line:
line = line.replace(" z=\"0.0\"", "")
if " name=" in line:
line = line.replace(" name=", " index=")
if " name =" in line:
line = line.replace(" name =", " index=")
if "n0" in line:
line = line.replace("n0", "v0")
if "n1" in line:
line = line.replace("n1", "v1")
if "n2" in line:
line = line.replace("n2", "v2")
if "n3" in line:
line = line.replace("n3", "v3")
# Write line
ofile.write(line)
# Close files
ifile.close();
ofile.close();
print("Conversion done")
def metis_graph2graph_xml(ifilename, ofilename):
"Convert from Metis graph format to DOLFIN Graph XML."
print("Converting from Metis graph format to DOLFIN Graph XML.")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
xml_writer.write_header_graph(ofile, "directed")
xml_writer.write_header_vertices(ofile, int(num_vertices))
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
xml_writer.write_graph_vertex(ofile, i, len(edges))
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_edges(ofile, 2*int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
for i in range(int(num_vertices)):
print("vertex %g", i)
line = ifile.readline()
edges = line.split()
for e in edges:
xml_writer.write_graph_edge(ofile, i, int(e))
xml_writer.write_footer_edges(ofile)
xml_writer.write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def scotch_graph2graph_xml(ifilename, ofilename):
"Convert from Scotch graph format to DOLFIN Graph XML."
print("Converting from Scotch graph format to DOLFIN Graph XML.")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Skip graph file version number
ifile.readline()
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
# Read start index and numeric flag
# Start index is 0 or 1 (C/Fortran)
# Numeric flag is 3 bits where bit 1 enables vertex labels
# bit 2 enables edge weights and bit 3 enables vertex weights
line = ifile.readline()
(start_index, numeric_flag) = line.split()
# Handling not implented
if not numeric_flag == "000":
_error("Handling of scotch vertex labels, edge- and vertex weights not implemented")
xml_writer.write_header_graph(ofile, "undirected")
xml_writer.write_header_vertices(ofile, int(num_vertices))
# Read vertices and edges, first number gives number of edges from this vertex (not used)
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
xml_writer.write_graph_vertex(ofile, i, len(edges)-1)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_edges(ofile, int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
ifile.readline()
ifile.readline()
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
for j in range(1, len(edges)):
xml_writer.write_graph_edge(ofile, i, int(edges[j]))
xml_writer.write_footer_edges(ofile)
xml_writer.write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def diffpack2xml(ifilename, ofilename):
"Convert from Diffpack tetrahedral/triangle grid format to DOLFIN XML."
print(diffpack2xml.__doc__)
# Format strings for MeshFunction XML files
meshfunction_header = """\
<?xml version="1.0" encoding="UTF-8"?>\n
<dolfin xmlns:dolfin="http://www.fenics.org/dolfin/">
<mesh_function type="uint" dim="%d" size="%d">\n"""
meshfunction_entity = " <entity index=\"%d\" value=\"%d\"/>\n"
meshfunction_footer = " </mesh_function>\n</dolfin>"
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read and analyze header
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
if re.search(r"Number of elements", line):
num_cells = int(re.match(r".*\s(\d+).*", line).group(1))
if re.search(r"Number of nodes", line):
num_vertices = int(re.match(r".*\s(\d+).*", line).group(1))
if re.search(r"Number of space dim.", line):
num_dims = int(re.match(r".*\s(\d+).*", line).group(1))
if num_dims == 3:
xml_writer.write_header_mesh(ofile, "tetrahedron", 3)
elem_type = "ElmT4n3D"
write_cell_func = xml_writer.write_cell_tetrahedron
else:
xml_writer.write_header_mesh(ofile, "triangle", 2)
elem_type = "ElmT3n2D"
write_cell_func = xml_writer.write_cell_triangle
xml_writer.write_header_vertices(ofile, num_vertices)
# Read & write vertices and collect markers for vertices
vertex_markers = []
unique_vertex_markers = set()
for i in range(num_vertices):
line = ifile.readline()
m = re.match(r"^.*\(\s*(.*)\s*\).*\](.*)$", line)
x = list(map(float, re.split("[\s,]+", m.group(1))))
xml_writer.write_vertex(ofile, i, *x)
markers = list(map(int, m.group(2).split()))
vertex_markers.append(markers)
unique_vertex_markers.update(markers)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_cells(ofile, num_cells)
# Output unique vertex markers as individual VertexFunctions
unique_vertex_markers.difference_update([0])
for unique_marker in unique_vertex_markers:
ofile_marker = open(ofilename.replace(".xml", "") + \
"_marker_" + str(unique_marker)+".xml", "w")
xml_writer.write_header_meshfunction(ofile_marker, 0, num_vertices)
for ind, markers in enumerate(vertex_markers):
if unique_marker in markers:
xml_writer.write_entity_meshfunction(ofile_marker, ind, unique_marker)
else:
xml_writer.write_entity_meshfunction(ofile_marker, ind, 0)
xml_writer.write_footer_meshfunction(ofile_marker)
# Ignore comment lines
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
# Read & write cells and collect cell and face markers
cell_markers = []
facet_markers = []
facet_to_vert = [[1,2,3], [0,2,3], [0,1,3], [0,1,2]]
vert_to_facet = facet_to_vert # The same!
cell_ind = 0
while cell_ind < num_cells:
line = ifile.readline()
v = line.split()
if not v:
continue
if v[1] != elem_type:
_error("Only tetrahedral (ElmT4n3D) and triangular (ElmT3n2D) elements are implemented.")
# Store Cell markers
cell_markers.append(int(v[2]))
# Sort vertex indices
cell_indices = sorted([int(x)-1 for x in v[3:]])
write_cell_func(ofile, cell_ind, *cell_indices)
if num_dims == 2:
cell_ind += 1
continue
# Check Facet info
process_facet = set(range(4))
for local_vert_ind, global_vert_ind in enumerate(cell_indices):
# If no marker is included for vertex skip corresponding facet
if not vertex_markers[global_vert_ind]:
process_facet.difference_update(facet_to_vert[local_vert_ind])
# Process facets
for local_facet in process_facet:
# Start with markers from first vertex
global_first_vertex = cell_indices[facet_to_vert[local_facet][0]]
marker_intersection = set(vertex_markers[global_first_vertex])
# Process the other vertices
for local_vert in facet_to_vert[local_facet][1:]:
marker_intersection.intersection_update(\
vertex_markers[cell_indices[local_vert]])
if not marker_intersection:
break
# If not break we have a marker on local_facet
else:
assert(len(marker_intersection)==1)
facet_markers.append((cell_ind, local_facet, \
marker_intersection.pop()))
# Bump cell_ind
cell_ind += 1
xml_writer.write_footer_cells(ofile)
xml_writer.write_header_domains(ofile)
# Write facet markers if any
if facet_markers:
xml_writer.write_header_meshvaluecollection(ofile, "m", 2, \
len(facet_markers), "uint")
for cell, local_facet, marker in facet_markers:
xml_writer.write_entity_meshvaluecollection(ofile, 2, cell, \
marker, local_facet)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_header_meshvaluecollection(ofile, "m", num_dims, \
len(cell_markers), "uint")
for cell, marker in enumerate(cell_markers):
xml_writer.write_entity_meshvaluecollection(ofile, num_dims, cell, \
marker)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_footer_domains(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
class ParseError(Exception):
""" Error encountered in source file.
"""
class DataHandler(object):
""" Baseclass for handlers of mesh data.
The actual handling of mesh data encountered in the source file is
delegated to a polymorfic object. Typically, the delegate will write the
data to XML.
@ivar _state: the state which the handler is in, one of State_*.
@ivar _cell_type: cell type in mesh. One of CellType_*.
@ivar _dim: mesh dimensions.
"""
State_Invalid, State_Init, State_Vertices, State_Cells, \
State_MeshFunction, State_MeshValueCollection = list(range(6))
CellType_Tetrahedron, CellType_Triangle, CellType_Interval = list(range(3))
def __init__(self):
self._state = self.State_Invalid
def set_mesh_type(self, cell_type, dim):
assert self._state == self.State_Invalid
self._state = self.State_Init
if cell_type == "tetrahedron":
self._cell_type = self.CellType_Tetrahedron
elif cell_type == "triangle":
self._cell_type = self.CellType_Triangle
elif cell_type == "interval":
self._cell_type = self.CellType_Interval
self._dim = dim
def start_vertices(self, num_vertices):
assert self._state == self.State_Init
self._state = self.State_Vertices
def add_vertex(self, vertex, coords):
assert self._state == self.State_Vertices
def end_vertices(self):
assert self._state == self.State_Vertices
self._state = self.State_Init
def start_cells(self, num_cells):
assert self._state == self.State_Init
self._state = self.State_Cells
def add_cell(self, cell, nodes):
assert self._state == self.State_Cells
def end_cells(self):
assert self._state == self.State_Cells
self._state = self.State_Init
def start_domains(self):
assert self._state == self.State_Init
def end_domains(self):
self._state = self.State_Init
def start_meshfunction(self, name, dim, size):
assert self._state == self.State_Init
self._state = self.State_MeshFunction
def add_entity_meshfunction(self, index, value):
assert self._state == self.State_MeshFunction
def end_meshfunction(self):
assert self._state == self.State_MeshFunction
self._state = self.State_Init
def start_mesh_value_collection(self, name, dim, size, etype):
assert self._state == self.State_Init
self._state = self.State_MeshValueCollection
def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):
assert self._state == self.State_MeshValueCollection
def end_mesh_value_collection(self):
assert self._state == self.State_MeshValueCollection
self._state = self.State_Init
def warn(self, msg):
""" Issue warning during parse.
"""
warnings.warn(msg)
def error(self, msg):
""" Raise error during parse.
This method is expected to raise ParseError.
"""
raise ParseError(msg)
def close(self):
self._state = self.State_Invalid
class XmlHandler(DataHandler):
""" Data handler class which writes to Dolfin XML.
"""
def __init__(self, ofilename):
DataHandler.__init__(self)
self._ofilename = ofilename
self.__ofile = file(ofilename, "wb")
self.__ofile_meshfunc = None
def ofile(self):
return self.__ofile
def set_mesh_type(self, cell_type, dim):
DataHandler.set_mesh_type(self, cell_type, dim)
xml_writer.write_header_mesh(self.__ofile, cell_type, dim)
def start_vertices(self, num_vertices):
DataHandler.start_vertices(self, num_vertices)
xml_writer.write_header_vertices(self.__ofile, num_vertices)
def add_vertex(self, vertex, coords):
DataHandler.add_vertex(self, vertex, coords)
xml_writer.write_vertex(self.__ofile, vertex, *coords)
def end_vertices(self):
DataHandler.end_vertices(self)
xml_writer.write_footer_vertices(self.__ofile)
def start_cells(self, num_cells):
DataHandler.start_cells(self, num_cells)
xml_writer.write_header_cells(self.__ofile, num_cells)
def add_cell(self, cell, nodes):
DataHandler.add_cell(self, cell, nodes)
if self._cell_type == self.CellType_Tetrahedron:
func = xml_writer.write_cell_tetrahedron
elif self._cell_type == self.CellType_Triangle:
func = xml_writer.write_cell_triangle
elif self._cell_type == self.CellType_Interval:
func = xml_writer.write_cell_interval
func(self.__ofile, cell, *nodes)
def end_cells(self):
DataHandler.end_cells(self)
xml_writer.write_footer_cells(self.__ofile)
def start_meshfunction(self, name, dim, size):
DataHandler.start_meshfunction(self, name, dim, size)
fname = os.path.splitext(self.__ofile.name)[0]
self.__ofile_meshfunc = file("%s_%s.xml" % (fname, name), "wb")
xml_writer.write_header_meshfunction(self.__ofile_meshfunc, dim, size)
def add_entity_meshfunction(self, index, value):
DataHandler.add_entity_meshfunction(self, index, value)
xml_writer.write_entity_meshfunction(self.__ofile_meshfunc, index, value)
def end_meshfunction(self):
DataHandler.end_meshfunction(self)
xml_writer.write_footer_meshfunction(self.__ofile_meshfunc)
self.__ofile_meshfunc.close()
self.__ofile_meshfunc = None
def start_domains(self):
#DataHandler.start_domains(self)
xml_writer.write_header_domains(self.__ofile)
def end_domains(self):
#DataHandler.end_domains(self)
xml_writer.write_footer_domains(self.__ofile)
def start_mesh_value_collection(self, name, dim, size, etype):
DataHandler.start_mesh_value_collection(self, name, dim, size, etype)
xml_writer.write_header_meshvaluecollection(self.__ofile, name, dim, size, etype)
def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):
DataHandler.add_entity_mesh_value_collection(self, dim, index, value)
xml_writer.write_entity_meshvaluecollection(self.__ofile, dim, index, value, local_entity=local_entity)
def end_mesh_value_collection(self):
DataHandler.end_mesh_value_collection(self)
xml_writer.write_footer_meshvaluecollection(self.__ofile)
def close(self):
DataHandler.close(self)
if self.__ofile.closed:
return
xml_writer.write_footer_mesh(self.__ofile)
self.__ofile.close()
if self.__ofile_meshfunc is not None:
self.__ofile_meshfunc.close()
def netcdf2xml(ifilename,ofilename):
"Convert from NetCDF format to DOLFIN XML."
print("Converting from NetCDF format (.ncdf) to DOLFIN XML format")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
cell_type = None
dim = 0
# Scan file for dimension, number of nodes, number of elements
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if re.search(r"num_dim.*=", line):
dim = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_nodes.*=", line):
num_vertices = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_elem.*=", line):
num_cells = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"connect1 =",line):
break
num_dims=dim
# Set cell type
if dim == 2:
cell_type ="triangle"
if dim == 3:
cell_type ="tetrahedron"
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
xml_writer.write_header_cells(ofile, num_cells)
num_cells_read = 0
# Read and write cells
while 1:
# Read next line
line = ifile.readline()
if not line:
break
connect=re.split("[,;]",line)
if num_dims == 2:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
n3 = int(connect[3])-1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
xml_writer.write_header_vertices(ofile, num_vertices)
break
num_vertices_read = 0
coords = [[],[],[]]
coord = -1
while 1:
line = ifile.readline()
if not line:
_error("Missing data")
if re.search(r"coord =",line):
break
# Read vertices
while 1:
line = ifile.readline()
if not line:
break
if re.search(r"\A\s\s\S+,",line):
coord+=1
print("Found x_"+str(coord)+" coordinates")
coords[coord] += line.split()
if re.search(r";",line):
break
# Write vertices
for i in range(num_vertices):
if num_dims == 2:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = 0
if num_dims == 3:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = float(re.split(",",coords[2].pop(0))[0])
xml_writer.write_vertex(ofile, i, x, y, z)
# Write footer
xml_writer.write_footer_vertices(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def exodus2xml(ifilename,ofilename):
"Convert from Exodus II format to DOLFIN XML."
print("Converting from Exodus II format to NetCDF format")
name = ifilename.split(".")[0]
netcdffilename = name +".ncdf"
status, output = get_status_output('ncdump '+ifilename + ' > '+netcdffilename)
if status != 0:
raise IOError("Something wrong while executing ncdump. Is ncdump "\
"installed on the system?")
netcdf2xml(netcdffilename, ofilename)
def _error(message):
"Write an error message"
for line in message.split("\n"):
print("*** %s" % line)
sys.exit(2)
def convert2xml(ifilename, ofilename, iformat=None):
""" Convert a file to the DOLFIN XML format.
"""
convert(ifilename, XmlHandler(ofilename), iformat=iformat)
def convert(ifilename, handler, iformat=None):
""" Convert a file using a provided data handler.
Note that handler.close is called when this function finishes.
@param ifilename: Name of input file.
@param handler: The data handler (instance of L{DataHandler}).
@param iformat: Format of input file.
"""
if iformat is None:
iformat = format_from_suffix(os.path.splitext(ifilename)[1][1:])
# XXX: Backwards-compat
if hasattr(handler, "_ofilename"):
ofilename = handler._ofilename
# Choose conversion
if iformat == "mesh":
# Convert from mesh to xml format
mesh2xml(ifilename, ofilename)
elif iformat == "gmsh":
# Convert from gmsh to xml format
gmsh2xml(ifilename, handler)
elif iformat == "Triangle":
# Convert from Triangle to xml format
triangle2xml(ifilename, ofilename)
elif iformat == "xml-old":
# Convert from old to new xml format
xml_old2xml(ifilename, ofilename)
elif iformat == "metis":
# Convert from metis graph to dolfin graph xml format
metis_graph2graph_xml(ifilename, ofilename)
elif iformat == "scotch":
# Convert from scotch graph to dolfin graph xml format
scotch_graph2graph_xml(ifilename, ofilename)
elif iformat == "diffpack":
# Convert from Diffpack tetrahedral grid format to xml format
diffpack2xml(ifilename, ofilename)
elif iformat == "abaqus":
# Convert from abaqus to xml format
abaqus.convert(ifilename, handler)
elif iformat == "NetCDF":
# Convert from NetCDF generated from ExodusII format to xml format
netcdf2xml(ifilename, ofilename)
elif iformat =="ExodusII":
# Convert from ExodusII format to xml format via NetCDF
exodus2xml(ifilename, ofilename)
elif iformat == "StarCD":
# Convert from Star-CD tetrahedral grid format to xml format
starcd2xml(ifilename, ofilename)
else:
_error("Sorry, cannot convert between %s and DOLFIN xml file formats." % iformat)
# XXX: handler.close messes things for other input formats than abaqus or gmsh
if iformat in ("abaqus", "gmsh"):
handler.close()
def starcd2xml(ifilename, ofilename):
"Convert from Star-CD tetrahedral grid format to DOLFIN XML."
print(starcd2xml.__doc__)
if not os.path.isfile(ifilename[:-3] + "vrt") or not os.path.isfile(ifilename[:-3] + "cel"):
print("StarCD format requires one .vrt file and one .cel file")
sys.exit(2)
# open output file
ofile = open(ofilename, "w")
# Open file, the vertices are in a .vrt file
ifile = open(ifilename[:-3] + "vrt", "r")
write_header_mesh(ofile, "tetrahedron", 3)
# Read & write vertices
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of vertices
num_vertices = -1
counter = 0
# nodenr_map is needed because starcd support node numbering like 1,2,4 (ie 3 is missing)
nodenr_map = {}
for line in lines:
nodenr = int(line[0:15])
nodenr_map[nodenr] = counter
counter += 1
num_vertices = counter
# third, run over all vertices
xml_writer.write_header_vertices(ofile, num_vertices)
for line in lines:
nodenr = int(line[0:15])
vertex0 = float(line[15:31])
vertex1 = float(line[31:47])
vertex2 = float(line[47:63])
xml_writer.write_vertex(ofile, nodenr_map[nodenr], float(vertex0), float(vertex1), float(vertex2))
xml_writer.write_footer_vertices(ofile)
# Open file, the cells are in a .cel file
ifile = open(ifilename[:-3] + "cel", "r")
# Read & write cells
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of cells
num_cells = -1
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if node4 > 0:
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
counter += 1
else:
print("The file does contain cells that are not tetraheders. The cell number is ", cellnr, " the line read was ", line)
else:
# triangles on the surface
# print "The file does contain cells that are not tetraheders node4==0. The cell number is ", cellnr, " the line read was ", line
#sys.exit(2)
pass
num_cells = counter
# third, run over all cells
xml_writer.write_header_cells(ofile, num_cells)
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if (node4 > 0):
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
xml_writer.write_cell_tetrahedron(ofile, counter, nodenr_map[node0], nodenr_map[node1], nodenr_map[node2], nodenr_map[node4])
counter += 1
xml_writer.write_footer_cells(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
|
MiroK/dolfin
|
site-packages/dolfin_utils/meshconvert/meshconvert.py
|
Python
|
gpl-3.0
| 50,288
|
[
"NetCDF"
] |
885f7b218095ff0c30e29d14b9999ba0289dc9481ce7abc69e18861d16365844
|
#!/usr/bin/env python
# encoding: utf-8
'''
Created by Brian Cherinka on 2016-03-28 23:30:14
Licensed under a 3-clause BSD license.
Revision History:
Initial Version: 2016-03-28 23:30:14 by Brian Cherinka
Last Modified On: 2016-03-28 23:30:14 by Brian
'''
from __future__ import print_function, division
from brain.db.modelGraph import ModelGraph
import inspect
__author__ = 'Brian Cherinka'
class MarvinDB(object):
''' Class designed to handle database related things with Marvin '''
def __init__(self, dbtype=None, log=None, allowed_releases=None):
self.dbtype = dbtype
self.db = None
self.log = log
self.allowed_releases = allowed_releases
self.error = []
self.spaxelpropdict = None
self.datadb = None
self.dapdb = None
self.sampledb = None
self._init_the_db()
def _init_the_db(self):
''' Initialize the db '''
if self.dbtype:
self._setupDB()
if self.db:
self._importModels()
self._setSession()
self.testDbConnection()
self._setModelGraph()
self.cache_bits = []
if self.db:
self._addCache()
def _setupDB(self):
''' Try to import the database '''
# time - 14.8 ms
try:
from marvin.db.database import db
except RuntimeError as e:
self.log.debug('RuntimeError raised: Problem importing db: {0}'.format(e))
self.db = None
except ImportError as e:
self.log.debug('ImportError raised: Problem importing db: {0}'.format(e))
self.db = None
else:
self.db = db
def _importModels(self):
''' Try to import the sql alchemy model classes '''
# tested lazy imports - speeds init until they get called
# import lazy_import
# sampledb = lazy_import.lazy_module("marvin.db.models.SampleModelClasses")
# time 1.6 seconds
try:
import marvin.db.models.SampleModelClasses as sampledb
except Exception as e:
self.log.debug('Exception raised: Problem importing mangadb SampleModelClasses: {0}'.format(e))
else:
self.sampledb = sampledb
try:
import marvin.db.models.DataModelClasses as datadb
except Exception as e:
self.log.debug('Exception raised: Problem importing mangadb DataModelClasses: {0}'.format(e))
else:
self.datadb = datadb
try:
import marvin.db.models.DapModelClasses as dapdb
except Exception as e:
self.log.debug('Exception raised: Problem importing mangadb DapModelClasses: {0}'.format(e))
else:
self.dapdb = dapdb
self.spaxelpropdict = self._setSpaxelPropDict()
def has_models(self):
''' check if the marvin db has all the models properly loaded '''
isdata = self.datadb is not None
isdap = self.dapdb is not None
issample = self.sampledb is not None
self.log.info('datadb? {0}'.format(isdata))
self.log.info('dapdb? {0}'.format(isdap))
self.log.info('sampledb? {0}'.format(issample))
return all([isdata, isdap, issample])
def _setSpaxelPropDict(self):
''' Set the SpaxelProp lookup dictionary '''
# time - 38 us
from marvin.utils.datamodel.dap import datamodel
spdict = {}
for release in self.allowed_releases:
if release in datamodel:
dm = datamodel[release]
spdict.update({release: dm.property_table})
return spdict
def _getSpaxelProp(self):
''' Get the correct SpaxelProp class given an release '''
inspdict = self._release in self.spaxelpropdict
if inspdict:
specific_spaxelprop = {'full': self.spaxelpropdict[self._release], 'clean':
'Clean{0}'.format(self.spaxelpropdict[self._release])}
else:
specific_spaxelprop = {'full': None, 'clean': None}
return specific_spaxelprop
def _setSession(self):
''' Sets the database session '''
self.session = self.db.Session() if self.db else None
def testDbConnection(self):
''' Test the database connection to ensure it works. Sets a boolean variable isdbconnected '''
# time - 4.7 ms
if self.db and self.datadb:
try:
tmp = self.session.query(self.datadb.PipelineVersion).first()
except Exception as e:
self.isdbconnected = False
self.error.append('Error connecting to manga database: {0}'.format(str(e)))
else:
self.isdbconnected = True
else:
self.isdbconnected = False
def forceDbOff(self):
''' Force the database to turn off '''
self.db = None
self.session = None
self.isdbconnected = False
self.datadb = None
self.dapdb = None
self.sampledb = None
def forceDbOn(self, dbtype=None):
''' Force the database to turn on '''
self._init_the_db()
def generateClassDict(self, module=None, lower=None):
''' Generates a dictionary of the Model Classes, based on class name as key, to the object class.
Selects only those classes in the module with attribute __tablename__
lower = True makes class name key all lowercase
'''
if not module:
module = self.datadb
classdict = {}
for model in inspect.getmembers(module, inspect.isclass):
keyname = model[0].lower() if lower else model[0]
if hasattr(model[1], '__tablename__'):
# only include the spaxelprop table matching the MPL version
if 'SpaxelProp' in keyname:
if keyname in self._getSpaxelProp().values():
classdict[keyname] = model[1]
else:
classdict[keyname] = model[1]
return classdict
def buildUberClassDict(self, release=None):
''' Builds an uber class dictionary from all modelclasses '''
self._release = release
classdict = {}
models = [self.datadb, self.sampledb, self.dapdb]
for model in models:
if model:
modelclasses = self.generateClassDict(module=model)
classdict.update(modelclasses)
return classdict
def _setModelGraph(self):
''' Initiates the ModelGraph using all available ModelClasses '''
models = list(filter(None, [self.datadb, self.sampledb, self.dapdb]))
if models:
self.modelgraph = ModelGraph(models)
else:
self.modelgraph = None
def _addCache(self):
''' Initialize dogpile caching for relationships
Caching options. A set of three RelationshipCache options
which can be applied to Query(), causing the "lazy load"
of these attributes to be loaded from cache.
'''
if self.datadb:
self.cache_bits.append(self.datadb.data_cache)
if self.sampledb:
self.cache_bits.append(self.sampledb.sample_cache)
if self.dapdb:
self.cache_bits.append(self.dapdb.dap_cache)
|
sdss/marvin
|
python/marvin/db/marvindb.py
|
Python
|
bsd-3-clause
| 7,361
|
[
"Brian"
] |
ce644f670c678293cb7ec09b9f336bab0d33bb40422527084a9385b311361890
|
"""Mission Pinball Framework Media Controller (mpf-mc) setup.py.
Notes:
This setup script is a modified/customized version of the Kivy setup.py script.
"""
import sys
import re
from copy import deepcopy
import os
from os.path import join, dirname, sep, exists, isdir
from os import walk, environ
from distutils.version import LooseVersion
from distutils.sysconfig import get_python_inc
from collections import OrderedDict
from time import sleep
from sysconfig import get_paths
from setuptools import setup, Extension
print('Using setuptools')
# fix error with py3's LooseVersion comparisons
def ver_equal(self, other):
return self.version == other
LooseVersion.__eq__ = ver_equal
MIN_CYTHON_STRING = '0.24'
MIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING)
MAX_CYTHON_STRING = '0.29.10'
MAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)
CYTHON_UNSUPPORTED = (
# ref https://github.com/cython/cython/issues/1968
'0.27', '0.27.2'
)
CYTHON_REQUIRES_STRING = (
'cython>={min_version},<={max_version},{exclusion}'.format(
min_version=MIN_CYTHON_STRING,
max_version=MAX_CYTHON_STRING,
exclusion=','.join('!=%s' % excl for excl in CYTHON_UNSUPPORTED),
)
)
PACKAGE_FILES_ALLOWED_EXT = ('py', 'yaml', 'png', 'md', 'zip', 'gif', 'jpg',
'mp4', 'm4v', 'so', 'pyd', 'dylib', 'wav', 'ogg',
'pxd', 'pyx', 'c', 'h', 'ttf', 'fnt', 'txt')
on_rtd = os.environ.get('READTHEDOCS') == 'True'
def getoutput(cmd, env=None):
import subprocess
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
p.wait()
if p.returncode: # if not returncode == 0
print('WARNING: A problem occurred while running {0} (code {1})\n'
.format(cmd, p.returncode))
stderr_content = p.stderr.read()
if stderr_content:
print('{0}\n'.format(stderr_content))
return ""
return p.stdout.read()
def pkgconfig(*packages, **kw):
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
lenviron = None
pconfig = join(sys.prefix, 'libs', 'pkgconfig')
if isdir(pconfig):
lenviron = environ.copy()
lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(
environ.get('PKG_CONFIG_PATH', ''), pconfig)
cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))
results = getoutput(cmd, lenviron).split()
for token in results:
extension = token[:2].decode('utf-8')
flag = flag_map.get(extension)
if not flag:
continue
kw.setdefault(flag, []).append(token[2:].decode('utf-8'))
return kw
# -----------------------------------------------------------------------------
# Determine on which platform we are
platform = sys.platform
# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)
if sys.platform == 'darwin':
if sys.maxsize > 2 ** 32:
osx_arch = 'x86_64'
else:
osx_arch = 'i386'
# Detect Python for android project (http://github.com/kivy/python-for-android)
ndkplatform = environ.get('NDKPLATFORM')
if ndkplatform is not None and environ.get('LIBLINK'):
platform = 'android'
kivy_ios_root = environ.get('KIVYIOSROOT', None)
if kivy_ios_root is not None:
platform = 'ios'
if exists('/opt/vc/include/bcm_host.h'):
platform = 'rpi'
if exists('/usr/lib/arm-linux-gnueabihf/libMali.so'):
platform = 'mali'
# -----------------------------------------------------------------------------
# Detect options
#
c_options = OrderedDict()
c_options['use_rpi'] = platform == 'rpi'
c_options['use_mali'] = platform == 'mali'
c_options['use_sdl2'] = True
c_options['use_gstreamer'] = True
c_options['use_avfoundation'] = platform == 'darwin'
c_options['use_osx_frameworks'] = platform == 'darwin'
# now check if environ is changing the default values
for key in list(c_options.keys()):
ukey = key.upper()
if ukey in environ:
value = bool(int(environ[ukey]))
print('Environ change {0} -> {1}'.format(key, value))
c_options[key] = value
# -----------------------------------------------------------------------------
# Cython check
# Cython usage is optional (.c files are included to build without Cython)
#
cython_unsupported_append = '''
Please note that the following versions of Cython are not supported
at all: {}
'''.format(', '.join(map(str, CYTHON_UNSUPPORTED)))
cython_min = '''\
This version of Cython is not compatible with MPF-MC. Please upgrade to
at least version {0}, preferably the newest supported version {1}.
If your platform provides a Cython package, make sure you have upgraded
to the newest version. If the newest version available is still too low,
please remove it and install the newest supported Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append if CYTHON_UNSUPPORTED else '')
cython_max = '''\
This version of Cython is untested with MPF-MC. While this version may
work perfectly fine, it is possible that you may experience issues. If
you do have issues, please downgrade to a supported version. It is
best to use the newest supported version, {1}, but the minimum
supported version is {0}.
If your platform provides a Cython package, check if you can downgrade
to a supported version. Otherwise, uninstall the platform package and
install Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append if CYTHON_UNSUPPORTED else '')
cython_unsupported = '''\
This version of Cython suffers from known bugs and is unsupported.
Please install the newest supported version, {1}, if possible, but
the minimum supported version is {0}.
If your platform provides a Cython package, check if you can install
a supported version. Otherwise, uninstall the platform package and
install Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append)
have_cython = False
skip_cython = environ.get('USE_CYTHON', False) not in ['1', 'True', 'TRUE', 'true', 'Yes', 'YES', 'y', 'Y']
if skip_cython:
print("\nSkipping Cython build (using .c files)")
else:
try:
# check for cython
from Cython.Distutils import build_ext
have_cython = True
import Cython
cy_version_str = Cython.__version__
cy_ver = LooseVersion(cy_version_str)
print('\nDetected Cython version {}'.format(cy_version_str))
if cy_ver < MIN_CYTHON_VERSION:
print(cython_min)
raise ImportError('Incompatible Cython Version')
if cy_ver in CYTHON_UNSUPPORTED:
print(cython_unsupported)
raise ImportError('Incompatible Cython Version')
if cy_ver > MAX_CYTHON_VERSION:
print(cython_max)
sleep(1)
except ImportError:
print("\nCython is missing and the USE_CYTHON environment variable is set to True!\n\n")
raise
if not have_cython:
from distutils.command.build_ext import build_ext # noqa
# -----------------------------------------------------------------------------
# Setup classes
# the build path where kivy is being compiled
src_path = build_path = dirname(__file__)
class CustomBuildExt(build_ext):
def finalize_options(self):
retval = build_ext.finalize_options(self)
global build_path # noqa
if (self.build_lib is not None and exists(self.build_lib) and
not self.inplace):
build_path = self.build_lib
return retval
def build_extensions(self):
c = self.compiler.compiler_type
print('Detected compiler is {}'.format(c))
if c != 'msvc':
for e in self.extensions:
e.extra_link_args += ['-lm']
build_ext.build_extensions(self)
def _check_and_fix_sdl2_mixer(f_path_to_check):
# Between SDL_mixer 2.0.1 and 2.0.4, the included frameworks changed
# smpeg2 have been replaced with mpg123, but there is no need to fix.
smpeg2_path = ("{}/Versions/A/Frameworks/smpeg2.framework"
"/Versions/A/smpeg2").format(f_path_to_check)
if not exists(smpeg2_path):
return
print("Check if SDL2_mixer smpeg2 have an @executable_path")
rpath_from = ("@executable_path/../Frameworks/SDL2.framework"
"/Versions/A/SDL2")
rpath_to = "@rpath/../../../../SDL2.framework/Versions/A/SDL2"
output = getoutput(("otool -L '{}'").format(smpeg2_path)).decode('utf-8')
if "@executable_path" not in output:
return
print("WARNING: Your SDL2_mixer version is invalid")
print("WARNING: The smpeg2 framework embedded in SDL2_mixer contains a")
print("WARNING: reference to @executable_path that will fail the")
print("WARNING: execution of your application.")
print("WARNING: We are going to change:")
print("WARNING: from: {}".format(rpath_from))
print("WARNING: to: {}".format(rpath_to))
getoutput("install_name_tool -change {} {} {}".format(
rpath_from, rpath_to, smpeg2_path))
output = getoutput(("otool -L '{}'").format(smpeg2_path))
if b"@executable_path" not in output:
print("WARNING: Change successfully applied!")
print("WARNING: You'll never see this message again.")
else:
print("WARNING: Unable to apply the changes, sorry.")
gst_flags = {}
if platform == 'darwin':
if c_options['use_osx_frameworks']:
if osx_arch == "i386":
print("Warning: building with frameworks fail on i386")
else:
print("OSX framework used, force to x86_64 only")
environ["ARCHFLAGS"] = environ.get("ARCHFLAGS", "-arch x86_64")
print("OSX ARCHFLAGS are: {}".format(environ["ARCHFLAGS"]))
# detect gstreamer, only on desktop
# works if we forced the options or in autodetection
if c_options['use_gstreamer'] in (None, True):
gstreamer_valid = False
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
f_path = '/Library/Frameworks/GStreamer.framework'
if not exists(f_path):
c_options['use_gstreamer'] = False
print('GStreamer framework not found, fallback on pkg-config')
else:
print('GStreamer framework found')
gstreamer_valid = True
c_options['use_gstreamer'] = True
gst_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190',
'-framework', 'GStreamer'],
'include_dirs': [join(f_path, 'Headers')]}
elif platform == 'win32':
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
print('GStreamer found via pkg-config')
gstreamer_valid = True
c_options['use_gstreamer'] = True
elif exists(join(get_paths()['include'], 'gst', 'gst.h')):
print('GStreamer found via gst.h')
gstreamer_valid = True
c_options['use_gstreamer'] = True
gst_flags = {'libraries': ['gstreamer-1.0', 'glib-2.0', 'gobject-2.0']}
if not gstreamer_valid:
# use pkg-config approach instead
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
print('GStreamer found via pkg-config')
c_options['use_gstreamer'] = True
# detect SDL2
# works if we forced the options or in autodetection
sdl2_flags = {}
if c_options['use_sdl2'] in (None, True):
sdl2_valid = False
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
sdl2_valid = True
sdl2_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190'],
'include_dirs': [],
'extra_compile_args': ['-F/Library/Frameworks']
}
for name in ('SDL2', 'SDL2_image', 'SDL2_mixer'):
f_path = '/Library/Frameworks/{}.framework'.format(name)
if not exists(f_path):
print('Missing framework {}'.format(f_path))
sdl2_valid = False
continue
sdl2_flags['extra_link_args'] += ['-framework', name]
sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]
print('Found sdl2 frameworks: {}'.format(f_path))
if name == 'SDL2_mixer':
_check_and_fix_sdl2_mixer(f_path)
if not sdl2_valid:
c_options['use_sdl2'] = False
print('SDL2 frameworks not found, fallback on pkg-config')
else:
c_options['use_sdl2'] = True
print('Activate SDL2 compilation')
if not sdl2_valid and platform != "ios":
# use pkg-config approach instead
sdl2_flags = pkgconfig('sdl2', 'SDL2_image', 'SDL2_mixer')
if 'libraries' in sdl2_flags:
print('SDL2 found via pkg-config')
c_options['use_sdl2'] = True
# -----------------------------------------------------------------------------
# declare flags
def get_modulename_from_file(filename_to_check):
filename_to_check = filename_to_check.replace(sep, '/')
pyx = '.'.join(filename_to_check.split('.')[:-1])
pyxl = pyx.split('/')
while pyxl[0] != 'mpfmc':
pyxl.pop(0)
if pyxl[1] == 'mpfmc':
pyxl.pop(0)
return '.'.join(pyxl)
def expand(root_path, *args):
return join(root_path, 'mpfmc', *args)
class CythonExtension(Extension):
def __init__(self, *args, **kwargs):
Extension.__init__(self, *args, **kwargs)
self.cython_directives = {
'c_string_encoding': 'utf-8',
'profile': 'USE_PROFILE' in environ,
'embedsignature': environ.get('USE_EMBEDSIGNATURE', '0') == 1,
'language_level': 3,
'unraisable_tracebacks': True}
# XXX with pip, setuptools is imported before distutils, and change
# our pyx to c, then, cythonize doesn't happen. So force again our
# sources
self.sources = args[1]
def merge(d1, *args):
d1 = deepcopy(d1)
for d2 in args:
for item_key, item_value in d2.items():
item_value = deepcopy(item_value)
if item_key in d1:
d1[item_key].extend(item_value)
else:
d1[item_key] = item_value
return d1
def determine_base_flags():
flags = {
'libraries': [],
'include_dirs': [join(src_path, 'kivy', 'include')],
'library_dirs': [],
'extra_link_args': [],
'extra_compile_args': []}
if platform.startswith('freebsd'):
flags['include_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'include')]
flags['library_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'lib')]
elif platform == 'darwin':
v = os.uname()
if v[2] >= '13.0.0':
# use xcode-select to search on the right Xcode path
# XXX use the best SDK available instead of a specific one
import platform as _platform
xcode_dev = getoutput('xcode-select -p').splitlines()[0]
sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])
print('Xcode detected at {}, and using OS X{} sdk'.format(
xcode_dev, sdk_mac_ver))
sysroot = join(
xcode_dev.decode('utf-8'),
'Platforms/MacOSX.platform/Developer/SDKs',
'MacOSX{}.sdk'.format(sdk_mac_ver),
'System/Library/Frameworks')
else:
sysroot = ('/System/Library/Frameworks/'
'ApplicationServices.framework/Frameworks')
flags['extra_compile_args'] += ['-F%s' % sysroot]
flags['extra_link_args'] += ['-F%s' % sysroot]
elif platform == 'win32':
flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]
flags['library_dirs'] += [join(sys.prefix, "libs")]
return flags
def determine_sdl2():
flags = {}
if not c_options['use_sdl2']:
return flags
sdl2_path = environ.get('KIVY_SDL2_PATH', None)
if sdl2_flags and not sdl2_path and platform == 'darwin':
return sdl2_flags
# no pkgconfig info, or we want to use a specific sdl2 path, so perform
# manual configuration
flags['libraries'] = ['SDL2', 'SDL2_image', 'SDL2_mixer']
split_chr = ';' if platform == 'win32' else ':'
sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []
if not sdl2_paths:
sdl_inc = join(sys.prefix, 'include', 'SDL2')
if isdir(sdl_inc):
sdl2_paths = [sdl_inc]
sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])
flags['include_dirs'] = sdl2_paths
flags['extra_link_args'] = []
flags['extra_compile_args'] = []
flags['library_dirs'] = (
sdl2_paths if sdl2_paths else
['/usr/local/lib/'])
if sdl2_flags:
flags = merge(flags, sdl2_flags)
# ensure headers for all the SDL2 and sub libraries are available
libs_to_check = ['SDL', 'SDL_mixer', 'SDL_image']
can_compile = True
for lib in libs_to_check:
found = False
for d in flags['include_dirs']:
header_file = join(d, '{}.h'.format(lib))
if exists(header_file):
found = True
print('SDL2: found {} header at {}'.format(lib, header_file))
break
if not found:
print('SDL2: missing sub library {}'.format(lib))
can_compile = False
if not can_compile:
c_options['use_sdl2'] = False
return {}
return flags
base_flags = determine_base_flags()
# -----------------------------------------------------------------------------
# sources to compile
sources = {
'core/audio/sound_file.pyx': {
'depends': ['core/audio/sdl2_helper.h', 'core/audio/gstreamer_helper.h']},
'core/audio/track.pyx': {
'depends': ['core/audio/sdl2_helper.h', 'core/audio/gstreamer_helper.h']},
'core/audio/track_standard.pyx': {
'depends': ['core/audio/sdl2_helper.h', 'core/audio/gstreamer_helper.h']},
'core/audio/track_sound_loop.pyx': {
'depends': ['core/audio/sdl2_helper.h', 'core/audio/gstreamer_helper.h']},
'core/audio/audio_interface.pyx': {
'depends': ['core/audio/sdl2_helper.h', 'core/audio/gstreamer_helper.h']},
'core/audio/playlist_controller.pyx': {},
'uix/bitmap_font/bitmap_font.pyx': {'depends': ['core/audio/sdl2.pxi', ]}
}
if c_options["use_sdl2"] and not on_rtd:
sdl2_flags = determine_sdl2()
else:
sdl2_flags = {}
if sdl2_flags:
for source_file, depends in sources.items():
sources[source_file] = merge(
base_flags, gst_flags, sdl2_flags, depends)
# -----------------------------------------------------------------------------
# extension modules
def get_extensions_from_sources(sources_to_search):
ext_modules_found = []
for pyx, flags in sources_to_search.items():
pyx = expand(src_path, pyx)
depends_sources = [expand(src_path, x) for x in flags.pop('depends', [])]
c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]
if not have_cython:
pyx = '%s.c' % pyx[:-4]
f_depends = [x for x in depends_sources if x.rsplit('.', 1)[-1] in (
'c', 'cpp', 'm')]
module_name = get_modulename_from_file(pyx)
flags_clean = {'depends': depends_sources}
for item_key, item_value in flags.items():
if item_value:
flags_clean[item_key] = item_value
ext_modules_found.append(CythonExtension(
module_name, [pyx] + f_depends + c_depends, **flags_clean))
return ext_modules_found
print(sources)
if not on_rtd:
ext_modules = get_extensions_from_sources(sources)
else:
ext_modules = []
# -----------------------------------------------------------------------------
# Get the version number of mpf-mc and the required version of MPF by reading
# the file directly. We can't import it because that would import mpf and
# break the setup. Details here:
# http://stackoverflow.com/questions/458550/standard-way-to-embed-version
# -into-python-package
version_file = "mpfmc/_version.py"
version_file_content = open(version_file, "rt").read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(version_re, version_file_content, re.M)
if mo:
mc_version = mo.group(1)
else:
raise RuntimeError(
"Unable to find version string in %s." % (version_file,))
# This section pulls the MPF required version from the mpf-mc version file so
# we can write that as a requirement below
mpf_version_re = r"^__mpf_version_required__ = ['\"]([^'\"]*)['\"]"
mo = re.search(mpf_version_re, version_file_content, re.M)
if mo:
mpf_version = mo.group(1)
else:
raise RuntimeError("Unable to find MPF version string in %s." % (
version_file,))
install_requires = ['ruamel.yaml==0.15.100', # better YAML library
'mpf>={}'.format(mpf_version),
'kivy==1.11.1',
'psutil==5.7.0',
'Pygments==2.3.1', # YAML syntax formatting for the iMC
'pypiwin32==223;platform_system=="Windows"',
# also update those in appveyor.yaml if you change versions
'kivy-deps.sdl2==0.1.23;platform_system=="Windows"',
'kivy-deps.sdl2-dev==0.1.23;platform_system=="Windows"',
'kivy-deps.glew==0.1.12;platform_system=="Windows"',
'kivy-deps.glew-dev==0.1.12;platform_system=="Windows"',
'kivy-deps.gstreamer==0.1.18;platform_system=="Windows"',
'kivy-deps.gstreamer-dev==0.1.18;platform_system=="Windows"',
'ffpyplayer==4.3.1'
]
# If we're running on Read The Docs, then we just need to copy the files
# (since mpf-docs uses the test YAML files in the doc build), and we don't
# need to actually install mpf-mc, so override the installation requirements:
if on_rtd:
install_requires = []
# -----------------------------------------------------------------------------
# automatically detect package files
package_files = dict(mpfmc=list())
for root, subFolders, files in walk('mpfmc'):
for fn in files:
ext = fn.split('.')[-1].lower()
if ext not in PACKAGE_FILES_ALLOWED_EXT:
continue
filename = join(root, fn)
directory = dirname(filename)
package_files['mpfmc'].append('/'.join(filename.split(os.sep)[1:]))
# -----------------------------------------------------------------------------
# setup !
setup(
name='mpf-mc',
version=mc_version,
author='The Mission Pinball Framework Team',
author_email='brian@missionpinball.org',
url='http://missionpinball.org',
license='MIT',
description='Mission Pinball Framework Media Controller',
long_description='''Graphics, video, and audio engine for the
Mission Pinball Framework.
The Mission Pinball Framework Media Controller (MPF-MC) is a component
of the Mission Pinball Framework (MPF) that controls graphics and
sound, including dot matrix displays (DMDs), LCD displays, and color
RGB LED displays.
(The MPF media controller architecture is modular, so you can use this
MPF-MC package or another one.)
The MPF-MC is built on Kivy and leverages SDL2, OpenGL, and
GPU-accelerated hardware.
MPF is a work-in-progress that is not yet complete, though we're
actively developing it and checking in several commits a week. It's
MIT licensed, actively developed by fun people, and supported by a
vibrant pinball-loving community.''',
keywords='pinball',
ext_modules=ext_modules,
cmdclass={'build_ext': CustomBuildExt},
packages=['mpfmc'],
package_dir={'mpfmc': 'mpfmc'},
package_data=package_files,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Topic :: Artistic Software',
'Topic :: Games/Entertainment :: Arcade'
],
install_requires=install_requires,
tests_require=[],
entry_points='''
[mpf.config_player]
sound_player=mpfmc.config_players.plugins.sound_player:register_with_mpf
sound_loop_player=mpfmc.config_players.plugins.sound_loop_player:register_with_mpf
playlist_player=mpfmc.config_players.plugins.playlist_player:register_with_mpf
widget_player=mpfmc.config_players.plugins.widget_player:register_with_mpf
slide_player=mpfmc.config_players.plugins.slide_player:register_with_mpf
track_player=mpfmc.config_players.plugins.track_player:register_with_mpf
display_light_player=mpfmc.config_players.plugins.display_light_player:register_with_mpf
[mpf.command]
mc=mpfmc.commands.mc:get_command
imc=mpfmc.commands.imc:get_command
''',
setup_requires=[CYTHON_REQUIRES_STRING] if not skip_cython else [])
|
missionpinball/mpf_mc
|
setup.py
|
Python
|
mit
| 26,075
|
[
"Brian"
] |
7473ce4315ea7d9de3730b8389299274cf882f103af9dfccff92fe7c4bc54887
|
"""K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.sparsefuncs import mean_variance_axis0
from ..utils import check_arrays
from ..utils import check_random_state
from ..utils import atleast2d_or_csr
from ..utils import as_float_array
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, n_local_trials=None, random_state=None,
x_squared_norms=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
random_state = check_random_state(random_state)
centers = np.empty((n_clusters, n_features))
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
if x_squared_norms is None:
x_squared_norms = _squared_norms(X)
closest_dist_sq = euclidean_distances(
centers[0], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis0(X)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances=True,
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1):
"""K-means clustering algorithm.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
"""
random_state = check_random_state(random_state)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
if copy_x:
X = X.copy()
X -= X_mean
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
init -= X_mean
if not n_init == 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in the k-means instead of %d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = _squared_norms(X)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
k: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array, optional
Precomputed x_squared_norms. Calculated if not given.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
"""
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = _squared_norms(X)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print('Initialization complete')
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignement is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print('Iteration %i, inertia %s' % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
if np.sum((centers_old - centers) ** 2) < tol:
if verbose:
print('Converged to similar centers at iteration', i)
break
return best_labels, best_inertia, best_centers
def _squared_norms(X):
"""Compute the squared euclidean norms of the rows of X"""
if sp.issparse(X):
return _k_means.csr_row_norm_l2(X, squared=True)
else:
# TODO: implement a cython version to avoid the memory copy of the
# input data
return (X ** 2).sum(axis=1)
def _labels_inertia_precompute_dense(X, x_squared_norms, centers):
n_samples = X.shape[0]
k = centers.shape[0]
distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm
Compute the labels and the inertia of the given samples and centers
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
distances: float64 array, shape (n_samples,)
Distances for each sample to its closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia: float
The value of the inertia criterion with the assignment
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = - np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accurracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
if len(centers) != k:
raise ValueError('The shape of the inital centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, k))
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int
Maximum number of iterations of the k-means algorithm for a
single run.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : boolean
Precompute distances (faster but takes more memory).
tol: float, optional default: 1e-4
Relative tolerance w.r.t. inertia to declare convergence
n_jobs: int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
`cluster_centers_`: array, [n_clusters, n_features]
Coordinates of cluster centers
`labels_`:
Labels of each point
`inertia_`: float
The value of the inertia criterion associated with the chosen
partition.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances=True,
verbose=0, random_state=None, copy_x=True, n_jobs=1, k=None):
if hasattr(init, '__array__'):
n_clusters = init.shape[0]
init = np.asanyarray(init, dtype=np.float64)
self.n_clusters = n_clusters
self.k = k
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = atleast2d_or_csr(X, dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = atleast2d_or_csr(X)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
if not X.dtype.kind is 'f':
warnings.warn("Got data type %s, converted to float "
"to avoid overflows" % X.dtype,
RuntimeWarning, stacklevel=2)
X = X.astype(np.float)
return X
def _check_fitted(self):
if not hasattr(self, "cluster_centers_"):
raise AttributeError("Model has not been trained yet.")
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_ = k_means(
X, n_clusters=self.n_clusters, init=self.init, n_init=self.n_init,
max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
self._check_fitted()
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
Y : array, shape [n_samples,]
Index of the closest center each sample belongs to.
"""
self._check_fitted()
X = self._check_test_data(X)
x_squared_norms = _squared_norms(X)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score: float
Opposite of the value of X on the K-means objective.
"""
self._check_fitted()
X = self._check_test_data(X)
x_squared_norms = _squared_norms(X)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances=None, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm
Parameters
----------
X: array, shape (n_samples, n_features)
The original data array.
x_squared_norms: array, shape (n_samples,)
Squared euclidean norm of each data point.
centers: array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts: array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances: array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to it's closest center.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign: boolean, optional
If True, centers with very low counts are
randomly-reassigned to observations in dense areas.
reassignment_ratio: float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose: bool, optional
Controls the verbosity
"""
# Perform label assignement to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = np.logical_or(
(counts <= 1), counts <= reassignment_ratio * counts.max())
number_of_reassignments = to_reassign.sum()
if number_of_reassignments:
# Pick new clusters amongst observations with a probability
# proportional to their closeness to their center
distance_to_centers = np.asarray(centers[nearest_center] - X)
distance_to_centers **= 2
distance_to_centers = distance_to_centers.sum(axis=1)
# Flip the ordering of the distances
distance_to_centers -= distance_to_centers.max()
distance_to_centers *= -1
rand_vals = random_state.rand(number_of_reassignments)
rand_vals *= distance_to_centers.sum()
new_centers = np.searchsorted(distance_to_centers.cumsum(),
rand_vals)
new_centers = X[new_centers]
if verbose:
n_reassigns = to_reassign.sum()
if n_reassigns:
print("[_mini_batch_step] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(new_centers) and not sp.issparse(centers):
new_centers = new_centers.toarray()
centers[to_reassign] = new_centers
# implementation for the sparse CSR reprensation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
squared_diff += np.sum(
(centers[center_idx] - old_center_buffer) ** 2)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
'mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers postion (using EWA smoothing)
if tol > 0.0 and ewa_diff < tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if (ewa_inertia_min is None or ewa_inertia < ewa_inertia_min):
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across sucessive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, optional
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, optional
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size: int, optional, default: 100
Size of the mini batches.
init_size: int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accurracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
compute_labels: boolean
Compute label assignements and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio: float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
Attributes
----------
`cluster_centers_`: array, [n_clusters, n_features]
Coordinates of cluster centers
`labels_`:
Labels of each point (if compute_labels is set to True).
`inertia_`: float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, k=None,
reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init,
k=k)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_arrays(X, sparse_format="csr", copy=False,
check_ccontiguous=True, dtype=np.float64)[0]
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
x_squared_norms = _squared_norms(X)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(self.n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, self.n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignement on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=distances, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, self.n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
if self.compute_labels:
if self.verbose:
print('Computing label assignements and total inertia')
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_arrays(X, sparse_format="csr", copy=False)[0]
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = _squared_norms(X)
self.random_state_ = check_random_state(self.random_state)
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
|
maxlikely/scikit-learn
|
sklearn/cluster/k_means_.py
|
Python
|
bsd-3-clause
| 48,669
|
[
"Gaussian"
] |
43e031a59b2e1765bc2564a8e002103e5c619c97e47de8187e0fe9646e258f52
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 23:05:11 2015
@author: boland
"""
class Config:
"""
The following class contains all the required configuration information
required to process the whole set of applications!
"""
def __init__(self, config_file):
# initialise config file parser object
self.config = ConfigParser.ConfigParser()
self.config.read(config_file)
# initialise ALL configuration variables on definition of class!
# -----
# paths
# -----
self.FOLDER = self.config.get('paths', 'FOLDER')
#TIMELINE_DB = config.get('paths', 'TIMELINE_DB')
#RESPONSE_DB = config.get('paths', 'RESPONSE_DB')
# input dirs
if self.FOLDER == "DEFAULT":
#fold = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
fold = os.getcwd()
self.MSEED_DIR = "{}/INPUT/DATA".format(fold)
self.STATIONXML_DIR = "{}/INPUT/XML".format(fold)
self.DATALESS_DIR = "{}/INPUT/DATALESS".format(fold)
self.DATABASE_DIR = "{}/INPUT/DATABASES".format(fold)
# output dirs
self.CROSSCORR_DIR = "{}/OUTPUT/CROSS".format(fold)
self.FTAN_DIR = "{}/OUTPUT/FTAN".format(fold)
self.TOMO_DIR = "{}/OUTPUT/TOMO".format(fold)
self.DEPTHMODELS_DIR = "{}/OUTPUT/DEPTH".format(fold)
else:
self.MSEED_DIR = "{}/INPUT/DATA".format(self.FOLDER)
self.STATIONXML_DIR = "{}/INPUT/XML".format(self.FOLDER)
self.DATALESS_DIR = "{}/INPUT/DATALESS".format(self.FOLDER)
self.DATABASE_DIR = "{}/INPUT/DATABASES".format(self.FOLDER)
# output dirs
self.CROSSCORR_DIR = "{}/OUTPUT/CROSS".format(self.FOLDER)
self.FTAN_DIR = "{}/OUTPUT/FTAN".format(self.FOLDER)
self.TOMO_DIR = "{}/OUTPUT/TOMO".format(self.FOLDER)
self.DEPTHMODELS_DIR = "{}/OUTPUT/DEPTH".format(self.FOLDER)
# dir of the Computer Programs in Seismology (can be None)
self.COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR = \
self.config.get('paths', 'COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR')
#===========
# processing
#===========
#set the individual preprocessing techniques that you want performed on your analysis. Each
# must be set either True or False to work. Any other options with give an error
self.TDD = self.config.getboolean('processing', 'TDD')
self.RESP_REMOVE = self.config.getboolean('processing', 'EVENT_REMOVE')
self.EVENT_REMOVE = self.config.getboolean('processing', 'EVENT_REMOVE')
self.HIGHAMP_REMOVE = self.config.getboolean('processing', 'HIGHAMP_REMOVE')
self.RESP_CHECK = self.config.getboolean('processing', 'RESP_CHECK')
self.BANDPASS = self.config.getboolean('processing', 'BANDPASS')
self.DOWNSAMPLE = self.config.getboolean('processing', 'DOWNSAMPLE')
self.COMPLETENESS = self.config.getboolean('processing', 'COMPLETENESS')
self.TIME_NOMALISATION = self.config.getboolean('processing', 'TIME_NOMALISATION')
self.SPEC_WHITENING = self.config.getboolean('processing', 'SPEC_WHITENING')
# ---------------
# maps parameters
# ---------------
# paths to shapefiles (coasts, tectonic provinces and labels)
self.COAST_SHP = self.config.get('maps', 'COAST_SHP')
self.TECTO_SHP = self.config.get('maps', 'TECTO_SHP')
self.TECTO_LABELS = self.config.get('maps', 'TECTO_LABELS')
# colors of tectonic provinces
self.TECTO_COLORS = json.loads(self.config.get('maps', 'TECTO_COLORS'))
# bounding boxes
self.BBOX_LARGE = json.loads(self.config.get('maps', 'BBOX_LARGE'))
self.BBOX_SMALL = json.loads(self.config.get('maps', 'BBOX_SMALL'))
# --------------------------------------
# cross-correlation / spectra parameters
# --------------------------------------
# use dataless files or stationXML files to remove instrument response?
self.USE_DATALESSPAZ = self.config.getboolean('cross-correlation',
'USE_DATALESSPAZ')
self.USE_STATIONXML = self.config.getboolean('cross-correlation',
'USE_STATIONXML')
# subset of stations to cross-correlate
self.CROSSCORR_STATIONS_SUBSET = \
self.config.get('cross-correlation', 'CROSSCORR_STATIONS_SUBSET')
self.CROSSCORR_STATIONS_SUBSET = json.loads(self.CROSSCORR_STATIONS_SUBSET)
# locations to skip
self.CROSSCORR_SKIPLOCS = json.loads(self.config.get('cross-correlation',
'CROSSCORR_SKIPLOCS'))
#GET RID OF .day() for FIRSTDAY and LASTDAY variables.
#This is is to allow for the interval to be a datetime object rather than just
#a date object!
# first and last day, minimum data fill per day
self.FIRSTDAY = self.config.get('cross-correlation', 'FIRSTDAY')
self.FIRSTDAY = dt.datetime.strptime(self.FIRSTDAY, '%d/%m/%Y')
self.LASTDAY = self.config.get('cross-correlation', 'LASTDAY')
self.LASTDAY = dt.datetime.strptime(self.LASTDAY, '%d/%m/%Y')
self.MINFILL = self.config.getfloat('cross-correlation', 'MINFILL')
# band-pass parameters
self.PERIODMIN = self.config.getfloat('cross-correlation', 'PERIODMIN')
self.PERIODMAX = self.config.getfloat('cross-correlation', 'PERIODMAX')
self.FREQMIN = 1.0 / self.PERIODMAX
self.FREQMAX = 1.0 / self.PERIODMIN
self.CORNERS = self.config.getint('cross-correlation', 'CORNERS')
self.ZEROPHASE = self.config.getboolean('cross-correlation', 'ZEROPHASE')
# resample period (to decimate traces, after band-pass)
self.PERIOD_RESAMPLE = self.config.getfloat('cross-correlation',
'PERIOD_RESAMPLE')
# Time-normalization parameters:
self.ONEBIT_NORM = self.config.getboolean('cross-correlation', 'ONEBIT_NORM')
# earthquakes period bands
self.PERIODMIN_EARTHQUAKE = self.config.getfloat('cross-correlation',
'PERIODMIN_EARTHQUAKE')
self.PERIODMAX_EARTHQUAKE = self.config.getfloat('cross-correlation',
'PERIODMAX_EARTHQUAKE')
self.FREQMIN_EARTHQUAKE = 1.0 / self.PERIODMAX_EARTHQUAKE
self.FREQMAX_EARTHQUAKE = 1.0 / self.PERIODMIN_EARTHQUAKE
# time window (s) to smooth data in earthquake band
# and calculate time-norm weights
self.WINDOW_TIME = 0.5 * self.PERIODMAX_EARTHQUAKE
# frequency window (Hz) to smooth ampl spectrum
# and calculate spect withening weights
self.WINDOW_FREQ = self.config.getfloat('cross-correlation', 'WINDOW_FREQ')
#45 minute long xcorr time intervals for superior SNR
self.XCORR_INTERVAL = self.config.getfloat('cross-correlation',
'XCORR_INTERVAL')
# Max time window (s) for cross-correlation
#set this to automatic based on function shift() above. Equation may vary.
try:
self.CROSSCORR_TMAX = self.config.getfloat('cross-correlation',
'CROSSCORR_TMAX')
except:
self.CROSSCORR_TMAX = shift(self.XCORR_INTERVAL * 60.0)
# ---------------
# FTAN parameters
# ---------------
# default period bands, used to:
# - plot cross-correlation by period bands, in plot_FTAN(), plot_by_period_bands()
# - plot spectral SNR, in plot_spectral_SNR()
# - estimate min spectral SNR, in FTANs()
self.PERIOD_BANDS = json.loads(self.config.get('FTAN', 'PERIOD_BANDS'))
# default parameters to define the signal and noise windows used to
# estimate the SNR:
# - the signal window is defined according to a min and a max velocity as:
# dist/vmax < t < dist/vmin
# - the noise window has a fixed size and starts after a fixed trailing
# time from the end of the signal window
self.SIGNAL_WINDOW_VMIN = self.config.getfloat('FTAN',
'SIGNAL_WINDOW_VMIN')
self.SIGNAL_WINDOW_VMAX = self.config.getfloat('FTAN',
'SIGNAL_WINDOW_VMAX')
self.SIGNAL2NOISE_TRAIL = self.config.getfloat('FTAN',
'SIGNAL2NOISE_TRAIL')
self.NOISE_WINDOW_SIZE = self.config.getfloat('FTAN',
'NOISE_WINDOW_SIZE')
# smoothing parameter of FTAN analysis
self.FTAN_ALPHA = self.config.getfloat('FTAN', 'FTAN_ALPHA')
# periods and velocities of FTAN analysis
self.RAWFTAN_PERIODS_STARTSTOPSTEP = self.config.get('FTAN',
'RAWFTAN_PERIODS_STARTSTOPSTEP')
self.RAWFTAN_PERIODS_STARTSTOPSTEP = \
json.loads(self.RAWFTAN_PERIODS_STARTSTOPSTEP)
self.RAWFTAN_PERIODS = np.arange(*self.RAWFTAN_PERIODS_STARTSTOPSTEP)
self.CLEANFTAN_PERIODS_STARTSTOPSTEP = \
self.config.get('FTAN', 'CLEANFTAN_PERIODS_STARTSTOPSTEP')
self.CLEANFTAN_PERIODS_STARTSTOPSTEP = \
json.loads(self.CLEANFTAN_PERIODS_STARTSTOPSTEP)
self.CLEANFTAN_PERIODS = np.arange(*self.CLEANFTAN_PERIODS_STARTSTOPSTEP)
self.FTAN_VELOCITIES_STARTSTOPSTEP = \
self.config.get('FTAN', 'FTAN_VELOCITIES_STARTSTOPSTEP')
self.FTAN_VELOCITIES_STARTSTOPSTEP = \
json.loads(self.FTAN_VELOCITIES_STARTSTOPSTEP)
self.FTAN_VELOCITIES = np.arange(*self.FTAN_VELOCITIES_STARTSTOPSTEP)
self.FTAN_VELOCITIES_STEP = self.FTAN_VELOCITIES_STARTSTOPSTEP[2]
# relative strength of the smoothing term in the penalty function that
# the dispersion curve seeks to minimize
self.STRENGTH_SMOOTHING = self.config.getfloat('FTAN',
'STRENGTH_SMOOTHING')
# replace nominal frequancy (i.e., center frequency of Gaussian filters)
# with instantaneous frequency (i.e., dphi/dt(t=arrival time) with phi the
# phase of the filtered analytic signal), in the FTAN and dispersion curves?
# See Bensen et al. (2007) for technical details.
self.USE_INSTANTANEOUS_FREQ = self.config.getboolean('FTAN',
'USE_INSTANTANEOUS_FREQ')
# if the instantaneous frequency (or period) is used, we need to discard bad
# values from instantaneous periods. So:
# - instantaneous periods whose relative difference with respect to
# nominal period is greater than ``MAX_RELDIFF_INST_NOMINAL_PERIOD``
# are discarded,
# - instantaneous periods lower than ``MIN_INST_PERIOD`` are discarded,
# - instantaneous periods whose relative difference with respect to the
# running median is greater than ``MAX_RELDIFF_INST_MEDIAN_PERIOD`` are
# discarded; the running median is calculated over
# ``HALFWINDOW_MEDIAN_PERIOD`` points to the right and to the left
# of each period.
self.MAX_RELDIFF_INST_NOMINAL_PERIOD = \
self.config.getfloat('FTAN', 'MAX_RELDIFF_INST_NOMINAL_PERIOD')
self.MIN_INST_PERIOD = self.config.getfloat('FTAN', 'MIN_INST_PERIOD')
self.HALFWINDOW_MEDIAN_PERIOD = \
self.config.getint('FTAN', 'HALFWINDOW_MEDIAN_PERIOD')
self.MAX_RELDIFF_INST_MEDIAN_PERIOD = \
self.config.getfloat('FTAN', 'MAX_RELDIFF_INST_MEDIAN_PERIOD')
# --------------------------------
# Tomographic inversion parameters
# --------------------------------
# Default parameters related to the velocity selection criteria
# min spectral SNR to retain velocity
self.MINSPECTSNR = self.config.getfloat('tomography', 'MINSPECTSNR')
# min spectral SNR to retain velocity if no std dev
self.MINSPECTSNR_NOSDEV = self.config.getfloat('tomography', 'MINSPECTSNR_NOSDEV')
# max sdt dev (km/s) to retain velocity
self.MAXSDEV = self.config.getfloat('tomography', 'MAXSDEV')
# min nb of trimesters to estimate std dev
self.MINNBTRIMESTER = self.config.getint('tomography', 'MINNBTRIMESTER')
# max period = *MAXPERIOD_FACTOR* * pair distance
self.MAXPERIOD_FACTOR = self.config.getfloat('tomography', 'MAXPERIOD_FACTOR')
# Default internode spacing of grid
self.LONSTEP = self.config.getfloat('tomography', 'LONSTEP')
self.LATSTEP = self.config.getfloat('tomography', 'LATSTEP')
# Default correlation length of the smoothing kernel:
# S(r,r') = exp[-|r-r'|**2 / (2 * correlation_length**2)]
self.CORRELATION_LENGTH = self.config.getfloat('tomography',
'CORRELATION_LENGTH')
# Default strength of the spatial smoothing term (alpha) and the
# weighted norm penalization term (beta) in the penalty function
self.ALPHA = self.config.getfloat('tomography', 'ALPHA')
self.BETA = self.config.getfloat('tomography', 'BETA')
# Default parameter in the damping factor of the norm penalization term,
# such that the norm is weighted by exp(- lambda_*path_density)
# With a value of 0.15, penalization becomes strong when path density < ~20
# With a value of 0.30, penalization becomes strong when path density < ~10
self.LAMBDA = self.config.getfloat('tomography', 'LAMBDA')
|
boland1992/SeisSuite
|
seissuite/misc/config_class.py
|
Python
|
gpl-3.0
| 14,443
|
[
"Gaussian"
] |
fc6c63f394f163913b5c91b5edbb01ea4f38bd2b9cca2d7bc6d8c3164e8111da
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
Generic Taskmaster module for the SCons build engine.
This module contains the primary interface(s) between a wrapping user
interface and the SCons build engine. There are two key classes here:
Taskmaster
This is the main engine for walking the dependency graph and
calling things to decide what does or doesn't need to be built.
Task
This is the base class for allowing a wrapping interface to
decide what does or doesn't actually need to be done. The
intention is for a wrapping interface to subclass this as
appropriate for different types of behavior it may need.
The canonical example is the SCons native Python interface,
which has Task subclasses that handle its specific behavior,
like printing "`foo' is up to date" when a top-level target
doesn't need to be built, and handling the -c option by removing
targets as its "build" action. There is also a separate subclass
for suppressing this output when the -q option is used.
The Taskmaster instantiates a Task object for each (set of)
target(s) that it decides need to be evaluated and/or built.
"""
__revision__ = "src/engine/SCons/Taskmaster.py 3842 2008/12/20 22:59:52 scons"
from itertools import chain
import operator
import string
import sys
import traceback
import SCons.Errors
import SCons.Node
StateString = SCons.Node.StateString
NODE_NO_STATE = SCons.Node.no_state
NODE_PENDING = SCons.Node.pending
NODE_EXECUTING = SCons.Node.executing
NODE_UP_TO_DATE = SCons.Node.up_to_date
NODE_EXECUTED = SCons.Node.executed
NODE_FAILED = SCons.Node.failed
# A subsystem for recording stats about how different Nodes are handled by
# the main Taskmaster loop. There's no external control here (no need for
# a --debug= option); enable it by changing the value of CollectStats.
CollectStats = None
class Stats:
"""
A simple class for holding statistics about the disposition of a
Node by the Taskmaster. If we're collecting statistics, each Node
processed by the Taskmaster gets one of these attached, in which case
the Taskmaster records its decision each time it processes the Node.
(Ideally, that's just once per Node.)
"""
def __init__(self):
"""
Instantiates a Taskmaster.Stats object, initializing all
appropriate counters to zero.
"""
self.considered = 0
self.already_handled = 0
self.problem = 0
self.child_failed = 0
self.not_built = 0
self.side_effects = 0
self.build = 0
StatsNodes = []
fmt = "%(considered)3d "\
"%(already_handled)3d " \
"%(problem)3d " \
"%(child_failed)3d " \
"%(not_built)3d " \
"%(side_effects)3d " \
"%(build)3d "
def dump_stats():
StatsNodes.sort(lambda a, b: cmp(str(a), str(b)))
for n in StatsNodes:
print (fmt % n.stats.__dict__) + str(n)
class Task:
"""
Default SCons build engine task.
This controls the interaction of the actual building of node
and the rest of the engine.
This is expected to handle all of the normally-customizable
aspects of controlling a build, so any given application
*should* be able to do what it wants by sub-classing this
class and overriding methods as appropriate. If an application
needs to customze something by sub-classing Taskmaster (or
some other build engine class), we should first try to migrate
that functionality into this class.
Note that it's generally a good idea for sub-classes to call
these methods explicitly to update state, etc., rather than
roll their own interaction with Taskmaster from scratch.
"""
def __init__(self, tm, targets, top, node):
self.tm = tm
self.targets = targets
self.top = top
self.node = node
self.exc_clear()
def trace_message(self, method, node, description='node'):
fmt = '%-20s %s %s\n'
return fmt % (method + ':', description, self.tm.trace_node(node))
def display(self, message):
"""
Hook to allow the calling interface to display a message.
This hook gets called as part of preparing a task for execution
(that is, a Node to be built). As part of figuring out what Node
should be built next, the actually target list may be altered,
along with a message describing the alteration. The calling
interface can subclass Task and provide a concrete implementation
of this method to see those messages.
"""
pass
def prepare(self):
"""
Called just before the task is executed.
This is mainly intended to give the target Nodes a chance to
unlink underlying files and make all necessary directories before
the Action is actually called to build the targets.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.prepare()', self.node))
# Now that it's the appropriate time, give the TaskMaster a
# chance to raise any exceptions it encountered while preparing
# this task.
self.exception_raise()
if self.tm.message:
self.display(self.tm.message)
self.tm.message = None
# Let the targets take care of any necessary preparations.
# This includes verifying that all of the necessary sources
# and dependencies exist, removing the target file(s), etc.
#
# As of April 2008, the get_executor().prepare() method makes
# sure that all of the aggregate sources necessary to build this
# Task's target(s) exist in one up-front check. The individual
# target t.prepare() methods check that each target's explicit
# or implicit dependencies exists, and also initialize the
# .sconsign info.
self.targets[0].get_executor().prepare()
for t in self.targets:
t.prepare()
for s in t.side_effects:
s.prepare()
def get_target(self):
"""Fetch the target being built or updated by this task.
"""
return self.node
def needs_execute(self):
"""
Called to determine whether the task's execute() method should
be run.
This method allows one to skip the somethat costly execution
of the execute() method in a seperate thread. For example,
that would be unnecessary for up-to-date targets.
"""
return True
def execute(self):
"""
Called to execute the task.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
prepare(), executed() or failed().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.execute()', self.node))
try:
everything_was_cached = 1
for t in self.targets:
if not t.retrieve_from_cache():
everything_was_cached = 0
break
if not everything_was_cached:
self.targets[0].build()
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code)
except SCons.Errors.UserError:
raise
except SCons.Errors.BuildError:
raise
except Exception, e:
buildError = SCons.Errors.convert_to_BuildError(e)
buildError.node = self.targets[0]
buildError.exc_info = sys.exc_info()
raise buildError
def executed_without_callbacks(self):
"""
Called when the task has been successfully executed
and the Taskmaster instance doesn't want to call
the Node's callback methods.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_without_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
def executed_with_callbacks(self):
"""
Called when the task has been successfully executed and
the Taskmaster instance wants to call the Node's callback
methods.
This may have been a do-nothing operation (to preserve build
order), so we must check the node's state before deciding whether
it was "built", in which case we call the appropriate Node method.
In any event, we always call "visited()", which will handle any
post-visit actions that must take place regardless of whether
or not the target was an actual built target or a source Node.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_with_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
t.built()
t.visited()
executed = executed_with_callbacks
def failed(self):
"""
Default action when a task fails: stop the build.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
self.fail_stop()
def fail_stop(self):
"""
Explicit stop-the-build failure.
This sets failure status on the target nodes and all of
their dependent parent nodes.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.failed_stop()', self.node))
# Invoke will_not_build() to clean-up the pending children
# list.
self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
# Tell the taskmaster to not start any new tasks
self.tm.stop()
# We're stopping because of a build failure, but give the
# calling Task class a chance to postprocess() the top-level
# target under which the build failure occurred.
self.targets = [self.tm.current_top]
self.top = 1
def fail_continue(self):
"""
Explicit continue-the-build failure.
This sets failure status on the target nodes and all of
their dependent parent nodes.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.failed_continue()', self.node))
self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
def make_ready_all(self):
"""
Marks all targets in a task ready for execution.
This is used when the interface needs every target Node to be
visited--the canonical example being the "scons -c" option.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.make_ready_all()', self.node))
self.out_of_date = self.targets[:]
for t in self.targets:
t.disambiguate().set_state(NODE_EXECUTING)
for s in t.side_effects:
s.set_state(NODE_EXECUTING)
def make_ready_current(self):
"""
Marks all targets in a task ready for execution if any target
is not current.
This is the default behavior for building only what's necessary.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.make_ready_current()',
self.node))
self.out_of_date = []
needs_executing = False
for t in self.targets:
try:
t.disambiguate().make_ready()
is_up_to_date = not t.has_builder() or \
(not t.always_build and t.is_up_to_date())
except EnvironmentError, e:
raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename)
if not is_up_to_date:
self.out_of_date.append(t)
needs_executing = True
if needs_executing:
for t in self.targets:
t.set_state(NODE_EXECUTING)
for s in t.side_effects:
s.set_state(NODE_EXECUTING)
else:
for t in self.targets:
# We must invoke visited() to ensure that the node
# information has been computed before allowing the
# parent nodes to execute. (That could occur in a
# parallel build...)
t.visited()
t.set_state(NODE_UP_TO_DATE)
make_ready = make_ready_current
def postprocess(self):
"""
Post-processes a task after it's been executed.
This examines all the targets just built (or not, we don't care
if the build was successful, or even if there was no build
because everything was up-to-date) to see if they have any
waiting parent Nodes, or Nodes waiting on a common side effect,
that can be put back on the candidates list.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.postprocess()', self.node))
# We may have built multiple targets, some of which may have
# common parents waiting for this build. Count up how many
# targets each parent was waiting for so we can subtract the
# values later, and so we *don't* put waiting side-effect Nodes
# back on the candidates list if the Node is also a waiting
# parent.
targets = set(self.targets)
pending_children = self.tm.pending_children
parents = {}
for t in targets:
# A node can only be in the pending_children set if it has
# some waiting_parents.
if t.waiting_parents:
if T: T.write(self.trace_message('Task.postprocess()',
t,
'removing'))
pending_children.discard(t)
for p in t.waiting_parents:
parents[p] = parents.get(p, 0) + 1
for t in targets:
for s in t.side_effects:
if s.get_state() == NODE_EXECUTING:
s.set_state(NODE_NO_STATE)
for p in s.waiting_parents:
parents[p] = parents.get(p, 0) + 1
for p in s.waiting_s_e:
if p.ref_count == 0:
self.tm.candidates.append(p)
for p, subtract in parents.items():
p.ref_count = p.ref_count - subtract
if T: T.write(self.trace_message('Task.postprocess()',
p,
'adjusted parent ref count'))
if p.ref_count == 0:
self.tm.candidates.append(p)
for t in targets:
t.postprocess()
# Exception handling subsystem.
#
# Exceptions that occur while walking the DAG or examining Nodes
# must be raised, but must be raised at an appropriate time and in
# a controlled manner so we can, if necessary, recover gracefully,
# possibly write out signature information for Nodes we've updated,
# etc. This is done by having the Taskmaster tell us about the
# exception, and letting
def exc_info(self):
"""
Returns info about a recorded exception.
"""
return self.exception
def exc_clear(self):
"""
Clears any recorded exception.
This also changes the "exception_raise" attribute to point
to the appropriate do-nothing method.
"""
self.exception = (None, None, None)
self.exception_raise = self._no_exception_to_raise
def exception_set(self, exception=None):
"""
Records an exception to be raised at the appropriate time.
This also changes the "exception_raise" attribute to point
to the method that will, in fact
"""
if not exception:
exception = sys.exc_info()
self.exception = exception
self.exception_raise = self._exception_raise
def _no_exception_to_raise(self):
pass
def _exception_raise(self):
"""
Raises a pending exception that was recorded while getting a
Task ready for execution.
"""
exc = self.exc_info()[:]
try:
exc_type, exc_value, exc_traceback = exc
except ValueError:
exc_type, exc_value = exc
exc_traceback = None
raise exc_type, exc_value, exc_traceback
def find_cycle(stack, visited):
if stack[-1] in visited:
return None
visited.add(stack[-1])
for n in stack[-1].waiting_parents:
stack.append(n)
if stack[0] == stack[-1]:
return stack
if find_cycle(stack, visited):
return stack
stack.pop()
return None
class Taskmaster:
"""
The Taskmaster for walking the dependency DAG.
"""
def __init__(self, targets=[], tasker=Task, order=None, trace=None):
self.original_top = targets
self.top_targets_left = targets[:]
self.top_targets_left.reverse()
self.candidates = []
self.tasker = tasker
if not order:
order = lambda l: l
self.order = order
self.message = None
self.trace = trace
self.next_candidate = self.find_next_candidate
self.pending_children = set()
def find_next_candidate(self):
"""
Returns the next candidate Node for (potential) evaluation.
The candidate list (really a stack) initially consists of all of
the top-level (command line) targets provided when the Taskmaster
was initialized. While we walk the DAG, visiting Nodes, all the
children that haven't finished processing get pushed on to the
candidate list. Each child can then be popped and examined in
turn for whether *their* children are all up-to-date, in which
case a Task will be created for their actual evaluation and
potential building.
Here is where we also allow candidate Nodes to alter the list of
Nodes that should be examined. This is used, for example, when
invoking SCons in a source directory. A source directory Node can
return its corresponding build directory Node, essentially saying,
"Hey, you really need to build this thing over here instead."
"""
try:
return self.candidates.pop()
except IndexError:
pass
try:
node = self.top_targets_left.pop()
except IndexError:
return None
self.current_top = node
alt, message = node.alter_targets()
if alt:
self.message = message
self.candidates.append(node)
self.candidates.extend(self.order(alt))
node = self.candidates.pop()
return node
def no_next_candidate(self):
"""
Stops Taskmaster processing by not returning a next candidate.
Note that we have to clean-up the Taskmaster candidate list
because the cycle detection depends on the fact all nodes have
been processed somehow.
"""
while self.candidates:
candidates = self.candidates
self.candidates = []
self.will_not_build(candidates)
return None
def _validate_pending_children(self):
"""
Validate the content of the pending_children set. Assert if an
internal error is found.
This function is used strictly for debugging the taskmaster by
checking that no invariants are violated. It is not used in
normal operation.
The pending_children set is used to detect cycles in the
dependency graph. We call a "pending child" a child that is
found in the "pending" state when checking the dependencies of
its parent node.
A pending child can occur when the Taskmaster completes a loop
through a cycle. For example, lets imagine a graph made of
three node (A, B and C) making a cycle. The evaluation starts
at node A. The taskmaster first consider whether node A's
child B is up-to-date. Then, recursively, node B needs to
check whether node C is up-to-date. This leaves us with a
dependency graph looking like:
Next candidate \
\
Node A (Pending) --> Node B(Pending) --> Node C (NoState)
^ |
| |
+-------------------------------------+
Now, when the Taskmaster examines the Node C's child Node A,
it finds that Node A is in the "pending" state. Therefore,
Node A is a pending child of node C.
Pending children indicate that the Taskmaster has potentially
loop back through a cycle. We say potentially because it could
also occur when a DAG is evaluated in parallel. For example,
consider the following graph:
Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
| ^
| |
+----------> Node D (NoState) --------+
/
Next candidate /
The Taskmaster first evaluates the nodes A, B, and C and
starts building some children of node C. Assuming, that the
maximum parallel level has not been reached, the Taskmaster
will examine Node D. It will find that Node C is a pending
child of Node D.
In summary, evaluating a graph with a cycle will always
involve a pending child at one point. A pending child might
indicate either a cycle or a diamond-shaped DAG. Only a
fraction of the nodes ends-up being a "pending child" of
another node. This keeps the pending_children set small in
practice.
We can differentiate between the two cases if we wait until
the end of the build. At this point, all the pending children
nodes due to a diamond-shaped DAG will have been properly
built (or will have failed to build). But, the pending
children involved in a cycle will still be in the pending
state.
The taskmaster removes nodes from the pending_children set as
soon as a pending_children node moves out of the pending
state. This also helps to keep the pending_children set small.
"""
for n in self.pending_children:
assert n.state in (NODE_PENDING, NODE_EXECUTING), \
(str(n), StateString[n.state])
assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents))
for p in n.waiting_parents:
assert p.ref_count > 0, (str(n), str(p), p.ref_count)
def trace_message(self, message):
return 'Taskmaster: %s\n' % message
def trace_node(self, node):
return '<%-10s %-3s %s>' % (StateString[node.get_state()],
node.ref_count,
repr(str(node)))
def _find_next_ready_node(self):
"""
Finds the next node that is ready to be built.
This is *the* main guts of the DAG walk. We loop through the
list of candidates, looking for something that has no un-built
children (i.e., that is a leaf Node or has dependencies that are
all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
(both the target Node itself and its sources, which are always
scanned in the context of a given target) to discover implicit
dependencies. A Node that must wait for some children to be
built will be put back on the candidates list after the children
have finished building. A Node that has been put back on the
candidates list in this way may have itself (or its sources)
re-scanned, in order to handle generated header files (e.g.) and
the implicit dependencies therein.
Note that this method does not do any signature calculation or
up-to-date check itself. All of that is handled by the Task
class. This is purely concerned with the dependency graph walk.
"""
self.ready_exc = None
T = self.trace
if T: T.write('\n' + self.trace_message('Looking for a node to evaluate'))
while 1:
node = self.next_candidate()
if node is None:
if T: T.write(self.trace_message('No candidate anymore.') + '\n')
return None
node = node.disambiguate()
state = node.get_state()
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
if CollectStats:
if not hasattr(node, 'stats'):
node.stats = Stats()
StatsNodes.append(node)
S = node.stats
S.considered = S.considered + 1
else:
S = None
if T: T.write(self.trace_message(' Considering node %s and its children:' % self.trace_node(node)))
if state == NODE_NO_STATE:
# Mark this node as being on the execution stack:
node.set_state(NODE_PENDING)
elif state > NODE_PENDING:
# Skip this node if it has already been evaluated:
if S: S.already_handled = S.already_handled + 1
if T: T.write(self.trace_message(' already handled (executed)'))
continue
try:
children = node.children()
except SystemExit:
exc_value = sys.exc_info()[1]
e = SCons.Errors.ExplicitExit(node, exc_value.code)
self.ready_exc = (SCons.Errors.ExplicitExit, e)
if T: T.write(self.trace_message(' SystemExit'))
return node
except Exception, e:
# We had a problem just trying to figure out the
# children (like a child couldn't be linked in to a
# VariantDir, or a Scanner threw something). Arrange to
# raise the exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if S: S.problem = S.problem + 1
if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e))
return node
children_not_visited = []
children_pending = set()
children_not_ready = []
children_failed = False
for child in chain(children,node.prerequisites):
childstate = child.get_state()
if T: T.write(self.trace_message(' ' + self.trace_node(child)))
if childstate == NODE_NO_STATE:
children_not_visited.append(child)
elif childstate == NODE_PENDING:
children_pending.add(child)
elif childstate == NODE_FAILED:
children_failed = True
if childstate <= NODE_EXECUTING:
children_not_ready.append(child)
# These nodes have not even been visited yet. Add
# them to the list so that on some next pass we can
# take a stab at evaluating them (or their children).
children_not_visited.reverse()
self.candidates.extend(self.order(children_not_visited))
#if T and children_not_visited:
# T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))
# T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates)))
# Skip this node if any of its children have failed.
#
# This catches the case where we're descending a top-level
# target and one of our children failed while trying to be
# built by a *previous* descent of an earlier top-level
# target.
#
# It can also occur if a node is reused in multiple
# targets. One first descends though the one of the
# target, the next time occurs through the other target.
#
# Note that we can only have failed_children if the
# --keep-going flag was used, because without it the build
# will stop before diving in the other branch.
#
# Note that even if one of the children fails, we still
# added the other children to the list of candidate nodes
# to keep on building (--keep-going).
if children_failed:
node.set_state(NODE_FAILED)
if S: S.child_failed = S.child_failed + 1
if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
continue
if children_not_ready:
for child in children_not_ready:
# We're waiting on one or more derived targets
# that have not yet finished building.
if S: S.not_built = S.not_built + 1
# Add this node to the waiting parents lists of
# anything we're waiting on, with a reference
# count so we can be put back on the list for
# re-evaluation when they've all finished.
node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
if T: T.write(self.trace_message(' adjusted ref count: %s, child %s' %
(self.trace_node(node), repr(str(child)))))
if T:
for pc in children_pending:
T.write(self.trace_message(' adding %s to the pending children set\n' %
self.trace_node(pc)))
self.pending_children = self.pending_children | children_pending
continue
# Skip this node if it has side-effects that are
# currently being built:
wait_side_effects = False
for se in node.side_effects:
if se.get_state() == NODE_EXECUTING:
se.add_to_waiting_s_e(node)
wait_side_effects = True
if wait_side_effects:
if S: S.side_effects = S.side_effects + 1
continue
# The default when we've gotten through all of the checks above:
# this node is ready to be built.
if S: S.build = S.build + 1
if T: T.write(self.trace_message('Evaluating %s\n' %
self.trace_node(node)))
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
return node
return None
def next_task(self):
"""
Returns the next task to be executed.
This simply asks for the next Node to be evaluated, and then wraps
it in the specific Task subclass with which we were initialized.
"""
node = self._find_next_ready_node()
if node is None:
return None
tlist = node.get_executor().targets
task = self.tasker(self, tlist, node in self.original_top, node)
try:
task.make_ready()
except:
# We had a problem just trying to get this task ready (like
# a child couldn't be linked in to a VariantDir when deciding
# whether this node is current). Arrange to raise the
# exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if self.ready_exc:
task.exception_set(self.ready_exc)
self.ready_exc = None
return task
def will_not_build(self, nodes, node_func=lambda n: None):
"""
Perform clean-up about nodes that will never be built. Invokes
a user defined function on all of these nodes (including all
of their parents).
"""
T = self.trace
pending_children = self.pending_children
to_visit = set(nodes)
pending_children = pending_children - to_visit
if T:
for n in nodes:
T.write(self.trace_message(' removing node %s from the pending children set\n' %
self.trace_node(n)))
try:
while 1:
try:
node = to_visit.pop()
except AttributeError:
# Python 1.5.2
if len(to_visit):
node = to_visit[0]
to_visit.remove(node)
else:
break
node_func(node)
# Prune recursion by flushing the waiting children
# list immediately.
parents = node.waiting_parents
node.waiting_parents = set()
to_visit = to_visit | parents
pending_children = pending_children - parents
for p in parents:
p.ref_count = p.ref_count - 1
if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' %
self.trace_node(p)))
except KeyError:
# The container to_visit has been emptied.
pass
# We have the stick back the pending_children list into the
# task master because the python 1.5.2 compatibility does not
# allow us to use in-place updates
self.pending_children = pending_children
def stop(self):
"""
Stops the current build completely.
"""
self.next_candidate = self.no_next_candidate
def cleanup(self):
"""
Check for dependency cycles.
"""
if not self.pending_children:
return
# TODO(1.5)
#nclist = [ (n, find_cycle([n], set())) for n in self.pending_children ]
nclist = map(lambda n: (n, find_cycle([n], set())), self.pending_children)
# TODO(1.5)
#genuine_cycles = [
# node for node, cycle in nclist
# if cycle or node.get_state() != NODE_EXECUTED
#]
genuine_cycles = filter(lambda t: t[1] or t[0].get_state() != NODE_EXECUTED, nclist)
if not genuine_cycles:
# All of the "cycles" found were single nodes in EXECUTED state,
# which is to say, they really weren't cycles. Just return.
return
desc = 'Found dependency cycle(s):\n'
for node, cycle in nclist:
if cycle:
desc = desc + " " + string.join(map(str, cycle), " -> ") + "\n"
else:
desc = desc + \
" Internal Error: no cycle found for node %s (%s) in state %s\n" % \
(node, repr(node), StateString[node.get_state()])
raise SCons.Errors.UserError, desc
|
carlos-lopez-garces/mapnik-trunk
|
scons/scons-local-1.2.0/SCons/Taskmaster.py
|
Python
|
lgpl-2.1
| 37,728
|
[
"VisIt"
] |
eda8b586da371b164797cfa0ce73ee5e92cafee44a184dbc61b808e133afb678
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for transforming Tensors."""
import string
from typing import Optional, Text, Union
import numpy as np
import tensorflow as tf
def flatten_dims(tensor: tf.Tensor,
first_dim: Optional[int] = 0,
last_dim: Optional[int] = -1,
name: Optional[Text] = None) -> tf.Tensor:
"""Flattens the given span of dimensions in `tensor`.
Args:
tensor: [..., first_dim_size, ...middle_dims..., last_dim_size, ...] shaped
Tensor.
first_dim: The first dimension to flatten (inclusive). Must be a valid index
for the rank of `tensor`. Default is 0.
last_dim: The last dimension to flatten (inclusive). Must be a valid index
for the rank of `tensor`. Default is -1.
name: A name for the operation (optional).
Returns:
Tensor of shape [..., flattened_dim_size, ...] where
flattened_dim_size = first_dim_size * ...middle_dims... * last_dim_size.
"""
with tf.name_scope(name or 'flatten_dims'):
tensor = tf.convert_to_tensor(tensor)
rank = tensor.shape.rank
if rank is None:
raise ValueError('Static rank of `tensor` must be known.')
if first_dim < 0: # pytype: disable=unsupported-operands
first_dim += rank
if first_dim < 0 or first_dim >= rank: # pytype: disable=unsupported-operands
raise ValueError('`first_dim` out of bounds for `tensor` rank.')
if last_dim < 0: # pytype: disable=unsupported-operands
last_dim += rank
if last_dim < 0 or last_dim >= rank: # pytype: disable=unsupported-operands
raise ValueError('`last_dim` out of bounds for `tensor` rank.')
if first_dim > last_dim: # pytype: disable=unsupported-operands
raise ValueError('`first_dim` must not be larger than `last_dim`.')
# Try to calculate static flattened dim size if all input sizes to flatten
# are statically known. Otherwise, just use -1.
flat_dims_shape = tensor.shape[first_dim:(last_dim + 1)].as_list()
flattened_dim_size = 1
for size in flat_dims_shape:
if size is None:
flattened_dim_size = -1
break
flattened_dim_size *= size
old_shape = tf.shape(tensor)
output_shape = tf.concat([
old_shape[:first_dim], [flattened_dim_size], old_shape[(last_dim + 1):]
], 0)
return tf.reshape(tensor, output_shape)
def gather_by_one_hot(params: tf.Tensor,
indices: tf.Tensor,
name: Optional[Text] = None) -> tf.Tensor:
"""Performs a gather operation using tf.one_hot multiplication.
This is intended for TPU friendliness but comes with additional complexity
costs. In particular, the materialized one-hot tensor has
`lookup_size * indices.shape.num_elements()` elements.
The time complexity is higher by a factor of `lookup_size` also.
Unlike `tf.gather`, the axis to gather along is always the first one from
`params`.
Args:
params: <float32>[lookup_size, ...] Tensor of rank >= 1 to gather values
from.
indices: <int>[...] Tensor of ids to index into `params`. Any ids outside
the range [0, lookup_size) will translate to 0 values in the output.
name: A name for the operation (optional).
Returns:
[indices.shape, params.shape[1:]] Tensor.
"""
with tf.name_scope(name or 'gather_by_one_hot'):
params = tf.convert_to_tensor(params)
indices = tf.convert_to_tensor(indices)
lookup_size = tf.shape(params)[0]
flat_indices = tf.reshape(indices, [-1])
one_hot_matrix = tf.one_hot(flat_indices, lookup_size, dtype=params.dtype)
flat_result = einsum_wrap_ellipses('ij,j...->i...', one_hot_matrix, params)
output_shape = tf.concat([tf.shape(indices), tf.shape(params)[1:]], 0)
return tf.reshape(flat_result, output_shape)
def batch_gather_by_one_hot(params: tf.Tensor,
indices: tf.Tensor,
batch_dims: Optional[int] = None,
name: Optional[Text] = None) -> tf.Tensor:
"""Performs a batched version of gather using tf.one_hot multiplication.
The first `batch_dims` dimensions of `params` and `indices` must match in
shape.
This is intended for TPU friendliness but comes with additional complexity
costs. In particular, the materialized one-hot tensor has
`lookup_size * indices.shape.num_elements()` elements.
The time complexity is higher by a factor of `lookup_size` also.
Args:
params: <float32>[...some_batch_dims, lookup_size, ...] Tensor of values to
gather from.
indices: <int>[...some_batch_dims, ...index_dims...] Tensor of ids to index
into `params`. Any values outside the range [0, lookup_size) will
translate to 0 values in the output.
batch_dims: Number of batched dimensions. Must be positive. Defaults to
len(indices.shape) - 1.
name: A name for the operation (optional).
Returns:
[indices.shape, params.shape[(batch_dims+1):]] Tensor.
"""
# We rename `batch_dims` to `num_batch_dims` since it refers to a single
# integer rather than a list of the dimensions themselves. The argument
# name is kept to match `tf.gather`.
num_batch_dims = batch_dims
del batch_dims
with tf.name_scope(name or 'batch_gather_by_one_hot'):
params = tf.convert_to_tensor(params)
indices = tf.convert_to_tensor(indices)
if num_batch_dims is None:
num_batch_dims = len(indices.shape) - 1
if num_batch_dims <= 0:
raise ValueError('`num_batch_dims` must be positive.')
if len(params.shape) <= num_batch_dims:
raise ValueError('`params` has too few dimensions.')
if len(indices.shape) < num_batch_dims:
raise ValueError('`indices` has too few dimensions.')
if not params.shape[:num_batch_dims].is_compatible_with(
indices.shape[:num_batch_dims]):
raise ValueError('`params` and `indices` must have compatible batch '
'dimensions.')
lookup_size = tf.shape(params)[num_batch_dims]
# Flatten all "index_dims" in `indices` into a single dimension.
flat_indices_shape = tf.concat([tf.shape(indices)[:num_batch_dims], [-1]],
0)
flat_indices = tf.reshape(indices, flat_indices_shape)
one_hot_matrices = tf.one_hot(flat_indices, lookup_size, dtype=params.dtype)
# Flatten all `params` dims after the "lookup_size" dimension. (If there
# aren't any, then expand a final dimension.)
flat_params_shape = tf.concat(
[tf.shape(params)[:(num_batch_dims + 1)], [-1]], 0)
flat_params = tf.reshape(params, flat_params_shape)
flat_result = tf.matmul(one_hot_matrices, flat_params)
output_shape = tf.concat(
[tf.shape(indices),
tf.shape(params)[(num_batch_dims + 1):]], 0)
return tf.reshape(flat_result, output_shape)
def pad_to_multiple(tensor: tf.Tensor,
factor: Union[int, tf.Tensor],
axis: int,
mode: Optional[Text] = 'CONSTANT',
constant_values=0,
name: Optional[Text] = None) -> tf.Tensor:
"""Pads `tensor` on a given `axis` to be a multiple of `factor`.
Padding will be concatenated to the end of the axis only, not the beginning.
If the length along `axis` is already a multiple of `factor`, this is
effectively a no-op.
Args:
tensor: A Tensor with rank >= 1 to pad.
factor: Positive integer factor to pad for. If a Tensor, must be a scalar
int.
axis: A valid axis in `tensor` to pad.
mode: The padding mode to use according to `tf.pad`. Defaults to 'CONSTANT'.
constant_values: For 'CONSTANT' mode, the scalar pad value to use within
`tf.pad`. Defaults to 0. Must be same type as `tensor`.
name: A name for the operation (optional).
Returns:
The padded Tensor result.
"""
with tf.name_scope(name or 'pad_to_multiple'):
tensor = tf.convert_to_tensor(tensor)
if isinstance(factor, int) and factor < 1:
raise ValueError('`factor` must be positive.')
rank = tensor.shape.rank
if rank is None:
raise ValueError('Static rank of `tensor` must be known.')
if axis < 0:
axis += rank
if axis < 0 or axis >= rank:
raise ValueError('`axis` out of bounds for `tensor` rank.')
axis_len = get_shape_list(tensor)[axis]
pad_len = -axis_len % factor
paddings = pad_len * tf.one_hot([-1, axis], rank, axis=0, dtype=tf.int32)
return tf.pad(
tensor=tensor,
paddings=paddings,
mode=mode,
constant_values=constant_values)
def split_into_blocks(tensor: tf.Tensor,
block_len: int,
axis: int,
pad_value=0,
name: Optional[Text] = None) -> tf.Tensor:
"""Splits a tensor into blocks along the given `axis`.
If the axis length isn't a multiple of `block_len`, it'll be padded via
`pad_to_multiple` first.
Args:
tensor: Tensor of shape [..., axis_len, ...].
block_len: Positive integer length of each block.
axis: A valid axis in `tensor` to split along.
pad_value: The scalar pad value to use. Defaults to 0. Must be same type as
`tensor`.
name: A name for the operation (optional).
Returns:
Tensor of shape [..., num_blocks, block_len, ...], where
num_blocks = ceiling(axis_len / block_len).
"""
with tf.name_scope(name or 'split_into_blocks'):
tensor = tf.convert_to_tensor(tensor)
if block_len < 1:
raise ValueError('`block_len` must be positive.')
rank = tensor.shape.rank
if rank is None:
raise ValueError('Static rank of `tensor` must be known.')
if axis < 0:
axis += rank
if axis < 0 or axis >= rank:
raise ValueError('`axis` out of bounds for `tensor` rank.')
padded_tensor = pad_to_multiple(
tensor, factor=block_len, axis=axis, constant_values=pad_value)
padded_len = get_shape_list(padded_tensor)[axis]
num_blocks = padded_len // block_len
output_shape = tf.concat([
tf.shape(tensor)[:axis], [num_blocks, block_len],
tf.shape(tensor)[(axis + 1):]
], 0)
return tf.reshape(padded_tensor, output_shape)
def concat_3_blocks(blocked_seq: tf.Tensor,
name: Optional[Text] = None) -> tf.Tensor:
"""Concatenates 3 consecutive blocks for each input block.
This is meant to be called on a blocked sequence as returned by
`split_into_blocks` for example. This function augments each block with its
adjacent left and right blocks so that every token from the original block
can access all other tokens `block_len` away from it. The first and last input
blocks will have 0-padded blocks to their left and right, respectively.
Args:
blocked_seq: [batch_size, num_blocks, block_len, ...] shaped Tensor.
name: A name for the operation (optional).
Returns:
A Tensor of shape [batch_size, num_blocks, 3*block_len, ...].
"""
with tf.name_scope(name or 'concat_3_blocks'):
blocked_seq = tf.convert_to_tensor(blocked_seq)
num_blocks = tf.shape(blocked_seq)[1]
paddings = tf.one_hot([1, 1],
blocked_seq.shape.rank,
axis=0,
dtype=tf.int32)
# [batch_size, num_blocks + 2, block_len, ...]
padded_blocked_seq = tf.pad(blocked_seq, paddings)
blocks_list = []
for i in range(3):
blocks_list.append(padded_blocked_seq[:, i:(i + num_blocks), ...])
return tf.concat(blocks_list, 2)
def shift_elements_right(tensor: tf.Tensor,
axis: int = -1,
amount: int = 1,
pad_value=0,
name: Optional[Text] = None) -> tf.Tensor:
"""Shifts elements right (towards higher indices) along the given `axis`.
This changes an input like
[5, 4, 3, 2, 1]
into the following (for amount=1):
[0, 5, 4, 3, 2]
New elements resulting from the shift are populated using `pad_value`.
Args:
tensor: Tensor with rank at least 1.
axis: A valid axis in `tensor` to shift elements along.
amount: Integer number of positions to shift element. Use negative numbers
to shift left instead of right.
pad_value: The scalar pad value to use. Defaults to 0. Must be the same type
as `tensor`.
name: A name for the operation (optional).
Returns:
Shifted tensor with the same shape as the input `tensor`.
"""
with tf.name_scope(name or 'shift_elements_right'):
tensor = tf.convert_to_tensor(tensor)
rank = tensor.shape.rank
if rank is None:
raise ValueError('Static rank of `tensor` must be known.')
if axis < 0:
axis += rank
if axis < 0 or axis >= rank:
raise ValueError('`axis` out of bounds for `tensor` rank.')
if amount == 0:
return tensor
paddings = abs(amount) * tf.one_hot(
[axis, -1] if amount > 0 else [-1, axis], rank, axis=0, dtype=tf.int32)
# [..., axis_len + abs(amount), ...]
padded_tensor = tf.pad(tensor, paddings, constant_values=pad_value)
if amount > 0:
slice_begin = tf.zeros([rank], dtype=tf.int32)
else:
slice_begin = abs(amount) * tf.one_hot(axis, rank, dtype=tf.int32)
return tf.slice(padded_tensor, begin=slice_begin, size=tf.shape(tensor))
def skew_elements_right(tensor: tf.Tensor,
axis: int,
pad_value=0,
name: Optional[Text] = None) -> tf.Tensor:
"""Skews successive elements right along the given `axis`.
This changes an input like
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
into the following:
[
[1, 2, 3, 0, 0],
[0, 4, 5, 6, 0],
[0, 0, 7, 8, 9]
]
Args:
tensor: Tensor of shape [..., num_rows, axis_len, ...].
axis: A valid axis in `tensor` to skew along. It must not be the first axis
in `tensor`.
pad_value: The scalar pad value to use. Defaults to 0. Must be the same type
as `tensor`.
name: A name for the operation (optional).
Returns:
Tensor of shape [..., num_rows, axis_len + num_rows - 1, ...].
"""
with tf.name_scope(name or 'skew_elements_right'):
tensor = tf.convert_to_tensor(tensor)
rank = tensor.shape.rank
num_rows = get_shape_list(tensor)[axis - 1]
axis_len = get_shape_list(tensor)[axis]
if rank is None:
raise ValueError('Static rank of `tensor` must be known.')
if axis < 0:
axis += rank
if axis <= 0 or axis >= rank:
raise ValueError('`axis` out of bounds for `tensor` rank.')
output_len = axis_len + num_rows - 1
paddings = num_rows * tf.one_hot([-1, axis], rank, axis=0, dtype=tf.int32)
# [..., num_rows, axis_len + num_rows, ...]
padded_tensor = tf.pad(tensor, paddings, constant_values=pad_value)
# [..., num_rows * (axis_len + num_rows), ...]
flat_tensor = flatten_dims(padded_tensor, first_dim=axis - 1, last_dim=axis)
padded_tensor2 = pad_to_multiple(
flat_tensor,
factor=output_len,
axis=axis - 1,
constant_values=pad_value)
# [..., num_rows + 1, output_len, ...]
new_shape = tf.concat([
tf.shape(tensor)[:(axis - 1)], [num_rows + 1, output_len],
tf.shape(tensor)[(axis + 1):]
], 0)
reshaped_tensor = tf.reshape(padded_tensor2, new_shape)
# [..., num_rows, output_len, ...]
output_shape = new_shape - tf.one_hot(axis - 1, depth=rank, dtype=tf.int32)
return tf.slice(
reshaped_tensor, begin=tf.zeros_like(output_shape), size=output_shape)
def unskew_elements_right(tensor: tf.Tensor,
axis: int,
name: Optional[Text] = None) -> tf.Tensor:
"""Unskews elements that were "skewed right" along the given `axis`.
This operation is the inverse of `skew_elements_right`. It changes an input
like
[
[1, 2, 3, 0, 0],
[0, 4, 5, 6, 0],
[0, 0, 7, 8, 9]
]
into the following:
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
Args:
tensor: Tensor of shape [..., num_rows, axis_len, ...], where `axis_len`
must be at least `num_rows`.
axis: A valid axis in `tensor` to unskew along. It must not be the first
axis in `tensor`.
name: A name for the operation (optional).
Returns:
Tensor of shape [..., num_rows, axis_len - num_rows + 1, ...]
"""
with tf.name_scope(name or 'unskew_elements_right'):
tensor = tf.convert_to_tensor(tensor)
rank = tensor.shape.rank
num_rows = tensor.shape.as_list()[axis - 1]
axis_len = tensor.shape.as_list()[axis]
if rank is None:
raise ValueError('Static rank of `tensor` must be known.')
if axis < 0:
axis += rank
if axis <= 0 or axis >= rank:
raise ValueError('`axis` out of bounds for `tensor` rank.')
if num_rows is None:
raise ValueError('Static size `num_rows` must be known.')
if axis_len is None:
raise ValueError('Static size `axis_len` must be known.')
if axis_len < num_rows:
raise ValueError('`axis_len` ({}) is less than `num_rows` ({}).'.format(
axis_len, num_rows))
output_len = axis_len - num_rows + 1
# [..., num_rows * axis_len, ...]
flat_tensor = flatten_dims(tensor, first_dim=axis - 1, last_dim=axis)
padded_tensor = pad_to_multiple(
flat_tensor, factor=axis_len + 1, axis=axis - 1)
# [..., num_rows, axis_len + 1, ...]
unskewing_shape = tf.concat([
tf.shape(tensor)[:axis], [axis_len + 1],
tf.shape(tensor)[(axis + 1):]
], 0)
reshaped_tensor = tf.reshape(padded_tensor, unskewing_shape)
# [..., num_rows, output_len, ...]
output_shape = tf.concat(
[tf.shape(tensor)[:axis], [output_len],
tf.shape(tensor)[(axis + 1):]], 0)
return tf.slice(
reshaped_tensor, begin=tf.zeros_like(output_shape), size=output_shape)
def einsum_wrap_ellipses(equation: Text, *inputs, **kwargs) -> tf.Tensor:
"""Wrapper over `tf.einsum` that rewrites ellipses for efficiency.
`tf.einsum` equations with ellipses (e.g. '...ab,abc->...ac') seem to
result in slow backward pass computations on TPU. As an optimization, this
wrapper replaces the ellipses with explicit letters unused by the equation
(e.g. 'ABab,abc->ABac' in the previous example) before calling `tf.einsum`.
This wrapper is more restrictive than `tf.einsum` in that it requires all
input tensor ranks to be statically known, and '...' must represent the same
number of dimensions in each input. It will not broadcast a mismatch in
number of dimensions automatically.
The result should be the same as calling `tf.einsum` directly, and equations
without ellipses are passed to `tf.einsum` untouched.
Args:
equation: a `str` describing the contraction, in the same format as
`tf.einsum`.
*inputs: the inputs to contract (each one a `Tensor`), whose shapes should
be consistent with `equation`.
**kwargs: Additional keyword arguments to pass to `tf.einsum`.
Returns:
The result of calling `tf.einsum`.
Raises:
ValueError: if required input tensor ranks aren't statically known or usage
of '...' implies different numbers of dimensions for different inputs.
"""
if '...' not in equation:
return tf.einsum(equation, *inputs, **kwargs)
if '->' in equation:
equation_inputs, equation_output = equation.split('->')
else:
equation_inputs = equation
equation_output = None
equation_inputs = equation_inputs.split(',')
# Determine how many dimensions to replace '...' with.
num_dims = None
for i, input_str in enumerate(equation_inputs):
if '...' not in input_str:
continue
rank = inputs[i].shape.rank
if not isinstance(rank, int):
raise ValueError(
'Unknown static rank for input tensor at position {}.'.format(i))
input_str = input_str.replace('...', '')
input_str = input_str.replace(' ', '')
if num_dims is None:
num_dims = rank - len(input_str)
if num_dims < 0:
raise ValueError(
'Not enough dimensions for input tensor at position {}.'.format(i))
elif num_dims != rank - len(input_str):
raise ValueError(
'Mismatch for implied ellipses number of dimensions: {} vs. {}.'
.format(num_dims, rank - len(input_str)))
# Find letters to use in place of '...'
used_letters = set() if equation_output is None else set(equation_output)
for input_str in equation_inputs:
used_letters.update(input_str)
unused_letters = sorted(set(string.ascii_letters) - used_letters)
ellipsis_letters = ''.join(unused_letters[:num_dims])
new_equation_inputs = []
for input_str in equation_inputs:
new_equation_inputs.append(input_str.replace('...', ellipsis_letters))
new_equation = ','.join(new_equation_inputs)
if equation_output is not None:
new_equation += '->{}'.format(
equation_output.replace('...', ellipsis_letters))
return tf.einsum(new_equation, *inputs, **kwargs)
# The following functions are from the original BERT code:
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
# Tensor.name is not supported in Eager mode.
if tf.executing_eagerly():
name = 'get_shape_list'
else:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, int):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
'For the tensor `%s` in scope `%s`, the actual rank '
'`%d` (shape = %s) is not equal to the expected rank `%s`' %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that's not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, str):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == 'linear':
return None
elif act == 'relu':
return tf.nn.relu
elif act == 'gelu':
return gelu
elif act == 'tanh':
return tf.tanh
else:
raise ValueError('Unsupported activation: %s' % act)
|
google-research/google-research
|
etcmodel/tensor_utils.py
|
Python
|
apache-2.0
| 25,205
|
[
"Gaussian"
] |
3967d9f2530d715b3fbe4e22962f8b49a7e1e7d5df8d2d82aec06b1229be8f3b
|
#######Angles for the differents sectors: Right[0-1],Up-Right[1-2],Up[2-3],Up-left[3-4],Down-Right[0-5],Down[5-6],Down-Left[6-7], Left[Else]
Angles_values=[-20.0, 30.0,60.0,110.0,150.0,-70.0,-120.0,-160.0]
#######Frame_id for movement
#Frame_id_mov = 'odom'
Frame_id_mov = 'world'
#######Frame_id for the marker
#Frame_id = "vicon" #For Vicon Room
Frame_id_world = "world" #For Stereo Odometry and simulation
#######Topic for the command (PoseStamped type)
#Topic_command="/pegasus/command/pose" #Pegasus Drone
Topic_command="/firefly/command/pose" #Firefly(Simulation) Drone
#######Topic for the Odometry (Odometry msgs)
#Topic_Odometry="/pegasus/vrpn_client/estimated_odometry" #Vicon Room
#Topic_Odometry="/stereo_odometer/odometry" #Stereo Odometry
Topic_Odometry="/firefly/odometry_sensor1/odometry" #Simulation
#######Limits for the movement of the drone
x_min = -99.9 # 0.5 for the Vicon room
y_min = -99.9 # -2.0 for the Vicon room
z_min = 0.0
x_max = 99.9 # 2.5 for the Vicon room
y_max = 99.9 # 0.0 for the Vicon room
z_max = 99.9
#######Number of frames skipped:
Skip_f = 3
#######Scales used for the skeleton
scale=[0.7]
#######Scale for resizing before
Res_scale = 0.8
#######Topic for the frontal camera (compressed)
#Topic_camera = "/camera/color/image_raw/compressed" #RealSense
Topic_camera = "/firefly/camera/camera_frontal/image_raw/compressed" #Simulation
#######Number of images to process
N_Images= 2
#######Threshold to accept the position
x_thres = 0.1
y_thres = 0.1
z_thres = 0.1
####Orientation Vector Transformation for Odometry
#orientation_list2 = [0,0,1.0,0] # Pegasus
orientation_list2 = [0,0,0,1.0] # Simulation
#######Rviz file
#Rviz_File = "Visual_Real.rviz"
Rviz_File = "Visual_Sim.rviz"
#######Function for transforming the output of the Visual Recogn to the Drone
def Transform_Dir(x,y,z):
y=y*-1 #Simulation
return z,y,x
|
pazagra/Drone_Rviz_Visualization_Control
|
Config.py
|
Python
|
gpl-3.0
| 1,884
|
[
"Firefly"
] |
ad07aa808bcd223870adc392742c158e3006a6eb29134ba75f802d006d6be997
|
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
import mox
from oslo.config import cfg
from oslo import messaging
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova.objects import fixed_ip as fixed_ip_obj
from nova.objects import network as network_obj
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import quota
from nova import test
from nova.tests import fake_instance
from nova.tests import fake_ldap
from nova.tests import fake_network
from nova.tests import matchers
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_network
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
fake_inst = fake_instance.fake_db_instance
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1::%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
ip['network'] = dict(test_network.fake_network,
**networks[0])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(3, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_add_fixed_ip_instance_using_id_without_vpn(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
quota.QUOTAS.reserve(mox.IgnoreArg(),
fixed_ips=mox.IgnoreArg()).AndReturn(None)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
def test_add_fixed_ip_instance_using_uuid_without_vpn(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
quota.QUOTAS.reserve(mox.IgnoreArg(),
fixed_ips=mox.IgnoreArg()).AndReturn(None)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 1)
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 0)
def test_instance_dns(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
fixedip = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None
).AndReturn(fixedip)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
quota.QUOTAS.reserve(mox.IgnoreArg(),
fixed_ips=mox.IgnoreArg()).AndReturn(None)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
def test_allocate_floating_ip(self):
self.assertIsNone(self.network.allocate_floating_ip(self.context,
1, None))
def test_deallocate_floating_ip(self):
self.assertIsNone(self.network.deallocate_floating_ip(self.context,
1, None))
def test_associate_floating_ip(self):
self.assertIsNone(self.network.associate_floating_ip(self.context,
None, None))
def test_disassociate_floating_ip(self):
self.assertIsNone(self.network.disassociate_floating_ip(self.context,
None, None))
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
def test_quota_driver_type(self):
self.assertEqual(quota.NoopQuotaDriver,
type(self.network.quotas._driver))
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
network_id=mox.IgnoreArg(),
reserved=True).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = network_obj.Network._from_db_object(
self.context, network_obj.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
instance = db.instance_create(self.context, {})
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
self.context_admin,
instance['uuid'],
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = network_obj.Network._from_db_object(
self.context, network_obj.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_duplicate_vlan_raises(self):
# VLAN 100 is already used and we force the network to be created
# in that vlan (vlan=100).
self.assertRaises(exception.DuplicateVlan,
self.network.create_networks,
self.context_admin, label="fake", num_networks=1,
vlan=100, cidr='192.168.0.1/24', network_size=100)
def test_vlan_start(self):
# VLAN 100 and 101 are used, so this network shoud be created in 102
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_start_multiple(self):
# VLAN 100 and 101 are used, so these networks shoud be created in 102
# and 103
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
self.assertEqual(networks[1]["vlan"], 103)
def test_vlan_start_used(self):
# VLAN 100 and 101 are used, but vlan_start=99.
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=99, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
@mock.patch('nova.db.network_get')
def test_validate_networks(self, net_get):
def network_get(_context, network_id, project_only='allow_none'):
return dict(test_network.fake_network, **networks[network_id])
net_get.side_effect = network_get
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[1]['id'],
network=dict(test_network.fake_network,
**networks[1]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed1)
db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[0]['id'],
network=dict(test_network.fake_network,
**networks[0]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed2)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = {'address': '10.0.0.1',
'project_id': None}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id + '1'}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = {'address': '10.0.0.1',
'project_id': None}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = {'address': '10.0.0.1',
'project_id': 'testproject'}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
def test_deallocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': 1}
def fake3(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': None,
'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
def test_associate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return {'address': '10.0.0.1', 'network': 'fakenet'}
# floating ip that's already associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1}
# floating ip that isn't associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False, 'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise processutils.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
def fake_fixed_ip_get(context, fixed_ip_id):
return {'address': 'old', 'instance_uuid': 'fake_uuid'}
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), 'old')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
def _test_floating_ip_init_host(self, public_interface, expected_arg):
def get_all_by_host(_context, _host):
return [{'interface': 'foo',
'address': 'foo'},
{'interface': 'fakeiface',
'address': 'fakefloat',
'fixed_ip_id': 1},
{'interface': 'bar',
'address': 'bar',
'fixed_ip_id': 2}]
self.stubs.Set(self.network.db, 'floating_ip_get_all_by_host',
get_all_by_host)
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return {'address': 'fakefixed', 'network': 'fakenet'}
raise exception.FixedIpNotFound(id=fixed_ip_id)
self.stubs.Set(self.network.db, 'fixed_ip_get', fixed_ip_get)
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=public_interface)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
expected_arg,
'fakenet')
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_floating_ip_init_host_without_public_interface(self):
self._test_floating_ip_init_host(public_interface=False,
expected_arg='fakeiface')
def test_floating_ip_init_host_with_public_interface(self):
self._test_floating_ip_init_host(public_interface='fooiface',
expected_arg='fooiface')
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# floating ip that is associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1,
'project_id': ctxt.project_id}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False,
'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1,
'auto_assigned': True,
'project_id': ctxt.project_id}
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_ip_association_and_allocation_of_other_project(self, net_get,
fixed_get):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project.
"""
net_get.return_value = dict(networks[1])
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
float_ip = db.floating_ip_create(context1.elevated(),
{'address': '1.2.3.4',
'project_id': context1.project_id})
float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid']).address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
# Associate the IP with non-admin user context
self.assertRaises(exception.NotAuthorized,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.NotAuthorized,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.NotAuthorized,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
'DE:AD:BE:EF:00:00')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False,
'virtual_interface_id': None})
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
elevated = context1.elevated()
instance = db.instance_create(context1,
{'project_id': 'project1'})
network = db.network_create_safe(elevated, networks[0])
_fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fix_addr = _fix_addr.address
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': network.id,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_deallocate_fixed_no_vif(self, net_get, fixed_get):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
# Verify IP is not deallocated if the security group refresh fails.
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = fixed_ip_obj.FixedIP.associate_pool(elevated, 1,
instance['uuid'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, str(fix_addr.address), 'fake')
self.assertFalse(fixed_update.called)
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
class _TestDomainObject(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
class FakeNetwork(object):
def __init__(self, **kwargs):
self.vlan = None
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
def __getitem__(self, item):
return getattr(self, item)
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
ipv6.reset_backend()
def test_validate_instance_zone_for_dns_domain(self):
domain = 'example.com'
az = 'test_az'
domains = {
domain: _TestDomainObject(
domain=domain,
availability_zone=az)}
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
manager = network_manager.NetworkManager()
res = manager._validate_instance_zone_for_dns_domain(self.context,
fake_instance)
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'virtual_interface_get_by_instance')
manager.db.virtual_interface_get_by_instance(
self.context, FAKEUUID,
use_slave=False).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
@mock.patch('nova.db.instance_get')
def test_deallocate_for_instance_passes_host_info(self, instance_get):
manager = fake_network.FakeNetworkManager()
db = manager.db
instance_get.return_value = fake_inst(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
db.fixed_ip_get_by_instance = lambda x, y: [dict(address='1.2.3.4',
network_id='ignoredid')]
manager.deallocate_for_instance(
ctx, instance_id='ignore', host='somehost')
self.assertEqual([
(ctx, '1.2.3.4', 'somehost')
], manager.deallocate_fixed_ip_calls)
def test_remove_fixed_ip_from_instance(self):
manager = fake_network.FakeNetworkManager()
manager.remove_fixed_ip_from_instance(self.context, 99,
HOST,
'10.0.0.1')
self.assertEqual(manager.deallocate_called, '10.0.0.1')
def test_remove_fixed_ip_from_instance_bad_input(self):
manager = fake_network.FakeNetworkManager()
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/25', cidrs)
self.assertIn('192.168.0.128/25', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/24')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.9/25')]
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/25')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
get_all.return_value = [dict(test_network.fake_network, id=1,
cidr='192.168.2.9/29')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/27', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_all_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
in_use = [dict(test_network.fake_network, **values) for values in
[{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]]
get_all.return_value = in_use
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
3, 64, None, None, None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
cidr='192.168.0.0/24')]
# CidrConflict: cidr already in use
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', returned_cidrs)
self.assertIn('192.168.1.0/24', returned_cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_conflict_existing_supernet(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/8')]
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get_all')
def test_create_networks_cidr_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/24')]
args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ip_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ipv6_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
def _network_get(context, network_id, **args):
return dict(test_network.fake_network,
**manager.db.network_get(context, network_id))
network_get.side_effect = _network_get
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ip(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network, **networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
@mock.patch('nova.db.network_get_all')
def test_get_all_networks(self, get_all):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get_all.return_value = [dict(test_network.fake_network, **net)
for net in networks]
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
@mock.patch('nova.db.network_get_by_uuid')
@mock.patch('nova.db.network_disassociate')
def test_disassociate_network(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
disassociate.return_value = True
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network,
**networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_disassociate_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
fake_networks = [dict(test_network.fake_network, **n)
for n in networks]
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()
).MultipleTimes().AndReturn(fake_networks)
self.mox.ReplayAll()
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
new_network_obj = network_obj.Network._from_db_object(
self.context, network_obj.Network(),
dict(test_network.fake_network, **new_network))
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network_obj)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes.
"""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return test_network.fake_network
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
def setUp(self):
super(AllocateTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
self.assertEqual(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(utils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
fixed_ips=fixed_ip,
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEqual(1, len(assigned_macs))
self.assertEqual(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance, self.context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_disassociate_floating_ip_multi_host_calls(self):
floating_ip = {
'fixed_ip_id': 12
}
fixed_ip = {
'network_id': None,
'instance_uuid': 'instance-uuid'
}
network = {
'multi_host': True
}
instance = {
'host': 'some-other-host'
}
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network.db,
'floating_ip_get_by_address',
lambda _x, _y: floating_ip)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
self.stubs.Set(self.network.db,
'fixed_ip_get',
lambda _x, _y: fixed_ip)
self.stubs.Set(self.network.db,
'network_get',
lambda _x, _y: network)
self.stubs.Set(self.network.db,
'instance_get_by_uuid',
lambda _x, _y: instance)
self.stubs.Set(self.network.db,
'service_get_by_host_and_topic',
lambda _x, _y, _z: 'service')
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
def test_associate_floating_ip_multi_host_calls(self):
floating_ip = {
'fixed_ip_id': None
}
fixed_ip = {
'network_id': None,
'instance_uuid': 'instance-uuid'
}
network = {
'multi_host': True
}
instance = {
'host': 'some-other-host'
}
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network.db,
'floating_ip_get_by_address',
lambda _x, _y: floating_ip)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
self.stubs.Set(self.network.db,
'fixed_ip_get_by_address',
lambda _x, _y: fixed_ip)
self.stubs.Set(self.network.db,
'network_get',
lambda _x, _y: network)
self.stubs.Set(self.network.db,
'instance_get_by_uuid',
lambda _x, _y: instance)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = db.instance_create(self.context, {
'project_id': self.project_id, 'deleted': True})
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance['uuid'], 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance['uuid'],
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context,
instance_id=instance['uuid'])
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = db.instance_create(self.context, {
'project_id': self.project_id})
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance['uuid'], 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance['uuid'],
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context,
instance_id=instance['uuid'])
def test_migrate_instance_start(self):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return {'address': address,
'fixed_ip_id': 0}
def fake_is_stale_floating_ip_address(context, floating_ip):
return floating_ip['address'] == '172.24.4.23'
def fake_fixed_ip_get(context, fixed_ip_id, get_network):
return {'instance_uuid': 'fake_uuid',
'address': '10.0.0.2',
'network': 'fakenet'}
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_clean_conntrack(fixed_ip):
if not fixed_ip == "10.0.0.2":
raise exception.FixedIpInvalid(address=fixed_ip)
def fake_floating_ip_update(context, address, args):
pass
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
self.stubs.Set(self.network.db, 'floating_ip_update',
fake_floating_ip_update)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.stubs.Set(self.network.l3driver, 'clean_conntrack',
fake_clean_conntrack)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(called['count'], 2)
def test_migrate_instance_finish(self):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return {'address': address,
'fixed_ip_id': 0}
def fake_is_stale_floating_ip_address(context, floating_ip):
return floating_ip['address'] == '172.24.4.23'
def fake_fixed_ip_get(context, fixed_ip_id, get_network):
return {'instance_uuid': 'fake_uuid',
'address': '10.0.0.2',
'network': 'fakenet'}
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_floating_ip_update(context, address, args):
pass
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
self.stubs.Set(self.network.db, 'floating_ip_update',
fake_floating_ip_update)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(called['count'], 2)
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 2)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[1]['domain'], domain2)
self.assertEqual(domains[0]['project'], 'testproject')
self.assertEqual(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEqual(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 123,
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
# set, and we expect it to get set automatically when we do the
# save.
vif.id = 1
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.get_floating_ip,
self.context, 'fake-id')
def _test_associate_floating_ip_failure(self, stdout, expected_exception):
def _fake_catchall(*args, **kwargs):
return {'id': 'fake', 'network': 'fake'}
def _fake_add_floating_ip(*args, **kwargs):
raise processutils.ProcessExecutionError(stdout)
self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
_fake_catchall)
self.stubs.Set(self.network.db, 'floating_ip_disassociate',
_fake_catchall)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
_fake_add_floating_ip)
self.assertRaises(expected_exception,
self.network._associate_floating_ip, self.context,
'', '', '', '')
def test_associate_floating_ip_failure(self):
self._test_associate_floating_ip_failure(None,
processutils.ProcessExecutionError)
def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(test.ReplaceModule('ldap', fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
|
OpenAcademy-OpenStack/nova-scheduler
|
nova/tests/network/test_manager.py
|
Python
|
apache-2.0
| 124,380
|
[
"FEFF"
] |
7df82921a58f7ea4ad7379a0767c3d34b21c618adcb64def96b92d1324ed7c46
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.