text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
# Modifications to create of the HMMLearn module: Gael Varoquaux
"""
The :mod:`hmmlearn.hmm` module implements hidden Markov models.
"""
import string
import cPickle
import numpy as np
import multiprocessing as mp
from numpy.random import multivariate_normal, normal
from sklearn.utils import check_random_state
from sklearn.utils.extmath import logsumexp
from sklearn.base import BaseEstimator
from sklearn.mixture import (
GMM, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from sklearn import cluster
from scipy.stats import (poisson, expon)
from copy import deepcopy
from .utils.fixes import (log_multivariate_normal_density,
log_poisson_pmf, log_exponential_density)
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
def batches(l, n):
""" Yield successive n-sized batches from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def unwrap_self_estep(arg, **kwarg):
return _BaseHMM._do_estep(*arg, **kwarg)
def unwrap_self_score(arg, **kwarg):
return _BaseHMM._score(*arg, **kwarg)
def merge_sum(x, y):
D = {}
for k in x.keys():
if isinstance(x[k], list):
z = []
for i in xrange(len(x[k])):
z.append(x[k][i] + y[k][i])
D[k] = z
else:
D[k] = x[k] + y[k]
return D
def reduce_merge_sum(L):
return reduce(lambda x, y: merge_sum(x, y), L)
def log_normalize(A, axis=None):
arr = np.rollaxis(A, axis)
vmax = arr.max(axis=axis)
return normalize(np.exp((arr.T - vmax).T))
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
def randomize(A, axis=None):
randomizer = np.random.rand(*A.shape) / 10.
Arand = A + randomizer
return normalize(Arand, axis)
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
self.verbose_fmt = '{iter:>10d} {lpr:>16.4f} {improvement:>16.4f}'
self.verbose_mod = 1
def init(self):
header_fields = ['Iter', 'Log Likelihood', 'Log Improvement']
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
def update(self, i, lpr, improvement):
"""Update reporter with new iteration. """
# we need to take into account if we fit additional estimators.
if (i + 1) % self.verbose_mod == 0:
print(self.verbose_fmt.format(iter=i + 1,
lpr=lpr,
improvement=improvement))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_states`, `n_states`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_states`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters, verbose=0,
n_jobs=1, batch_size=1, memory_safe=False):
self.n_states = n_states
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
if startprob_prior is None:
startprob_prior = np.ones(n_states)
self.startprob_prior = startprob_prior
self.transmat_ = transmat
if transmat_prior is None:
transmat_prior = np.ones((n_states,
n_states))
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
self.verbose = verbose
self.n_jobs = n_jobs
self.batch_size = batch_size
self.memory_safe = memory_safe
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : list of array_like, shape (n, n_states)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
logprob = 0
posteriors = []
for seq in obs:
seq = np.asarray(seq)
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors.append(np.exp(gamma.T - logsumexp(gamma, axis=1)).T)
posteriors[-1] += np.finfo(np.float32).eps
posteriors[-1] /= np.sum(posteriors, axis=1).reshape((-1, 1))
logprob += lpr
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : list of array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
n_batches = (len(obs) // self.batch_size) + \
(1 if len(obs) % self.batch_size else 0)
if self.n_jobs == 1:
logprob = 0
for obs_batch in batches(obs, self.batch_size):
logprob += self._score(obs_batch)
else:
pool = mp.Pool(processes=self.n_jobs)
results = pool.map(unwrap_self_score,
zip([self] * n_batches,
batches(obs, self.batch_size)))
pool.terminate()
logprob = sum(results)
return logprob
def aic(self, obs):
"""Computes the Aikaike Information Criterion of the model and
set of observations.
Parameters
----------
obs : list of arrays
List of observation sequences.
Returns
-------
aic_score : float
The Aikaike Information Criterion.
"""
logprob = self.score(obs)
n_pars = self._n_free_parameters()
aic_score = 2 * n_pars - 2 * logprob
return aic_score
def bic(self, obs):
"""Computes the Aikaike Information Criterion of the model and
set of observations.
Parameters
----------
obs : list of arrays
List of observation sequences.
Returns
-------
bic_score : float
The Aikaike Information Criterion.
"""
logprob = self.score(obs)
n_pars = self._n_free_parameters()
n_data = sum([len(seq) for seq in obs])
bic_score = n_pars * (np.log(n_data) - np.log(2 * np.pi)) - 2 * logprob
return bic_score
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprobs : array_like, shape (n,)
Log probability of the maximum likelihood path through the HMM.
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
viterbi_logprobs = np.zeros(len(obs))
state_sequences = []
for n, seq in enumerate(obs):
seq = np.asarray(seq)
framelogprob = self._compute_log_likelihood(seq)
viterbi_logprobs[n], state_sequence = self._do_viterbi_pass(framelogprob)
state_sequences.append(state_sequence)
return viterbi_logprobs, state_sequences
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : list of array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprobs : array_like, shape (n,)
Log probability of the maximum likelihood path through the HMM
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
map_logprobs = np.zeros(len(obs))
state_sequences = []
_, posteriors = self.score_samples(obs)
for n, post in enumerate(posteriors):
state_sequences.append(np.argmax(post, axis=1))
map_logprobs[n] = np.max(post, axis=1).sum()
return map_logprobs, state_sequences
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprobs : array_like, shape (n,)
Log probability of the maximum likelihood path through the HMM
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprobs, state_sequences = decoder[algorithm](obs)
return logprobs, state_sequences
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequences = self.decode(obs, algorithm)
return state_sequences
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
posteriors : list of array-like, shape (n, n_states)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n_seq=1, n_min=10, n_max=20, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_seq : int
Number of observation sequences to generate.
n_min : int
Minimum number of observations for a sequence.
n_max : int
Maximum number of observations for a sequence.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : list of array_like, length `n_seq` List of samples
states : list of array_like, length `n_seq` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
obs = []
states = []
for _ in range(n_seq):
n = np.random.randint(n_min, n_max, size=1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
state_seq = [currstate]
obs_seq = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
state_seq.append(currstate)
obs_seq.append(self._generate_sample_from_state(
currstate, random_state=random_state))
obs.append(deepcopy(np.array(obs_seq)))
states.append(deepcopy(np.array(state_seq, dtype=int)))
return obs, states
def fit(self, obs, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.memory_safe and (not isinstance(obs[0], str)):
raise ValueError("Filepath locations must be provided as \
observations to be memory safe.")
n_batches = (len(obs) // self.batch_size) + \
(1 if len(obs) % self.batch_size else 0)
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
if not warm_start:
self._init(obs, self.init_params)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init()
logprob = []
for i in range(self.n_iter):
# Expectation step
if self.n_jobs == 1:
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for obs_batch in batches(obs, self.batch_size):
seq_stats, lpr = self._do_estep(obs_batch)
stats = merge_sum(stats, seq_stats)
curr_logprob += lpr
else:
pool = mp.Pool(processes=self.n_jobs)
results = pool.map(unwrap_self_estep,
zip([self] * n_batches,
batches(obs, self.batch_size)))
pool.terminate()
stats = reduce_merge_sum([x[0] for x in results])
curr_logprob = sum([x[1] for x in results])
logprob.append(curr_logprob)
if i > 0:
improvement = logprob[-1] - logprob[-2]
else:
improvement = np.inf
if self.verbose:
verbose_reporter.update(i, curr_logprob, improvement)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_states, self.n_states)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_states:
raise ValueError('startprob must have length n_states')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_states,
(self.n_states, self.n_states))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_states, self.n_states)):
raise ValueError('transmat must have shape '
'(n_states, n_states)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_states = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_states, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_states))
_hmmc._forward(n_observations, n_states, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_states))
_hmmc._backward(n_observations, n_states, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_ = np.random.dirichlet(self.startprob_prior)
if 't' in params:
self.transmat_ = np.vstack([np.random.dirichlet(
self.transmat_prior[i])
for i in xrange(self.n_states)])
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_states),
'trans': np.zeros((self.n_states, self.n_states))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_states = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1,
n_states,
n_states))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_states, fwdlattice,
self._log_transmat, bwdlattice,
framelogprob, lnP, lneta)
stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700))
def _do_estep(self, obs_batch):
if self.memory_safe:
local_obs = reduce(lambda x, y: x + y,
[cPickle.load(open(filename, 'r'))
for filename in obs_batch],
[])
else:
local_obs = obs_batch
local_stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in local_obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
curr_logprob += lpr
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
self._accumulate_sufficient_statistics(local_stats, seq, framelogprob,
posteriors, fwdlattice,
bwdlattice, self.params)
if self.memory_safe:
local_obs = None
return local_stats, curr_logprob
def _score(self, obs_batch):
if self.memory_safe:
local_obs = reduce(lambda x, y: x + y,
[cPickle.load(open(filename, 'r'))
for filename in obs_batch],
[])
else:
local_obs = obs_batch
logprob = 0
for seq in local_obs:
seq = np.asarray(seq)
framelogprob = self._compute_log_likelihood(seq)
lpr, _ = self._do_forward_pass(framelogprob)
logprob += lpr
return logprob
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if 's' in params:
self.startprob_ = normalize(np.maximum(stats['start'], 1e-20))
if 't' in params:
self.transmat_ = normalize(np.maximum(stats['trans'], 1e-20), 1)
def _n_free_parameters(self):
pass
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
n_states : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
means : array, shape (`n_states`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_states`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import GaussianHMM
>>> GaussianHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_states=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_var=1.0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters,
verbose=0,
n_jobs=1,
batch_size=1,
memory_safe=False):
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params, verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_var = means_var
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_states, self.n_features)):
raise ValueError('means must have shape '
'(n_states, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_states
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_states)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if self.memory_safe:
concat_obs = np.vstack(cPickle.load(open(obs[0], 'r')))
else:
concat_obs = np.vstack(obs)
if (hasattr(self, 'n_features')
and self.n_features != concat_obs.shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (concat_obs.shape[1],
self.n_features))
self.n_features = concat_obs.shape[1]
if 'm' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
concat_obs)
self._means_ = np.array([multivariate_normal(
mean,
np.eye(self.n_features) * self.means_var)
for mean in clu.cluster_centers_])
if 'c' in params:
cv = np.cov(concat_obs.T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_states)
self._covars_[self._covars_ == 0] = 1e-5
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_states)
stats['obs'] = np.zeros((self.n_states, self.n_features))
stats['obs**2'] = np.zeros((self.n_states, self.n_features))
if self._covariance_type in ('tied', 'full'):
stats['obs*obs.T'] = np.zeros((self.n_states, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_states):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
self._means_ = stats['obs'] / stats['post'][:, np.newaxis]
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
if self._covariance_type in ('spherical', 'diag'):
cv_num = ((self._means_) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den,
1e-5)
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_states, self.n_features,
self.n_features))
for c in range(self.n_states):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (np.outer(self._means_[c],
self._means_[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states * self.n_features
if self._covariance_type == 'spherical':
n_pars += self.n_states
elif self._covariance_type == 'tied':
n_pars += ((self.n_features + 1) * self.n_features) / 2
elif self._covariance_type == 'diag':
n_pars += self.n_states * self.n_features
elif self._covariance_type == 'full':
n_pars += self.n_states * ((self.n_features + 1) * self.n_features) / 2
return n_pars
def fit(self, obs, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(obs, warm_start)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
Attributes
----------
n_states : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_states`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
emissionprob_prior=None, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters, init_params=string.ascii_letters,
verbose=0, n_jobs=1, batch_size=1, memory_safe=False):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
self.emissionprob_prior = emissionprob_prior
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_states, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_states, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
if self.memory_safe:
symbols = symbols.union(set(np.concatenate(
cPickle.load(open(o, 'r')))))
else:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
if self.emissionprob_prior is None:
self.emissionprob_prior = np.ones((self.n_states,
self.n_symbols))
emissionprob = np.vstack([np.random.dirichlet(
self.emissionprob_prior[i])
for i in xrange(self.n_states)])
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_states, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
if self.memory_safe:
symbols = []
for o in obs:
symbols += cPickle.load(open(o, 'r'))
symbols = np.concatenate(symbols)
else:
symbols = np.concatenate(obs)
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states * (self.n_symbols - 1)
return n_pars
def fit(self, obs, warm_start=False, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
"""
err_msg = ("Input must be a list of non-negative integer arrays where "
"in all, every element must be continuous, but %s was "
"given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return super(MultinomialHMM, self).fit(obs, warm_start, **kwargs)
class PoissonHMM(_BaseHMM):
"""Hidden Markov Model with Poisson (discrete) emissions
Attributes
----------
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
rates : array, shape ('n_states`,)
Poisson rate parameters for each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import PoissonHMM
>>> PoissonHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
PoissonHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
rates_var=1.0, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters, verbose=0,
n_jobs=1, batch_size=1, memory_safe=False):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
self.rates_var = rates_var
def _get_rates(self):
"""Emission rate for each state."""
return self._rates
def _set_rates(self, rates):
rates = np.asarray(rates)
self._rates = rates.copy()
rates_ = property(_get_rates, _set_rates)
def _compute_log_likelihood(self, obs):
return log_poisson_pmf(obs, self._rates)
def _generate_sample_from_state(self, state, random_state=None):
return poisson.rvs(self._rates[state])
def _init(self, obs, params='str'):
super(PoissonHMM, self)._init(obs, params=params)
if self.memory_safe:
concat_obs = np.concatenate(cPickle.load(open(obs[0], 'r')))
else:
concat_obs = np.concatenate(obs)
if 'r' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
np.atleast_2d(concat_obs).T)
rates = normal(0, self.rates_var, self.n_states) + \
clu.cluster_centers_.T[0]
self._rates = np.maximum(0.1, rates)
def _initialize_sufficient_statistics(self):
stats = super(PoissonHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_states)
stats['obs'] = np.zeros((self.n_states,))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(PoissonHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'r' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
def _do_mstep(self, stats, params):
super(PoissonHMM, self)._do_mstep(stats, params)
if 'r' in params:
self._rates = stats['obs'] / stats['post']
def _check_input_symbols(self, obs):
"""check if input can be used for PoissonHMM. Input must be a list
of non-negative integers.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, -1, 3, 5, 10] not
"""
if self.memory_safe:
for o in obs:
symbols = np.concatenate(cPickle.load(open(o, 'r')))
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
else:
symbols = np.concatenate(obs)
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states
return n_pars
def fit(self, obs, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
err_msg = ("Input must be a list of non-negative integer arrays, \
but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return super(PoissonHMM, self).fit(obs, warm_start)
class ExponentialHMM(_BaseHMM):
"""Hidden Markov Model with Exponential (continuous) emissions
Attributes
----------
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
rates : array, shape ('n_states`,)
Exponential rate parameters for each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import ExponentialHMM
>>> ExponentialHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ExponentialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
rates_var=1.0, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters, verbose=0,
n_jobs=1, batch_size=1, memory_safe=False):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
self.rates_var = rates_var
def _get_rates(self):
"""Emission rate for each state."""
return self._rates
def _set_rates(self, rates):
rates = np.asarray(rates)
self._rates = rates.copy()
rates_ = property(_get_rates, _set_rates)
def _compute_log_likelihood(self, obs):
return log_exponential_density(obs, self._rates)
def _generate_sample_from_state(self, state, random_state=None):
return expon.rvs(scale=1. / self._rates[state])
def _init(self, obs, params='str'):
super(ExponentialHMM, self)._init(obs, params=params)
if self.memory_safe:
concat_obs = np.concatenate(cPickle.load(open(obs[0], 'r')))
else:
concat_obs = np.concatenate(obs)
if 'r' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
np.atleast_2d(concat_obs).T)
rates = normal(0, self.rates_var, self.n_states) + \
1. / clu.cluster_centers_.T[0]
self._rates = np.maximum(0.1, rates)
def _initialize_sufficient_statistics(self):
stats = super(ExponentialHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_states)
stats['obs'] = np.zeros((self.n_states,))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(ExponentialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'r' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
def _do_mstep(self, stats, params):
super(ExponentialHMM, self)._do_mstep(stats, params)
if 'r' in params:
self._rates = stats['post'] / stats['obs']
def _check_input_symbols(self, obs):
"""check if input can be used for ExponentialHMM. Input must be a list
of non-negative reals.
e.g. x = [0., 0.5, 2.3] is OK and y = [0.0, -1.0, 3.3, 5.4, 10.9] not
"""
if self.memory_safe:
for o in obs:
symbols = np.concatenate(cPickle.load(open(o, 'r')))
if symbols.dtype.kind not in ('f', 'i'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
else:
symbols = np.concatenate(obs)
if symbols.dtype.kind not in ('f', 'i'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states
return n_pars
def fit(self, obs, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
err_msg = ("Input must be a list of non-negative real arrays, \
but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return super(ExponentialHMM, self).fit(obs, warm_start)
class MultinomialExponentialHMM(_BaseHMM):
"""Hidden Markov Model with joint multinomial and exponential emissions
Attributes
----------
n_states : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_states`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
rates : array, shape ('n_states`,)
Exponential rate parameters for each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
emissionprob_prior=None, rates_var=1.0, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters, init_params=string.ascii_letters,
verbose=0, n_jobs=1, batch_size=1, memory_safe=False):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
self.emissionprob_prior = emissionprob_prior
self.rates_var = rates_var
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_states, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_states, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _get_rates(self):
"""Emission rate for each state."""
return self._rates
def _set_rates(self, rates):
rates = np.asarray(rates)
self._rates = rates.copy()
rates_ = property(_get_rates, _set_rates)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, map(int, obs[:, 0])].T + \
log_exponential_density(obs[:, 1], self._rates)
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
expon_obs = expon.rvs(scale=1. / self._rates[state])
return symbol, expon_obs
def _init(self, obs, params='ster'):
super(MultinomialExponentialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
if self.memory_safe:
symbols = symbols.union(set(np.concatenate(
cPickle.load(open(o, 'r')))[:, 0]))
else:
symbols = symbols.union(set(o[:, 0]))
self.n_symbols = len(symbols)
if self.emissionprob_prior is None:
self.emissionprob_prior = np.ones((self.n_states,
self.n_symbols))
emissionprob = np.vstack([np.random.dirichlet(
self.emissionprob_prior[i])
for i in xrange(self.n_states)])
self.emissionprob_ = emissionprob
if self.memory_safe:
concat_obs = np.concatenate(cPickle.load(open(obs[0], 'r')))[:, 1]
else:
concat_obs = np.concatenate(obs)[:, 1]
if 'r' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
np.atleast_2d(concat_obs).T)
rates = normal(0, self.rates_var, self.n_states) + \
1. / clu.cluster_centers_.T[0]
self._rates = np.maximum(0.1, rates)
def _initialize_sufficient_statistics(self):
stats = super(MultinomialExponentialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_states, self.n_symbols))
stats['post'] = np.zeros(self.n_states)
stats['expon_obs'] = np.zeros((self.n_states,))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialExponentialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs[:, 0]):
stats['obs'][:, int(symbol)] += posteriors[t]
if 'r' in params:
stats['post'] += posteriors.sum(axis=0)
stats['expon_obs'] += np.dot(posteriors.T, obs[:, 1])
def _do_mstep(self, stats, params):
super(MultinomialExponentialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
if 'r' in params:
self._rates = stats['post'] / stats['expon_obs']
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
if self.memory_safe:
symbols = []
for o in obs:
symbols += cPickle.load(open(o, 'r'))
symbols = np.concatenate(symbols)[:, 0]
else:
symbols = np.concatenate(obs)[:, 0]
if symbols.dtype.kind not in ('i', 'f'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
if self.memory_safe:
for o in obs:
symbols = np.concatenate(cPickle.load(open(o, 'r')))[:, 1]
if symbols.dtype.kind not in ('f', 'i'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
else:
symbols = np.concatenate(obs)[:, 1]
if symbols.dtype.kind not in ('f', 'i'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states * (self.n_symbols - 1)
n_pars += self.n_states
return n_pars
def fit(self, obs, warm_start=False, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
"""
err_msg = ("Input must be a list of non-negative integer arrays where "
"in all, every element must be continuous, but %s was "
"given.")
cleaned_obs = [np.array(seq) for seq in obs]
if not self._check_input_symbols(cleaned_obs):
raise ValueError(err_msg % obs)
return super(MultinomialExponentialHMM, self).fit(cleaned_obs, warm_start, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can
contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat, 'm' for
means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_states`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
var : float, default: 1.0
Variance parameter to randomize the initialization of the GMM objects.
The larger var, the greater the randomization.
Examples
--------
>>> from hmmlearn.hmm import GMMHMM
>>> GMMHMM(n_states=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters,
verbose=0, means_var=1.0,
n_jobs=1, batch_size=1, memory_safe=False):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose,
n_jobs=n_jobs,
batch_size=batch_size,
memory_safe=memory_safe)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_states):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
self.means_var = means_var
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
if self.memory_safe:
concat_obs = np.concatenate(cPickle.load(open(obs[0], 'r')), 0)
else:
concat_obs = np.concatenate(obs, 0)
n_features = concat_obs.shape[1]
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(concat_obs)
means = np.array([multivariate_normal(
mean,
np.eye(n_features) * self.means_var)
for mean in g.means_])
g.means_ = means
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, tmp_gmm_posteriors = g.score_samples(obs)
lgmm_posteriors = np.log(tmp_gmm_posteriors
+ np.finfo(np.float).eps) + \
np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
for g in self.gmms_:
n_components = g.means_.shape[0]
n_features = g.means_.shape[1]
n_pars += n_components - 1
n_pars += n_components * n_features
if g.covariance_type == 'spherical':
n_pars += n_components
elif g.covariance_type == 'tied':
n_pars += ((n_features + 1) * n_features) / 2
elif g.covariance_type == 'diag':
n_pars += n_components * n_features
elif g.covariance_type == 'full':
n_pars += n_components * ((n_features + 1) * n_features) / 2
return n_pars
|
mvictor212/hmmlearn
|
hmmlearn/hmm.py
|
Python
|
bsd-3-clause
| 86,425
|
[
"Gaussian"
] |
19a4450581a99821191d53a76876daacd8743215246d11e2a20ba9cf38c1316a
|
# Copyright 2013-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import Dependency, ExternalDependency, DependencyException, DependencyMethods, NotFoundDependency
from .cmake import CMakeDependency
from .dub import DubDependency
from .framework import ExtraFrameworkDependency
from .pkgconfig import PkgConfigDependency
from ..mesonlib import listify, MachineChoice, PerMachine
from .. import mlog
import functools
import typing as T
if T.TYPE_CHECKING:
from ..environment import Environment
from .factory import DependencyFactory, WrappedFactoryFunc, DependencyGenerator
# These must be defined in this file to avoid cyclical references.
packages: T.Dict[
str,
T.Union[T.Type[ExternalDependency], 'DependencyFactory', 'WrappedFactoryFunc']
] = {}
_packages_accept_language: T.Set[str] = set()
if T.TYPE_CHECKING:
TV_DepIDEntry = T.Union[str, bool, int, T.Tuple[str, ...]]
TV_DepID = T.Tuple[T.Tuple[str, TV_DepIDEntry], ...]
def get_dep_identifier(name: str, kwargs: T.Dict[str, T.Any]) -> 'TV_DepID':
identifier: 'TV_DepID' = (('name', name), )
from ..interpreter import permitted_dependency_kwargs
assert len(permitted_dependency_kwargs) == 19, \
'Extra kwargs have been added to dependency(), please review if it makes sense to handle it here'
for key, value in kwargs.items():
# 'version' is irrelevant for caching; the caller must check version matches
# 'native' is handled above with `for_machine`
# 'required' is irrelevant for caching; the caller handles it separately
# 'fallback' and 'allow_fallback' is not part of the cache because,
# once a dependency has been found through a fallback, it should
# be used for the rest of the Meson run.
# 'default_options' is only used in fallback case
# 'not_found_message' has no impact on the dependency lookup
# 'include_type' is handled after the dependency lookup
if key in ('version', 'native', 'required', 'fallback', 'allow_fallback', 'default_options',
'not_found_message', 'include_type'):
continue
# All keyword arguments are strings, ints, or lists (or lists of lists)
if isinstance(value, list):
value = frozenset(listify(value))
for i in value:
assert isinstance(i, str)
else:
assert isinstance(value, (str, bool, int))
identifier += (key, value)
return identifier
display_name_map = {
'boost': 'Boost',
'cuda': 'CUDA',
'dub': 'DUB',
'gmock': 'GMock',
'gtest': 'GTest',
'hdf5': 'HDF5',
'llvm': 'LLVM',
'mpi': 'MPI',
'netcdf': 'NetCDF',
'openmp': 'OpenMP',
'wxwidgets': 'WxWidgets',
}
def find_external_dependency(name: str, env: 'Environment', kwargs: T.Dict[str, object]) -> T.Union['ExternalDependency', NotFoundDependency]:
assert(name)
required = kwargs.get('required', True)
if not isinstance(required, bool):
raise DependencyException('Keyword "required" must be a boolean.')
if not isinstance(kwargs.get('method', ''), str):
raise DependencyException('Keyword "method" must be a string.')
lname = name.lower()
if lname not in _packages_accept_language and 'language' in kwargs:
raise DependencyException(f'{name} dependency does not accept "language" keyword argument')
if not isinstance(kwargs.get('version', ''), (str, list)):
raise DependencyException('Keyword "Version" must be string or list.')
# display the dependency name with correct casing
display_name = display_name_map.get(lname, lname)
for_machine = MachineChoice.BUILD if kwargs.get('native', False) else MachineChoice.HOST
type_text = PerMachine('Build-time', 'Run-time')[for_machine] + ' dependency'
# build a list of dependency methods to try
candidates = _build_external_dependency_list(name, env, for_machine, kwargs)
pkg_exc: T.List[DependencyException] = []
pkgdep: T.List[ExternalDependency] = []
details = ''
for c in candidates:
# try this dependency method
try:
d = c()
d._check_version()
pkgdep.append(d)
except DependencyException as e:
pkg_exc.append(e)
mlog.debug(str(e))
else:
pkg_exc.append(None)
details = d.log_details()
if details:
details = '(' + details + ') '
if 'language' in kwargs:
details += 'for ' + d.language + ' '
# if the dependency was found
if d.found():
info: mlog.TV_LoggableList = []
if d.version:
info.append(mlog.normal_cyan(d.version))
log_info = d.log_info()
if log_info:
info.append('(' + log_info + ')')
mlog.log(type_text, mlog.bold(display_name), details + 'found:', mlog.green('YES'), *info)
return d
# otherwise, the dependency could not be found
tried_methods = [d.log_tried() for d in pkgdep if d.log_tried()]
if tried_methods:
tried = '{}'.format(mlog.format_list(tried_methods))
else:
tried = ''
mlog.log(type_text, mlog.bold(display_name), details + 'found:', mlog.red('NO'),
f'(tried {tried})' if tried else '')
if required:
# if an exception occurred with the first detection method, re-raise it
# (on the grounds that it came from the preferred dependency detection
# method)
if pkg_exc and pkg_exc[0]:
raise pkg_exc[0]
# we have a list of failed ExternalDependency objects, so we can report
# the methods we tried to find the dependency
raise DependencyException('Dependency "%s" not found' % (name) +
(', tried %s' % (tried) if tried else ''))
return NotFoundDependency(env)
def _build_external_dependency_list(name: str, env: 'Environment', for_machine: MachineChoice,
kwargs: T.Dict[str, T.Any]) -> T.List['DependencyGenerator']:
# First check if the method is valid
if 'method' in kwargs and kwargs['method'] not in [e.value for e in DependencyMethods]:
raise DependencyException('method {!r} is invalid'.format(kwargs['method']))
# Is there a specific dependency detector for this dependency?
lname = name.lower()
if lname in packages:
# Create the list of dependency object constructors using a factory
# class method, if one exists, otherwise the list just consists of the
# constructor
if isinstance(packages[lname], type):
entry1 = T.cast(T.Type[ExternalDependency], packages[lname]) # mypy doesn't understand isinstance(..., type)
if issubclass(entry1, ExternalDependency):
# TODO: somehow make mypy understand that entry1(env, kwargs) is OK...
func: T.Callable[[], 'ExternalDependency'] = lambda: entry1(env, kwargs) # type: ignore
dep = [func]
else:
entry2 = T.cast(T.Union['DependencyFactory', 'WrappedFactoryFunc'], packages[lname])
dep = entry2(env, for_machine, kwargs)
return dep
candidates: T.List['DependencyGenerator'] = []
# If it's explicitly requested, use the dub detection method (only)
if 'dub' == kwargs.get('method', ''):
candidates.append(functools.partial(DubDependency, name, env, kwargs))
return candidates
# If it's explicitly requested, use the pkgconfig detection method (only)
if 'pkg-config' == kwargs.get('method', ''):
candidates.append(functools.partial(PkgConfigDependency, name, env, kwargs))
return candidates
# If it's explicitly requested, use the CMake detection method (only)
if 'cmake' == kwargs.get('method', ''):
candidates.append(functools.partial(CMakeDependency, name, env, kwargs))
return candidates
# If it's explicitly requested, use the Extraframework detection method (only)
if 'extraframework' == kwargs.get('method', ''):
# On OSX, also try framework dependency detector
if env.machines[for_machine].is_darwin():
candidates.append(functools.partial(ExtraFrameworkDependency, name, env, kwargs))
return candidates
# Otherwise, just use the pkgconfig and cmake dependency detector
if 'auto' == kwargs.get('method', 'auto'):
candidates.append(functools.partial(PkgConfigDependency, name, env, kwargs))
# On OSX, also try framework dependency detector
if env.machines[for_machine].is_darwin():
candidates.append(functools.partial(ExtraFrameworkDependency, name, env, kwargs))
# Only use CMake as a last resort, since it might not work 100% (see #6113)
candidates.append(functools.partial(CMakeDependency, name, env, kwargs))
return candidates
|
jpakkane/meson
|
mesonbuild/dependencies/detect.py
|
Python
|
apache-2.0
| 9,556
|
[
"NetCDF"
] |
e5d422289eb4812f326d7abfd143c321aaf2c1206e74a5339751b811aaed5d25
|
"""
Django settings for octopus project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e*&z$a0kxeyqh_6l3&4+399)cf)&u9bycqx6!j3doyd217-3dr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'engines',
'images',
'containers',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'octopus.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR,'templates').replace('\\', '/'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'octopus.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
|
gregorianzhang/octopus
|
octopus/settings.py
|
Python
|
apache-2.0
| 2,882
|
[
"Octopus"
] |
046ac1f1f3d658fe0012fd5849070e6523bb1c26187fbec0a4d2410cf498133c
|
#!/usr/bin/env ipython
from pylab import *
import numpy as np
from scipy.io.netcdf import netcdf_file
import console_colors as ccl
import os
MCwant = '2'
nbefore = 2
nafter = 4
WangFlag = '90.0' #'NaN'
fgap = 0.2
#v_lo = 550.0 #550.0 #450.0 #100.0
#v_hi = 3000.0 #3000.0 #550.0 #450.0
prexShift = 'wShiftCorr'
varname = 'Bmc' # para el titulo de la fig
#+++++++++++++++ filtros
FILTER = {}
FILTER['vsw_filter'] = False #True #False
FILTER['B_filter'] = True #False
FILTER['filter_dR.icme'] = False
LO, HI = 0.0, 11.0 #11.0, 15.0 #15.0, 300.0
# NOTA: estos directorios deberian existir
dir_suffix = '/_test_Bmc_' #'/_test_Vmc_'
DIR_FIGS = '../../plots/MCflag%s/%s' % (MCwant, prexShift) + dir_suffix
DIR_ASCII = '../../ascii/MCflag%s/%s' % (MCwant, prexShift) + dir_suffix
#os.system('mkdir -p %s %s' % (DIR_FIGS, DIR_ASCII)) # si no existen, los creo!
print ccl.On + " ---> leyendo data de: " + DIR_ASCII + ccl.W
#FNAMEs = 'MCflag%s_%dbefore.%dafter_Wang%s_fgap%1.1f_vlo.%3.1f.vhi.%4.1f' % (MCwant, nbefore, nafter, WangFlag, fgap, v_lo, v_hi)
FNAMEs = 'MCflag%s_%dbefore.%dafter_fgap%1.1f' % (MCwant, nbefore, nafter, fgap)
FNAMEs += '_Wang%s' % (WangFlag)
if FILTER['vsw_filter']: FNAMEs += '_vlo.%03.1f.vhi.%04.1f' % (LO, HI)
if FILTER['B_filter']: FNAMEs += '_Blo.%2.2f.Bhi.%2.2f' % (LO, HI)
if FILTER['filter_dR.icme']: FNAMEs += '_dRlo.%2.2f.dRhi.%2.2f' % (LO, HI)
# _stuff_MCflag2_2before.4after_fgap0.2_Wang90.0_
#FNAME_FIGS = '%s/_hist_%s' % (DIR_FIGS, FNAMEs)
fname_inp = '%s/_stuff_%s.nc' % (DIR_ASCII, FNAMEs)
f_in = netcdf_file(fname_inp, 'r')
Pcc = f_in.variables['Pcc'].data
dt_sh_Pcc = f_in.variables['dt_sheath_Pcc'].data
Vsh = f_in.variables['V'].data
id_Pcc = set(f_in.variables['IDs_Pcc'].data)
id_Vsh = set(f_in.variables['IDs_V'].data)
ids = id_Pcc.intersection(id_Vsh)
#---------------------------------- calculamos las surface densities
nbins = 10
n = len(ids)
var_sh = np.zeros(n)
var_mc = np.zeros(n)
var_co = np.zeros(n)
#var = Pcc*dt_sh*Vsh
#for id, i in zip(ids, range(n)):
i=0
for ID_Pcc, i_Pcc in zip(id_Pcc, range(len(id_Pcc))):
for ID_Vsh, i_Vsh in zip(id_Vsh, range(len(id_Vsh))):
ok = (ID_Pcc==ID_Vsh) and (ID_Vsh in ids)
if ok:
var_sh[i] = Pcc[i_Pcc]*dt_sh_Pcc[i_Pcc]*Vsh[i_Vsh]
i+=1
var_sh *= 86400.*1e5
#---------------------------------- begin: figura
XRANGE = [0., 1.4e14]
fig = figure(1, figsize=(6,4))
ax = fig.add_subplot(111)
h, x = np.histogram(var_sh, bins=nbins, range=XRANGE, normed=True)
x = .5*(x[:-1] + x[1:])
dx = x[1]-x[0]
h *= dx*100.
LABEL = 'N: %d' % n
TIT1 = 'normalized histogram (area=100%)'
TIT2 = '%4.1f km/s < %s < %4.1f km/s' % (LO, varname, HI)
TITLE = TIT1+'\n'+TIT2
ax.plot(x, h, 'o-', label=LABEL)
ax.legend()
ax.grid()
ax.set_title(TITLE)
ax.set_xlabel('surface density at sheath $\sigma_{sh}$ [$1/cm^2$]')
ax.set_ylabel('[%]')
ax.set_xlim(XRANGE)
ax.set_ylim(0., 40.)
# generamos figura
fname_fig = '%s/_hist_.sh_%s.png' % (DIR_FIGS, FNAMEs)
savefig(fname_fig, format='png', dpi=135, bbox_inches='tight')
close(fig)
print ccl.Rn + " -------> genere: %s" % fname_fig + ccl.W
#---------------------------------- end: figura
#---------------------------------- begin: ascii
fname_txt = '%s/_hist_.sh_%s.txt' % (DIR_ASCII, FNAMEs)
data_out = np.array([x, h]).T
np.savetxt(fname_txt, data_out, fmt='%5.3g')
print ccl.Rn + " -------> genere: %s" % fname_txt + ccl.W
#---------------------------------- end: ascii
#
del Pcc, dt_sh_Pcc, Vsh # para evitar el RunTimeWarning del netcdf_file()
#pause(3)
f_in.close()
##
|
jimsrc/seatos
|
sheaths/src/surf.dens_for.paper/src/h_group_Bmc.py
|
Python
|
mit
| 3,805
|
[
"NetCDF"
] |
3a26bdd015a835de49101f44bf009ea6a4333547735e80f626d7027b39fcb3f7
|
import numpy as np
from scipy.optimize import fsolve
from astropy.utils import isiterable
def stellarmass_from_halomass(log_Mhalo, z=0):
""" Stellar mass from Halo Mass from Moster+2013
https://doi.org/10.1093/mnras/sts261
Args:
log_Mhalo (float): log_10 halo mass
in solar mass units.
z (float, optional): halo redshift.
Assumed to be 0 by default.
Returns:
log_mstar (float): log_10 galaxy stellar mass
in solar mass units.
"""
# Define model parameters from Table 1
# of the paper.
N10 = 0.0351
N11 = -0.0247
beta10 = 1.376
beta11 = -0.826
gamma10 = 0.608
gamma11 = 0.329
M10 = 11.59
M11 = 1.195
# Get redshift dependent parameters
# from equations 11-14.
z_factor = z / (1 + z)
N = N10 + N11 * z_factor
beta = beta10 + beta11 * z_factor
gamma = gamma10 + gamma11 * z_factor
logM1 = M10 + M11 * z_factor
M1 = 10 ** logM1
M_halo = 10 ** log_Mhalo
# Simple
log_mstar = log_Mhalo + np.log10(2 * N) - np.log10((M_halo / M1) ** -beta + (M_halo / M1) ** gamma)
# Done
return log_mstar
def halomass_from_stellarmass(log_mstar, z=0):
""" Halo mass from Stellar mass (Moster+2013).
Inverts the function `stellarmass_from_halomass`
numerically.
Args:
log_mstar (float or numpy.ndarray): log_10 stellar mass
in solar mass units.
z (float, optional): galaxy redshift
Returns:
log_Mhalo (float): log_10 halo mass
in solar mass units.
"""
try:
log_mstar * z
except ValueError:
raise TypeError(
"log_mstar and z can't be broadcast together for root finding. Use numpy arrays of same length or scalar values.")
f = lambda x: stellarmass_from_halomass(x, z=z) - log_mstar
guess = 2 + log_mstar
if isiterable(log_mstar):
return fsolve(f, guess)
else:
return fsolve(f, guess)[0]
|
FRBs/FRB
|
frb/halos/utils.py
|
Python
|
bsd-3-clause
| 1,986
|
[
"Galaxy"
] |
d130a8fea954015b8d4ba26c652fee4eb14dd98f1af00fcc1cc21643e649ea56
|
# Nucleotide analysis from VCF files
# (c) 2017 Ali Rassolie
# Lumina sequencing
class annuc:
def __init__(self, **kwargs):
# Imports
# Kwarg handling
self.spec_input = kwargs["dictinput"]
self.hass_input = kwargs["hass"]
self.output = "{}_" #kwargs["output"]
self.searchtype = kwargs["searchtype"]
self.taildata = kwargs["taildata"]
self.tailsample = kwargs["tailsample"]
self.amount = kwargs["amount"]
self.header = kwargs["header"]
# where to start from.
self.pos_in_iter = 147
self.endstart = list()
def clean(self):
with open("malbac_4.freebayes.bwa.vcf", "rb") as file_in:
with open("cleaned_hassle.vcf", "wb") as file_out:
for i, line in enumerate(file_in):
if i < 50000:
file_out.write(line)
else:
break
with open("malbac_4_vcfoutput", "rb") as file_in:
with open("cleaned_sample", "wb") as file_out:
for i, line in enumerate(file_in):
if i < 10000:
file_out.write(line)
else:
break
def specific_input(self):
with open(self.spec_input, "r") as spec_file:
for spec_line in spec_file:
temp_info = spec_line.split("\t")
yield str(temp_info[0]), str(temp_info[1].replace("\n",""))
def hass(self):
with open(self.hass_input, "r") as hass_file:
for i, line in enumerate(hass_file):
if i > self.pos_in_iter:
line = line.split("\t")
temp = [ line[0],line[1], line[3], line[4] ]
if self.chr_ and self.pos in temp:
self.pos_in_iter = i
yield temp
else:
pass
else:
pass
def chunk_search(self):
import re, itertools
with open(self.hass_input, "r") as file:
search_list = list(itertools.islice(file, self.slicesize))
while True:
# print("wut")
if not search_list:
break
search_for = re.compile("{}\\t{}\\t.*".format(self.chr_, self.pos))
finding = list(filter(search_for.match, search_list))
# print(finding)
if finding:
finding_list = finding[0].split("\t")
result = [finding_list[0],finding_list[1],finding_list[3],finding_list[4]]
# print(result)
yield result
# print("2")
elif not finding:
# print("no finding")
search_list = list(itertools.islice(file, self.slicesize))
else:
print("We have an issue")
def filter(self, slicesize):
import time
self.slicesize = slicesize
# for pos_of_file in range(self.amount):
# self.output = "{}_{}_results.vcf".format(self.header, pos_of_file)
# self.hass_input = "{}_{}_{}".format(self.header, pos_of_file, self.taildata)
# self.spec_input = "{}_{}_{}".format(self.header, pos_of_file, self.tailsample)
# print(self.hass_input, self.spec_input)
with open(self.output, "w") as file:
spec_gen = self.specific_input()
if self.searchtype == "hassle":
gen = self.hass()
elif self.searchtype == "chunk":
gen = self.chunk_search()
else:
print("Please enter searchtype:")
try:
text = ""
count = 0
start = time.time()
start2 = time.time()
for self.chr_, self.pos in spec_gen:
append_this = next(gen)
# print(append_this)
text = text + "{}\t{}\t{}\t{}\n".format(*append_this)
if count == 100:
end2 = time.time()
print(end2-start2)
start2 = time.time()
count = 0
count += 1
file.write(text)
end = time.time()
self.endstart = end - start
except:
raise Exception
def timed_completion(self):
print("Tot time: {}".format(self.endstart))
def plot_results(self, infile=None):
import matplotlib.pyplot as plt
import numpy as np
counts=dict()
for element in self.basepair_gen(infile=infile):
if element in counts:
counts[element] += 1
else:
counts[element] = 1
ordered_count = iter(counts)
with open("count_results.vcf", "a") as file:
pass
def basepair_gen(self, infile=None):
with open(infile, "r") as file:
for line in file:
line = line.replace("\n", "").split("\t")
yield "{}{}".format(line[2],line[3])
# def hassle_get(self):
# pass
# def spec_get(self):
# pass
# def hassle_redef(self):
# pass
# def spec_input(self):
# pass
# hassle = property(hassle_get, hassle_redef)
# spec_input = property(spec_get, spec_redef)
|
HypoChloremic/annuc
|
annuc_v2.py
|
Python
|
mit
| 4,410
|
[
"BWA"
] |
38be610dea149d157172123199b0caad6efc8254f437166cfa1324cd2441ec16
|
# $Id$
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Sereina Riniker, Aug 2013
from rdkit import Chem
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit.Chem import rdMolDescriptors as rdMD
from rdkit.Chem import rdmolops
from rdkit.Chem import Draw
from rdkit.six import iteritems
import numpy
import math
import copy
from matplotlib import cm
def GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity):
"""
Calculates the atomic weights for the probe molecule
based on a fingerprint function and a metric.
Parameters:
refMol -- the reference molecule
probeMol -- the probe molecule
fpFunction -- the fingerprint function
metric -- the similarity metric
Note:
If fpFunction needs additional parameters, use a lambda construct
"""
if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo')
if hasattr(refMol, '_fpInfo'): delattr(refMol, '_fpInfo')
refFP = fpFunction(refMol, -1)
probeFP = fpFunction(probeMol, -1)
baseSimilarity = metric(refFP, probeFP)
# loop over atoms
weights = []
for atomId in range(probeMol.GetNumAtoms()):
newFP = fpFunction(probeMol, atomId)
newSimilarity = metric(refFP, newFP)
weights.append(baseSimilarity - newSimilarity)
if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo')
if hasattr(refMol, '_fpInfo'): delattr(refMol, '_fpInfo')
return weights
def GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction):
"""
Calculates the atomic weights for the probe molecule based on
a fingerprint function and the prediction function of a ML model.
Parameters:
probeMol -- the probe molecule
fpFunction -- the fingerprint function
predictionFunction -- the prediction function of the ML model
"""
if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo')
probeFP = fpFunction(probeMol, -1)
baseProba = predictionFunction(probeFP)
# loop over atoms
weights = []
for atomId in range(probeMol.GetNumAtoms()):
newFP = fpFunction(probeMol, atomId)
newProba = predictionFunction(newFP)
weights.append(baseProba - newProba)
if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo')
return weights
def GetStandardizedWeights(weights):
"""
Normalizes the weights,
such that the absolute maximum weight equals 1.0.
Parameters:
weights -- the list with the atomic weights
"""
tmp = [math.fabs(w) for w in weights]
currentMax = max(tmp)
if currentMax > 0:
return [w/currentMax for w in weights], currentMax
else:
return weights, currentMax
def GetSimilarityMapFromWeights(mol, weights, colorMap=cm.PiYG, scale=-1, size=(250, 250), sigma=None, #@UndefinedVariable #pylint: disable=E1101
coordScale=1.5, step=0.01, colors='k', contourLines=10, alpha=0.5, **kwargs):
"""
Generates the similarity map for a molecule given the atomic weights.
Parameters:
mol -- the molecule of interest
colorMap -- the matplotlib color map scheme
scale -- the scaling: scale < 0 -> the absolute maximum weight is used as maximum scale
scale = double -> this is the maximum scale
size -- the size of the figure
sigma -- the sigma for the Gaussians
coordScale -- scaling factor for the coordinates
step -- the step for calcAtomGaussian
colors -- color of the contour lines
contourLines -- if integer number N: N contour lines are drawn
if list(numbers): contour lines at these numbers are drawn
alpha -- the alpha blending value for the contour lines
kwargs -- additional arguments for drawing
"""
if mol.GetNumAtoms() < 2: raise ValueError("too few atoms")
fig = Draw.MolToMPL(mol, coordScale=coordScale, size=size, **kwargs)
if sigma is None:
if mol.GetNumBonds() > 0:
bond = mol.GetBondWithIdx(0)
idx1 = bond.GetBeginAtomIdx()
idx2 = bond.GetEndAtomIdx()
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[idx1][i]-mol._atomPs[idx2][i])**2 for i in range(2)]))
else:
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[0][i]-mol._atomPs[1][i])**2 for i in range(2)]))
sigma = round(sigma, 2)
x, y, z = Draw.calcAtomGaussians(mol, sigma, weights=weights, step=step)
# scaling
if scale <= 0.0: maxScale = max(math.fabs(numpy.min(z)), math.fabs(numpy.max(z)))
else: maxScale = scale
# coloring
fig.axes[0].imshow(z, cmap=colorMap, interpolation='bilinear', origin='lower', extent=(0,1,0,1), vmin=-maxScale, vmax=maxScale)
# contour lines
# only draw them when at least one weight is not zero
if len([w for w in weights if w != 0.0]):
fig.axes[0].contour(x, y, z, contourLines, colors=colors, alpha=alpha, **kwargs)
return fig
def GetSimilarityMapForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity, **kwargs):
"""
Generates the similarity map for a given reference and probe molecule,
fingerprint function and similarity metric.
Parameters:
refMol -- the reference molecule
probeMol -- the probe molecule
fpFunction -- the fingerprint function
metric -- the similarity metric.
kwargs -- additional arguments for drawing
"""
weights = GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric)
weights, maxWeight = GetStandardizedWeights(weights)
fig = GetSimilarityMapFromWeights(probeMol, weights, **kwargs)
return fig, maxWeight
def GetSimilarityMapForModel(probeMol, fpFunction, predictionFunction, **kwargs):
"""
Generates the similarity map for a given ML model and probe molecule,
and fingerprint function.
Parameters:
probeMol -- the probe molecule
fpFunction -- the fingerprint function
predictionFunction -- the prediction function of the ML model
kwargs -- additional arguments for drawing
"""
weights = GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction)
weights, maxWeight = GetStandardizedWeights(weights)
fig = GetSimilarityMapFromWeights(probeMol, weights, **kwargs)
return fig, maxWeight
apDict = {}
apDict['normal'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetAtomPairFingerprint(m, minLength=minl, maxLength=maxl, ignoreAtoms=ia, **kwargs)
apDict['hashed'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetHashedAtomPairFingerprint(m, nBits=bits, minLength=minl, maxLength=maxl, ignoreAtoms=ia, **kwargs)
apDict['bv'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetHashedAtomPairFingerprintAsBitVect(m, nBits=bits, minLength=minl, maxLength=maxl, nBitsPerEntry=bpe, ignoreAtoms=ia, **kwargs)
# usage: lambda m,i: GetAPFingerprint(m, i, fpType, nBits, minLength, maxLength, nBitsPerEntry)
def GetAPFingerprint(mol, atomId=-1, fpType='normal', nBits=2048, minLength=1, maxLength=30, nBitsPerEntry=4, **kwargs):
"""
Calculates the atom pairs fingerprint with the torsions of atomId removed.
Parameters:
mol -- the molecule of interest
atomId -- the atom to remove the pairs for (if -1, no pair is removed)
fpType -- the type of AP fingerprint ('normal', 'hashed', 'bv')
nBits -- the size of the bit vector (only for fpType='bv')
minLength -- the minimum path length for an atom pair
maxLength -- the maxmimum path length for an atom pair
nBitsPerEntry -- the number of bits available for each pair
"""
if fpType not in ['normal', 'hashed', 'bv']: raise ValueError("Unknown Atom pairs fingerprint type")
if atomId < 0:
return apDict[fpType](mol, nBits, minLength, maxLength, nBitsPerEntry, 0, **kwargs)
if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms")
return apDict[fpType](mol, nBits, minLength, maxLength, nBitsPerEntry, [atomId], **kwargs)
ttDict = {}
ttDict['normal'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetTopologicalTorsionFingerprint(m, targetSize=ts, ignoreAtoms=ia, **kwargs)
ttDict['hashed'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetHashedTopologicalTorsionFingerprint(m, nBits=bits, targetSize=ts, ignoreAtoms=ia, **kwargs)
ttDict['bv'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect(m, nBits=bits, targetSize=ts, nBitsPerEntry=bpe, ignoreAtoms=ia, **kwargs)
# usage: lambda m,i: GetTTFingerprint(m, i, fpType, nBits, targetSize)
def GetTTFingerprint(mol, atomId=-1, fpType='normal', nBits=2048, targetSize=4, nBitsPerEntry=4, **kwargs):
"""
Calculates the topological torsion fingerprint with the pairs of atomId removed.
Parameters:
mol -- the molecule of interest
atomId -- the atom to remove the torsions for (if -1, no torsion is removed)
fpType -- the type of TT fingerprint ('normal', 'hashed', 'bv')
nBits -- the size of the bit vector (only for fpType='bv')
minLength -- the minimum path length for an atom pair
maxLength -- the maxmimum path length for an atom pair
nBitsPerEntry -- the number of bits available for each torsion
any additional keyword arguments will be passed to the fingerprinting function.
"""
if fpType not in ['normal', 'hashed', 'bv']: raise ValueError("Unknown Topological torsion fingerprint type")
if atomId < 0:
return ttDict[fpType](mol, nBits, targetSize, nBitsPerEntry, 0, **kwargs)
if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms")
return ttDict[fpType](mol, nBits, targetSize, nBitsPerEntry, [atomId], **kwargs)
# usage: lambda m,i: GetMorganFingerprint(m, i, radius, fpType, nBits, useFeatures)
def GetMorganFingerprint(mol, atomId=-1, radius=2, fpType='bv', nBits=2048, useFeatures=False, **kwargs):
"""
Calculates the Morgan fingerprint with the environments of atomId removed.
Parameters:
mol -- the molecule of interest
radius -- the maximum radius
fpType -- the type of Morgan fingerprint: 'count' or 'bv'
atomId -- the atom to remove the environments for (if -1, no environments is removed)
nBits -- the size of the bit vector (only for fpType = 'bv')
useFeatures -- if false: ConnectivityMorgan, if true: FeatureMorgan
any additional keyword arguments will be passed to the fingerprinting function.
"""
if fpType not in ['bv', 'count']: raise ValueError("Unknown Morgan fingerprint type")
if not hasattr(mol, '_fpInfo'):
info = {}
# get the fingerprint
if fpType == 'bv': molFp = rdMD.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits,
useFeatures=useFeatures, bitInfo=info,
**kwargs)
else: molFp = rdMD.GetMorganFingerprint(mol, radius, useFeatures=useFeatures, bitInfo=info,
**kwargs)
# construct the bit map
if fpType == 'bv': bitmap = [DataStructs.ExplicitBitVect(nBits) for x in range(mol.GetNumAtoms())]
else: bitmap = [[] for x in range(mol.GetNumAtoms())]
for bit, es in iteritems(info):
for at1, rad in es:
if rad == 0: # for radius 0
if fpType == 'bv': bitmap[at1][bit] = 1
else: bitmap[at1].append(bit)
else: # for radii > 0
env = Chem.FindAtomEnvironmentOfRadiusN(mol, rad, at1)
amap = {}
submol = Chem.PathToSubmol(mol, env, atomMap=amap)
for at2 in amap.keys():
if fpType == 'bv': bitmap[at2][bit] = 1
else: bitmap[at2].append(bit)
mol._fpInfo = (molFp, bitmap)
if atomId < 0:
return mol._fpInfo[0]
else: # remove the bits of atomId
if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms")
if len(mol._fpInfo) != 2: raise ValueError("_fpInfo not set")
if fpType == 'bv':
molFp = mol._fpInfo[0] ^ mol._fpInfo[1][atomId] # xor
else: # count
molFp = copy.deepcopy(mol._fpInfo[0])
# delete the bits with atomId
for bit in mol._fpInfo[1][atomId]:
molFp[bit] -= 1
return molFp
# usage: lambda m,i: GetRDKFingerprint(m, i, fpType, nBits, minPath, maxPath, nBitsPerHash)
def GetRDKFingerprint(mol, atomId=-1, fpType='bv', nBits=2048, minPath=1, maxPath=5, nBitsPerHash=2, **kwargs):
"""
Calculates the RDKit fingerprint with the paths of atomId removed.
Parameters:
mol -- the molecule of interest
atomId -- the atom to remove the paths for (if -1, no path is removed)
fpType -- the type of RDKit fingerprint: 'bv'
nBits -- the size of the bit vector
minPath -- minimum path length
maxPath -- maximum path length
nBitsPerHash -- number of to set per path
"""
if fpType not in ['bv', '']: raise ValueError("Unknown RDKit fingerprint type")
fpType = 'bv'
if not hasattr(mol, '_fpInfo'):
info = [] # list with bits for each atom
# get the fingerprint
molFp = Chem.RDKFingerprint(mol, fpSize=nBits, minPath=minPath, maxPath=maxPath, nBitsPerHash=nBitsPerHash, atomBits=info, **kwargs)
mol._fpInfo = (molFp, info)
if atomId < 0:
return mol._fpInfo[0]
else: # remove the bits of atomId
if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms")
if len(mol._fpInfo) != 2: raise ValueError("_fpInfo not set")
molFp = copy.deepcopy(mol._fpInfo[0])
molFp.UnSetBitsFromList(mol._fpInfo[1][atomId])
return molFp
|
adalke/rdkit
|
rdkit/Chem/Draw/SimilarityMaps.py
|
Python
|
bsd-3-clause
| 14,928
|
[
"RDKit"
] |
e501b16fb34fa4a607cc68a45927629541e69824af160ade61ced5a31e0a8485
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RStatmod(RPackage):
"""A collection of algorithms and functions to aid statistical
modeling. Includes growth curve comparisons, limiting dilution
analysis (aka ELDA), mixed linear models, heteroscedastic
regression, inverse-Gaussian probability calculations, Gauss
quadrature and a secure convergence algorithm for nonlinear
models. Includes advanced generalized linear model functions
that implement secure convergence, dispersion modeling and
Tweedie power-law families."""
homepage = "https://cran.r-project.org/package=statmod"
url = "https://cran.rstudio.com/src/contrib/statmod_1.4.30.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/statmod"
version('1.4.30', '34e60132ce3df38208f9dc0db0479151')
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-statmod/package.py
|
Python
|
lgpl-2.1
| 2,037
|
[
"Gaussian"
] |
affc16ba8eba9395bc9c09802209a659873b1e1e8a44dc1f6e9198923ca2a789
|
import logging, tempfile
from time import time, gmtime, strftime
from DIRAC.Core.Utilities import Time
from dirac.lib.base import *
from dirac.lib.diset import getRPCClient
from dirac.lib.credentials import authorizeAction
from DIRAC import gConfig, gLogger
from DIRAC.Core.Utilities.DictCache import DictCache
import dirac.lib.credentials as credentials
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
log = logging.getLogger(__name__)
R_NUMBER = 25
P_NUMBER = 0
globalSort = []
GENERALLIST = ["Site","Service","Resource"]
STORELIST = ["StorageRead","StorageWrite","StorageCheck","StorageRemove"]
MODELIST = GENERALLIST + STORELIST
class SitegatewayController(BaseController):
################################################################################
def display(self):
lhcbGroup = credentials.getSelectedGroup()
if lhcbGroup == "visitor":
return render("/login.mako")
c.select = self.__getSelectionData()
gLogger.info("SELECTION RESULTS:",c.select)
return render("jobs/SiteGateway.mako")
################################################################################
@jsonify
def submit(self):
pagestart = time()
RPC = getRPCClient( "ResourceStatus/ResourceStatus" )
client = ResourceStatusClient( serviceIn = RPC )
if not request.params.has_key("mode") or not len(request.params["mode"]) > 0:
gLogger.error("The parameter 'mode' is absent")
return {"success":"false","error":"The parameter 'mode' is absent"}
mode = request.params["mode"]
gLogger.verbose("Requested mode is %s" % mode)
if not mode in MODELIST:
gLogger.error("Parameter 'mode': %s is wrong. Should be one of the list %s" % (mode, MODELIST) )
return {"success":"false","error":"Parameter 'mode' is wrong"}
if mode in STORELIST:
mode = 'StorageElement'
gLogger.verbose("Selected mode is %s" % mode)
req = self.__request()
gLogger.info("getMonitoredsStatusWeb(%s,%s,%s,%s)" % (mode,req,P_NUMBER,R_NUMBER))
result = client.getMonitoredsStatusWeb(mode,req,P_NUMBER,R_NUMBER)
gLogger.debug("Call result: %s" % result )
if not result["OK"]:
error = result["Message"]
gLogger.error( error )
return {"success":"false","error":error}
result = result["Value"]
if not result.has_key("TotalRecords") or not result["TotalRecords"] > 0:
return {"success":"false","error":"There were no data matching your selection"}
if not result.has_key("ParameterNames") or not result.has_key("Records"):
return {"success":"false","error":"Data structure is corrupted"}
if not len(result["ParameterNames"]) > 0:
return {"success":"false","error":"ParameterNames field is undefined"}
if not len(result["Records"]) > 0:
return {"success":"false","error":"There are no data to display"}
c.result = []
records = result["Records"]
head = result["ParameterNames"]
headLength = len(head)
countryCode = self.__countries()
for i in records:
tmp = {}
for j in range(0,headLength):
tmp[head[j]] = i[j]
if mode == "Resource":
if countryCode.has_key(i[4]):
tmp["FullCountry"] = countryCode[i[4]]
else:
tmp["Country"] = "Unknown"
tmp["FullCountry"] = "Unknown"
else:
if countryCode.has_key(i[3]):
tmp["FullCountry"] = countryCode[i[3]]
else:
tmp["Country"] = "Unknown"
tmp["FullCountry"] = "Unknown"
c.result.append(tmp)
total = result["TotalRecords"]
if result.has_key("Extras"):
extra = result["Extras"]
c.result = {"success":"true","result":c.result,"total":total,"extra":extra}
else:
c.result = {"success":"true","result":c.result,"total":total}
return c.result
###############################################################################
def __request(self):
req = {}
global R_NUMBER
global P_NUMBER
global globalSort
globalSort = []
R_NUMBER = 25
if request.params.has_key("limit") and len(request.params["limit"]) > 0:
R_NUMBER = int(request.params["limit"])
P_NUMBER = 0
if request.params.has_key("start") and len(request.params["start"]) > 0:
P_NUMBER = int(request.params["start"])
if request.params.has_key("getSiteHistory") and len(request.params["getSiteHistory"]) > 0:
req["ExpandSiteHistory"] = str(request.params["getSiteHistory"])
elif request.params.has_key("getServiceHistory") and len(request.params["getServiceHistory"]) > 0:
req["ExpandServiceHistory"] = str(request.params["getServiceHistory"])
elif request.params.has_key("getResourceHistory") and len(request.params["getResourceHistory"]) > 0:
req["ExpandResourceHistory"] = str(request.params["getResourceHistory"])
elif request.params.has_key("getStorageHistory") and len(request.params["getStorageHistory"]) > 0:
req["ExpandStorageElementHistory"] = str(request.params["getStorageHistory"])
else:
result = gConfig.getOption("/Website/ListSeparator")
if result["OK"]:
separator = result["Value"]
else:
separator = ":::"
if not request.params.has_key("mode") or not len(request.params["mode"]) > 0:
return req
mode = request.params["mode"]
if not mode in MODELIST:
return req
selectors = [
"SiteName",
"SiteType",
"Status",
"ResourceType",
"ResourceName",
"ServiceType",
"ServiceName",
"StorageSiteName",
"StorageElementName"
]
gLogger.info("params: ",request.params)
for i in selectors:
if request.params.has_key(i) and len(request.params[i]) > 0:
if str(request.params[i]) != "All":
req[i] = request.params[i].split(separator)
if "All" in req[i]:
req[i].remove("All")
if i == "StorageSiteName":
req["SiteName"] = req[i]
del req[i]
if mode in STORELIST:
status = mode[7:]
req['StatusType'] = status
gLogger.info("Request:",req)
return req
################################################################################
def __getSelectionData(self):
callback = {}
lhcbGroup = credentials.getSelectedGroup()
lhcbUser = str(credentials.getUsername())
RPC = getRPCClient( "ResourceStatus/ResourceStatus" )
client = ResourceStatusClient( serviceIn = RPC )
if len(request.params) > 0:
tmp = {}
for i in request.params:
tmp[i] = str(request.params[i])
callback["extra"] = tmp
####
result = client.getSitePresent( meta = { 'columns' : 'SiteName' } )
if result["OK"]:
sites = result["Value"]
try:
sites = list(sites)
except Exception,x:
gLogger.error("Exception during convertion to a list: %s" % str(x))
sites = [] # Will return error on length check
tier1 = gConfig.getValue("/Website/PreferredSites",[]) # Always return a list
if len(sites)>0:
tier1.reverse()
tier1 = [[x] for x in tier1]
sites = [x for x in sites if x not in tier1] # Removes sites which are in tier1 list
for i in tier1:
sites.insert(0,i)
sites.insert(0,["All"])
else:
sites = [["Nothing to display"]]
else:
gLogger.error("client.getSitePresent(meta ={'columns':'SiteName'}) return error: %s" % result["Message"])
sites = [["Error happened on service side"]]
callback["SiteName"] = sites
####
result = client.getValidSiteTypes()
stat = []
if result["OK"]:
value = result["Value"]
try:
value = list(value)
except Exception,x:
gLogger.error("Exception during convertion to a list: %s" % str(x))
value = [] # Will return error on length check
if len(value)>0:
stat.append(["All"])
for i in value:
stat.append([str(i)])
else:
stat = [["Nothing to display"]]
else:
gLogger.error("client.getValidSiteTypes() return error: %s" % result["Message"])
stat = [["Error happened on service side"]]
callback["SiteType"] = stat
####
stat = []
result = client.getValidStatuses()
if result["OK"]:
value = result["Value"]
try:
value = list(value)
except Exception,x:
gLogger.error("Exception during convertion to a list: %s" % str(x))
value = [] # Will return error on length check
if len(value)>0:
stat.append(["All"])
for i in value:
i = i.replace(",",";")
stat.append([str(i)])
else:
stat = [["Nothing to display"]]
else:
gLogger.error("client.getValidStatuses() return error: %s" % result["Message"])
stat = [["Error happened on service side"]]
callback["Status"] = stat
####
app = []
result = client.getValidResourceTypes()
if result["OK"]:
value = result["Value"]
try:
value = list(value)
except Exception,x:
gLogger.error("Exception during convertion to a list: %s" % str(x))
value = [] # Will return error on length check
if len(value)>0:
app.append(["All"])
for i in value:
i = i.replace(",",";")
app.append([str(i)])
else:
app = [["Nothing to display"]]
else:
gLogger.error("client.getValidResourceTypes() return error: %s" % result["Message"])
app = [["Error happened on service side"]]
callback["ResourceType"] = app
####
result = client.getResourcePresent( meta = { 'columns' : 'ResourceName' } )
if result["OK"]:
value = result["Value"]
try:
value = list(value)
except Exception,x:
gLogger.error("Exception during convertion to a list: %s" % str(x))
value = [] # Will return error on length check
if len(value)>0:
value.insert(0,["All"])
gLogger.info("Deb: %s \n" % value)
else:
value = [["Nothing to display"]]
else:
gLogger.error("client.getResourcePresent( meta = { 'columns' : 'ResourceName' } ) return error: %s" % result["Message"])
value = [["Error happened on service side"]]
callback["ResourceName"] = value
####
stat = []
result = client.getValidServiceTypes()
if result["OK"]:
value = result["Value"]
try:
value = list(value)
except Exception,x:
gLogger.error("Exception during convertion to a list: %s" % str(x))
value = [] # Will return error on length check
if len(value)>0:
stat.append(["All"])
for i in value:
i = i.replace(",",";")
stat.append([str(i)])
else:
stat = [["Nothing to display"]]
else:
gLogger.error("client.getValidServiceTypes() return error: %s" % result["Message"])
stat = [["Error happened on service side"]]
callback["ServiceType"] = stat
####
result = client.getServicePresent( meta = { 'columns' : 'ServiceName' } )
if result["OK"]:
value = result["Value"]
try:
value = list(value)
except Exception,x:
gLogger.error("Exception during convertion to a list: %s" % str(x))
value = [] # Will return error on length check
if len(value)>0:
value.insert(0,["All"])
else:
value = [["Nothing to display"]]
else:
gLogger.error("client.getServicePresent( meta = { 'columns' : 'ServiceName' } ) return error: %s" % result["Message"])
value = [["Error happened on service side"]]
callback["ServiceName"] = value
####
value = [["Nothing to display"]]
result = client.getStorageElementPresent( meta = { "columns" : "StorageElementName" }, statusType = "Read" )
if result["OK"]:
value = result["Value"]
if len(value)>0:
value.insert(0,["All"])
else:
gLogger.error("client.getStorageElementPresent(meta={'columns':'StorageElementName'},statusType='Read') return error: %s" % result)
value = [["Error happened on service side"]]
callback["StorageElementName"] = value
####
sesites = [["Nothing to display"]]
result = client.getSESitesList()
if result["OK"]:
if len(result["Value"])>0:
sesites = [[x] for x in result["Value"]]
tier1 = gConfig.getValue("/Website/PreferredSites",[]) # Always return a list
tier1.reverse()
tier1 = [[x] for x in tier1]
sesites = [x for x in sesites if x not in tier1] # Removes sites which are in tier1 list
for i in tier1:
sesites.insert(0,i)
sesites.insert(0,["All"])
else:
gLogger.error("client.getSESitesList() return error: %s" % result["Message"])
sesites = [["Error happened on service side"]]
callback["StorageSiteName"] = sesites
return callback
################################################################################
def __reverseCountry(self):
result = self.__countries()
name = {}
for code, country in result.items():
name[country] = code
return name
################################################################################
def __countries(self):
countries = {
"af": "Afghanistan",
"al": "Albania",
"dz": "Algeria",
"as": "American Samoa",
"ad": "Andorra",
"ao": "Angola",
"ai": "Anguilla",
"aq": "Antarctica",
"ag": "Antigua and Barbuda",
"ar": "Argentina",
"am": "Armenia",
"aw": "Aruba",
"au": "Australia",
"at": "Austria",
"az": "Azerbaijan",
"bs": "Bahamas",
"bh": "Bahrain",
"bd": "Bangladesh",
"bb": "Barbados",
"by": "Belarus",
"be": "Belgium",
"bz": "Belize",
"bj": "Benin",
"bm": "Bermuda",
"bt": "Bhutan",
"bo": "Bolivia",
"ba": "Bosnia and Herzegowina",
"bw": "Botswana",
"bv": "Bouvet Island",
"br": "Brazil",
"io": "British Indian Ocean Territory",
"bn": "Brunei Darussalam",
"bg": "Bulgaria",
"bf": "Burkina Faso",
"bi": "Burundi",
"kh": "Cambodia",
"cm": "Cameroon",
"ca": "Canada",
"cv": "Cape Verde",
"ky": "Cayman Islands",
"cf": "Central African Republic",
"td": "Chad",
"cl": "Chile",
"cn": "China",
"cx": "Christmas Island",
"cc": "Cocos Islands",
"co": "Colombia",
"km": "Comoros",
"cg": "Congo",
"cd": "Congo",
"ck": "Cook Islands",
"cr": "Costa Rica",
"ci": "Cote D'Ivoire",
"hr": "Croatia",
"cu": "Cuba",
"cy": "Cyprus",
"cz": "Czech Republic",
"dk": "Denmark",
"dj": "Djibouti",
"dm": "Dominica",
"do": "Dominican Republic",
"tp": "East Timor",
"ec": "Ecuador",
"eg": "Egypt",
"sv": "El Salvador",
"gq": "Equatorial Guinea",
"er": "Eritrea",
"ee": "Estonia",
"et": "Ethiopia",
"fk": "Falkland Islands",
"fo": "Faroe Islands",
"fj": "Fiji",
"fi": "Finland",
"fr": "France",
"fx": "France, metropolitan",
"gf": "French Guiana",
"pf": "French Polynesia",
"tf": "French Southern Territories",
"ga": "Gabon",
"gm": "Gambia",
"ge": "Georgia",
"de": "Germany",
"gh": "Ghana",
"gi": "Gibraltar",
"gr": "Greece",
"gl": "Greenland",
"gd": "Grenada",
"gp": "Guadeloupe",
"gu": "Guam",
"gt": "Guatemala",
"gn": "Guinea",
"gw": "Guinea-Bissau",
"gy": "Guyana",
"ht": "Haiti",
"hm": "Heard and Mc Donald Islands",
"va": "Vatican City",
"hn": "Honduras",
"hk": "Hong Kong",
"hu": "Hungary",
"is": "Iceland",
"in": "India",
"id": "Indonesia",
"ir": "Iran",
"iq": "Iraq",
"ie": "Ireland",
"il": "Israel",
"it": "Italy",
"jm": "Jamaica",
"jp": "Japan",
"jo": "Jordan",
"kz": "Kazakhstan",
"ke": "Kenya",
"ki": "Kiribati",
"kp": "Korea",
"kr": "Korea",
"kw": "Kuwait",
"kg": "Kyrgyzstan",
"la": "Lao",
"lv": "Latvia",
"lb": "Lebanon",
"ls": "Lesotho",
"lr": "Liberia",
"ly": "Libyan",
"li": "Liechtenstein",
"lt": "Lithuania",
"lu": "Luxembourg",
"mo": "Macau",
"mk": "Macedonia",
"mg": "Madagascar",
"mw": "Malawi",
"my": "Malaysia",
"mv": "Maldives",
"ml": "Mali",
"mt": "Malta",
"mh": "Marshall Islands",
"mq": "Martinique",
"mr": "Mauritania",
"mu": "Mauritius",
"yt": "Mayotte",
"mx": "Mexico",
"fm": "Micronesia",
"md": "Moldova",
"mc": "Monaco",
"mn": "Mongolia",
"ms": "Montserrat",
"ma": "Morocco",
"mz": "Mozambique",
"mm": "Myanmar",
"na": "Namibia",
"nr": "Nauru",
"np": "Nepal",
"nl": "Netherlands",
"an": "Netherlands Antilles",
"nc": "New Caledonia",
"nz": "New Zealand",
"ni": "Nicaragua",
"ne": "Niger",
"ng": "Nigeria",
"nu": "Niue",
"nf": "Norfolk Island",
"mp": "Northern Mariana Islands",
"no": "Norway",
"om": "Oman",
"pk": "Pakistan",
"pw": "Palau",
"pa": "Panama",
"pg": "Papua New Guinea",
"py": "Paraguay",
"pe": "Peru",
"ph": "Philippines",
"pn": "Pitcairn",
"pl": "Poland",
"pt": "Portugal",
"pr": "Puerto Rico",
"qa": "Qatar",
"re": "Reunion",
"ro": "Romania",
"ru": "Russia",
"rw": "Rwanda",
"kn": "Saint Kitts and Nevis",
"lc": "Saint Lucia",
"vc": "Saint Vincent and the Grenadines",
"ws": "Samoa",
"sm": "San Marino",
"st": "Sao Tome and Principe",
"sa": "Saudi Arabia",
"sn": "Senegal",
"sc": "Seychelles",
"sl": "Sierra Leone",
"sg": "Singapore",
"sk": "Slovakia",
"si": "Slovenia",
"sb": "Solomon Islands",
"so": "Somalia",
"za": "South Africa",
"gs": "South Georgia and the South Sandwich Islands",
"es": "Spain",
"lk": "Sri Lanka",
"sh": "St. Helena",
"pm": "St. Pierre and Miquelon",
"sd": "Sudan",
"sr": "Suriname",
"sj": "Svalbard and Jan Mayen Islands",
"sz": "Swaziland",
"se": "Sweden",
"ch": "Switzerland",
"sy": "Syrian Arab Republic",
"tw": "Taiwan",
"tj": "Tajikistan",
"tz": "Tanzania",
"th": "Thailand",
"tg": "Togo",
"tk": "Tokelau",
"to": "Tonga",
"tt": "Trinidad and Tobago",
"tn": "Tunisia",
"tr": "Turkey",
"tm": "Turkmenistan",
"tc": "Turks and Caicos Islands",
"tv": "Tuvalu",
"ug": "Uganda",
"ua": "Ukraine",
"ae": "United Arab Emirates",
"gb": "United Kingdom",
"uk": "United Kingdom",
"us": "United States",
"um": "United States Minor Outlying Islands",
"uy": "Uruguay",
"uz": "Uzbekistan",
"vu": "Vanuatu",
"ve": "Venezuela",
"vn": "Viet Nam",
"vg": "Virgin Islands (British)",
"vi": "Virgin Islands (U.S.)",
"wf": "Wallis and Futuna Islands",
"eh": "Western Sahara",
"ye": "Yemen",
"yu": "Yugoslavia",
"zm": "Zambia",
"zw": "Zimbabwe",
"su": "Soviet Union"
}
return countries
|
DIRACGrid/DIRACWeb
|
dirac/controllers/jobs/SiteGateway.py
|
Python
|
gpl-3.0
| 19,138
|
[
"DIRAC"
] |
60e11f6acde89255ca0808b40d8da0d0f2c02240ca120282fafd15099cac7d5b
|
'''plotChromVariantFrequency.py
Usage:
plotChromVariantFrequency.py <mincov> <outfile> <snpfiles>...
'''
import collections
import os
import re
import pysam
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from general_python import docopt
import numpy as np
# Extract and process arguments
args = docopt.docopt(__doc__,version = 'v1')
if not args['<outfile>'].endswith('.png'):
raise TypeError('Outfile must have .png suffix')
def parseInputFiles(infiles, minCov=1):
''' Function to extract frequency for SNPs in an input
text file.
Args:
infile (str)- Path to input text file. File must have header with
columns named 'Cov' and 'Freq' for the SNP coverage and SNP
frequency, respectivly.
minCov (int)- Minimum coverage of SNP required.
Returns:
snpDict - A collections default dictionary of lists. Each list
contains tuples of the position and frequency of each variant.
The lists are ordered by position.
'''
# Check arguments
if not isinstance(minCov, int):
raise TypeError('minCov must be an integer')
if not minCov > 0:
raise ValueError('minCov must be >=1')
# Create ordered dictionary to store data
freqDict = collections.OrderedDict()
for infile in infiles:
freqList = []
with open(infile) as filein:
# Extract index of key columns from header
header = filein.next().strip().split('\t')
covIndex = header.index('Cov')
freqIndex = header.index('Freq')
# Extract SNP location and frequency
for line in filein:
splitline = line.strip().split('\t')
cov = splitline[covIndex]
if cov < minCov:
continue
freq = splitline[freqIndex]
except IndexError:
continue
freqList.append(float(freq))
# Sort and return data
freqList.sort()
freqDict[infile] = freqList
# Sort and return data
return(freqDict)
def createFrequencyPlots(freqDict, outFile, bins=20, range=(0,1)):
''' Function to create plot of SNP frequencies across chromosomes.
Args:
snpDict - A collections.OrderedDict where the key is the file name
name and the value is an ordered list of SNP frequencies.
outFile - The name of the output file in which to save the data.
'''
# Extract maximum counts:
maxCount = 0
for freq in freqDict.values():
counts, edges = np.histogram(freq, bins=bins, range=range)
maxCount = max(max(counts), maxCount)
# Loop through frequencies and create plot
plt.figure(1, figsize=[6, 4 * len(freqDict)])
for count, sample in enumerate(freqDict):
# Create subplot
plt.subplot(len(freqDict), 1, count + 1)
plt.hist(freqDict[sample], bins=edges)
plt.title(sample)
# Format y-axis
plt.ylabel('Count')
plt.ylim([0, maxCount])
# Format x axis
plt.xlim([0, 1])
plt.xlabel('Frequency')
plt.xticks([0, 0.25, 0.5, 0.75, 1])
plt.tight_layout()
# Save file
plt.savefig(outFile)
if __name__ == "__main__":
sampleFreq = parseInputFiles(args['<snpfiles>'], int(args['<mincov>']))
for sample in sampleFreq:
print('{}\t{}'.format(sample, len(sampleFreq[sample])))
createFrequencyPlots(sampleFreq, args['<outfile>'])
|
adam-rabinowitz/ngs_python
|
scripts/Variants/plotVariantFrequency.py
|
Python
|
gpl-2.0
| 3,550
|
[
"pysam"
] |
1b257488f0b93da7fd8428f27b455f8b754f083823b53e59f2417e0bdbcefeb0
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import imp
import json
import os
import shlex
import zipfile
import re
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.plugins.loader import module_utils_loader
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
_ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _ansiballz_main():
%(rlimit)s
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read (realpath calls abspath)
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import imp
import shutil
import tempfile
import zipfile
if sys.version_info < (3,):
bytes = str
MOD_DESC = ('.py', 'U', imp.PY_SOURCE)
PY3 = False
else:
unicode = str
MOD_DESC = ('.py', 'r', imp.PY_SOURCE)
PY3 = True
ZIPDATA = """%(zipdata)s"""
# Note: temp_path isn't needed once we switch to zipimport
def invoke_module(modlib_path, temp_path, json_params):
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(modlib_path, mode='a')
# py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
# Note: Remove the following section when we switch to zipimport
# Write the module to disk for imp.load_module
module = os.path.join(temp_path, '__main__.py')
with open(module, 'wb') as f:
f.write(z.read('__main__.py'))
f.close()
# End pre-zipimport section
z.close()
# Put the zipped up module_utils we got from the controller first in the python path so that we
# can monkeypatch the right basic
sys.path.insert(0, modlib_path)
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
with open(module, 'rb') as mod:
imp.load_module('__main__', mod, module, MOD_DESC)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, '__main__.py')
if command == 'excommunicate':
print('The excommunicate debug command is deprecated and will be removed in 2.11. Use execute instead.')
command = 'execute'
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# Set pythonpath to the debug dir
sys.path.insert(0, basedir)
# read in the args file which the user may have modified
with open(args_path, 'rb') as f:
json_params = f.read()
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
# Run the module! By importing it as '__main__', it thinks it is executing as a script
import imp
with open(script_path, 'r') as f:
importer = imp.load_module('__main__', f, script_path, ('.py', 'r', imp.PY_SOURCE))
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
# Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
# (this helps ansible-test produce coverage stats)
temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
# Note: temp_path isn't needed once we switch to zipimport
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
# tempdir creation probably failed
pass
sys.exit(exitcode)
if __name__ == '__main__':
_ansiballz_main()
'''
ANSIBALLZ_COVERAGE_TEMPLATE = '''
# Access to the working directory is required by coverage.
# Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
try:
os.getcwd()
except OSError:
os.chdir('/')
os.environ['COVERAGE_FILE'] = '%(coverage_output)s'
import atexit
import coverage
cov = coverage.Coverage(config_file='%(coverage_config)s')
def atexit_coverage():
cov.stop()
cov.save()
atexit.register(atexit_coverage)
cov.start()
'''
ANSIBALLZ_RLIMIT_TEMPLATE = '''
import resource
existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# adjust soft limit subject to existing hard limit
requested_soft = min(existing_hard, %(rlimit_nofile)d)
if requested_soft != existing_soft:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
except ValueError:
# some platforms (eg macOS) lie about their hard limit
pass
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
py_mod = tuple(py_mod.split('.'))
self.submodules.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
# Specialcase: six is a special case because of its
# import logic
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, templar, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip()
if interpreter_config not in task_vars:
return (None, interpreter)
interpreter = templar.template(task_vars[interpreter_config].strip())
shebang = u'#!' + interpreter
if args:
shebang = shebang + u' ' + u' '.join(args)
return (shebang, interpreter)
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
try:
tree = ast.parse(data)
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0] == 'six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('six', module_utils_paths)
py_module_name = ('six',)
idx = 0
elif py_module_name[0] == '_six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('_six', [os.path.join(p, 'six') for p in module_utils_paths])
py_module_name = ('six', '_six')
idx = 0
else:
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(p, *py_module_name[:-idx]) for p in module_utils_paths])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % (name,)]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
# Found a byte compiled file rather than source. We cannot send byte
# compiled over the wire as the python version might be different.
# imp.find_module seems to prefer to return source packages so we just
# error out if imp.find_module returns byte compiled files (This is
# fragile as it depends on undocumented imp.find_module behaviour)
if module_info[2][2] not in (imp.PY_SOURCE, imp.PKG_DIRECTORY):
msg = ['Could not find python source for imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
normalized_name = py_module_name + ('__init__',)
if normalized_name not in py_module_names:
normalized_path = os.path.join(os.path.join(module_info[1], '__init__.py'))
normalized_data = _slurp(normalized_path)
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
else:
normalized_name = py_module_name
if normalized_name not in py_module_names:
normalized_path = module_info[1]
normalized_data = module_info[0].read()
module_info[0].close()
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
pkg_dir_info = imp.find_module(py_pkg_name[-1],
[os.path.join(p, *py_pkg_name[:-1]) for p in module_utils_paths])
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# FIXME: Currently the AnsiBallZ wrapper monkeypatches module args into a global
# variable in basic.py. If a module doesn't import basic.py, then the AnsiBallZ wrapper will
# traceback when it tries to monkypatch. So, for now, we have to unconditionally include
# basic.py.
#
# In the future we need to change the wrapper to monkeypatch the args into a global variable in
# their own, separate python module. That way we won't require basic.py. Modules which don't
# want basic.py can import that instead. AnsibleModule will need to change to import the vars
# from the separate python module and mirror the args into its global variable for backwards
# compatibility.
if ('basic',) not in py_module_names:
pkg_dir_info = imp.find_module('basic', module_utils_paths)
normalized_modules.add(('basic',))
py_module_cache[('basic',)] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# End of AnsiballZ hack
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join("ansible/module_utils",
py_module_file_name), py_module_cache[py_module_name][0])
display.vvvvv("Using module_utils file %s" % py_module_cache[py_module_name][1])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file][0], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
become_method, become_user, become_password, become_flags, environment):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif b'from ansible.module_utils.' in b_module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
try:
python_repred_params = repr(json.dumps(params))
except TypeError as e:
raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
with open(cached_module_filename, 'rb') as module_data:
zipdata = module_data.read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# Note: If we need to import from release.py first,
# remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
zf.writestr('ansible/__init__.py',
b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' +
to_bytes(__version__) + b'"\n__author__="' +
to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('__main__.py', b_module_data)
py_module_cache = {('__init__',): (b'', '[builtin]')}
recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
with open(cached_module_filename, 'rb') as f:
zipdata = f.read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar)
if shebang is None:
shebang = u'#!/usr/bin/python'
# Enclose the parts of the interpreter in quotes because we're
# substituting it into the template as a Python string
interpreter_parts = interpreter.split(u' ')
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
# FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source
rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
if not isinstance(rlimit_nofile, int):
rlimit_nofile = int(templar.template(rlimit_nofile))
if rlimit_nofile:
rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict(
rlimit_nofile=rlimit_nofile,
)
else:
rlimit = ''
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if coverage_config:
# Enable code coverage analysis of the module.
# This feature is for internal testing and may change without notice.
coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
coverage_config=coverage_config,
coverage_output=os.environ['_ANSIBLE_COVERAGE_OUTPUT']
)
else:
coverage = ''
now = datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
coverage=coverage,
rlimit=rlimit,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
b_module_data, module_args, environment, async_timeout, become,
become_method, become_user, become_password, become_flags,
module_substyle
)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
become_method=None, become_user=None, become_password=None, become_flags=None, environment=None):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
environment=environment)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# _get_shebang() takes text strings
args = [to_text(a, errors='surrogate_or_strict') for a in args]
interpreter = args[0]
b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0],
errors='surrogate_or_strict', nonstring='passthru')
if b_new_shebang:
b_lines[0] = b_shebang = b_new_shebang
if os.path.basename(interpreter).startswith(u'python'):
b_lines.insert(1, b_ENCODING_STRING)
shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict')
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(b_lines)
return (b_module_data, module_style, shebang)
|
brandond/ansible
|
lib/ansible/executor/module_common.py
|
Python
|
gpl-3.0
| 42,417
|
[
"VisIt"
] |
2717c6e8dec60d4b5e2abcae29cf985dc75f565f276fcf39ee3623043cb46f53
|
"""
Tests on the models
"""
from octopus.modules.es.testindex import ESTestCase
from service import models
from service.tests import fixtures
from octopus.lib import dataobj, dates
import time
class TestModels(ESTestCase):
def setUp(self):
super(TestModels, self).setUp()
def tearDown(self):
super(TestModels, self).tearDown()
def test_01_account(self):
# first load some accounts into the system, some with and some without sword support
acc1 = models.Account()
acc1.add_sword_credentials("acc1", "pass1", "http://sword/1")
acc1.save()
acc2 = models.Account()
acc2.add_sword_credentials("acc2", "pass2", "http://sword/2")
acc2.save()
acc3 = models.Account()
acc3.save()
acc4 = models.Account()
acc4.save(blocking=True)
time.sleep(2)
accs = models.Account.with_sword_activated()
assert len(accs) == 2
for acc in accs:
assert acc.sword_collection in ["http://sword/1", "http://sword/2"]
def test_02_repository_status(self):
# make a blank one
rs = models.RepositoryStatus()
# test all its methods
dataobj.test_dataobj(rs, fixtures.SwordFactory.repository_status_do_test())
# make a new one around some existing data
rs = models.RepositoryStatus(fixtures.SwordFactory.repository_status())
# try recording a failure which increments the counter
lt = rs.last_tried
rs.record_failure(24)
assert rs.last_tried != lt
assert rs.retries == 15
assert rs.status == "problem"
# now try incrementing the counter past the limit
rs.record_failure(15)
assert rs.last_tried is None
assert rs.retries == 0
assert rs.status == "failing"
# now record a new failure and check that we can't retry straight away
rs.record_failure(10)
assert not rs.can_retry(100)
# now check that once the delay is over we can retry
time.sleep(2)
assert rs.can_retry(1)
# try deleting the last_tried date directly
assert rs.last_tried is not None
del rs.last_tried
assert rs.last_tried is None
# try deactivating and activating the status
rs.activate()
assert rs.status == "succeeding"
assert rs.retries == 0
rs.deactivate()
assert rs.status == "failing"
def test_03_deposit_record(self):
# make a blank one
dr = models.DepositRecord()
# test all its methods
dataobj.test_dataobj(dr, fixtures.SwordFactory.deposit_record_do_test())
# make a new one around some existing data
dr = models.DepositRecord(fixtures.SwordFactory.deposit_record())
# check the was_successful calculations
# when the metadata fails, that is a certain failure irrespective of the other values
dr.metadata_status = "failed"
dr.content_status = "deposited"
dr.completed_status = "deposited"
assert not dr.was_successful()
# accross the board success
dr.metadata_status = "deposited"
dr.content_status = "deposited"
dr.completed_status = "deposited"
assert dr.was_successful()
# failed at the complete stage
dr.metadata_status = "deposited"
dr.content_status = "deposited"
dr.completed_status = "failed"
assert not dr.was_successful()
# failed at the content stage
dr.metadata_status = "deposited"
dr.content_status = "failed"
dr.completed_status = "deposited"
assert not dr.was_successful()
# successful metadata-only deposit
dr.metadata_status = "deposited"
dr.content_status = "none"
dr.completed_status = "none"
assert dr.was_successful()
def test_04_deposit_record_pull(self):
dd = dates.now()
# create a deposit record with some properties we can check
dr = models.DepositRecord()
dr.notification = "123456"
dr.repository = "abcdef"
dr.metadata_status = "deposited"
dr.content_status = "deposited"
dr.completed_status = "failed"
dr.deposit_date = dd
dr.save(blocking=True)
# first check an empty response
r = models.DepositRecord.pull_by_ids("adfsadf", "kasdfasf")
assert r is None
# now check we can retrieve the real thing
r = models.DepositRecord.pull_by_ids("123456", "abcdef")
assert r.notification == "123456"
assert r.repository == "abcdef"
assert r.metadata_status == "deposited"
assert r.content_status == "deposited"
assert r.completed_status == "failed"
assert r.deposit_date == dd
|
JiscPER/jper-sword-out
|
service/tests/unit/test_models.py
|
Python
|
apache-2.0
| 4,823
|
[
"Octopus"
] |
2f55e53640fb8cd32a18e7996fce42bb312fedb6ed1ad3c79f41d91b5ab23163
|
import base64
import json
import re
import string
import sys
import unicodedata
import time
import BaseHTTPServer
import SocketServer
from urlparse import urlparse
from rauth import OAuth1Service, OAuth2Service
rdio_client_id=''
rdio_client_secret=''
spotify_client_id=''
spotify_client_secret=''
page_size = 50
redirect_port = 8123
def normalize_text(data):
return re.sub(r'- .*$', '', re.sub(r'[\(\[][^)]*[\)\]]', '', unicodedata.normalize('NFKD', data.lower()).encode('ASCII', 'ignore'))).strip()
def get_sessions():
redirect_uri = 'http://localhost:%d/' % redirect_port
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(s):
Handler.code = urlparse(s.path).query.split('=')[1]
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write('<h3>Code received successfully</h3>')
s.wfile.write('<p>Please look back at the terminal.</p>')
httpd = SocketServer.TCPServer(("", redirect_port), Handler)
def wait_for_code():
httpd.handle_request()
sys.stdout.flush()
return Handler.code
rdio = OAuth2Service(
name='rdio',
client_id=rdio_client_id,
client_secret=rdio_client_secret,
authorize_url='https://www.rdio.com/oauth2/authorize',
access_token_url='https://services.rdio.com/oauth2/token',
base_url='https://services.rdio.com/api/1/',)
params={'response_type': 'code', 'redirect_uri': redirect_uri}
rdio_authorize_url = rdio.get_authorize_url(**params)
print 'Visit this URL in your browser: ' + rdio_authorize_url
rdio_pin = wait_for_code()
rdio_session = rdio.get_auth_session(method='POST',
data={'code': rdio_pin,
'grant_type': 'authorization_code',
'redirect_uri': redirect_uri,},
headers={'Authorization': 'Basic ' + base64.b64encode(rdio_client_id + ":" + rdio_client_secret)},
decoder=json.loads)
spotify = OAuth2Service(
name='spotify',
client_id=spotify_client_id,
client_secret=spotify_client_secret,
authorize_url='https://accounts.spotify.com/authorize/',
access_token_url='https://accounts.spotify.com/api/token',
base_url='https://api.spotify.com',)
params={'scope':'user-library-modify user-library-read playlist-read-private playlist-modify-public user-follow-modify', \
'response_type': 'code', 'redirect_uri': redirect_uri}
spotify_authorize_url = spotify.get_authorize_url(**params)
print 'Visit this URL in your browser: ' + spotify_authorize_url
spotify_pin = wait_for_code()
spotify_session = spotify.get_auth_session(method='POST',
data={'code': spotify_pin,
'grant_type': 'authorization_code',
'redirect_uri': redirect_uri,},
headers={'Authorization': 'Basic ' + base64.b64encode(spotify_client_id + ":" + spotify_client_secret)},
decoder=json.loads)
spotify_refresh_token = spotify_session.access_token_response.json()['refresh_token']
def retry_if_possible(response):
if response.status_code == 429 and response.headers['retry-after']:
time.sleep(float(response.headers['retry-after']))
return True
if response.status_code == 401:
refresh = spotify.get_raw_access_token(data={'refresh_token':spotify_refresh_token,
'grant_type': 'refresh_token'})
spotify_session.access_token = refresh.json()['access_token']
return True
if response.status_code / 100 == 5:
return True
return False
def spotify_get(url, **kwargs):
while True:
response = spotify_session.orig_get(url, **kwargs)
if retry_if_possible(response):
continue
return response
spotify_session.orig_get = spotify_session.get
spotify_session.get = spotify_get
def spotify_put(url, data = None, **kwargs):
while True:
response = spotify_session.orig_put(url, data, **kwargs)
if retry_if_possible(response):
continue
return response
spotify_session.orig_put = spotify_session.put
spotify_session.put = spotify_put
def spotify_post(url, data = None, json = None, **kwargs):
while True:
response = spotify_session.orig_post(url, data, json, **kwargs)
if retry_if_possible(response):
continue
return response
spotify_session.orig_post = spotify_session.post
spotify_session.post = spotify_post
httpd.server_close()
return rdio_session, spotify_session
def search(track_to_match, spotify_session, album_ids, matched_tracks, unmatched_tracks, match_album=False):
search_term = normalize_text(track_to_match['artist'] + ' ' + track_to_match['name'])
search_results = spotify_session.get('/v1/search', params={'q': search_term, 'type': 'track', 'limit': 50})
matched_track = None
if search_results.status_code != 200:
print search_results
print search_results.text
print search_results.json()
unmatched_tracks.append(search_term)
return matched_track, album_ids, matched_tracks, unmatched_tracks
if search_results.json()['tracks']['items'] and normalize_text(search_results.json()['tracks']['items'][0]['artists'][0]['name']) in normalize_text(track_to_match['artist']):
matched_track = search_results.json()['tracks']['items'][0]
else:
search_results = spotify_session.get('/v1/search', params={'q': normalize_text(track_to_match['name']), 'type': 'track', 'limit': 50})
for search_result in search_results.json()['tracks']['items']:
if (#"US" in search_result['album']['available_markets']
#and
(normalize_text(track_to_match['album']) in normalize_text(search_result['album']['name'])
or match_album==False)
and normalize_text(track_to_match['name']) in normalize_text(search_result['name'])
and normalize_text(track_to_match['artist']) in normalize_text(search_result['artists'][0]['name'])
#and search_result['explicit'] == track_to_match['isExplicit']
):
matched_track = search_result
# try to group songs using same spotify album
if (normalize_text(track_to_match['artist']) not in album_ids
or album_ids[normalize_text(track_to_match['artist'])] == search_result['album']['id']):
album_ids[normalize_text(track_to_match['artist'])] = search_result['album']['id']
break
if matched_track:
matched_tracks.append(search_term)
else:
unmatched_tracks.append(search_term)
return matched_track, album_ids, matched_tracks, unmatched_tracks
def sync_followed_artists(rdio_session, spotify_session):
print 'Syncing followed artists'
artists = rdio_session.post('', data={'method': 'getArtistsInCollection', 'count': page_size}, verify=True)
if artists.status_code != 200:
print artists.json()
return
matched_artists = []
unmatched_artists = []
search_loop = 2
keep_processing = True
while keep_processing:
if len(artists.json()['result']) < page_size:
keep_processing = False
for artist in artists.json()['result']:
sys.stdout.write('.')
sys.stdout.flush()
matched_artist = None
search_results = spotify_session.get('/v1/search', params={'q': normalize_text(artist['name']), 'type': 'artist', 'limit': 50})
try:
for search_result in search_results.json()['artists']['items']:
if (normalize_text(artist['name']) == normalize_text(search_result['name'])):
matched_artist = search_result
break
except Exception, e:
import pdb; pdb.set_trace()
if matched_artist:
spotify_session.put('/v1/me/following', params={'ids': matched_artist['id'], 'type': 'artist'})
matched_artists.append(artist['name'])
else:
unmatched_artists.append(artist['name'])
retries = 1
while retries < 10:
artists = rdio_session.post('', data={'method': 'getArtistsInCollection', 'count': page_size*search_loop}, verify=True)
if artists.status_code == 200:
break
retries = retries + 1
search_loop = search_loop + 1
print ''
print 'Matched artists: '
print '\n'.join(matched_artists)
print ''
print 'Unmatched artists: '
print '\n'.join(unmatched_artists)
def sync_collection_albums(rdio_session, spotify_session):
print 'Syncing collection albums'
albums = rdio_session.post('', data={'method': 'getAlbumsInCollection', 'count': page_size}, verify=True)
if albums.status_code != 200:
print albums.json()
return
matched_albums = []
unmatched_albums = []
search_loop = 2
keep_processing = True
while keep_processing:
if len(albums.json()['result']) < page_size:
keep_processing = False
for album in albums.json()['result']:
sys.stdout.write('.')
sys.stdout.flush()
matched_album = None
search_results = spotify_session.get('/v1/search', params={'q': normalize_text(album['artist'] + ' ' + album['name']), 'type': 'album', 'limit': 50})
if search_results.json()['albums']['items']:
matched_album = search_results.json()['albums']['items'][0]
for search_result in search_results.json()['albums']['items']:
if ('US' in search_result['available_markets']
and normalize_text(album['name']) in normalize_text(search_result['name'])
and search_result['album_type'] == 'album'):
matched_album = search_result
break
if matched_album:
album_tracks = spotify_session.get('v1/albums/%s/tracks' % matched_album['id'])
if album_tracks.status_code != 200:
unmatched_albums.append(album['artist'] + ' ' + album['name'])
print album_tracks.json()
continue
track_ids = []
for album_track in album_tracks.json()['items']:
track_ids.append(album_track['id'])
if len(track_ids) > 0:
spotify_session.put('/v1/me/tracks?ids=%s' % ','.join(track_ids))
matched_albums.append(album['artist'] + ' ' + album['name'])
else:
unmatched_albums.append(album['artist'] + ' ' + album['name'])
retries = 1
while retries < 10:
albums = rdio_session.post('', data={'method': 'getAlbumsInCollection', 'count': page_size*search_loop}, verify=True)
if albums.status_code == 200:
break
retries = retries + 1
search_loop = search_loop + 1
print ''
print 'Matched albums: '
print '\n'.join(matched_albums)
print ''
print 'Unmatched albums: '
print '\n'.join(unmatched_albums)
def sync_collection(rdio_session, spotify_session):
print 'Syncing collection'
tracks = rdio_session.post('', data={'method': 'getTracksInCollection', 'count': page_size}, verify=True)
if tracks.status_code != 200:
print tracks.json()
return
matched_tracks = []
unmatched_tracks = []
search_loop = 2
album_ids = {}
keep_processing = True
while keep_processing:
if len(tracks.json()['result']) < page_size:
keep_processing = False
for track in tracks.json()['result']:
matched_track, album_ids, matched_tracks, unmatched_tracks = search(track, spotify_session, album_ids, matched_tracks, unmatched_tracks, True)
if matched_track:
spotify_session.put('/v1/me/tracks', params={'ids': matched_track['id']})
sys.stdout.write('.')
sys.stdout.flush()
retries = 1
while retries < 10:
tracks = rdio_session.post('', data={'method': 'getTracksInCollection', 'count': page_size, 'start': page_size * search_loop}, verify=True)
if tracks.status_code == 200:
break
retries = retries + 1
search_loop = search_loop + 1
print ''
print 'Matched tracks: '
print '\n'.join(matched_tracks)
print ''
print 'Unmatched tracks: '
print '\n'.join(unmatched_tracks)
def sync_playlists(rdio_session, spotify_session):
print 'Syncing playlists'
rdio_playlists = rdio_session.post('', data={'method': 'getPlaylists', 'extras': 'tracks'}, verify=True)
if rdio_playlists.status_code != 200:
print rdio_playlists.json()
return
rdio_playlists = rdio_playlists.json()
if 'result' not in rdio_playlists or 'owned' not in rdio_playlists['result'] or 'subscribed' not in rdio_playlists['result']:
print 'No owned or subscribed playlists'
else:
spotify_id = spotify_session.get('/v1/me').json()['id']
spotify_playlists = spotify_session.get('/v1/users/%s/playlists' % spotify_id)
if spotify_playlists.status_code != 200:
print spotify_playlists.json()
return
rdio_playlists_to_process = []
if 'owned' in rdio_playlists['result']:
rdio_playlists_to_process = rdio_playlists_to_process + rdio_playlists['result']['owned']
if 'subscribed' in rdio_playlists['result']:
rdio_playlists_to_process= rdio_playlists_to_process + rdio_playlists['result']['subscribed']
spotify_playlists = spotify_playlists.json()
for rdio_playlist in rdio_playlists_to_process:
existing_spotify_playlist = None
for spotify_playlist in spotify_playlists['items']:
if spotify_playlist['name'] == rdio_playlist['name']:
existing_spotify_playlist = spotify_playlist
break
if not existing_spotify_playlist:
# set existing_spotify_playlist to a new playlist
existing_spotify_playlist = spotify_session.post('/v1/users/%s/playlists' % spotify_id,
json={'name': rdio_playlist['name']})
if existing_spotify_playlist.status_code > 201:
print existing_spotify_playlist.json()
return
existing_spotify_playlist = existing_spotify_playlist.json()
matched_tracks = []
unmatched_tracks = []
album_ids = {}
track_uris = []
did_first_hundred = False
last_track = rdio_playlist['tracks'][-1]
for rdio_track in rdio_playlist['tracks']:
matched_track, album_ids, matched_tracks, unmatched_tracks = search(rdio_track, spotify_session, album_ids, matched_tracks, unmatched_tracks)
if matched_track:
# can't really update playlists easily, so replace all contents with fist 100 songs, then keep appending
if not did_first_hundred:
track_uris.append(matched_track['uri'])
else:
spotify_session.post('/v1/users/%s/playlists/%s/tracks' % (spotify_id, existing_spotify_playlist['id']),
params={'uris': [matched_track['uri']]})
if not did_first_hundred:
if len(track_uris) == 100 or last_track['key'] == rdio_track['key']:
did_first_hundred = True
spotify_session.put('/v1/users/%s/playlists/%s/tracks' % (spotify_id, existing_spotify_playlist['id']),
json={'uris': track_uris})
sys.stdout.write('.')
sys.stdout.flush()
print ''
print 'Matched tracks: '
print '\n'.join(matched_tracks)
print ''
print 'Unmatched tracks: '
print '\n'.join(unmatched_tracks)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-t', action='store_const', dest='tracks',
const=True, help='Sync collection by tracks')
parser.add_argument('-a', action='store_const', dest='albums',
const=True, help='Sync collection by full albums')
parser.add_argument('-p', action='store_const', dest='playlists',
const=True, help='Sync owned and subscribed playlists')
parser.add_argument('-f', action='store_const', dest='followed',
const=True, help='Sync followed artists')
results = parser.parse_args()
if not results.tracks and not results.albums and \
not results.playlists and not results.followed:
parser.print_help()
sys.exit(1)
else:
rdio_session, spotify_session = get_sessions()
if results.tracks:
sync_collection(rdio_session, spotify_session)
if results.albums:
sync_collection_albums(rdio_session, spotify_session)
if results.playlists:
sync_playlists(rdio_session, spotify_session)
if results.followed:
sync_followed_artists(rdio_session, spotify_session)
|
streeter/Rdio2Spotify
|
main.py
|
Python
|
mit
| 18,693
|
[
"VisIt"
] |
4a2cb927cb652c84135f8ef97b383e3ff7db1df58615275c5f014f3e77d5082c
|
# encoding: utf-8
import os
import argparse
import sys
import re
import pytest
from txtemplates import server_templates
import txtemplates
def test_get_parser_error(capsys):
parser = server_templates.get_parser()
with pytest.raises(SystemExit):
parser.parse_args([])
_, err = capsys.readouterr()
assert re.search('error: too few arguments', err)
@pytest.mark.parametrize('argstr, expected', [
('module', {'name': 'module', 'module': 'txtemplates'}),
('module package', {'name': 'module', 'module': 'package'}),
('module package -C directory -f',
{'directory': 'directory', 'force_overwrite': True})
])
def test_get_parser(argstr, expected):
parser = server_templates.get_parser()
args = parser.parse_args(argstr.split(' '))
for (k, v) in expected.items():
assert hasattr(args, k)
assert getattr(args, k) == v
def test_get_target_module():
directory = os.path.dirname(os.path.dirname(txtemplates.__file__))
args = argparse.Namespace(module='txtemplates', directory=directory)
module = server_templates.get_target_module(args)
assert module == txtemplates
args = argparse.Namespace(module='txtemplates', directory='/tmp')
module = server_templates.get_target_module(args)
assert module == txtemplates
@pytest.fixture(scope="function")
def testpackage(tmpdir):
p = tmpdir.mkdir("testpackage").join("__init__.py")
p.write("")
args = argparse.Namespace(module='testpackage', directory=str(tmpdir))
package = server_templates.get_target_module(args)
return tmpdir, package
def test_dirs(testpackage):
tempdir, package = testpackage
basedir = str(tempdir)
parser = server_templates.get_parser()
args = parser.parse_args('module testpackage'.split(' '))
dirs = server_templates.Dirs(args, package)
assert dirs.module == os.path.join(basedir, 'testpackage', 'module')
assert dirs.twistedplugin == os.path.join(
basedir, 'testpackage', 'twisted', 'plugins')
assert dirs.testbase == os.path.join(basedir, 'tests')
assert dirs.test == os.path.join(basedir, 'tests', 'module')
def test_run(testpackage, monkeypatch, capsys):
tempdir, package = testpackage
monkeypatch.setattr(
sys, "argv",
"main.py testmodule testpackage -C {}"
.format(str(tempdir)).split(" "))
server_templates.main()
files = [str(f)[len(str(tempdir)):] for f in tempdir.visit()
if not str(f).endswith('.pyc') and not '__pycache__' in str(f)]
assert len(files) == 21
assert '/testpackage/testmodule/backend/__init__.py' in files
assert '/tests/testmodule/test_testmodule_backend.py' in files
# second run should skip all files
p = tempdir.join('testpackage').join('__init__.py')
text = "# This should not be overwritten"
p.write(text)
server_templates.main()
out, _ = capsys.readouterr()
assert re.search(text, p.read())
assert re.search('exists: Skipped', out)
# another run with overwrite flag turned on, should overwrite the existing
# files.
monkeypatch.setattr(
sys, "argv",
"main.py testmodule testpackage -C {} -f"
.format(str(tempdir)).split(" "))
server_templates.main()
p = tempdir.join('testpackage').join('__init__.py')
out, _ = capsys.readouterr()
assert re.search(text, p.read())
assert not re.search('exists: Skipped', out)
# vim:set ft=python sw=4 et spell spelllang=en:
|
mdrohmann/txtemplates
|
tests/server_templates/test_server_templates.py
|
Python
|
bsd-3-clause
| 3,487
|
[
"VisIt"
] |
fd9ced3a2ee12e707af6c562ca4fdc30006f33959c066a306820738104afb8f2
|
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/xor-problem.py
# Solving the exclusive-or problem
################################################################################
# Please, for more information on this demo, see the tutorial documentation.
# First, we import the needed modules
from numpy import *
import peach as p
# The network to solve the exclusive-or problem must have two layers: two input
# neurons and one output neuron. The neurons should be biased and the activation
# function should be sigmoidal. The learning rule is backpropagation.
nn = p.FeedForward((2, 2, 1), p.TanH, p.BackPropagation(0.2), True)
# This is the training set. A training set is a list of tuples with two elements
# each. The first element is the input vector, the second element is the output
# vector. In this case, the output is just a number.
train_set = [ ( array(( -1., -1.)), -1. ),
( array(( -1., 1.)), 1. ),
( array(( 1., -1.)), 1. ),
( array(( 1., 1.)), -1. ) ]
# This shows the training set to the network.
nn.train(train_set)
# Testing the results:
print nn[0].weights
print nn[1].weights
for x, _ in train_set:
print x, " => ", nn(x)
|
anki1909/peach
|
tutorial/neural-networks/xor-problem.py
|
Python
|
lgpl-2.1
| 1,332
|
[
"NEURON"
] |
dfbcdd81da8c95e7259d95b2a11aa13beaabe42055c6cf4ed06d5b19d2eeb223
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
import threading
import sys
import os
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import (QTextEdit, QVBoxLayout, QLabel, QGridLayout, QHBoxLayout,
QRadioButton, QCheckBox, QLineEdit)
from electrum.gui.qt.util import (read_QIcon, WindowModalDialog, WaitingDialog, OkButton,
CancelButton, Buttons, icon_path, WWLabel, CloseButton)
from electrum.gui.qt.qrcodewidget import QRCodeWidget
from electrum.gui.qt.amountedit import AmountEdit
from electrum.gui.qt.main_window import StatusBarButton
from electrum.gui.qt.installwizard import InstallWizard
from electrum.i18n import _
from electrum.plugin import hook
from electrum.util import is_valid_email
from electrum.logging import Logger
from electrum.base_wizard import GoBack
from .trustedcoin import TrustedCoinPlugin, server
class TOS(QTextEdit):
tos_signal = pyqtSignal()
error_signal = pyqtSignal(object)
class HandlerTwoFactor(QObject, Logger):
def __init__(self, plugin, window):
QObject.__init__(self)
self.plugin = plugin
self.window = window
Logger.__init__(self)
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.plugin.wallet_class):
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.logger.info("twofactor: xpub3 not needed")
return
window = self.window.top_level_window()
auth_code = self.plugin.auth_dialog(window)
WaitingDialog(parent=window,
message=_('Waiting for TrustedCoin server to sign transaction...'),
task=lambda: wallet.on_otp(tx, auth_code),
on_success=lambda *args: on_success(tx),
on_error=on_failure)
class Plugin(TrustedCoinPlugin):
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
wallet.handler_2fa = HandlerTwoFactor(self, window)
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(read_QIcon("trustedcoin-status.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
self.start_request_thread(window.wallet)
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
msg = _('If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.')
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
wallet.handler_2fa.prompt_user_for_otp(wallet, tx, on_success, on_failure)
def waiting_dialog_for_billing_info(self, window, *, on_finished=None):
def task():
return self.request_billing_info(window.wallet, suppress_connection_error=False)
def on_error(exc_info):
e = exc_info[1]
window.show_error("{header}\n{exc}\n\n{tor}"
.format(header=_('Error getting TrustedCoin account info.'),
exc=repr(e),
tor=_('If you keep experiencing network problems, try using a Tor proxy.')))
return WaitingDialog(parent=window,
message=_('Requesting account info from TrustedCoin server...'),
task=task,
on_success=on_finished,
on_error=on_error)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
self.waiting_dialog_for_billing_info(window)
return True
return False
def settings_dialog(self, window):
self.waiting_dialog_for_billing_info(window,
on_finished=partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(icon_path("trustedcoin-status.png")))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
n_prepay = wallet.num_prepay(self.config)
i = 0
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Pay every %d transactions:"%k), i, 0)
grid.addWidget(QLabel(window.format_amount(v/k) + ' ' + window.base_unit() + "/tx"), i, 1)
b = QRadioButton()
b.setChecked(k == n_prepay)
b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def go_online_dialog(self, wizard: InstallWizard):
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.reset_stack()
try:
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('accept_terms_of_use'))
except GoBack:
# user clicked 'Cancel' and decided to move wallet file manually
wizard.create_storage(wizard.path)
raise
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = TOS()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
tos_received = False
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
try:
tos = server.get_terms_of_service()
except Exception as e:
self.logger.exception('Could not retrieve Terms of Service')
tos_e.error_signal.emit(_('Could not retrieve Terms of Service:')
+ '\n' + repr(e))
return
self.TOS = tos
tos_e.tos_signal.emit()
def on_result():
tos_e.setText(self.TOS)
nonlocal tos_received
tos_received = True
set_enabled()
def on_error(msg):
window.show_error(str(msg))
window.terminate()
def set_enabled():
next_button.setEnabled(tos_received and is_valid_email(email_e.text()))
tos_e.tos_signal.connect(on_result)
tos_e.error_signal.connect(on_error)
t = threading.Thread(target=request_TOS)
t.setDaemon(True)
t.start()
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.exec_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
email = str(email_e.text())
self.create_remote_key(email, window)
def request_otp_dialog(self, window, short_id, otp_secret, xpub3):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with TrustedCoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.exec_layout(vbox, next_enabled=False, raise_on_cancel=False)
self.check_otp(window, short_id, otp_secret, xpub3, pw.get_amount(), cb_lost.isChecked())
|
neocogent/electrum
|
electrum/plugins/trustedcoin/qt.py
|
Python
|
mit
| 13,106
|
[
"VisIt"
] |
f2e53cc16162d43d4fe1378acbac7fc1d8d16a79a35fe7799f1a2136c998647c
|
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
# Django settings for galaxy project.
"""
Production configuration file.
The following environment variables are supported:
* GALAXY_SECRET_KEY
* GALAXY_ALLOWED_HOSTS
* GALAXY_EMAIL_HOST
* GALAXY_DB_URL
* GALAXY_EMAIL_PORT
* GALAXY_EMAIL_USER
* GALAXY_EMAIL_PASSWORD
* GALAXY_ELASTICSEARCH_HOST
* GALAXY_ELASTICSEARCH_PORT
* GALAXY_MEMCACHE_HOST
* GALAXY_MEMCACHE_PORT
* GALAXY_RABBITMQ_HOST
* GALAXY_RABBITMQ_PORT
* GALAXY_RABBITMQ_USER
* GALAXY_RABBITMQ_PASSWORD
* GALAXY_ADMIN_PATH
"""
import os
import dj_database_url
from . import include_settings
from .default import * # noqa
def _read_secret_key(settings_dir='/etc/galaxy'):
"""
Reads secret key from environment variable, otherwise from SECRET_KEY
file in settings directory.
In case secret key cannot be read, function returns None, which
causes django configuration exception.
:param settings_dir: Settings directory, default: '/etc/galaxy'.
:return: Secret key string, if available, None otherwise.
"""
try:
return os.environ['GALAXY_SECRET_KEY']
except KeyError:
pass
try:
with open(os.path.join(settings_dir, 'SECRET_KEY')) as fp:
return fp.read().strip()
except IOError:
return None
# =========================================================
# Django Core Settings
# =========================================================
DEBUG = False
ALLOWED_HOSTS = os.environ.get('GALAXY_ALLOWED_HOSTS', '*').split(',')
# Database
# ---------------------------------------------------------
# Define GALAXY_DB_URL=postgres://USER:PASSWORD@HOST:PORT/NAME
DATABASES = {'default': dj_database_url.config(env='GALAXY_DB_URL', conn_max_age=None)}
# Cache
# ---------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '{0}:{1}'.format(
os.environ.get('GALAXY_MEMCACHE_HOST', ''),
os.environ.get('GALAXY_MEMCACHE_PORT', 11211)),
},
'download_count': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'main_download_count_cache',
'TIMEOUT': None,
'OPTIONS': {
'MAX_ENTRIES': 100000,
'CULL_FREQUENCY': 0
}
}
}
# Static files
# ---------------------------------------------------------
STATIC_ROOT = '/var/lib/galaxy/public/static'
# Security
# ---------------------------------------------------------
SECRET_KEY = _read_secret_key()
# Email settings
# ---------------------------------------------------------
# FIXME(cutwater): Review parameters usage
EMAIL_HOST = os.environ.get('GALAXY_EMAIL_HOST', '')
EMAIL_PORT = int(os.environ.get('GALAXY_EMAIL_PORT', 587))
EMAIL_HOST_USER = os.environ.get('GALAXY_EMAIL_USER', '')
EMAIL_HOST_PASSWORD = os.environ.get('GALAXY_EMAIL_PASSWORD', '')
EMAIL_USE_TLS = True
# =========================================================
# Third Party Apps Settings
# =========================================================
# Elasticsearch settings
# ---------------------------------------------------------
ELASTICSEARCH = {
'default': {
'hosts': [
'{0}:{1}'.format(
os.environ.get('GALAXY_ELASTICSEARCH_HOST'),
os.environ.get('GALAXY_ELASTICSEARCH_PORT', 9200))
],
},
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'galaxy.main.elasticsearch_backend'
'.ElasticsearchSearchEngine',
'URL': [
'http://{0}:{1}'.format(
os.environ.get('GALAXY_ELASTICSEARCH_HOST'),
os.environ.get('GALAXY_ELASTICSEARCH_PORT', 9200))
],
'INDEX_NAME': 'haystack',
},
}
# Celery settings
# ---------------------------------------------------------
# TODO(cutwater): Replace with BROKER_URL connection string parameter
BROKER_URL = 'amqp://{user}:{password}@{host}:{port}/{vhost}'.format(
user=os.environ.get('GALAXY_RABBITMQ_USER', 'galaxy'),
password=os.environ.get('GALAXY_RABBITMQ_PASSWORD', ''),
host=os.environ.get('GALAXY_RABBITMQ_HOST', ''),
port=os.environ.get('GALAXY_RABBITMQ_PORT', 5672),
vhost=os.environ.get('GALAXY_RABBITMQ_VHOST', 'galaxy'),
)
# =========================================================
# Galaxy Settings
# =========================================================
SITE_ENV = 'PROD'
SITE_NAME = os.environ.get('GALAXY_SITE_NAME', 'localhost')
# FIXME(cutwater): Remove WAIT_FOR logic from django application
WAIT_FOR = [
{
'host': DATABASES['default']['HOST'],
'port': DATABASES['default']['PORT'],
},
{
'host': os.environ.get('GALAXY_RABBITMQ_HOST', ''),
'port': int(os.environ.get('GALAXY_RABBITMQ_PORT', 5672))
},
{
'host': os.environ.get('GALAXY_MEMCACHE_HOST', ''),
'port': int(os.environ.get('GALAXY_MEMCACHE_PORT', 11211))
},
{
'host': os.environ.get('GALAXY_ELASTICSEARCH_HOST', ''),
'port': int(os.environ.get('GALAXY_ELASTICSEARCH_PORT', 9200)),
}
]
ADMIN_URL_PATTERN = r'^%s/' % os.environ.get('GALAXY_ADMIN_PATH', 'admin')
# =========================================================
# System Settings
# =========================================================
include_settings('/etc/galaxy/settings.py', scope=globals(), optional=True)
|
chouseknecht/galaxy
|
galaxy/settings/production.py
|
Python
|
apache-2.0
| 6,084
|
[
"Galaxy"
] |
5cecc693530d5ccf5f5b75affe9c9f41a4666908e2dec9ecdda777524b92287b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Running the tests:
# $ python3 -m unittest discover --start-directory ./tests/
# Checking the coverage of the tests:
# $ coverage run --include=./*.py --omit=tests/* -m unittest discover && rm -rf ../html_dev/coverage && coverage html --directory=../html_dev/coverage --title="Code test coverage for vallenato.fr"
import unittest
import sys
import os
from unittest.mock import patch
from unittest.mock import MagicMock
from unittest.mock import call
sys.path.append('.')
target = __import__("vallenato_fr")
youtube = __import__("youtube")
class TestYtGetAuthenticatedService(unittest.TestCase):
@patch("youtube.build")
@patch("youtube.run_flow")
@patch("youtube.Storage")
@patch("youtube.flow_from_clientsecrets")
def test_yt_get_authenticated_service(self, yt_ffc, yt_S, yt_rf, yt_b):
args = target.parse_args(['--website'])
yt = youtube.yt_get_authenticated_service(args)
# local_file_path = "/home/emilien/devel/vallenato.fr/bin"
local_file_path = os.getcwd()
expected_yt_ffc = [call('client_secret.json', message='\nWARNING: Please configure OAuth 2.0\n\nTo make this sample run you will need to populate the client_secrets.json file\nfound at:\n %s/client_secret.json\nwith information from the APIs Console\nhttps://console.developers.google.com\n\nFor more information about the client_secrets.json file format, please visit:\nhttps://developers.google.com/api-client-library/python/guide/aaa_client_secrets\n' % local_file_path, scope=['https://www.googleapis.com/auth/youtube.readonly'])]
self.assertTrue(expected_yt_ffc in yt_ffc.mock_calls)
self.assertTrue(call('vallenato.fr-oauth2.json') in yt_S.mock_calls)
self.assertEqual([call(yt_ffc(), yt_S(), args)], yt_rf.mock_calls)
expected_yt_b = [call('youtube', 'v3', cache_discovery=False, credentials=yt_rf())]
self.assertTrue(expected_yt_b in yt_b.mock_calls)
class TestYtGetMyUploadsList(unittest.TestCase):
def test_yt_get_my_uploads_list(self):
uploads_playlist_id = youtube.yt_get_my_uploads_list(None)
self.assertEqual(uploads_playlist_id, "UU_8R235jg1ld6MCMOzz2khQ")
# class TestYtListMyUploadedVideos(unittest.TestCase):
# def test_yt_list_my_uploaded_videos(self):
# yt = MagicMock()
# uploaded_videos = youtube.yt_list_my_uploaded_videos("UU_8R235jg1ld6MCMOzz2khQ", yt)
if __name__ == '__main__':
unittest.main()
|
e2jk/vallenato.fr
|
bin/tests/test_youtube.py
|
Python
|
agpl-3.0
| 2,489
|
[
"VisIt"
] |
5a0bdce9f7447e51786432a2d39bcea0b298ec97a8c80355e353cbc12305d0db
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffypdnn(RPackage):
"""The package contains functions to perform the PDNN method
described by Li Zhang et al."""
homepage = "https://www.bioconductor.org/packages/affypdnn/"
url = "https://git.bioconductor.org/packages/affypdnn"
version('1.50.0', git='https://git.bioconductor.org/packages/affypdnn', commit='97ff68e9f51f31333c0330435ea23b212b3ed18a')
depends_on('r@3.4.0:3.4.9', when='@1.50.0')
depends_on('r-affy', type=('build', 'run'))
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-affypdnn/package.py
|
Python
|
lgpl-2.1
| 1,740
|
[
"Bioconductor"
] |
9881aa7eb524afec694c186c7a03f9d65b554ba6c2b4f9d433aa84219e13e081
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
|
dreamllq/node
|
tools/gyp/pylib/gyp/common.py
|
Python
|
apache-2.0
| 20,003
|
[
"VisIt"
] |
ee3bc8926856c9a8e4e408923a4656c831225c519b5001d1471f558b72220166
|
import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
#############################################################################
# Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
#############################################################################
row_dim = x.shape[0]
col_dim = np.prod(x.shape[1:])
x_reshape = x.reshape(row_dim, col_dim)
out = np.dot(x_reshape, w) + b
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
#############################################################################
# Implement the affine backward pass. #
#############################################################################
x = cache[0]
w = cache[1]
b = cache[2]
row_dim = x.shape[0]
col_dim = np.prod(x.shape[1:])
x_reshape = x.reshape(row_dim, col_dim)
dw = x_reshape.T.dot(dout)
dx = dout.dot(w.T).reshape(x.shape)
db = np.sum(dout, axis=0)
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
#############################################################################
# Implement the ReLU forward pass. #
#############################################################################
out = np.maximum(0, x)
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
#############################################################################
# Implement the ReLU backward pass. #
#############################################################################
x = cache
out = np.maximum(0, x) # ReLU performed again
out[out > 0 ] = 1
dx = out * dout
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the mean
and variance of each feature, and these averages are used to normalize data
at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7 implementation
of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == 'train':
#############################################################################
# Implement the training-time forward pass for batch normalization. #
# Use minibatch statistics to compute the mean and variance, use these #
# statistics to normalize the incoming data, and scale and shift the #
# normalized data using gamma and beta. #
# #
# You should store the output in the variable out. Any intermediates that #
# you need for the backward pass should be stored in the cache variable. #
# #
# You should also use your computed sample mean and variance together with #
# the momentum variable to update the running mean and running variance, #
# storing your result in the running_mean and running_var variables. #
#############################################################################
sample_mean = np.mean(x, axis=0)
sample_var = np.var(x, axis=0)
x_hat = (x - sample_mean.T) / np.sqrt(sample_var.T + eps)
out = x_hat * gamma + beta
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
cache = {}
cache['sample_mean'] = sample_mean
cache['sample_var'] = sample_var
cache['x_hat'] = x_hat
cache['x'] = x
cache['gamma'] = gamma
cache['beta'] = beta
cache['eps'] = eps
elif mode == 'test':
#############################################################################
# Implement the test-time forward pass for batch normalization. Use #
# the running mean and variance to normalize the incoming data, then scale #
# and shift the normalized data using gamma and beta. Store the result in #
# the out variable. #
#############################################################################
x_hat = (x - running_mean) / np.sqrt(running_var)
out = x_hat * gamma + beta
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
dx, dgamma, dbeta = None, None, None
#############################################################################
# Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
#############################################################################
m = dout.shape[0]
dx_hat = dout * cache['gamma']
dsample_var = np.sum(dx_hat * (cache['x']-cache['sample_mean']) * (-0.5) * (cache['sample_var'] + cache['eps'])**(-1.5), axis=0)
dsample_mean = np.sum(dx_hat * (-1/np.sqrt(cache['sample_var'] + cache['eps'])) , axis=0) + dsample_var * ((np.sum(-2*(cache['x']-cache['sample_mean']))) / m)
dx = dx_hat * (1/np.sqrt(cache['sample_var'] + cache['eps'])) + \
dsample_var * (2*(cache['x']-cache['sample_mean'])/m) + \
dsample_mean/m
dbeta = np.sum(dout, axis=0)
dgamma = np.sum(dout * cache['x_hat'], axis=0)
return dx, dgamma, dbeta
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
dx, dgamma, dbeta = None, None, None
#############################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
# #
# After computing the gradient with respect to the centered inputs, you #
# should be able to compute gradients with respect to the inputs in a #
# single statement; our implementation fits on a single 80-character line. #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not in
real networks.
Outputs:
- out: Array of the same shape as x.
- cache: A tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
###########################################################################
# Implement the training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
###########################################################################
mask = (np.random.rand(*x.shape) < p) / p
out = x * mask
elif mode == 'test':
###########################################################################
# Implement the test phase forward pass for inverted dropout. #
###########################################################################
out = x
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
###########################################################################
# Implement the training phase backward pass for inverted dropout. #
###########################################################################
dx = dout * mask
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and width
W. We convolve each input with F different filters, where each filter spans
all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
#############################################################################
# Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
#############################################################################
N, C, H, W = x.shape
F, CC, HH, WW = w.shape
assert C == CC
H_out = 1 + (H + 2 * conv_param['pad'] - HH) / conv_param['stride']
W_out = 1 + (W + 2 * conv_param['pad'] - WW) / conv_param['stride']
out = np.zeros((N, F, H_out, W_out))
# padding
pad = conv_param['pad']
x_with_pad = np.pad(x, ((0,0),(0,0),(pad,pad),(pad,pad)), 'constant', constant_values=0)
_, _, H, W = x_with_pad.shape
# convolving
stride = conv_param['stride']
for i in range(0, N):
x_data = x_with_pad[i]
xx, yy = -1, -1
for j in range(0, H-HH+1, stride):
yy += 1
for k in range(0, W-WW+1, stride):
xx += 1
x_rf = x_data[:, j:j+HH, k:k+WW]
for l in range(0, F):
conv_value = np.sum(x_rf * w[l]) + b[l]
out[i, l, yy, xx] = conv_value
xx = -1
cache = (x, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
#############################################################################
# Implement the convolutional backward pass. #
# Inspired by #
# https://github.com/cthorey/CS231/blob/master/assignment2/cs231n/layers.py #
#############################################################################
x, w, b, conv_param = cache
N, C, H, W = x.shape
F, C, HH, WW = w.shape
pad = conv_param['pad']
stride = conv_param['stride']
x_with_pad = np.pad(x, ((0,0),(0,0),(pad,pad),(pad,pad)), 'constant', constant_values=0)
N, F, Hdout, Wdout = dout.shape
H_out = 1 + (H + 2 * conv_param['pad'] - HH) / conv_param['stride']
W_out = 1 + (W + 2 * conv_param['pad'] - WW) / conv_param['stride']
db = np.zeros((b.shape))
for i in range(0, F):
db[i] = np.sum(dout[:, i, :, :])
dw = np.zeros((F, C, HH, WW))
for i in range(0, F):
for j in range(0, C):
for k in range(0, HH):
for l in range(0, WW):
dw[i, j, k, l] = np.sum(dout[:, i, :, :] * x_with_pad[:, j, k:k + Hdout * stride:stride, l:l + Wdout * stride:stride])
dx = np.zeros((N, C, H, W))
for nprime in range(N):
for i in range(H):
for j in range(W):
for f in range(F):
for k in range(Hdout):
for l in range(Wdout):
mask1 = np.zeros_like(w[f, :, :, :])
mask2 = np.zeros_like(w[f, :, :, :])
if (i + pad - k * stride) < HH and (i + pad - k * stride) >= 0:
mask1[:, i + pad - k * stride, :] = 1.0
if (j + pad - l * stride) < WW and (j + pad - l * stride) >= 0:
mask2[:, :, j + pad - l * stride] = 1.0
w_masked = np.sum(w[f, :, :, :] * mask1 * mask2, axis=(1, 2))
dx[nprime, :, i, j] += dout[nprime, f, k, l] * w_masked
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
#############################################################################
# Implement the max pooling forward pass #
#############################################################################
N, C, H, W = x.shape
pool_height = pool_param['pool_height']
pool_width = pool_param['pool_width']
stride = pool_param['stride']
H_out = 1 + (H - pool_height) / stride
W_out = 1 + (W - pool_width) / stride
out = np.zeros((N, C, H_out, W_out))
for i in range(0, N):
x_data = x[i]
xx, yy = -1, -1
for j in range(0, H-pool_height+1, stride):
yy += 1
for k in range(0, W-pool_width+1, stride):
xx += 1
x_rf = x_data[:, j:j+pool_height, k:k+pool_width]
for l in range(0, C):
out[i, l, yy, xx] = np.max(x_rf[l])
xx = -1
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
#############################################################################
# Implement the max pooling backward pass #
#############################################################################
x, pool_param = cache
N, C, H, W = x.shape
pool_height = pool_param['pool_height']
pool_width = pool_param['pool_width']
stride = pool_param['stride']
dx = np.zeros((N, C, H, W))
H_out = 1 + (H - pool_height) / stride
W_out = 1 + (W - pool_width) / stride
for i in range(0, N):
x_data = x[i]
xx, yy = -1, -1
for j in range(0, H-pool_height+1, stride):
yy += 1
for k in range(0, W-pool_width+1, stride):
xx += 1
x_rf = x_data[:, j:j+pool_height, k:k+pool_width]
for l in range(0, C):
x_pool = x_rf[l]
mask = x_pool == np.max(x_pool)
dx[i, l, j:j+pool_height, k:k+pool_width] += dout[i, l, yy, xx] * mask
xx = -1
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
#############################################################################
# Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should #
# be very short; ours is less than five lines. #
#############################################################################
N, C, H, W = x.shape
x_reshaped = x.transpose(0,2,3,1).reshape(N*H*W, C)
out_tmp, cache = batchnorm_forward(x_reshaped, gamma, beta, bn_param)
out = out_tmp.reshape(N, H, W, C).transpose(0, 3, 1, 2)
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
#############################################################################
# Implement the backward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should #
# be very short; ours is less than five lines. #
#############################################################################
N, C, H, W = dout.shape
dout_reshaped = dout.transpose(0,2,3,1).reshape(N*H*W, C)
dx_tmp, dgamma, dbeta = batchnorm_backward(dout_reshaped, cache)
dx = dx_tmp.reshape(N, H, W, C).transpose(0, 3, 1, 2)
return dx, dgamma, dbeta
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -np.sum(np.log(probs[np.arange(N), y])) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
|
arasdar/DL
|
stanford-cs231n-master/assignment2/layers.py
|
Python
|
unlicense
| 24,727
|
[
"NEURON"
] |
21848bb54fb7390e5b848502fa1e66f8c248383560e5752e88acfd3dd0a51752
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Compute dynamic properties."""
import logging
from typing import Any, Dict
import numpy as np
import pandas
from scipy.stats import spearmanr
from ..math_helper import (displacement_periodic, quaternion_rotation,
rotate_vectors)
from ..molecules import Molecule, Trimer
np.seterr(divide='raise', invalid='raise', over='raise')
logger = logging.getLogger(__name__)
class dynamics(object):
"""Compute dynamic properties of a simulation."""
dyn_dtype = np.float32
def __init__(self,
timestep: int,
box: np.ndarray,
position: np.ndarray,
orientation: np.ndarray=None,
molecule: Molecule=Trimer(),
spearman: bool=False,
) -> None:
"""Initialise a dynamics instance.
Args:
timestep (int): The timestep on which the configuration was taken.
position (py:class:`numpy.ndarray`): The positions of the molecules
with shape ``(nmols, 3)``. Even if the simulation is only 2D,
all 3 dimensions of the position need to be passed.
orientation (py:class:`numpy.ndarray`): The orientaions of all the
molecules as a quaternion in the form ``(w, x, y, z)``. If no
orientation is supplied then no rotational quantities are
calculated.
"""
self.timestep = timestep
self.box = box[:3]
self.position = position.astype(self.dyn_dtype)
self.num_particles = position.shape[0]
self.orientation = orientation.astype(self.dyn_dtype)
self.mol_vector = molecule.positions
self.spearman = spearman
def computeMSD(self, position: np.ndarray) -> float:
"""Compute the mean squared displacement."""
result = translationalDisplacement(self.box, self.position, position)
return mean_squared_displacement(result)
def comptuteMFD(self, position: np.ndarray) -> float:
"""Comptute the fourth power of displacement."""
result = translationalDisplacement(self.box, self.position, position)
return mean_fourth_displacement(result)
def computeAlpha(self, position: np.ndarray) -> float:
r"""Compute the non-gaussian parameter alpha.
.. math::
\alpha = \frac{\langle \Delta r^4\rangle}
{2\langle \Delta r^2 \rangle^2} -1
"""
result = translationalDisplacement(self.box, self.position, position)
return alpha_non_gaussian(result)
def computeTimeDelta(self, timestep: int) -> int:
"""Time difference between keyframe and timestep."""
return timestep - self.timestep
def computeRotation(self, orientation: np.ndarray) -> float:
"""Compute the rotation of the moleule."""
result = rotationalDisplacement(self.orientation, orientation)
return mean_rotation(result)
def get_rotations(self, orientation: np.ndarray) -> np.ndarray:
"""Get all the rotations."""
result = rotationalDisplacement(self.orientation, orientation)
return result
def get_displacements(self, position: np.ndarray) -> np.ndarray:
"""Get all the displacements."""
result = translationalDisplacement(self.box, self.position, position)
return mean_displacement(result)
def computeStructRelax(self, position: np.ndarray,
orientation: np.ndarray,
threshold: float=0.3
) -> float:
particle_displacement = translationalDisplacement(
self.box,
molecule2particles(self.position, self.orientation, self.mol_vector),
molecule2particles(position, orientation, self.mol_vector)
)
return structural_relax(particle_displacement, threshold)
def computeAll(self,
timestep: int,
position: np.ndarray,
orientation: np.ndarray=None,
) -> Dict[str, Any]:
"""Compute all dynamics quantities of interest."""
delta_displacement = translationalDisplacement(self.box, self.position, position)
dynamic_quantities = {
'time': self.computeTimeDelta(timestep),
'mean_displacement': mean_displacement(delta_displacement),
'msd': mean_squared_displacement(delta_displacement),
'mfd': mean_fourth_displacement(delta_displacement),
'alpha': alpha_non_gaussian(delta_displacement),
'com_struct': structural_relax(delta_displacement, dist=0.4),
}
if self.orientation is not None:
delta_rotation = rotationalDisplacement(self.orientation, orientation)
logger.debug('Max rotation: %f', delta_rotation.max())
dynamic_quantities.update({
'mean_rotation': mean_rotation(delta_rotation),
'rot1': rotational_relax1(delta_rotation),
'rot2': rotational_relax2(delta_rotation),
'gamma': gamma(delta_displacement, delta_rotation),
'overlap': mobile_overlap(delta_displacement, delta_rotation),
'struct': self.computeStructRelax(position, orientation, threshold=0.3),
})
return dynamic_quantities
def get_molid(self):
"""Molecule ids of each of the values."""
return np.arange(self.num_particles)
class molecularRelaxation(object):
"""Compute the relaxation of each molecule."""
def __init__(self, num_elements: int, threshold: float) -> None:
self.num_elements = num_elements
self.threshold = threshold
self._max_value = 2**32 - 1
self._status = np.full(self.num_elements, self._max_value, dtype=int)
def add(self, timediff: int, distance: np.ndarray) -> None:
assert distance.shape == self._status.shape
with np.errstate(invalid='ignore'):
moved = np.greater(distance, self.threshold)
moveable = np.greater(self._status, timediff)
self._status[np.logical_and(moved, moveable)] = timediff
def get_status(self):
return self._status
class lastMolecularRelaxation(molecularRelaxation):
_is_irreversible = 3
def __init__(self,
num_elements: int,
threshold: float,
irreversibility: float=1.
) -> None:
super().__init__(num_elements, threshold)
self._state = np.zeros(self.num_elements, dtype=np.uint8)
self._irreversibility = irreversibility
def add(self, timediff: int, distance: np.ndarray) -> None:
assert distance.shape == self._status.shape
with np.errstate(invalid='ignore'):
state = np.greater(distance, self.threshold).astype(np.uint8)
state[np.logical_or(self._state == self._is_irreversible,
np.greater(distance, self._irreversibility)
)] = self._is_irreversible
self._status[
np.logical_and(state == 1, self._state == 0)
] = timediff
self._state = state
def get_status(self):
status = np.copy(self._status)
status[self._state != self._is_irreversible] = self._max_value
return status
class structRelaxations(molecularRelaxation):
"""Compute the average structural relaxation for a molecule."""
def __init__(self, num_elements: int, threshold: float, molecule: Molecule) -> None:
self.molecule = molecule
super().__init__(num_elements*self.molecule.num_particles, threshold)
def get_status(self):
return self._status.reshape((-1, self.molecule.num_particles)).mean(axis=1)
class relaxations(object):
def __init__(self, timestep: int,
box: np.ndarray,
position: np.ndarray,
orientation: np.ndarray,
molecule: Molecule=None) -> None:
self.init_time = timestep
self.box = box
num_elements = position.shape[0]
self.init_position = position
self.init_orientation = orientation
self.mol_relax = {
'tau_D1': molecularRelaxation(num_elements, threshold=1.),
'tau_D03': molecularRelaxation(num_elements, threshold=0.3),
'tau_D04': molecularRelaxation(num_elements, threshold=0.4),
'tau_DL04': lastMolecularRelaxation(num_elements, threshold=0.4),
'tau_T2': molecularRelaxation(num_elements, threshold=np.pi/2),
'tau_T4': molecularRelaxation(num_elements, threshold=np.pi/4),
}
self.mol_vector = None
if molecule:
self.mol_vector = molecule.positions.astype(np.float32)
self.mol_relax['tau_S03'] = structRelaxations(
num_elements,
threshold=0.3,
molecule=molecule,
)
def get_timediff(self, timestep: int):
return timestep - self.init_time
def add(self, timestep: int,
position: np.ndarray,
orientation: np.ndarray,
) -> None:
displacement = translationalDisplacement(self.box, self.init_position, position)
rotation = rotationalDisplacement(self.init_orientation, orientation)
if self.mol_vector is not None:
particle_displacement = translationalDisplacement(
self.box,
molecule2particles(self.init_position, self.init_orientation, self.mol_vector),
molecule2particles(position, orientation, self.mol_vector)
)
for key, func in self.mol_relax.items():
if 'D' in key:
func.add(self.get_timediff(timestep), displacement)
elif 'S' in key:
func.add(self.get_timediff(timestep), particle_displacement)
else:
func.add(self.get_timediff(timestep), rotation)
def summary(self) -> pandas.DataFrame:
return pandas.DataFrame({key: func.get_status() for key, func in self.mol_relax.items()})
def molecule2particles(position: np.ndarray,
orientation: np.ndarray,
mol_vector: np.ndarray
) -> np.ndarray:
return (rotate_vectors(orientation, mol_vector.astype(np.float32)) +
np.repeat(position, mol_vector.shape[0], axis=0))
def mean_squared_displacement(displacement: np.ndarray) -> float:
"""Mean value of the squared displacment.
Args:
displacement (class:`numpy.ndarray`): vector of squared
displacements.
Returns:
float: Mean value
"""
return np.square(displacement).mean()
def mean_fourth_displacement(displacement: np.ndarray) -> float:
"""Mean value of the fourth power of displacment.
Args:
displacement (class:`numpy.ndarray`): vector of squared
displacements.
Returns:
float: Mean value of the fourth power
"""
return np.power(displacement, 4).mean()
def mean_displacement(displacement: np.ndarray) -> float:
"""Mean value of the displacment.
Args:
displacement (class:`numpy.ndarray`): vector of squared
displacements.
Returns:
float: Mean value of the displacement
"""
return displacement.mean()
def mean_rotation(rotation: np.ndarray) -> float:
"""Mean of the rotational displacement.
Args:
rotation (class:`numpy.ndarray`): Vector of rotations
Returns:
float: Mean value of the rotation
"""
return rotation.mean()
def alpha_non_gaussian(displacement: np.ndarray) -> float:
r"""Compute the non-gaussian parameter :math:`\alpha`.
The non-gaussian parameter is given as
.. math::
\alpha = \frac{\langle \Delta r^4\rangle}
{2\langle \Delta r^2 \rangle^2} -1
Return:
float: The non-gaussian parameter :math:`\alpha`
"""
try:
return (np.power(displacement, 4).mean() /
(2 * np.square(np.square(displacement).mean()))) - 1
except FloatingPointError:
return 0
def structural_relax(displacement: np.ndarray,
dist: float=0.3) -> float:
r"""Compute the structural relaxation.
The structural relaxation is given as the proportion of
particles which have moved further than `dist` from their
initial positions.
Args:
displacement: displacements
dist (float): The distance cutoff for considering relaxation.
(defualt: 0.3)
Return:
float: The structural relaxation of the configuration
"""
return np.mean(displacement < dist)
def gamma(displacement: np.ndarray,
rotation: np.ndarray) -> float:
r"""Calculate the second order coupling of translations and rotations.
.. math::
\gamma &= \frac{\langle(\Delta r \Delta\theta)^2 \rangle -
\langle\Delta r^2\rangle\langle\Delta \theta^2\rangle
}{\langle\Delta r^2\rangle\langle\Delta\theta^2\rangle}
Return:
float: The squared coupling of translations and rotations
:math:`\gamma`
"""
rot2 = np.square(rotation)
disp2 = np.square(displacement)
disp2m_rot2m = disp2.mean() * rot2.mean()
try:
return ((disp2 * rot2).mean() - disp2m_rot2m) / disp2m_rot2m
except FloatingPointError:
with np.errstate(invalid='ignore'):
res = ((disp2 * rot2).mean() - disp2m_rot2m) / disp2m_rot2m
np.nan_to_num(res, copy=False)
return res
def rotational_relax1(rotation: np.ndarray) -> float:
r"""Compute the first-order rotational relaxation function.
.. math::
C_1(t) = \langle \hat\vec e(0) \cdot
\hat \vec e(t) \rangle
Return:
float: The rotational relaxation
"""
return np.mean(np.cos(rotation))
def rotational_relax2(rotation: np.ndarray) -> float:
r"""Compute the second rotational relaxation function.
.. math::
C_1(t) = \langle 2[\hat\vec e(0) \cdot \
\hat \vec e(t)]^2 - 1 \rangle
Return:
float: The rotational relaxation
"""
return np.mean(2 * np.square(np.cos(rotation)) - 1)
def mobile_overlap(displacement: np.ndarray,
rotation: np.ndarray,
fraction: float=0.1) -> float:
"""Find the overlap of the most mobile translators and rotators.
This finds the proportion of molecules which reside in the top ``fraction``
of both the rotational and translational motion.
"""
num_elements = int(len(displacement) * fraction)
# np.argsort will sort from smallest to largest, we are interested in the
# largest elements so we will take from the end of the array.
trans_order = np.argsort(displacement)[-num_elements:]
rot_order = np.argsort(np.abs(rotation))[-num_elements:]
return len(np.intersect1d(trans_order, rot_order)) / num_elements
def spearman_rank(displacement: np.ndarray,
rotation: np.ndarray,
fraction: float=1.) -> float:
"""Compute the Spearman Rank coefficient for fast molecules.
This takes the molecules with the fastest 10% of the translations or
rotations and uses this subset to compute the Spearman rank coefficient.
"""
num_elements = int(len(displacement) * fraction)
# np.argsort will sort from smallest to largest, we are interested in the
# largest elements so we will take from the end of the array.
trans_order = np.argsort(displacement)[:-num_elements-1:-1]
rot_order = np.argsort(np.abs(rotation))[:-num_elements-1:-1]
rho, _ = spearmanr(trans_order, rot_order)
return rho
def rotationalDisplacement(initial: np.ndarray,
final: np.ndarray,
) -> np.ndarray:
r"""Compute the rotational displacement.
Args:
initial (py:class:`numpy.ndarray`): Initial orientation.
final (py:class:`numpy.ndarray`): final orientation.
result (py:class:`numpy.ndarray`): array in which to store result
The rotational displacment is computed using a slightly modified formula
from [@Huynh2009]_ specifically the formula for :math:`\phi_3`. Since we
are interested in angles of the range :math:`[0, 2\pi]`, the result of
:math:`\phi_3` is multiplied by 2, which is shown by Huynh to be equal
to :math:`phi_6`.
This imlementation was chosen for speed and accuracy, being tested against
a number of other possibilities. Another notable formulation was by [Jim
Belk] on Stack Exchange, however this equation was both slower to compute
in addition to being more prone to unusual bnehaviour.
.. [@Hunyh2009]: 1. Huynh, D. Q. Metrics for 3D rotations: Comparison and
analysis. J. Math. Imaging Vis. 35, 155–164 (2009).
.. [Jim Belk]: https://math.stackexchange.com/questions/90081/quaternion-distance
"""
result = np.empty(final.shape[0], dtype=final.dtype)
quaternion_rotation(initial, final, result)
return result
def translationalDisplacement(box: np.ndarray,
initial: np.ndarray,
final: np.ndarray,
) -> np.ndarray:
"""Optimised function for computing the displacement.
This computes the displacement using the shortest path from the original
position to the final position. This is a reasonable assumption to make
since the path
This assumes there is no more than a single image between molecules,
which breaks slightly when the frame size changes. I am assuming this is
negligible so not including it.
"""
result = np.empty(final.shape[0], dtype=final.dtype)
displacement_periodic(box.astype(np.float32), initial, final, result)
return result
|
malramsay64/MD-Molecules-Hoomd
|
statdyn/analysis/dynamics.py
|
Python
|
mit
| 18,128
|
[
"Gaussian"
] |
77f0b49ff3a0260e1cdd1abc789f0e00d86bd81e9db711794b71c09bed72bf85
|
import os
import sys
import functools
import collections
import scandir
PY2 = sys.version_info.major == 2
PY3 = sys.version_info.major == 3
if PY3:
basestring = str
unicode = str
if PY2:
unicode = unicode
basestring = basestring
def fnwalk(path, fn, shallow=False):
"""
Walk directory tree top-down and "visit" files or directory matching the
predicate are found
This generator function takes a ``path`` from which to begin the traversal,
and a ``fn`` object that selects the paths to be returned. It calls
``os.listdir()`` recursively until either a full path is flagged by ``fn``
function as valid (by returning a truthy value) or ``os.listdir()`` fails
with ``OSError``.
This function has been added specifically to deal with large and deep
directory trees, and it's therefore not advisable to convert the return
values to lists and similar memory-intensive objects.
The ``shallow`` flag is used to terminate further recursion on match. If
``shallow`` is ``False``, recursion continues even after a path is matched.
For example, given a path ``/foo/bar/bar``, and a matcher that matches
``bar``, with ``shallow`` flag set to ``True``, only ``/foo/bar`` is
matched. Otherwise, both ``/foo/bar`` and ``/foo/bar/bar`` are matched.
"""
if hasattr(path, 'path') and fn(path):
yield path
if shallow:
return
try:
entries = scandir.scandir(path.path)
except AttributeError:
entries = scandir.scandir(path)
except OSError:
return
for entry in entries:
if entry.is_dir():
for child in fnwalk(entry, fn, shallow):
yield child
else:
if fn(entry):
yield entry
def common_ancestor(*paths):
path_components = [p.split(os.path.sep) for p in paths]
min_path_length = min(len(p) for p in path_components)
common_path = []
for i in range(min_path_length):
common_component = set(p[i] for p in path_components)
if len(common_component) != 1:
break
common_path.append(common_component.pop())
return os.path.sep.join(common_path)
def validate_path(base_path, path):
path = path.lstrip(os.sep)
full_path = os.path.abspath(os.path.join(base_path, path))
return full_path.startswith(base_path), full_path
def lru_cache(maxsize=100):
'''Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
'''
def decorating_function(user_function):
# order: least recent to most recent
cache = collections.OrderedDict()
@functools.wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += tuple(sorted(kwds.items()))
try:
result = cache.pop(key)
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
wrapper.misses += 1
if len(cache) >= maxsize:
cache.popitem(0) # purge least recently used cache entry
cache[key] = result # record recent use of this key
return result
wrapper.hits = wrapper.misses = 0
return wrapper
return decorating_function
def to_unicode(v, encoding='utf8'):
"""
Convert a value to Unicode string (or just string in Py3). This function
can be used to ensure string is a unicode string. This may be useful when
input can be of different types (but meant to be used when input can be
either bytestring or Unicode string), and desired output is always Unicode
string.
The ``encoding`` argument is used to specify the encoding for bytestrings.
"""
if isinstance(v, unicode):
return v
try:
return v.decode(encoding)
except (AttributeError, UnicodeEncodeError):
return unicode(v)
def to_bytes(v, encoding='utf8'):
"""
Convert a value to bytestring (or just string in Py2). This function is
useful when desired output is always a bytestring, and input can be any
type (although it is intended to be used with strings and bytestrings).
The ``encoding`` argument is used to specify the encoding of the resulting
bytestring.
"""
if isinstance(v, bytes):
return v
try:
return v.encode(encoding, errors='ignore')
except AttributeError:
return unicode(v).encode(encoding)
|
Outernet-Project/fsal
|
fsal/utils.py
|
Python
|
gpl-3.0
| 4,673
|
[
"VisIt"
] |
0501e44d24e22755076cd3b23ac27e5072bd3c542728b476c1615e2ad25c8949
|
#!/usr/bin/env python
# Copyright (C) 2013,2014 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script appends the sample list of features to the file
# myconfig-sample.h.
#
import time
import string
import fileinput
import inspect
import sys
import os
# find featuredefs.py
moduledir = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.append(os.path.join(moduledir, '..', 'src'))
import featuredefs
if len(sys.argv) != 2:
print("Usage: {} DEFFILE".format(sys.argv[0]), file=sys.stderr)
exit(2)
deffilename = sys.argv[1]
#print "Reading definitions from " + deffilename + "..."
defs = featuredefs.defs(deffilename)
#print "Done."
#print "Writing " + hfilename + "..."
featuresdone = set()
for line in fileinput.input(deffilename):
line = line.strip()
# Handle empty and comment lines
if not line:
print()
continue
elif line.startswith('#'):
continue
elif line.startswith('//') or line.startswith('/*'):
print(line)
continue
# Tokenify line
feature = line.split(None, 1)[0]
if feature in defs.features and feature not in featuresdone:
print('//#define %s' % feature)
featuresdone.add(feature)
|
mkuron/espresso
|
src/config/gen_sampleconfig.py
|
Python
|
gpl-3.0
| 1,891
|
[
"ESPResSo"
] |
b3c0eed0fe79541e1b9a7e92945d7505284e1acf7b6dd1755bf621667f0961f5
|
#!/usr/bin/env
"""
NARRuv_quiver_plot.py
"""
#System Stack
import datetime, sys
#Science Stack
from netCDF4 import MFDataset, num2date
import numpy as np
# Visual Stack
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.dates import MonthLocator, DateFormatter
import matplotlib.ticker as ticker
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2016, 6, 20)
__modified__ = datetime.datetime(2016, 6, 20)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'quiver plot'
"""-------------------------- Initialization params -----------------------------------------"""
### some mpl specif settings for fonts and plot style
mpl.rcParams['svg.fonttype'] = 'none'
plt.style.use('bmh')
"""--------------------------------netcdf Routines---------------------------------------"""
def get_global_atts(nchandle):
g_atts = {}
att_names = nchandle.ncattrs()
for name in att_names:
g_atts[name] = nchandle.getncattr(name)
return g_atts
def get_vars(nchandle):
return nchandle.variables
def get_var_atts(nchandle, var_name):
return nchandle.variables[var_name]
def ncreadfile_dic(nchandle, params):
data = {}
for j, v in enumerate(params):
if v in nchandle.variables.keys(): #check for nc variable
data[v] = nchandle.variables[v][:]
else: #if parameter doesn't exist fill the array with zeros
data[v] = None
return (data)
"""--------------------------------time Routines---------------------------------------"""
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
"""--------------------------------main Routines---------------------------------------"""
####The following is designed to plot U/V vecotrs from NARR as a stick plot
### Read CF style netcdf files (one time word) from mulitple files
ncfiles = '/Volumes/WDC_internal/Users/bell/in_and_outbox/2016/stabeno/june/ArcticTransport/site2winds/'
nchandle = MFDataset(ncfiles+'*cf.nc',aggdim="time")
vars_dic = get_vars(nchandle)
data1 = ncreadfile_dic(nchandle,vars_dic.keys())
nchandle.close()
### some data manipulation or massaging for plots
##
## generate daily averages from hourly data and label it 12Z of the day averaged
subset_t, ucomp, vcomp = [], [], []
total_date_range = np.arange(data1["time"].min(),data1["time"].max(),1)
for count in total_date_range:
tind = (np.where(data1["time"] % count < 1))
ucomp = np.hstack((ucomp, np.mean(data1['WU_422'][tind,0,0,0])))
vcomp = np.hstack((vcomp, np.mean(data1['WV_423'][tind,0,0,0])))
subset_t = np.hstack((subset_t, count+0.5))
xdate = num2date(subset_t, "days since 1800-01-01")
xdate = [x.toordinal() for x in xdate]
#exchange 1e35 for 0
ucomp[ucomp == 1e35] = 0
vcomp[vcomp == 1e35] = 0
magnitude = np.sqrt(ucomp**2 + vcomp**2)
### Quiver / Stick plot
# Plot quiver
fig = plt.figure(1)
for i in range(1,6):
ax1 = fig.add_subplot(5,1,i)
# 1D Quiver plot
q = ax1.quiver(xdate,0,ucomp,vcomp,color='r',units='y',scale_units='y',
scale = 1,headlength=2,headaxislength=2,width=0.1,alpha=.95)
qk = plt.quiverkey(q,0.2, 0.05, 5,r'$5 \frac{m}{s}$',labelpos='W',
fontproperties={'weight': 'bold'})
ax1.set_ylim(vcomp.min(), vcomp.max())
ax1.set_ylabel("(m/s)")
ax1.set_xlim(datetime.datetime(2010+i-1, 8, 01).toordinal(),datetime.datetime(2011+i-1, 10, 31).toordinal())
ax1.xaxis.set_major_locator(MonthLocator())
ax1.xaxis.set_minor_locator(MonthLocator(bymonth=range(1,13), bymonthday=15))
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_formatter(DateFormatter('%b %y'))
ax1.tick_params(axis='both', which='minor', labelsize=12)
t = fig.suptitle('NARR Daily Averaged Wind', fontsize=12)
t.set_y(0.03)
fig.autofmt_xdate()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]*1.50) )
plt.savefig('images/NARR_C2_winds_stickplot.png', bbox_inches='tight', dpi = (300))
plt.close()
#plt.show()
|
shaunwbell/FOCI_Analysis
|
general_plotting_routines/cftime_uv_quiver_plot.py
|
Python
|
mit
| 4,936
|
[
"NetCDF"
] |
95cbd9d3f32602f799e1b99c3fb8ed6dea5ef9e40324743cfb800ebe3eaae57e
|
# -*- coding: utf-8 -*-
from django.conf import settings
GATEWAY_HOST = "www.moneybookers.com"
GATEWAY_URI = "/app/payment.pl"
TEST_GATEWAY_URI = "/app/test_payment.pl"
GATEWAY = "https://%s%s" % (GATEWAY_HOST, GATEWAY_URI)
TEST_GATEWAY = "https://%s%s" % (GATEWAY_HOST, TEST_GATEWAY_URI)
MERCHANT_ID = settings.MONEYBOOKERS_MERCHANT_ID
SECRET_WORD = settings.MONEYBOOKERS_SECRET_WORD
PAY_TO_EMAIL = settings.MONEYBOOKERS_PAY_TO_EMAIL
STATUS_URL = settings.MONEYBOOKERS_STATUS_URL
CURRENCY_CODE = settings.MONEYBOOKERS_CURRENCY_CODE
# Not required in settings
STATUS_URL2 = getattr(settings, "MONEYBOOKERS_STATUS_URL2", None) # Example: "mailto: merchant2@merchant.com"
RECEPIENT_DESCRIPTION = getattr(settings, "MONEYBOOKERS_RECEPIENT_DESCRIPTION", None)
RETURN_URL = getattr(settings, "MONEYBOOKERS_RETURN_URL", None)
RETURN_URL_TEXT = getattr(settings, "MONEYBOOKERS_RETURN_URL_TEXT", None)
CANCEL_URL = getattr(settings, "MONEYBOOKERS_CANCEL_URL", None)
LOGO_URL = getattr(settings, "MONEYBOOKERS_LOGO_URL", None)
BUTTON_IMAGE_URL = getattr(settings, "MONEYBOOKERS_BUTTON_IMAGE_URL", None)
USER_AGENT = getattr(settings, "MONEYBOOKERS_USER_AGENT", "Mozilla/5.0 (compatible; Django-Moneybookers/1.0)")
TRANSACTION_STATUS = (
("-3", 'Chargeback'), # This status could be received only if your account is configured to receive chargebacks. If this is the case, whenever a chargeback is received by Moneybookers, a -3 status will be posted on the status_url for the reversed transaction
("-2", 'Failed'), # This status is sent when the customer tries to pay via Credit Card or Direct Debit but our provider declines the transaction. If you do not accept Credit Card or Direct Debit payments via Moneybookers then you will never receive the failed status.
("-1", 'Cancelled'), # Pending transactions can either be cancelled manually by the sender in their online account history or they will auto-cancel after 14 days if still pending.
("0", 'Pending'), # This status is sent when the customers pays via the pending bank transfer option. Such transactions will auto-process IF the bank transfer is received by Moneybookers. We strongly recommend that you do NOT process the order/transaction in your system upon receipt of a pending status from Moneybookers.
("2", 'Processed'), # This status is sent when the transaction is processed and the funds have been received on your Moneybookers account.
)
ISO3166_A3 = (
('AFG', 'Afghanistan'),
('ALA', 'Åland Islands'),
('ALB', 'Albania'),
('DZA', 'Algeria'),
('ASM', 'American Samoa'),
('AND', 'Andorra'),
('AGO', 'Angola'),
('AIA', 'Anguilla'),
('ATA', 'Antarctica'),
('ATG', 'Antigua and Barbuda'),
('ARG', 'Argentina'),
('ARM', 'Armenia'),
('ABW', 'Aruba'),
('AUS', 'Australia'),
('AUT', 'Austria'),
('AZE', 'Azerbaijan'),
('BHS', 'Bahamas, The'),
('BHR', 'Bahrain'),
('BGD', 'Bangladesh'),
('BRB', 'Barbados'),
('BLR', 'Belarus'),
('BEL', 'Belgium'),
('BLZ', 'Belize'),
('BEN', 'Benin'),
('BMU', 'Bermuda'),
('BTN', 'Bhutan'),
('BOL', 'Bolivia'),
('BIH', 'Bosnia and Herzegovina'),
('BWA', 'Botswana'),
('BVT', 'Bouvet Island'),
('BRA', 'Brazil'),
('IOT', 'British Indian Ocean Territory'),
('VGB', 'British Virgin Islands'),
('BRN', 'Brunei'),
('BGR', 'Bulgaria'),
('BFA', 'Burkina Faso'),
('MMR', 'Burma'),
('BDI', 'Burundi'),
('KHM', 'Cambodia'),
('CMR', 'Cameroon'),
('CAN', 'Canada'),
('CPV', 'Cape Verde'),
('CYM', 'Cayman Islands'),
('CAF', 'Central African Republic'),
('TCD', 'Chad'),
('CHL', 'Chile'),
('CHN', 'China'),
('CXR', 'Christmas Island'),
('CCK', 'Cocos (Keeling) Islands'),
('COL', 'Colombia'),
('COM', 'Comoros'),
('COD', 'Congo, Democratic Republic of the'),
('COG', 'Congo, Republic of the'),
('COK', 'Cook Islands'),
('CRI', 'Costa Rica'),
('CIV', "Cote d'Ivoire"),
('HRV', 'Croatia'),
('CUB', 'Cuba'),
('CYP', 'Cyprus'),
('CZE', 'Czech Republic'),
('DNK', 'Denmark'),
('DJI', 'Djibouti'),
('DMA', 'Dominica'),
('DOM', 'Dominican Republic'),
('ECU', 'Ecuador'),
('EGY', 'Egypt'),
('SLV', 'El Salvador'),
('GNQ', 'Equatorial Guinea'),
('ERI', 'Eritrea'),
('EST', 'Estonia'),
('ETH', 'Ethiopia'),
('FLK', 'Falkland Islands (Islas Malvinas)'),
('FRO', 'Faroe Islands'),
('FJI', 'Fiji'),
('FIN', 'Finland'),
('FRA', 'France'),
('GUF', 'French Guiana'),
('PYF', 'French Polynesia'),
('ATF', 'French Southern and Antarctic Lands'),
('GAB', 'Gabon'),
('GMB', 'Gambia, The'),
('PSE', 'Gaza Strip'),
('GEO', 'Georgia'),
('DEU', 'Germany'),
('GHA', 'Ghana'),
('GIB', 'Gibraltar'),
('GRC', 'Greece'),
('GRL', 'Greenland'),
('GRD', 'Grenada'),
('GLP', 'Guadeloupe'),
('GUM', 'Guam'),
('GTM', 'Guatemala'),
('GGY', 'Guernsey'),
('GIN', 'Guinea'),
('GNB', 'GuineaBissau'),
('GUY', 'Guyana'),
('HTI', 'Haiti'),
('HMD', 'Heard Island and McDonald Islands'),
('VAT', 'Holy See (Vatican City)'),
('HND', 'Honduras'),
('HKG', 'Hong Kong'),
('HUN', 'Hungary'),
('ISL', 'Iceland'),
('IND', 'India'),
('IDN', 'Indonesia'),
('IRN', 'Iran'),
('IRQ', 'Iraq'),
('IRL', 'Ireland'),
('IMN', 'Isle of Man'),
('ISR', 'Israel'),
('ITA', 'Italy'),
('JAM', 'Jamaica'),
('JPN', 'Japan'),
('JEY', 'Jersey'),
('JOR', 'Jordan'),
('KAZ', 'Kazakhstan'),
('KEN', 'Kenya'),
('KIR', 'Kiribati'),
('PRK', 'Korea, North'),
('KOR', 'Korea, South'),
('KWT', 'Kuwait'),
('KGZ', 'Kyrgyzstan'),
('LAO', 'Laos'),
('LVA', 'Latvia'),
('LBN', 'Lebanon'),
('LSO', 'Lesotho'),
('LBR', 'Liberia'),
('LBY', 'Libya'),
('LIE', 'Liechtenstein'),
('LTU', 'Lithuania'),
('LUX', 'Luxembourg'),
('MAC', 'Macau'),
('MKD', 'Macedonia'),
('MDG', 'Madagascar'),
('MWI', 'Malawi'),
('MYS', 'Malaysia'),
('MDV', 'Maldives'),
('MLI', 'Mali'),
('MLT', 'Malta'),
('MHL', 'Marshall Islands'),
('MTQ', 'Martinique'),
('MRT', 'Mauritania'),
('MUS', 'Mauritius'),
('MYT', 'Mayotte'),
('MEX', 'Mexico'),
('FSM', 'Micronesia, Federated States of'),
('MDA', 'Moldova'),
('MCO', 'Monaco'),
('MNG', 'Mongolia'),
('MNE', 'Montenegro'),
('MSR', 'Montserrat'),
('MAR', 'Morocco'),
('MOZ', 'Mozambique'),
('NAM', 'Namibia'),
('NRU', 'Nauru'),
('NPL', 'Nepal'),
('NLD', 'Netherlands'),
('ANT', 'Netherlands Antilles'),
('NCL', 'New Caledonia'),
('NZL', 'New Zealand'),
('NIC', 'Nicaragua'),
('NER', 'Niger'),
('NGA', 'Nigeria'),
('NIU', 'Niue'),
('NFK', 'Norfolk Island'),
('MNP', 'Northern Mariana Islands'),
('NOR', 'Norway'),
('OMN', 'Oman'),
('PAK', 'Pakistan'),
('PLW', 'Palau'),
('PAN', 'Panama'),
('PNG', 'Papua New Guinea'),
('PRY', 'Paraguay'),
('PER', 'Peru'),
('PHL', 'Philippines'),
('PCN', 'Pitcairn Islands'),
('POL', 'Poland'),
('PRT', 'Portugal'),
('PRI', 'Puerto Rico'),
('QAT', 'Qatar'),
('REU', 'Reunion'),
('ROU', 'Romania'),
('RUS', 'Russia'),
('RWA', 'Rwanda'),
('BLM', 'Saint Barthelemy'),
('SHN', 'Saint Helena'),
('KNA', 'Saint Kitts and Nevis'),
('LCA', 'Saint Lucia'),
('MAF', 'Saint Martin'),
('SPM', 'Saint Pierre and Miquelon'),
('VCT', 'Saint Vincent and the Grenadines'),
('WSM', 'Samoa'),
('SMR', 'San Marino'),
('STP', 'Sao Tome and Principe'),
('SAU', 'Saudi Arabia'),
('SEN', 'Senegal'),
('SRB', 'Serbia'),
('SYC', 'Seychelles'),
('SLE', 'Sierra Leone'),
('SGP', 'Singapore'),
('SVK', 'Slovakia'),
('SVN', 'Slovenia'),
('SLB', 'Solomon Islands'),
('SOM', 'Somalia'),
('ZAF', 'South Africa'),
('SGS', 'South Georgia and the South Sandwich Islands'),
('ESP', 'Spain'),
('LKA', 'Sri Lanka'),
('SDN', 'Sudan'),
('SUR', 'Suriname'),
('SJM', 'Svalbard'),
('SWZ', 'Swaziland'),
('SWE', 'Sweden'),
('CHE', 'Switzerland'),
('SYR', 'Syria'),
('TWN', 'Taiwan'),
('TJK', 'Tajikistan'),
('TZA', 'Tanzania'),
('THA', 'Thailand'),
('TLS', 'TimorLeste'),
('TGO', 'Togo'),
('TKL', 'Tokelau'),
('TON', 'Tonga'),
('TTO', 'Trinidad and Tobago'),
('TUN', 'Tunisia'),
('TUR', 'Turkey'),
('TKM', 'Turkmenistan'),
('TCA', 'Turks and Caicos Islands'),
('TUV', 'Tuvalu'),
('UGA', 'Uganda'),
('UKR', 'Ukraine'),
('ARE', 'United Arab Emirates'),
('GBR', 'United Kingdom'),
('USA', 'United States'),
('UMI', 'United States Minor Outlying Islands'),
('URY', 'Uruguay'),
('UZB', 'Uzbekistan'),
('VUT', 'Vanuatu'),
('VEN', 'Venezuela'),
('VNM', 'Vietnam'),
('VIR', 'Virgin Islands'),
('WLF', 'Wallis and Futuna'),
('PSE', 'West Bank'),
('ESH', 'Western Sahara'),
('YEM', 'Yemen'),
('ZMB', 'Zambia'),
('ZWE', 'Zimbabwe'),
)
LANGUAGE_CODE = (
('EN', 'EN'),
('DE', 'DE'),
('ES', 'ES'),
('FR', 'FR'),
('IT', 'IT'),
('PL', 'PL'),
('GR', 'GR'),
('RO', 'RO'),
('RU', 'RU'),
('TR', 'TR'),
('CN', 'CN'),
('CZ', 'CZ'),
('NL', 'NL'),
('DA', 'DA'),
('SV', 'SV'),
('FI', 'FI'),
)
CUSTOMER_TITLE = (
('Mr', 'Mr'),
('Mrs', 'Mrs'),
('Miss', 'Miss'),
)
ISO4217 = (
('EUR', 'Euro'),
('TWD', 'Taiwan Dollar'),
('USD', 'U.S. Dollar'),
('THB', 'Thailand Baht'),
('GBP', 'British Pound'),
('CZK', 'Czech Koruna'),
('HKD', 'Hong Kong Dollar'),
('HUF', 'Hungarian Forint'),
('SGD', 'Singapore Dollar'),
('SKK', 'Slovakian Koruna'),
('JPY', 'Japanese Yen'),
('EEK', 'Estonian Kroon'),
('CAD', 'Canadian Dollar'),
('BGN', 'Bulgarian Leva'),
('AUD', 'Australian Dollar'),
('PLN', 'Polish Zloty'),
('CHF', 'Swiss Franc'),
('ISK', 'Iceland Krona'),
('DKK', 'Danish Krone'),
('INR', 'Indian Rupee'),
('SEK', 'Swedish Krona'),
('LVL', 'Latvian Lat'),
('NOK', 'Norwegian Krone'),
('KRW', 'South-Korean Won'),
('ILS', 'Israeli Shekel'),
('ZAR', 'South-African Rand'),
('MYR', 'Malaysian Ringgit'),
('RON', 'Romanian Leu New'),
('NZD', 'New Zealand Dollar'),
('HRK', 'Croatian Kuna'),
('TRY', 'New Turkish Lira'),
('LTL', 'Lithuanian Litas'),
('AED', 'Utd. Arab Emir. Dirham'),
('JOD', 'Jordanian Dinar'),
('MAD', 'Moroccan Dirham'),
('OMR', 'Omani Rial'),
('QAR', 'Qatari Rial'),
('RSD', 'Serbian dinar'),
('SAR', 'Saudi Riyal'),
('TND', 'Tunisian Dinar'),
)
FAILED_REASON_CODES = (
('01', '01 - Referred'),
('02', '02 - Invalid Merchant Number'),
('03', '03 - Pick-up card'),
('04', '04 - Authorisation Declined'),
('05', '05 - Other Error'),
('06', '06 - CVV is mandatory, but not set or invalid'),
('07', '07 - Approved authorisation, honour with identification'),
('08', '08 - Delayed Processing'),
('09', '09 - Invalid Transaction'),
('10', '10 - Invalid Currency'),
('11', '11 - Invalid Amount/Available Limit Exceeded/Amount too high'),
('12', '12 - Invalid credit card or bank account'),
('13', '13 - Invalid Card Issuer'),
('14', '14 - Annulation by client'),
('15', '15 - Duplicate transaction'),
('16', '16 - Acquirer Error'),
('17', '17 - Reversal not processed, matching authorisation not found'),
('18', '18 - File Transfer not available/unsuccessful'),
('19', '19 - Reference number error'),
('20', '20 - Access Denied'),
('21', '21 - File Transfer failed'),
('22', '22 - Format Error'),
('23', '23 - Unknown Acquirer'),
('24', '24 - Card expired'),
('25', '25 - Fraud Suspicion'),
('26', '26 - Security code expired'),
('27', '27 - Requested function not available'),
('28', '28 - Lost/Stolen card'),
('29', '29 - Stolen card, Pick up'),
('30', '30 - Duplicate Authorisation'),
('31', '31 - Limit Exceeded'),
('32', '32 - Invalid Security Code'),
('33', '33 - Unknown or Invalid Card/Bank account'),
('34', '34 - Illegal Transaction'),
('35', '35 - Transaction Not Permitted'),
('36', '36 - Card blocked in local blacklist'),
('37', '37 - Restricted card/bank account'),
('38', '38 - Security Rules Violation'),
('39', '39 - The transaction amount of the referencing transaction is higher than the transaction amount of the original transaction'),
('40', '40 - Transaction frequency limit exceeded, override is possible'),
('41', '41 - Incorrect usage count in the Authorisation System exceeded'),
('42', '42 - Card blocked'),
('43', '43 - Rejected by Credit Card Issuer'),
('44', '44 - Card Issuing Bank or Network is not available'),
('45', '45 - The card type is not processed by the authorisation centre / Authorisation System has determined incorrect Routing'),
('47', '47 - Processing temporarily not possible'),
('48', '48 - Security Breach'),
('49', '49 - Date / time not plausible, trace-no. not increasing'),
('50', '50 - Error in PAC encryption detected'),
('51', '51 - System Error'),
('52', '52 - MB Denied - potential fraud'),
('53', '53 - Mobile verification failed'),
('54', '54 - Failed due to internal security restrictions'),
('55', '55 - Communication or verification problem'),
('56', '56 - 3D verification failed'),
('57', '57 - AVS check failed'),
('58', '58 - Invalid bank code'),
('59', '59 - Invalid account code'),
('60', '60 - Card not authorised'),
('61', '61 - No credit worthiness'),
('62', '62 - Communication error'),
('63', '63 - Transaction not allowed for cardholder'),
('64', '64 - Invalid Data in Request'),
('65', '65 - Blocked bank code'),
('66', '66 - CVV2/CVC2 Failure'),
('99', '99 - General error'),
)
GATEWAY_PAYMENT_CODES = (
('', 'Moneybookers Wallet'), # ALL
('ACC', 'All Card Types'), # ALL
('VSA', 'Visa'), # ALL
('MSC', 'MasterCard'), # ALL
('VSD', 'Visa Delta/Debit'), # United Kingdom
('VSE', 'Visa Electron'), # ALL
('MAE', 'Maestro'), # United Kingdom, Spain & Austria
('SLO', 'Solo'), # United Kingdom
('AMX', 'American Express'), # ALL
('DIN', 'Diners'), # ALL
('JCB', 'JCB'), # ALL
('LSR', 'Laser'), # Rep. of Ireland
('GCB', 'Carte Bleue'), # France
('DNK', 'Dankort'), # Denmark
('PSP', 'PostePay'), # Italy
('CSI', 'CartaSi'), # Italy
('OBT', 'Online Bank Transfer'), # Germany, United Kingdom, Denmark, Finland, Sweden, Poland, Estonia, Latvia, Lithuania
('GIR', 'Giropay'), # Germany
('DID', 'Direct Debit / ELV'), # Germany
('SFT', 'Sofortueberweisung'), # Germany, Austria, Belgium, Netherlands, Switzerland & United Kingdom
('ENT', 'eNETS'), # Singapore
('EBT', 'Nordea Solo'), # Sweden
('SO2', 'Nordea Solo'), # Finland
('IDL', 'iDEAL'), # Netherlands
('NPY', 'EPS (Netpay)'), # Austria
('PLI', 'POLi'), # Australia
('PWY', 'All Polish Banks'), # Poland
('PWY5', 'ING Bank Śląski'), # Poland
('PWY6', 'PKO BP (PKO Inteligo)'), # Poland
('PWY7', 'Multibank (Multitransfer)'), # Poland
('PWY14', 'Lukas Bank'), # Poland
('PWY15', 'Bank BPH'), # Poland
('PWY17', 'InvestBank'), # Poland
('PWY18', 'PeKaO S.A.'), # Poland
('PWY19', 'Citibank handlowy'), # Poland
('PWY20', 'Bank Zachodni WBK (Przelew24)'), # Poland
('PWY21', 'BGŻ'), # Poland
('PWY22', 'Millenium'), # Poland
('PWY25', 'mBank (mTransfer)'), # Poland
('PWY26', 'Płacę z Inteligo'), # Poland
('PWY28', 'Bank Ochrony Środowiska'), # Poland
('PWY32', 'Nordea'), # Poland
('PWY33', 'Fortis Bank'), # Poland
('PWY36', 'Deutsche Bank PBC S.A.'), # Poland
('EPY', 'ePay.bg'), # Bulgaria
)
URL_TARGET = (
(1, "_top"), # default
(2, "_parent"),
(3, "_self"),
(4, "_blank")
)
|
mikery/django-moneybookers
|
moneybookers/conf.py
|
Python
|
bsd-2-clause
| 14,814
|
[
"BWA"
] |
bf437c042d5dbf23114bf0cff4b9de08b486310961afb9814e43b7843f325410
|
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state, deprecated
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
@deprecated('to be removed in 0.17; use scipy.special.expit or log_logistic')
def logistic_sigmoid(X, log=False, out=None):
"""Logistic function, ``1 / (1 + e ** (-x))``, or its log."""
from .fixes import expit
fn = log_logistic if log else expit
return fn(X, out)
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
|
ashhher3/scikit-learn
|
sklearn/utils/extmath.py
|
Python
|
bsd-3-clause
| 20,800
|
[
"Gaussian"
] |
ca307aabe8b8079b78003816c9b5c5d1567c86343f6a89687f1ea35898e66187
|
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
import numpy as np
from scipy import special
from mayavi import mlab
##############################################################################
# Function to caculate the Magnetic field generated by a current loop
def base_vectors(n):
""" Returns 3 orthognal base vectors, the first one colinear to n.
Parameters
-----------
n: ndarray, shape (3, )
A vector giving direction of the basis
Returns
-----------
n: ndarray, shape (3, )
The first vector of the basis
l: ndarray, shape (3, )
The second vector of the basis
m: ndarray, shape (3, )
The first vector of the basis
"""
# normalize n
n = n / (n ** 2).sum(axis=-1)
# choose two vectors perpendicular to n
# choice is arbitrary since the coil is symetric about n
if np.abs(n[0]) == 1:
l = np.r_[n[2], 0, -n[0]]
else:
l = np.r_[0, n[2], -n[1]]
l = l / (l ** 2).sum(axis=-1)
m = np.cross(n, l)
return n, l, m
def magnetic_field(r, n, r0, R):
"""
Returns the magnetic field from an arbitrary current loop calculated from
eqns (1) and (2) in Phys Rev A Vol. 35, N 4, pp. 1535-1546; 1987.
Arguments
----------
n: ndarray, shape (3, )
The normal vector to the plane of the loop at the center,
current is oriented by the right-hand-rule.
r: ndarray, shape (m, 3)
A position vector where the magnetic field is evaluated:
[x1 y2 z3 ; x2 y2 z2 ; ... ]
r is in units of d
r0: ndarray, shape (3, )
The location of the center of the loop in units of d: [x y z]
R: float
The radius of the current loop
Returns
--------
B: ndarray, shape (m, 3)
a vector for the B field at each position specified in r
in inverse units of (mu I) / (2 pi d)
for I in amps and d in meters and mu = 4 pi * 10^-7 we get Tesla
"""
### Translate the coordinates in the coil's frame
n, l, m = base_vectors(n)
# transformation matrix coil frame to lab frame
trans = np.vstack((l, m, n))
# transformation matrix to lab frame to coil frame
inv_trans = np.linalg.inv(trans)
# point location from center of coil
r = r - r0
# transform vector to coil frame
r = np.dot(r, inv_trans)
#### calculate field
# express the coordinates in polar form
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
rho = np.sqrt(x ** 2 + y ** 2)
theta = np.arctan(x / y)
theta[y == 0] = 0
E = special.ellipe((4 * R * rho) / ((R + rho) ** 2 + z ** 2))
K = special.ellipk((4 * R * rho) / ((R + rho) ** 2 + z ** 2))
Bz = 1 / np.sqrt((R + rho) ** 2 + z ** 2) * (
K
+ E * (R ** 2 - rho ** 2 - z ** 2) / ((R - rho) ** 2 + z ** 2)
)
Brho = z / (rho * np.sqrt((R + rho) ** 2 + z ** 2)) * (
-K
+ E * (R ** 2 + rho ** 2 + z ** 2) / ((R - rho) ** 2 + z ** 2)
)
# On the axis of the coil we get a divided by zero here. This returns a
# NaN, where the field is actually zero :
Brho[np.isnan(Brho)] = 0
Brho[np.isinf(Brho)] = 0
Bz[np.isnan(Bz)] = 0
Bz[np.isinf(Bz)] = 0
B = np.c_[np.cos(theta) * Brho, np.sin(theta) * Brho, Bz]
# Rotate the field back in the lab's frame
B = np.dot(B, trans)
return B
def display_coil(n, r0, R, half=False):
"""
Display a coils in the 3D view.
If half is True, display only one half of the coil.
"""
n, l, m = base_vectors(n)
theta = np.linspace(0, (2 - half) * np.pi, 30)
theta = theta[..., np.newaxis]
coil = np.atleast_1d(R) * (np.sin(theta) * l + np.cos(theta) * m)
coil += r0
coil_x = coil[:, 0]
coil_y = coil[:, 1]
coil_z = coil[:, 2]
mlab.plot3d(coil_x, coil_y, coil_z,
tube_radius=0.01,
name='Coil %i' % display_coil.num,
color=(0, 0, 0))
display_coil.num += 1
return coil_x, coil_y, coil_z
# display_coil.num = 0
from NurgushBinData import NurgushBinData
data = NurgushBinData("/Users/artem/workspace/inasan/nurgushmpi/bin/data/a200.dat")
# ##############################################################################
# # The grid of points on which we want to evaluate the field
# X, Y, Z = np.mgrid[-0.15:0.15:31j, -0.15:0.15:31j, -0.15:0.15:31j]
X, Y, Z = (data['x'], data['y'], data['z'])
# # Avoid rounding issues :
f = 1e4 # this gives the precision we are interested by :
X = np.round(X * f) / f
Y = np.round(Y * f) / f
Z = np.round(Z * f) / f
##############################################################################
# The coil positions
# # The center of the coil
# r0 = np.r_[0, 0, 0.1]
# # The normal to the coils
# n = np.r_[0, 0, 1]
# # The radius
# R = 0.1
# Add the mirror image of this coils relatively to the xy plane :
# r0 = np.vstack((r0, -r0 ))
# R = np.r_[R, R]
# n = np.vstack((n, n)) # Helmoltz like configuration
##############################################################################
# Calculate field
# First initialize a container matrix for the field vector :
# B = np.empty_like(r)
# # Then loop through the different coils and sum the fields :
# for this_n, this_r0, this_R in zip(n, r0, R):
# this_n = np.array(this_n)
# this_r0 = np.array(this_r0)
# this_R = np.array(this_R)
# B += magnetic_field(r, this_n, this_r0, this_R)
#
#
Bx = data['bx']
By = data['by']
Bz = data['bz']
Hx = data['hx']
Hy = data['hy']
Hz = data['hz']
# Bx.shape = X.shape
# By.shape = Y.shape
# Bz.shape = Z.shape
#
Bnorm = np.sqrt(Bx ** 2 + By ** 2 + Bz ** 2)
Hnorm = np.sqrt(Hx ** 2 + Hy ** 2 + Hz ** 2)
##############################################################################
# Visualization
# We threshold the data ourselves, as the threshold filter produce a
# data structure inefficient with IsoSurface
Bmax = Bnorm.max()
#
# Bx[Bnorm > Bmax] = 0
# By[Bnorm > Bmax] = 0
# Bz[Bnorm > Bmax] = 0
# Bnorm[Bnorm > Bmax] = Bmax
mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5), size=(480, 480))
mlab.clf()
#
# # for this_n, this_r0, this_R in zip(n, r0, R):
# # display_coil(this_n, this_r0, this_R)
#
# from NurgushBinData import NurgushBinData
# data = NurgushBinData("/Users/artem/workspace/inasan/nurgushmpi/bin/data/a200.dat")
#
# print data['bx'].min()
# print data['bx'].max()
#
field = mlab.pipeline.vector_field(Hx, Hy, Hz, scalars=Hnorm, name='B field')
vectors = mlab.pipeline.vectors(field, scale_factor=(X[1, 0, 0] - X[0, 0, 0]))
vectorsH = mlab.pipeline.vectors(fieldH, scale_factor=(X[1, 0, 0] - X[0, 0, 0]))
# # Mask random points, to have a lighter visualization.
vectors.glyph.mask_input_points = True
vectors.glyph.mask_points.on_ratio = 6
vectorsH.glyph.mask_input_points = True
vectorsH.glyph.mask_points.on_ratio = 6
#
vcp = mlab.pipeline.vector_cut_plane(field)
vcpH = mlab.pipeline.vector_cut_plane(fieldH)
vcp.glyph.glyph.scale_factor = 5 * (X[1, 0, 0] - X[0, 0, 0])
vcpH.glyph.glyph.scale_factor = 5 * (X[1, 0, 0] - X[0, 0, 0])
# # For prettier picture:
vcp.implicit_plane.widget.enabled = False
vcpH.implicit_plane.widget.enabled = False
#
iso = mlab.pipeline.iso_surface(field, contours=2, opacity=0.6, colormap='YlOrRd')
isoH = mlab.pipeline.iso_surface(fieldH, contours=2, opacity=0.6, colormap='YlOrRd')
#
# # A trick to make transparency look better: cull the front face
iso.actor.property.frontface_culling = True
isoH.actor.property.frontface_culling = True
#
# mlab.view(39, 74, 0.59, [.008, .0007, -.005])
mlab.show()
|
arakcheev/python-data-plotter
|
magnetic_example.py
|
Python
|
mit
| 7,716
|
[
"Mayavi"
] |
52622903629e2736fbbffe93c5f85276b72c7deb93aaf179a65db5b39ab0b1a6
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Variational dropout layers
Based on:
[1] Kingma, Diederik P., Tim Salimans, and Max Welling.
"Variational dropout and the local reparameterization trick." NIPS (2015).
[2] Molchanov, Dmitry, Arsenii Ashukha, and Dmitry Vetrov.
"Variational Dropout Sparsifies Deep Neural Networks." ICML (2017).
"""
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import init
from torch.nn.modules.utils import _pair as pair
from torch.nn.parameter import Parameter
from nupic.research.frameworks.pytorch.modules import MaskedConv2d
class VDropCentralData(nn.Identity):
"""
Stores data for a set of variational dropout (VDrop) modules in large
central tensors. The VDrop modules access the data using views. This makes
it possible to operate on all of the data at once, (rather than e.g. 53
times with resnet50).
Usage:
1. Instantiate
2. Pass into multiple constructed VDropLinear and VDropConv2d modules
3. Call finalize
Before calling forward on the model, call "compute_forward_data".
After calling forward on the model, call "clear_forward_data".
The parameters are stored in terms of z_mu and z_var rather than w_mu and
w_var to support group variational dropout (e.g. to allow for pruning entire
channels.)
This module inherits from nn.Identity instead of nn.Module so that it can be
used in nn.Sequential models. It simply passes its input to the next layer
unchanged.
"""
def __init__(self, z_logvar_init=-10):
super().__init__()
self.z_chunk_sizes = []
self.z_logvar_init = z_logvar_init
self.z_logvar_min = min(z_logvar_init, -10)
self.z_logvar_max = 10.
self.epsilon = 1e-8
self.data_views = {}
self.modules = []
# Populated during register(), deleted during finalize()
self.all_z_mu = []
self.all_z_logvar = []
self.all_num_weights = []
# Populated during finalize()
self.z_mu = None
self.z_logvar = None
self.z_num_weights = None
self.threshold = 3
def extra_repr(self):
s = f"z_logvar_init={self.z_logvar_init}"
return s
def __getitem__(self, key):
return self.data_views[key]
def register(self, module, z_mu, z_logvar, num_weights_per_z=1):
self.all_z_mu.append(z_mu.flatten())
self.all_z_logvar.append(z_logvar.flatten())
self.all_num_weights.append(num_weights_per_z)
self.modules.append(module)
data_index = len(self.z_chunk_sizes)
self.z_chunk_sizes.append(z_mu.numel())
return data_index
def finalize(self):
self.z_mu = Parameter(torch.cat(self.all_z_mu))
self.z_logvar = Parameter(torch.cat(self.all_z_logvar))
self.z_num_weights = torch.tensor(
self.all_num_weights, dtype=torch.float
).repeat_interleave(torch.tensor(self.z_chunk_sizes))
del self.all_z_mu
del self.all_z_logvar
del self.all_num_weights
def to(self, *args, **kwargs):
ret = super().to(*args, **kwargs)
self.z_num_weights = self.z_num_weights.to(*args, **kwargs)
return ret
def compute_forward_data(self):
if self.training:
self.data_views["z_mu"] = self.z_mu.split(self.z_chunk_sizes)
self.data_views["z_var"] = self.z_logvar.exp().split(
self.z_chunk_sizes)
else:
self.data_views["z_mu"] = (
self.z_mu
* (self.compute_z_logalpha() < self.threshold).float()
).split(self.z_chunk_sizes)
def clear_forward_data(self):
self.data_views.clear()
def compute_z_logalpha(self):
return self.z_logvar - (self.z_mu.square() + self.epsilon).log()
def regularization(self):
return (vdrop_regularization(self.compute_z_logalpha())
* self.z_num_weights).sum()
def constrain_parameters(self):
self.z_logvar.data.clamp_(min=self.z_logvar_min,
max=self.z_logvar_max)
class MaskedVDropCentralData(VDropCentralData):
def __init__(self, restore_precision_on_prune=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.restore_precision_on_prune = restore_precision_on_prune
# Populated during register(), deleted during finalize()
self.all_z_mask = []
# Populated during finalize()
self.register_buffer("z_mask", None)
# Sentinel value to distinguish pruned weights from those that actually
# reached z_logvar_max. (This has no effect on the algorithm, it's just
# sometimes useful during analysis.)
self.pruned_logvar_sentinel = self.z_logvar_max - 0.00058
def register(self, module, z_mu, z_logvar, z_mask=None,
num_weights_per_z=1):
data_index = super().register(module, z_mu, z_logvar, num_weights_per_z)
if z_mask is None:
z_mask = torch.ones(z_mu.numel(), dtype=torch.float16)
else:
z_mask = z_mask.half()
self.all_z_mask.append(z_mask)
return data_index
def finalize(self):
super().finalize()
self.z_mask = torch.cat(self.all_z_mask)
del self.all_z_mask
def compute_forward_data(self):
if self.training:
self.data_views["z_mu"] = (self.z_mu
* self.z_mask).split(self.z_chunk_sizes)
self.data_views["z_var"] = (self.z_logvar.exp()
* self.z_mask).split(self.z_chunk_sizes)
else:
z_mu = self.z_mu * self.z_mask * (
(self.compute_z_logalpha() < self.threshold).float()
)
self.data_views["z_mu"] = z_mu.split(self.z_chunk_sizes)
def regularization(self):
return ((vdrop_regularization(self.compute_z_logalpha())
* self.z_mask)
* self.z_num_weights).sum()
def masked_parameters(self):
"""
Get information needed to zero momentum in the optimizer.
"""
yield self.z_mu, self.z_mask
yield self.z_logvar, self.z_mask
class VDropLinear(nn.Module):
def __init__(self, in_features, out_features, central_data, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
# Store in a list to avoid having it registered as a module, otherwise
# it will appear multiple times in the state dict.
self.central_data = [central_data]
w_mu = torch.Tensor(self.out_features, self.in_features)
w_logvar = torch.Tensor(self.out_features, self.in_features)
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.bias = None
w_logvar.data.fill_(central_data.z_logvar_init)
# Standard nn.Linear initialization.
init.kaiming_uniform_(w_mu, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
self.data_index = central_data.register(self, w_mu, w_logvar)
self.tensor_constructor = (torch.FloatTensor
if not torch.cuda.is_available()
else torch.cuda.FloatTensor)
def extra_repr(self):
s = f"{self.in_features}, {self.out_features}, "
if self.bias is None:
s += ", bias=False"
return s
def get_w_mu(self):
return self.central_data[0]["z_mu"][self.data_index].view(
self.out_features, self.in_features)
def get_w_var(self):
return self.central_data[0]["z_var"][self.data_index].view(
self.out_features, self.in_features)
def forward(self, x):
if self.training:
return vdrop_linear_forward(x, self.get_w_mu, self.get_w_var,
self.bias, self.tensor_constructor)
else:
return F.linear(x, self.get_w_mu(), self.bias)
class VDropConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, central_data,
stride=1, padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = pair(kernel_size)
self.stride = pair(stride)
self.padding = pair(padding)
self.dilation = pair(dilation)
self.groups = groups
# Store in a list to avoid having it registered as a module, otherwise
# it will appear multiple times in the state dict.
self.central_data = [central_data]
w_mu = torch.Tensor(out_channels,
in_channels // groups,
*self.kernel_size)
w_logvar = torch.Tensor(out_channels,
in_channels // groups,
*self.kernel_size)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.bias = None
w_logvar.data.fill_(central_data.z_logvar_init)
# Standard nn.Conv2d initialization.
init.kaiming_uniform_(w_mu, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
self.data_index = central_data.register(self, w_mu, w_logvar)
self.tensor_constructor = (torch.FloatTensor
if not torch.cuda.is_available()
else torch.cuda.FloatTensor)
def extra_repr(self):
s = (f"{self.in_channels}, {self.out_channels}, "
f"kernel_size={self.kernel_size}, stride={self.stride}")
if self.padding != (0,) * len(self.padding):
s += f", padding={self.padding}"
if self.dilation != (1,) * len(self.dilation):
s += f", dilation={self.dilation}"
if self.groups != 1:
s += f", groups={self.groups}"
if self.bias is None:
s += ", bias=False"
return s
def get_w_mu(self):
return self.central_data[0]["z_mu"][self.data_index].view(
self.out_channels, self.in_channels, *self.kernel_size)
def get_w_var(self):
return self.central_data[0]["z_var"][self.data_index].view(
self.out_channels, self.in_channels, *self.kernel_size)
def forward(self, x):
if self.training:
return vdrop_conv_forward(x, self.get_w_mu, self.get_w_var,
self.bias, self.stride, self.padding,
self.dilation, self.groups,
self.tensor_constructor)
else:
return F.conv2d(x, self.get_w_mu(), self.bias, self.stride,
self.padding, self.dilation, self.groups)
class VDropLinear2(nn.Module):
"""
A self-contained VDropLinear (doesn't use the VDropCentralData)
"""
def __init__(self, in_features, out_features, bias=True, w_logvar_init=-10):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.w_logvar_min = min(w_logvar_init, -10)
self.w_logvar_max = 10.
self.pruned_logvar_sentinel = self.w_logvar_max - 0.00058
self.epsilon = 1e-8
self.w_mu = Parameter(torch.Tensor(self.out_features, self.in_features))
self.w_logvar = Parameter(torch.Tensor(self.out_features, self.in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.bias = None
self.w_logvar.data.fill_(w_logvar_init)
# Standard nn.Linear initialization.
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
self.tensor_constructor = (torch.FloatTensor
if not torch.cuda.is_available()
else torch.cuda.FloatTensor)
def extra_repr(self):
s = f"{self.in_features}, {self.out_features}, "
if self.bias is None:
s += ", bias=False"
return s
def get_w_mu(self):
return self.w_mu
def get_w_var(self):
return self.w_logvar.exp()
def forward(self, x):
if self.training:
return vdrop_linear_forward(x, self.get_w_mu, self.get_w_var,
self.bias, self.tensor_constructor)
else:
return F.linear(x, self.get_w_mu(), self.bias)
def compute_w_logalpha(self):
return self.w_logvar - (self.w_mu.square() + self.epsilon).log()
def regularization(self):
return vdrop_regularization(self.compute_w_logalpha()).sum()
def constrain_parameters(self):
self.w_logvar.data.clamp_(min=self.w_logvar_min,
max=self.w_logvar_max)
class MaskedVDropConv2d(nn.Module):
"""
A self-contained masked Conv2d (doesn't use the VDropCentralData)
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, mask=None,
w_logvar_init=-10):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = pair(kernel_size)
self.stride = pair(stride)
self.padding = pair(padding)
self.dilation = pair(dilation)
self.groups = groups
self.w_logvar_min = min(w_logvar_init, -10)
self.w_logvar_max = 10.
self.pruned_logvar_sentinel = self.w_logvar_max - 0.00058
self.epsilon = 1e-8
self.w_mu = Parameter(torch.Tensor(out_channels,
in_channels // groups,
*self.kernel_size))
self.w_logvar = Parameter(torch.Tensor(out_channels,
in_channels // groups,
*self.kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.bias = None
self.w_logvar.data.fill_(w_logvar_init)
self.register_buffer("w_mask", torch.HalfTensor(out_channels,
in_channels // groups,
*self.kernel_size))
# Standard nn.Conv2d initialization.
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
if mask is not None:
self.w_mask[:] = mask
self.w_mu.data *= self.w_mask
self.w_logvar.data[self.w_mask == 0.0] = self.pruned_logvar_sentinel
else:
self.w_mask.fill_(1.0)
# Standard nn.Conv2d initialization.
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
self.tensor_constructor = (torch.FloatTensor
if not torch.cuda.is_available()
else torch.cuda.FloatTensor)
def extra_repr(self):
s = (f"{self.in_channels}, {self.out_channels}, "
f"kernel_size={self.kernel_size}, stride={self.stride}")
if self.padding != (0,) * len(self.padding):
s += f", padding={self.padding}"
if self.dilation != (1,) * len(self.dilation):
s += f", dilation={self.dilation}"
if self.groups != 1:
s += f", groups={self.groups}"
if self.bias is None:
s += ", bias=False"
return s
def get_w_mu(self):
return self.w_mu * self.w_mask
def get_w_var(self):
return self.w_logvar.exp() * self.w_mask
def forward(self, x):
if self.training:
return vdrop_conv_forward(x, self.get_w_mu, self.get_w_var,
self.bias, self.stride, self.padding,
self.dilation, self.groups,
self.tensor_constructor)
else:
return F.conv2d(x, self.get_w_mu(), self.bias, self.stride,
self.padding, self.dilation, self.groups)
def compute_w_logalpha(self):
return self.w_logvar - (self.w_mu.square() + self.epsilon).log()
def regularization(self):
return (vdrop_regularization(self.compute_w_logalpha())
* self.w_mask).sum()
def constrain_parameters(self):
self.w_logvar.data.clamp_(min=self.w_logvar_min,
max=self.w_logvar_max)
class FixedAlphaVDropLinear(nn.Linear):
def __init__(self, in_features, out_features, alpha, bias=True):
super().__init__(in_features, out_features, bias)
self.alpha = alpha
self.tensor_constructor = (torch.FloatTensor
if not torch.cuda.is_available()
else torch.cuda.FloatTensor)
def extra_repr(self):
s = super().extra_repr()
s += (f", alpha={self.alpha}")
return s
def forward(self, x):
if self.training:
return vdrop_linear_forward(
x,
lambda: self.weight,
lambda: self.alpha * self.weight.square(),
self.bias,
self.tensor_constructor)
else:
return super().forward(x)
class FixedAlphaVDropConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, alpha,
stride=1, padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
self.alpha = alpha
self.tensor_constructor = (torch.FloatTensor
if not torch.cuda.is_available()
else torch.cuda.FloatTensor)
def extra_repr(self):
s = super().extra_repr()
s += (f", alpha={self.alpha}")
return s
def forward(self, x):
if self.training:
return vdrop_conv_forward(
x,
lambda: self.weight,
lambda: self.alpha * self.weight.square(),
self.bias, self.stride, self.padding, self.dilation, self.groups,
self.tensor_constructor)
else:
return super().forward(x)
class FixedVDropConv2d(MaskedConv2d):
"""
This is designed to be used with snapshots generated by other classes, e.g.
VDropConv2d.
"""
def __init__(self, in_channels, out_channels, kernel_size, alpha,
stride=1, padding=0, dilation=1, groups=1, bias=True,
mask_mode="channel_to_channel"):
"""
@param alpha (float)
Defined as w_var / w_mu**2. Weights are multiplied with noise sampled
from distribution N(1,alpha).
"""
super().__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias, mask_mode=mask_mode)
self.alpha = alpha
self.tensor_constructor = (torch.FloatTensor
if not torch.cuda.is_available()
else torch.cuda.FloatTensor)
self.epsilon = 1e-8
def extra_repr(self):
return f"alpha={self.alpha}"
def forward(self, x):
if self.training:
return vdrop_conv_forward(
x,
lambda: self.weight * self.weight_mask,
lambda: self.alpha * (self.weight.square() * self.weight_mask),
self.bias, self.stride, self.padding, self.dilation, self.groups,
self.tensor_constructor, self.epsilon)
else:
return F.conv2d(
x, self.weight, self.bias, self.stride, self.padding,
self.dilation, self.groups
)
def vdrop_linear_forward(x, get_w_mu, get_w_var, bias, tensor_constructor,
epsilon=1e-8):
"""
Rather than sampling weights from gaussian distribution N(w_mu, w_var), use
the "local reparameterization trick", using w_mu and w_var to compute y_mu
and y_var, the distribution of the downstream unit's activation, and sample
that distribution. (As described in [1], this enables us to simulate
sampling different weights for every item in the batch, while still getting
the hardware performance benefits of batching.)
This computes y_mu + y_sigma*noise, carefully reusing buffers to avoid
unnecessary memory usage. It takes in functions get_w_mu and get_w_var
rather than taking in actual tensors so that those tensors won't need to use
memory any longer than necessary.
@param get_w_mean (function)
Returns each weight's mean.
@param get_w_var (function)
Returns each weight's variance.
"""
# Compute y_var
y = F.linear(x.square(), get_w_var())
# If any values are 0, we'll divide by zero on the backward pass.
# Note that clamping y rather than y.data would use much more memory.
y.data.clamp_(epsilon)
# Compute y_stdev
y = y.sqrt_()
# Convert to the additive noise.
# (Can't do in-place update after sqrt_.)
y = y * tensor_constructor(y.size()).normal_()
# Add y_mu
y += F.linear(x, get_w_mu(), bias)
return y
def vdrop_conv_forward(x, get_w_mu, get_w_var, bias, stride, padding, dilation,
groups, tensor_constructor, epsilon=1e-8):
"""
Rather than sampling weights from gaussian distribution N(w_mu, w_var), use
the "local reparameterization trick", using w_mu and w_var to compute y_mu
and y_var, the distribution of the downstream unit's activation, and sample
that distribution. (As described in [1], this enables us to simulate
sampling different weights for every item in the batch, while still getting
the hardware performance benefits of batching.)
This computes y_mu + y_sigma*noise, carefully reusing buffers to avoid
unnecessary memory usage. It takes in functions get_w_mu and get_w_var
rather than taking in actual tensors so that those tensors won't need to use
memory any longer than necessary.
@param get_w_mean (function)
Returns each weight's mean.
@param get_w_var (function)
Returns each weight's variance.
"""
# Compute y_var.
y = F.conv2d(
x.square(), get_w_var(), None, stride, padding, dilation, groups
)
# This handles two possible issues:
# - It's possible some values are negative, which will lead to NaN
# on the forward pass.
# https://github.com/pytorch/pytorch/issues/30934
# - If any values are 0, we'll get NaN on the backward pass.
# Note that clamping y rather than y.data would use much more memory.
y.data.clamp_(epsilon)
# Compute y_stdev
y = y.sqrt_()
# Convert to the additive noise.
# (Can't do in-place update after sqrt_.)
y = y * tensor_constructor(y.size()).normal_()
# Add y_mu.
y += F.conv2d(x, get_w_mu(), bias, stride, padding,
dilation, groups)
return y
def vdrop_regularization(logalpha):
"""
alpha is defined as w_var / w_mu**2
@param logalpha (Tensor)
"""
k1, k2, k3 = 0.63576, 1.8732, 1.48695
return -(k1 * torch.sigmoid(k2 + k3 * logalpha)
- 0.5 * F.softplus(-logalpha) - k1)
K1 = 0.63576
K2 = 1.8732
K3 = 1.48695
class GaussianLogUniformKLDivergence(torch.autograd.Function):
"""
Like vdrop_regularization, but designed to have no autograd memory overhead.
It receives mu and logvar, matching the way these parameters are already
stored. This function could be made faster by caching intermediate values,
but it is often run on every weight in the network, so these cached tensors
would often be very large.
"""
@staticmethod
def forward(ctx, mu, logvar, epsilon=1e-8):
ctx.save_for_backward(mu, logvar)
ctx.epsilon = epsilon
logalpha = logvar - (mu.square() + epsilon).log()
return (-K1 * torch.sigmoid(K2 + K3 * logalpha)
+ 0.5 * F.softplus(-logalpha) + K1)
@staticmethod
def backward(ctx, grad_output):
mu, logvar = ctx.saved_tensors
epsilon = ctx.epsilon
mu2_plus_epsilon = mu.square() + epsilon
logalpha = logvar - mu2_plus_epsilon.log()
sig = torch.sigmoid(K2 + K3 * logalpha)
grad_logalpha = grad_output * (-K1 * K3 * sig * (1 - sig)
- 0.5 * torch.sigmoid(-logalpha))
grad_mu = grad_logalpha * (-2 * mu / mu2_plus_epsilon)
grad_logvar = grad_logalpha
return grad_mu, grad_logvar, None
__all__ = [
"VDropCentralData",
"MaskedVDropCentralData",
"VDropLinear",
"VDropConv2d",
"VDropLinear2",
"MaskedVDropConv2d",
"FixedVDropConv2d",
"FixedAlphaVDropConv2d",
"FixedAlphaVDropLinear",
]
|
numenta/nupic.research
|
packages/backprop_structure/src/nupic/research/frameworks/backprop_structure/modules/vdrop_layers.py
|
Python
|
agpl-3.0
| 26,779
|
[
"Gaussian"
] |
a0db6a02e14aad3da0459f55c35938a0b7a49704cbc5b273781649587604bdad
|
#! /usr/bin/env python
import sys
import numpy as np
import ase
from ase.io import read, write
from ase.optimize import FIRE
from ase.md import Langevin
from ase.units import mol, fs, kB
from atomistica.logger import MDLogger
from liquid_tools import *
###
# For coordination counting
#densities = [ 2.0, 2.3, 2.5, 2.7, 2.9, 3.1, 3.3, 3.5 ]
# Default parameters
T1 = 10000
T2 = 5000
T3 = 300
nat = 4001
g2_cutoff = 5.0
coord_cutoff = 1.85
nbins = 100
time = 1e4
fmax = 10.0
import getopt
optlist, args = getopt.getopt(sys.argv[1:], '',
[ 'T1=', 'T2=', 'T3=', 'nat=', 'g2_cutoff=',
'coord_cutoff=', 'nbins=', 'time=' ])
assert len(args) == 0
for key, value in optlist:
if key == '--T1':
T1 = float(value)
elif key == '--T2':
T2 = float(value)
elif key == '--T3':
T3 = float(value)
elif key == '--nat':
nat = int(value)
elif key == '--g2_cutoff':
g2_cutoff = float(value)
elif key == '--coord_cutoff':
coord_cutoff = float(value)
elif key == '--nbins':
nbins = int(value)
elif key == '--time':
time = float(value)
###
print '# T1 = ', T1
print '# T2 = ', T2
print '# T3 = ', T3
print '# nat = ', nat
print '# g2_cutoff = ', g2_cutoff
print '# coord_cutoff = ', coord_cutoff
print '# nbins = ', nbins
print '# time = ', time
###
sys.path += [ '.' ]
from calcs import el, dt, quick_calc, calc, densities
els = [(el, nat)]
###
f = open('hyb.out', 'w')
for density in densities:
print 'Running for density {0}...'.format(density)
if isinstance(density, str):
a = read(density)
else:
a = random_solid(els, density)
# Relax with the unscreened potential
print 'Relax with quick potential...'
a.calc = quick_calc
FIRE(a).run(fmax=fmax, steps=10000)
# Relax with the screened potential
print 'Relax with proper potential...'
a.calc = calc
FIRE(a).run(fmax=fmax, steps=10000)
# Langevin quench to T1
print 'Langevin quench to {0}...'.format(T1)
Langevin(a, dt*fs, T1*kB, 1.0/(500*fs),
logfile='-', loginterval=int(100/dt)).run(int(time/dt))
# Langevin quench to T2
print 'Langevin quench to {0}...'.format(T2)
Langevin(a, dt*fs, T2*kB, 1.0/(500*fs),
logfile='-', loginterval=int(100/dt)).run(int(time/dt))
# Langevin quench to T3
print 'Langevin quench to {0}...'.format(T3)
dyn = Langevin(a, dt*fs, T3*kB, 1.0/(500*fs), logfile='-',
loginterval=int(100/dt))
dyn.run(int(time/dt))
# Collect pair distribution function
print 'Collect pair distribution function...'
p = PairAndAngleDistribution(a.get_calculator(), g2_cutoff, coord_cutoff,
npairbins=nbins, nanglebins=nbins)
dyn.attach(p, interval=int(100/dt))
dyn.run(int(time/dt))
print 'Writing files...'
# Write snapshot
write('rho_{0}.traj'.format(density), a)
# Write pair distribution function
r = (np.arange(nbins)+0.5)*g2_cutoff/nbins
hist = p.get_pair_hist()
variance = p.get_pair_variance()
np.savetxt('g2_{0}.out'.format(density), np.transpose([r, hist, variance]))
# Write angle distribution function
r = (np.arange(nbins)+0.5)*pi/nbins
hist = p.get_angle_hist()
variance = p.get_angle_variance()
np.savetxt('angle_{0}.out'.format(density), np.transpose([r, hist, variance]))
# Count coordination numbers
print 'Calculation coordination numbers...'
c = np.zeros(12, dtype=int)
for i in range(len(a)):
c[calc.nl.coordination(calc.particles, i, coord_cutoff)] += 1
assert np.sum(c) == len(a)
if isinstance(density, str):
density = np.sum(a.get_masses())/a.get_volume() * 1e24/mol
np.savetxt(f, [ np.append([density], np.array(c, dtype=float)/len(a)) ])
f.flush()
f.close()
|
Atomistica/atomistica
|
examples/ASE/quench.py
|
Python
|
gpl-2.0
| 4,125
|
[
"ASE"
] |
9a717881f33d2126d999a31864986ec00b752146c0574243104e05992430bd9c
|
"""
tests for Quicklook QA class and functions. It also indludes tests on low level functions on desispec.qa.qalib
"""
import unittest
import shutil
import tempfile
import numpy as np
import os
from desispec.qa import qalib
from desispec.qa import qa_quicklook as QA
from pkg_resources import resource_filename
import desispec.sky
from desispec.preproc import parse_sec_keyword
from specter.psf import load_psf
import astropy.io.fits as fits
from desispec.quicklook import qllogger
import desispec.io
import desispec.image
from desitarget.targetmask import desi_mask
qlog=qllogger.QLLogger("QuickLook",0)
log=qlog.getlog()
def xy2hdr(xyslice):
'''
convert 2D slice into IRAF style [a:b,c:d] hdr value
e.g. xyslice2hdr(np.s_[0:10, 5:20]) -> '[6:20,1:10]'
'''
yy, xx = xyslice
value = '[{}:{},{}:{}]'.format(xx.start+1, xx.stop, yy.start+1, yy.stop)
return value
#- 2D gaussian function to model sky peaks
def gaussian2D(x,y,amp,xmu,ymu,xsigma,ysigma):
x,y = np.meshgrid(x,y)
gauss = amp*np.exp(-(x-xmu)**2/(2*xsigma**2)-(y-ymu)**2/(2*ysigma**2))
return gauss
class TestQL_QA(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Create test filenames in a unique temporary directory
"""
cls.testDir = tempfile.mkdtemp()
cls.rawfile = os.path.join(cls.testDir, 'test-raw-abcde.fits')
cls.pixfile = os.path.join(cls.testDir, 'test-pix-abcde.fits')
cls.xwfile = os.path.join(cls.testDir, 'test-xw-abcde.fits')
cls.framefile = os.path.join(cls.testDir, 'test-frame-abcde.fits')
cls.fibermapfile = os.path.join(cls.testDir, 'test-fibermap-abcde.fits')
cls.skyfile = os.path.join(cls.testDir, 'test-sky-abcde.fits')
cls.qafile = os.path.join(cls.testDir, 'test_qa.yaml')
cls.qajson = os.path.join(cls.testDir, 'test_qa.json')
cls.qafig = os.path.join(cls.testDir, 'test_qa.png')
@classmethod
def tearDownClass(cls):
"""Cleanup temporary directory
"""
shutil.rmtree(cls.testDir)
def tearDown(self):
self.rawimage.close()
for filename in [self.framefile, self.rawfile, self.pixfile, self.xwfile, self.fibermapfile, self.skyfile, self.qafile, self.qajson, self.qafig]:
if os.path.exists(filename):
os.remove(filename)
#- Create some test data
def setUp(self):
#- use specter psf for this test
self.psffile=resource_filename('specter', 'test/t/psf-monospot.fits')
#self.psffile=os.environ['DESIMODEL']+'/data/specpsf/psf-b.fits'
self.config={"kwargs":{
"refKey":None,
"param":{},
"qso_resid":None
}}
#- rawimage
hdr = dict()
hdr['CAMERA'] = 'z1'
hdr['DATE-OBS'] = '2018-09-23T08:17:03.988'
hdr['PROGRAM'] = 'dark'
hdr['EXPTIME'] = 100
#- Dimensions per amp
ny = self.ny = 500
nx = self.nx = 400
noverscan = nover = 50
hdr['BIASSECA'] = xy2hdr(np.s_[0:ny, nx:nx+nover])
hdr['DATASECA'] = xy2hdr(np.s_[0:ny, 0:nx])
hdr['CCDSECA'] = xy2hdr(np.s_[0:ny, 0:nx])
hdr['BIASSECB'] = xy2hdr(np.s_[0:ny, nx+nover:nx+2*nover])
hdr['DATASECB'] = xy2hdr(np.s_[0:ny, nx+2*nover:nx+2*nover+nx])
hdr['CCDSECB'] = xy2hdr(np.s_[0:ny, nx:nx+nx])
hdr['BIASSECC'] = xy2hdr(np.s_[ny:ny+ny, nx:nx+nover])
hdr['DATASECC'] = xy2hdr(np.s_[ny:ny+ny, 0:nx])
hdr['CCDSECC'] = xy2hdr(np.s_[ny:ny+ny, 0:nx])
hdr['BIASSECD'] = xy2hdr(np.s_[ny:ny+ny, nx+nover:nx+2*nover])
hdr['DATASECD'] = xy2hdr(np.s_[ny:ny+ny, nx+2*nover:nx+2*nover+nx])
hdr['CCDSECD'] = xy2hdr(np.s_[ny:ny+ny, nx:nx+nx])
hdr['NIGHT'] = '20180923'
hdr['EXPID'] = 1
hdr['PROGRAM'] = 'dark'
hdr['FLAVOR'] = 'science'
hdr['EXPTIME'] = 100.0
rawimage = np.zeros((2*ny, 2*nx+2*noverscan))
offset = {'A':100.0, 'B':100.5, 'C':50.3, 'D':200.4}
gain = {'A':1.0, 'B':1.5, 'C':0.8, 'D':1.2}
rdnoise = {'A':2.0, 'B':2.2, 'C':2.4, 'D':2.6}
obsrdn = {'A':3.4, 'B':3.3, 'C':3.6, 'D':3.3}
quad = {
'A': np.s_[0:ny, 0:nx], 'B': np.s_[0:ny, nx:nx+nx],
'C': np.s_[ny:ny+ny, 0:nx], 'D': np.s_[ny:ny+ny, nx:nx+nx],
}
for amp in ('A', 'B', 'C', 'D'):
hdr['GAIN'+amp] = gain[amp]
hdr['RDNOISE'+amp] = rdnoise[amp]
hdr['OBSRDN'+amp] = obsrdn[amp]
xy = parse_sec_keyword(hdr['BIASSEC'+amp])
shape = [xy[0].stop-xy[0].start, xy[1].stop-xy[1].start]
rawimage[xy] += offset[amp]
rawimage[xy] += np.random.normal(scale=rdnoise[amp], size=shape)/gain[amp]
xy = parse_sec_keyword(hdr['DATASEC'+amp])
shape = [xy[0].stop-xy[0].start, xy[1].stop-xy[1].start]
rawimage[xy] += offset[amp]
rawimage[xy] += np.random.normal(scale=rdnoise[amp], size=shape)/gain[amp]
#- set CCD parameters
self.ccdsec1=hdr["CCDSECA"]
self.ccdsec2=hdr["CCDSECB"]
self.ccdsec3=hdr["CCDSECC"]
self.ccdsec4=hdr["CCDSECD"]
#- raw data are integers, not floats
rawimg = rawimage.astype(np.int32)
self.expid=hdr["EXPID"]
self.camera=hdr["CAMERA"]
#- Confirm that all regions were correctly offset
assert not np.any(rawimage == 0.0)
#- write to the rawfile and read it in QA test
hdr['DOSVER'] = 'SIM'
hdr['FEEVER'] = 'SIM'
hdr['DETECTOR'] = 'SIM'
desispec.io.write_raw(self.rawfile,rawimg,hdr,camera=self.camera)
self.rawimage=fits.open(self.rawfile)
#- read psf, should use specter.PSF.load_psf instead of desispec.PSF(), otherwise need to create a psfboot somewhere.
self.psf = load_psf(self.psffile)
#- make the test pixfile, fibermap file
img_pix = rawimg
img_ivar = np.ones_like(img_pix) / 3.0**2
img_mask = np.zeros(img_pix.shape, dtype=np.uint32)
img_mask[200] = 1
self.image = desispec.image.Image(img_pix, img_ivar, img_mask, camera='z1',meta=hdr)
desispec.io.write_image(self.pixfile, self.image)
#- Create a fibermap with purposefully overlapping targeting bits
n = 30
self.fibermap = desispec.io.empty_fibermap(n)
self.fibermap['OBJTYPE'][:] = 'TGT'
self.fibermap['DESI_TARGET'][::2] |= desi_mask.ELG
self.fibermap['DESI_TARGET'][::5] |= desi_mask.QSO
self.fibermap['DESI_TARGET'][::7] |= desi_mask.LRG
#- add some arbitrary fluxes
for key in ['FLUX_G', 'FLUX_R', 'FLUX_Z', 'FLUX_W1', 'FLUX_W2']:
self.fibermap[key] = 10**((22.5 - np.random.uniform(18, 21, size=n))/2.5)
#- Make some standards; these still have OBJTYPE = 'TGT'
ii = [6,18,29]
self.fibermap['DESI_TARGET'][ii] = desi_mask.STD_FAINT
#- set some targets to SKY
ii = self.skyfibers = [5,10,21]
self.fibermap['OBJTYPE'][ii] = 'SKY'
self.fibermap['DESI_TARGET'][ii] = desi_mask.SKY
for key in ['FLUX_G', 'FLUX_R', 'FLUX_Z', 'FLUX_W1', 'FLUX_W2']:
self.fibermap[key][ii] = np.random.normal(scale=100, size=len(ii))
desispec.io.write_fibermap(self.fibermapfile, self.fibermap)
#- make a test frame file
self.night=hdr['NIGHT']
self.nspec = nspec = 30
wave=np.arange(7600.0,9800.0,1.0) #- z channel
nwave = self.nwave = len(wave)
flux=np.random.uniform(size=(nspec,nwave))+100.
ivar=np.ones_like(flux)
resolution_data=np.ones((nspec,13,nwave))
self.frame=desispec.frame.Frame(wave,flux,ivar,resolution_data=resolution_data,fibermap=self.fibermap)
self.frame.meta = hdr
self.frame.meta['WAVESTEP']=0.5
desispec.io.write_frame(self.framefile, self.frame)
#- make a skymodel
sky=np.ones_like(self.frame.flux)*0.5
skyivar=np.ones_like(sky)
self.mask=np.zeros(sky.shape,dtype=np.uint32)
self.skymodel=desispec.sky.SkyModel(wave,sky,skyivar,self.mask)
self.skyfile=desispec.io.write_sky(self.skyfile,self.skymodel)
#- Make a dummy boundary map for wavelength-flux in pixel space
self.map2pix={}
self.map2pix["LEFT_MAX_FIBER"] = 14
self.map2pix["RIGHT_MIN_FIBER"] = 17
self.map2pix["BOTTOM_MAX_WAVE_INDEX"] = 900
self.map2pix["TOP_MIN_WAVE_INDEX"] = 1100
#- test some qa utility functions:
def test_ampregion(self):
pixboundary=qalib.ampregion(self.image)
self.assertEqual(pixboundary[0][1],slice(0,self.nx,None))
self.assertEqual(pixboundary[3][0],slice(self.ny,self.ny+self.ny,None))
def test_fiducialregion(self):
leftmax,rightmin,bottommax,topmin=qalib.fiducialregion(self.frame,self.psf)
self.assertEqual(leftmax,self.nspec-1) #- as only 30 spectra defined
self.assertLess(bottommax,topmin)
def test_getrms(self):
img_rms=qalib.getrms(self.image.pix)
self.assertEqual(img_rms,np.std(self.image.pix))
def test_countpix(self):
pix=self.image.pix
counts1=qalib.countpix(pix,nsig=3) #- counts above 3 sigma
counts2=qalib.countpix(pix,nsig=4) #- counts above 4 sigma
self.assertLess(counts2,counts1)
# RS: remove this test because this QA isn't used
# def test_sky_resid(self):
# import copy
# param = dict(
# PCHI_RESID=0.05,PER_RESID=95.,BIN_SZ=0.1)
# qadict=qalib.sky_resid(param,self.frame,self.skymodel,quick_look=True)
# kk=np.where(self.frame.fibermap['OBJTYPE']=='SKY')[0]
# self.assertEqual(qadict['NSKY_FIB'],len(kk))
#
# #- run with different sky flux
# skym1=desispec.sky.SkyModel(self.frame.wave,self.skymodel.flux,self.skymodel.ivar,self.mask)
# skym2=desispec.sky.SkyModel(self.frame.wave,self.skymodel.flux*0.5,self.skymodel.ivar,self.mask)
# frame1=copy.deepcopy(self.frame)
# frame2=copy.deepcopy(self.frame)
# desispec.sky.subtract_sky(frame1,skym1)
# desispec.sky.subtract_sky(frame2,skym2)
#
# qa1=qalib.sky_resid(param,frame1,skym1)
# qa2=qalib.sky_resid(param,frame2,skym2)
# self.assertLess(qa1['RESID'],qa2['RESID']) #- residuals must be smaller for case 1
def testSignalVsNoise(self):
import copy
params=None
#- first get the sky subtracted frame
#- copy frame not to override
thisframe=copy.deepcopy(self.frame)
desispec.sky.subtract_sky(thisframe,self.skymodel)
qadict=qalib.SignalVsNoise(thisframe,params)
#- make sure all the S/N is positive
self.assertTrue(np.all(qadict['MEDIAN_SNR']) > 0)
#- Reduce sky
skym1=desispec.sky.SkyModel(self.frame.wave,self.skymodel.flux,self.skymodel.ivar,self.mask)
skym2=desispec.sky.SkyModel(self.frame.wave,self.skymodel.flux*0.5,self.skymodel.ivar,self.mask)
frame1=copy.deepcopy(self.frame)
frame2=copy.deepcopy(self.frame)
desispec.sky.subtract_sky(frame1,skym1)
desispec.sky.subtract_sky(frame2,skym2)
qa1=qalib.SignalVsNoise(frame1,params)
qa2=qalib.SignalVsNoise(frame2,params)
self.assertTrue(np.all(qa2['MEDIAN_SNR'] > qa1['MEDIAN_SNR']))
#- test for tracer not present
nullfibermap=desispec.io.empty_fibermap(10)
qa=qalib.SignalVsNoise(self.frame,params)
self.assertEqual(len(qa['MEDIAN_SNR']),30)
#- Test each individual QA:
def testBiasOverscan(self):
return
qa=QA.Bias_From_Overscan('bias',self.config) #- initialize with fake config and name
inp=self.rawimage
qargs={}
qargs["RESULTKEY"] = 'BIAS_AMP'
qargs["PSFFile"]=self.psf
qargs["camera"]=self.camera
qargs["expid"]=self.expid
qargs["amps"]=True
qargs["qafile"]=self.qafile
qargs["qafig"]=self.qafig
qargs["paname"]="abc"
qargs["singleqa"]=None
res1=qa(inp,**qargs)
self.assertEqual(len(res1['METRICS']['BIAS_AMP']),4)
def testGetRMS(self):
return
qa=QA.Get_RMS('rms',self.config)
inp=self.image
qargs={}
qargs["RESULTKEY"] = 'NOISE_AMP'
qargs["PSFFile"]=self.psf
qargs["camera"]=self.camera
qargs["expid"]=self.expid
qargs["amps"]=True
qargs["paname"]="abc"
qargs["qafile"]=self.qafile
qargs["qafig"]=self.qafig
qargs["singleqa"]=None
qargs["param"]={'PERCENTILES': [68.2,95.4,99.7], 'NOISE_AMP_NORMAL_RANGE': [-1.0, 1.0], 'NOISE_AMP_WARN_RANGE': [-2.0, 2.0]}
resl=qa(inp,**qargs)
self.assertTrue("yaml" in qargs["qafile"])
self.assertTrue("png" in qargs["qafig"])
self.assertTrue(len(resl['METRICS']['NOISE_AMP'])==4)
self.assertTrue((np.all(resl['METRICS']['NOISE_AMP'])>0))
def testCalcXWSigma(self):
return
#- Create another pix file for xwsigma test
xw_hdr = dict()
xw_hdr['CAMERA'] = self.camera
xw_hdr['NIGHT'] = self.night
xw_hdr['EXPID'] = self.expid
xw_hdr['PROGRAM'] = 'dark'
xw_hdr['FLAVOR'] = 'science'
xw_ny = 2000
xw_nx = 2000
xw_rawimage = np.zeros((2*xw_ny,2*xw_nx))
xw_img_pix = xw_rawimage.astype(np.int32)
xw_img_ivar = np.ones_like(xw_img_pix)/3.0**2
xw_img_mask = np.zeros(xw_img_pix.shape,dtype=np.uint32)
#- manually insert gaussian sky peaks
x = np.arange(7)
y = np.arange(7)
a = 10000.
xmu = np.mean(x)
ymu = np.mean(y)
xsigma = 1.0
ysigma = 1.0
peak_counts = np.rint(gaussian2D(x,y,a,xmu,ymu,xsigma,ysigma))
peak_counts = peak_counts.astype(np.int32)
zpeaks = np.array([8401.5,8432.4,8467.5,9479.4])
fibers = np.arange(30)
for i in range(len(zpeaks)):
pix = np.rint(self.psf.xy(fibers,zpeaks[i]))
for j in range(len(fibers)):
for k in range(len(peak_counts)):
ypix = int(pix[0][j]-3+k)
xpix_start =int(pix[1][j]-3)
xpix_stop = int(pix[1][j]+4)
xw_img_pix[ypix][xpix_start:xpix_stop] = peak_counts[k]
#- transpose pixel values to correct place in image
xw_img_pix=np.ndarray.transpose(xw_img_pix)
#- write the test pixfile, fibermap file
xwimage = desispec.image.Image(xw_img_pix, xw_img_ivar, xw_img_mask, camera='z1',meta=xw_hdr)
desispec.io.write_image(self.xwfile, xwimage)
qa=QA.Calc_XWSigma('xwsigma',self.config)
inp=xwimage
qargs={}
qargs["RESULTKEY"] = 'XWSIGMA'
qargs["Flavor"]='science'
qargs["PSFFile"]=self.psffile
qargs["FiberMap"]=self.fibermap
qargs["camera"]=self.camera
qargs["expid"]=self.expid
qargs["amps"]=False
qargs["paname"]="abc"
qargs["qafile"]=self.qafile
qargs["qafig"]=self.qafig
qargs["singleqa"]=None
qargs["param"]={'B_PEAKS': [3914.4, 5199.3, 5578.9],'R_PEAKS': [6301.9, 6365.4, 7318.2, 7342.8, 7371.3],'Z_PEAKS': [8401.5, 8432.4, 8467.5, 9479.4],'PIXEL_RANGE': 7,'XWSIGMA_NORMAL_RANGE': [-2.0, 2.0],'XWSIGMA_WARN_RANGE': [-4.0, 4.0]}
resl=qa(inp,**qargs)
self.assertTrue(len(resl["METRICS"]["XWSIGMA"].ravel())==2)
self.assertTrue("yaml" in qargs["qafile"])
self.assertTrue("png" in qargs["qafig"])
self.assertTrue(len(resl['METRICS']['XWSIGMA'])==4)
self.assertTrue((np.all(resl['METRICS']['XWSIGMA'])>0))
def testCountPixels(self):
return
qa=QA.Count_Pixels('countpix',self.config)
inp=self.image
qargs={}
qargs["RESULTKEY"] = 'LITFRAC_AMP'
qargs["PSFFile"]=self.psf
qargs["camera"]=self.camera
qargs["expid"]=self.expid
qargs["amps"]=False
qargs["paname"]="abc"
qargs["singleqa"]=None
qargs["param"]={'CUTPIX': 5, 'LITFRAC_NORMAL_RANGE': [-0.1, 0.1], 'LITFRAC_WARN_RANGE': [-0.2, 0.2]}
resl=qa(inp,**qargs)
#- test if amp QAs exist
qargs["amps"] = True
resl2=qa(inp,**qargs)
self.assertTrue(len(resl2['METRICS']['LITFRAC_AMP'])==4)
def testCountSpectralBins(self):
return
qa=QA.CountSpectralBins('countbins',self.config)
inp=self.frame
qargs={}
qargs["RESULTKEY"] = 'NGOODFIB'
qargs["PSFFile"]=self.psf
qargs["FiberMap"]=self.fibermap
qargs["camera"]=self.camera
qargs["expid"]=self.expid
qargs["amps"]=True
qargs["paname"]="abc"
qargs["qafile"]=self.qafile
qargs["qafig"]=None
qargs["singleqa"]=None
qargs["param"]={'CUTBINS': 5, 'N_KNOWN_BROKEN_FIBERS': 0, 'NGOODFIB_NORMAL_RANGE': [-5, 5], 'NGOODFIB_WARN_RANGE': [-10, 10]}
resl=qa(inp,**qargs)
self.assertTrue(resl["METRICS"]["GOOD_FIBERS"].shape[0]==inp.nspec)
self.assertTrue((resl["METRICS"]["NGOODFIB"])<=inp.nspec)
def testSkyCont(self):
return
qa=QA.Sky_Continuum('skycont',self.config)
inp=self.frame
qargs={}
qargs["RESULTKEY"] = 'SKYCONT'
qargs["FiberMap"]=self.fibermap
qargs["camera"]=self.camera
qargs["expid"]=self.expid
qargs["paname"]="abc"
qargs["singleqa"]=None
qargs["param"]={'B_CONT': ["4000, 4500", "5250, 5550"],'R_CONT': ["5950, 6200", "6990, 7230"],'Z_CONT': ["8120, 8270", "9110, 9280"]}
resl=qa(inp,**qargs)
self.assertTrue(resl["METRICS"]["SKYFIBERID"]==self.skyfibers) #- as defined in the fibermap
self.assertTrue(resl["METRICS"]["SKYCONT"]>0)
def testSkyPeaks(self):
return
qa=QA.Sky_Peaks('skypeaks',self.config)
inp=self.frame
qargs={}
qargs["RESULTKEY"] = 'PEAKCOUNT'
qargs["FiberMap"]=self.fibermap
qargs["camera"]=self.camera
qargs["expid"]=self.expid
qargs["paname"]="abc"
qargs["dict_countbins"]=self.map2pix
qargs["singleqa"]=None
qargs["param"]={'B_PEAKS': [3914.4, 5199.3, 5201.8],'R_PEAKS': [6301.9, 6365.4, 7318.2, 7342.8, 7371.3],'Z_PEAKS': [8401.5, 8432.4, 8467.5, 9479.4, 9505.6, 9521.8],'PEAKCOUNT_NORMAL_RANGE': [-1.0, 1.0],'PEAKCOUNT_WARN_RANGE': [-2.0, 2.0]}
resl=qa(inp,**qargs)
#self.assertTrue(np.all(resl['METRICS']['PEAKCOUNT_RMS_AMP'])>=0.)
self.assertTrue(resl['METRICS']['PEAKCOUNT_NOISE']>0)
def testIntegrateSpec(self):
return
qa=QA.Integrate_Spec('integ',self.config)
inp=self.frame
qargs={}
qargs["RESULTKEY"] = 'DELTAMAG_TGT'
qargs["PSFFile"]=self.psf
qargs["FiberMap"]=self.fibermap
qargs["camera"]=self.camera
qargs["expid"]=self.expid
qargs["paname"]="abc"
qargs["dict_countbins"]=self.map2pix
qargs["singleqa"]=None
qargs["param"]={'DELTAMAG_TGT_NORMAL_RANGE': [-2., 2.0], 'DELTAMAG_TGT_WARN_RANGE': [-4., 4.]}
resl=qa(inp,**qargs)
self.assertTrue(len(resl["METRICS"]["STD_FIBERID"])>0)
# RS: We are not using this QA anymore, so we don't need this test
# def testSkyResidual(self):
# qa=QA.Sky_Residual('skyresid',self.config)
# inp=self.frame
# sky=self.skymodel
# qargs={}
# qargs["PSFFile"]=self.psf
# qargs["FiberMap"]=self.fibermap
# qargs["camera"]=self.camera
# qargs["expid"]=self.expid
# qargs["paname"]="abc"
# qargs["dict_countbins"]=self.map2pix
# qargs["singleqa"]=None
# qargs["param"]={"BIN_SZ":0.2, "PCHI_RESID":0.05, "PER_RESID":95., "SKYRESID_NORMAL_RANGE":[-5.0, 5.0], "SKYRESID_WARN_RANGE":[-10.0, 10.0]}
#
# resl=qa(inp,sky,**qargs)
#
# #self.assertTrue(resl["METRICS"]["NREJ"]==self.skymodel.nrej)
# #self.assertTrue(len(resl["METRICS"]["MED_RESID_WAVE"]) == self.nwave)
# #self.assertTrue(len(resl["METRICS"]["MED_RESID_FIBER"]) == 5) #- 5 sky fibers in the input
# #self.assertTrue(resl["PARAMS"]["BIN_SZ"] == 0.1)
# ##- test with different parameter set:
# #resl2=qa(inp,sky,**qargs)
# #self.assertTrue(len(resl["METRICS"]["DEVS_1D"])>len(resl2["METRICS"]["DEVS_1D"])) #- larger histogram bin size than default 0.1
def testCalculateSNR(self):
return
qa=QA.Calculate_SNR('snr',self.config)
inp=self.frame
qargs={}
qargs["RESULTKEY"] = 'FIDSNR'
qargs["PSFFile"]=self.psf
qargs["FiberMap"]=self.fibermap
qargs["camera"]=self.camera
qargs["expid"]=self.expid
qargs["paname"]="abc"
qargs["qafile"]=self.qafile #- no LRG by construction.
qargs["dict_countbins"]=self.map2pix
qargs["singleqa"]=None
qargs["param"]={'RESIDUAL_CUT': 0.2, 'SIGMA_CUT': 2.0, 'FIDSNR_TGT_NORMAL_RANGE': [-11., 11.], 'FIDSNR_TGT_WARN_RANGE': [-12., 12.], 'FIDMAG': 22.}
resl=qa(inp,**qargs)
self.assertTrue("yaml" in qargs["qafile"])
self.assertTrue(len(resl["METRICS"]["MEDIAN_SNR"])==self.nspec) #- positive definite
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main()
|
desihub/desispec
|
py/desispec/test/test_ql_qa.py
|
Python
|
bsd-3-clause
| 21,707
|
[
"Gaussian"
] |
03aa510a0110c45a4ed62588477d30f506e463319d987813457afd108b72d6d8
|
from __future__ import print_function, unicode_literals
from builtins import zip
from builtins import range
import sys
import re
import warnings
import itertools
import string
import math
import numpy as np
import Bio.PDB as bpdb
from collections import defaultdict
import forgi.utilities.debug as fud
import forgi.threedee.utilities.vector as ftuv
from forgi.threedee.utilities.modified_res import to_4_letter_alphabeth
import forgi.graph.residue as fgr
from logging_exceptions import log_to_exception
import logging
log = logging.getLogger(__name__)
class AtomName(str):
"""
Like a string, but "C1'" and "C1*" compare equal
"""
def __eq__(self, other):
if self.endswith("*"):
self = self[:-1] + "'"
if other.endswith("*"):
other = other[:-1] + "'"
return str(self) == str(other)
def __hash__(self):
if self.endswith("*"):
self = self[:-1] + "'"
return hash(str(self))
backbone_atoms = list(map(AtomName, ['P', "O5'", "C5'", "C4'", "C3'", "O3'"]))
ring_atoms = list(map(AtomName, ["C4'", "C3'", "C2'", "C1'", "O4'"]))
nonsidechain_atoms = backbone_atoms + ring_atoms
chi_torsion_atoms = dict()
chi_torsion_atoms['A'] = chi_torsion_atoms['G'] = list(
map(AtomName, ["O4'", "C1'", "N9", "C4"]))
chi_torsion_atoms['C'] = chi_torsion_atoms['U'] = list(
map(AtomName, ["O4'", "C1'", "N1", "C2"]))
side_chain_atoms = dict()
side_chain_atoms['U'] = list(
map(AtomName, ['N1', 'C2', 'O2', 'N3', 'C4', 'O4', 'C5', 'C6']))
side_chain_atoms['C'] = list(
map(AtomName, ['N1', 'C2', 'O2', 'N3', 'C4', 'N4', 'C5', 'C6']))
side_chain_atoms['A'] = list(
map(AtomName, ['N1', 'C2', 'N3', 'C4', 'C5', 'C6', 'N6', 'N7', 'C8', 'N9']))
side_chain_atoms['G'] = list(
map(AtomName, ['N1', 'C2', 'N2', 'N3', 'C4', 'C5', 'C6', 'O6', 'N7', 'C8', 'N9']))
all_side_chains = set(
side_chain_atoms['U'] + side_chain_atoms['C'] + side_chain_atoms['A'] + side_chain_atoms['G'])
all_rna_atoms = set(nonsidechain_atoms) | all_side_chains
RNA_RESIDUES = ["A", "U", "G", "C", 'rA', 'rC', 'rG', 'rU', 'DU']
interactions = [(AtomName(a), AtomName(b)) for a, b in map(sorted,
[('P', "O5'"),
('P', 'OP1'),
('P', 'O1P'),
('P', 'OP2'),
('P', 'O2P'),
("C2'", "O2'"),
("O5'", "C5'"),
("C5'", "C4'"),
("C4'", "O4'"),
("C4'", "C3'"),
("O4'", "C1'"),
("C3'", "C2'"),
("C3'", "O3'"),
("C2'", "C1'"),
("C1'", "N1"),
('N1', 'C2'),
('N1', 'C6'),
('C6', 'C5'),
('C5', 'C4'),
('C4', 'O4'),
('C4', 'N4'),
('C4', 'N3'),
('N3', 'C2'),
('C2', 'O2'),
('C2', 'N2'),
("C1'", "N9"),
('N9', 'C8'),
('N9', 'C4'),
('C8', 'N7'),
('N7', 'C5'),
('C6', 'O6'),
('C6', 'N6')])]
def rename_chains_for_pdb(chains):
"""
:param chains: A dict chain_id:chain
"""
used_chainids = set(chains.keys())
def get_available_chainid():
for c in string.ascii_uppercase:
if c not in used_chainids:
used_chainids.add(c)
return c
raise ValueError("Too many chains. Cannot convert to old PDB format.")
for chain in chains.values():
if len(chain.id)>1:
chain.id = get_available_chainid()
return {c.id: c for c in chains.values()}
def trim_chain_between(chain, start_res, end_res):
'''
Remove all nucleotides between start_res and end_res, inclusive.
The chain is modified in place so there is no return value.
'''
to_detach = []
for res in chain:
if start_res <= res.id[1] and res.id[1] <= end_res:
to_detach += [res]
for res in to_detach:
chain.detach_child(res.id)
def extract_subchains_from_seq_ids(all_chains, seq_ids):
'''
Extract a portion of one or more pdb chains.
Creates a list of new chains which contain only
the specified residues copied from the original chain.
The chain ids are not modified.
:param all_chains: A dictionary {chainid:chains}.
:param seq_ids: An iterable of complete RESIDS.
:returns: A dictionary chain-id:Bio.PDB.Chain.Chain objects
'''
new_chains = {}
assert isinstance(all_chains, dict)
for r in seq_ids:
if r.chain in new_chains:
chain = new_chains[r.chain]
else:
chain = new_chains[r.chain] = bpdb.Chain.Chain(r.chain)
try:
chain.add(all_chains[r.chain][r.resid].copy())
except KeyError:
log.info(list(sorted(all_chains[r.chain].child_dict.keys())))
raise
return new_chains
def is_covalent(contact):
'''
Determine if a particular contact is covalent.
This does not look at the geometric distance but only at the atom names.
:param contact: A pair of two Atom objects
:return: `True` if they are covalently bonded
`False` otherwise
'''
r1 = contact[0].parent
r2 = contact[1].parent
r1a = (r1, contact[0])
r2a = (r2, contact[1])
if contact[0].name.find('H') >= 0 or contact[1].name.find('H') >= 0:
return True
((r1, c1), (r2, c2)) = sorted((r1a, r2a), key=lambda x: x[0].id[1])
if r1.id == r2.id:
if tuple(sorted((c1.name, c2.name))) in interactions:
return True
if r2.id[1] - r1.id[1] == 1:
# neighboring residues
if c1.name == 'O3*' and c2.name == 'P':
return True
return False
def num_noncovalent_clashes(chain):
'''
Check if a chain has non-covalent clashes. Non-covalent clashes are found
when two atoms that aren't covalently linked are within 1.8 A of each other.
:param chain: The chain to evaluate
:param return: The number of non-covalent clashes.
'''
all_atoms = bpdb.Selection.unfold_entities(chain, 'A')
ns = bpdb.NeighborSearch(all_atoms)
contacts = ns.search_all(1.9)
return len([c for c in contacts if not is_covalent(c)])
def noncovalent_distances(chain, cutoff=0.3):
'''
Print out the distances between all non-covalently bonded atoms
which are closer than cutoff to each other.
:param chain: The Bio.PDB chain.
:param cutoff: The maximum distance
'''
all_atoms = bpdb.Selection.unfold_entities(chain, 'A')
ns = bpdb.NeighborSearch(all_atoms)
contacts = ns.search_all(cutoff)
return [ftuv.magnitude(c[1] - c[0]) for c in contacts if not is_covalent(c)]
def pdb_rmsd(c1, c2, sidechains=False, superimpose=True ):
'''
Calculate the all-atom rmsd between two RNA chains.
:param c1: A Bio.PDB.Chain
:param c2: Another Bio.PDB.Chain
:return: The rmsd between the locations of all the atoms in the chains.
'''
c1_list = [cr for cr in c1.get_list() if cr.resname.strip()
in RNA_RESIDUES]
c2_list = [cr for cr in c2.get_list() if cr.resname.strip()
in RNA_RESIDUES]
return residuelist_rmsd(c1_list, c2_list, sidechains, superimpose )
def residuelist_rmsd(c1_list, c2_list, sidechains=False, superimpose=True):
import forgi.threedee.model.similarity as ftms
if len(c1_list) != len(c2_list):
log.error("c1_list (len %s): %s", c1_list)
log.error("c2_list (len %s): %s", c2_list)
raise Exception(
"Chains of different length. (Maybe an RNA-DNA hybrid?)")
#c1_list.sort(key=lambda x: x.id[1])
#c2_list.sort(key=lambda x: x.id[1])
to_residues = []
crds1 = []
crds2 = []
all_atoms1 = []
all_atoms2 = []
for r1, r2 in zip(c1_list, c2_list):
if sidechains:
anames = nonsidechain_atoms + \
side_chain_atoms[r1.resname.strip()]
else:
anames = nonsidechain_atoms
#anames = a_5_names + a_3_names
for a in anames:
try:
at1 = r1[a]
at2 = r2[a]
except:
continue
else:
all_atoms1.append(at1)
all_atoms2.append(at2)
crds1.append(at1.coord)
crds2.append(at2.coord)
to_residues.append(r1)
diff_vecs = ftms._pointwise_deviation(crds1, crds2)
dev_per_res = defaultdict(list)
for i, res in enumerate(to_residues):
dev_per_res[res].append(diff_vecs[i])
if superimpose:
sup = bpdb.Superimposer()
sup.set_atoms(all_atoms1, all_atoms2)
return (len(all_atoms1), sup.rms, sup.rotran, dev_per_res)
else:
return (len(all_atoms1), ftuv._vector_set_rmsd(crds1, crds2), None, dev_per_res)
def get_first_chain(filename):
'''
Load a PDB file using the Bio.PDB module and return the first chain.
:param filename: The path to the pdb file
'''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
s = bpdb.PDBParser(PERMISSIVE=False).get_structure('t', filename)
return list(s.get_chains())[0]
def pdb_file_rmsd(fn1, fn2):
'''
Calculate the RMSD of all the atoms in two pdb structures.
:param fn1: The first filename.
:param fn2: The second filename.
:return: The rmsd between the two structures.
'''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
s1 = bpdb.PDBParser().get_structure('t', fn1)
s2 = bpdb.PDBParser().get_structure('t', fn2)
c1, _, _ = get_biggest_chain(fn1)
c2, _, _ = get_biggest_chain(fn2)
rmsd = pdb_rmsd(c1, c2)
return rmsd
def renumber_chain(chain, resids=None):
'''
Renumber all the residues in this chain so that they start at 1 and end at
len(chain)
:param chain: A Bio.PDB.Chain object
:return: The same chain, but with renamed nucleotides
'''
counter = 1
if resids is None:
resids = [(' ', i + 1, ' ') for i in range(len(chain))]
new_child_dict = dict()
new_child_list = []
for res, r_new in zip(chain, resids):
res.id = r_new
new_child_dict[res.id] = res
new_child_list.append(res)
chain.child_dict = new_child_dict
chain.child_list = new_child_list
return chain
def output_chain(chain, filename, fr=None, to=None):
'''
Dump a chain to an output file. Remove the hydrogen atoms.
:param chain: The Bio.PDB.Chain to dump.
:param filename: The place to dump it.
'''
class HSelect(bpdb.Select):
def accept_atom(self, atom):
if atom.name.find('H') >= 0:
return False
else:
return True
m = bpdb.Model.Model(' ')
s = bpdb.Structure.Structure(' ')
m.add(chain)
s.add(m)
io = bpdb.PDBIO()
io.set_structure(s)
io.save(filename, HSelect())
def output_multiple_chains(chains, filename, file_type="pdb"):
'''
Dump multiple chains to an output file. Remove the hydrogen atoms.
:param chains: An iterable of Bio.PDB.Chain to dump.
:param filename: The place to dump it.
'''
class HSelect(bpdb.Select):
def accept_atom(self, atom):
if atom.name.find('H') >= 0:
return False
else:
return True
m = bpdb.Model.Model(0)
s = bpdb.Structure.Structure('stru')
for chain in chains:
log.debug("Adding chain %s with %s residues", chain.id, len(chain))
m.add(chain)
if file_type=="pdb" and len(chain.id)!=1:
raise ValueError("Cannot save chain with name %s (not a single character) "
"in PDB format. Use cif format instead!")
s.add(m)
if file_type == "pdb":
io = bpdb.PDBIO()
else:
io = bpdb.MMCIFIO()
io.set_structure(s)
try:
io.save(filename, HSelect())
except Exception as e:
with log_to_exception(log, e):
log.error("Could not output PDB with chains and residues:")
for chain in s[0]:
log.error("%s: %s", chain.id, [r.id for r in chain])
raise
def get_particular_chain(in_filename, chain_id, parser=None):
'''
Load a PDB file and return a particular chain.
:param in_filename: The name of the pdb file.
:param chain_id: The id of the chain.
:return: A Bio.PDB.Chain object containing that particular chain.
'''
chains, mr, ir = get_all_chains(in_filename, parser)
chain, = [c for c in chains if c.id == chain_id]
return chain, mr, ir
def get_biggest_chain(in_filename, parser=None):
'''
Load the PDB file located at filename, select the longest
chain and return it.
:param in_filename: The location of the original file.
:return: A Bio.PDB chain structure corresponding to the longest
chain in the structure stored in in_filename
'''
chains, mr, ir = get_all_chains(in_filename, parser)
biggest = 0
biggest_len = 0
for i in range(len(chains)):
c = chains[i]
# Only count RNA residues
num_residues = 0
for res in c:
if (res.resname.strip() == 'A' or
res.resname.strip() == 'C' or
res.resname.strip() == 'G' or
res.resname.strip() == 'U'):
num_residues += 1
if num_residues > biggest_len:
biggest = i
biggest_len = num_residues
# sys.exit(1)
orig_chain = chains[biggest]
return orig_chain, mr, ir
def _extract_symmetrymatrices_from_cif_dict(cif_dict):
"""
Code originally by G. Entzian, modified
Extract matrices for symmetry operations from the cif-dict to
convert the assymetric unit to the biological unit
:returns: A tuple matrices, vectors.
matrices is a dictionary {operation_id: rot_matrix}
vectors a dictionary {operation_id: vector}
"""
symmetry_ids = cif_dict["_pdbx_struct_oper_list.id"]
matrices = defaultdict(lambda: [[0.0 for x in range(3)] for y in range(3)])
vectors = defaultdict(lambda: [0.0 for x in range(3)])
for i in range(1,4):
v_key = '_pdbx_struct_oper_list.vector['+str(i)+']'
value_vectors = cif_dict[v_key]
if not isinstance(value_vectors, list):
value_vectors = [value_vectors]
for k, v in enumerate(value_vectors):
vectors[symmetry_ids[k]][i-1] = float(v)
for j in range(1,4):
key = '_pdbx_struct_oper_list.matrix['+str(i)+']['+str(j)+']'
value_per_matrix = cif_dict[key]
if not isinstance(value_per_matrix, list):
value_per_matrix = [value_per_matrix]
for k, v in enumerate(value_per_matrix):
matrices[symmetry_ids[k]][i-1][j-1] = float(v)
return matrices, vectors
def _convert_cif_operation_id_expression(expression):
"""
By Gregor Entzian
"""
tmp_expr = expression
op_id_list = []
match = re.findall("\([\w\-\,\d]+\)", tmp_expr)#("(\d+)\-(\d+)", id)
if match:
for m in match:
match_s = re.search("\((\d+)\-(\d+)\)", m)
if match_s:
f = match_s.group(1)
t = match_s.group(2)
f = int(f)
t = int(t)
for n in range(f,t+1):
op_id_list.append(str(n))
tmp_expr = tmp_expr.replace(m,",")
continue
else:
#split ',' in case of more entries. In case of one entry use the same code.
l = m[1:-1] # remove parentheses
parts = l.split(',')
for p in parts:
op_id_list.append(str(p))
tmp_expr = tmp_expr.replace(m,",")
continue
if tmp_expr != None and tmp_expr != "":
tmp_expr = tmp_expr.replace("(","").replace(")","")
parts = tmp_expr.split(',')
for p in parts:
if ',' not in p and p != '':
match_s = re.search("(\d+)\-(\d+)", p)
if match_s:
f = match_s.group(1)
t = match_s.group(2)
f = int(f)
t = int(t)
for n in range(f,t+1):
op_id_list.append(str(n))
continue
op_id_list.append(str(p))
return tuple(op_id_list)
def _extract_assembly_gen(cif_dict):
"""
Extracts the information, how assemblies are generated out
of the chains and symmetry operations, from the cifdict.
:returns: A dictionary {assembly_id: [(chainid, operationids)...]}
Where operation_ids is a list
"""
assembly_ids = cif_dict['_pdbx_struct_assembly_gen.assembly_id'] #1
operation_ids = cif_dict['_pdbx_struct_assembly_gen.oper_expression'] #1,2
chain_id_lists = cif_dict['_pdbx_struct_assembly_gen.asym_id_list'] #A,B
if not isinstance(assembly_ids, list):
assembly_ids=[assembly_ids]
assert not isinstance(operation_ids, list)
operation_ids=[operation_ids]
assert not isinstance(chain_id_lists, list)
chain_id_lists=[chain_id_lists]
assembly_components = defaultdict(lambda:defaultdict(list)) # assembly: operation-lists: chains
for i, aid in enumerate(assembly_ids):
op_ids=_convert_cif_operation_id_expression(operation_ids[i])
chain_ids=chain_id_lists[i].split()
assembly_components[0][operation_ids].extend(chain_id_lists)
return assembly_components
def _get_new_chainid(chain_id, op_id, taken_ids):
new_id= chain_id+"op"+str(op_id)
while new_id in taken_ids:
new_id+="a"
return new_id
def _extract_asym_auth_id_map(cif_dict):
label_ids = cif_dict['_atom_site.label_asym_id']
auth_ids = cif_dict['_atom_site.auth_asym_id']
l2a=defaultdict(lambda: None)
a2l=defaultdict(list)
for i, lid in enumerate(label_ids):
if lid in l2a:
if l2a[lid]!=auth_ids[i]:
log.error("lid %s, authid %s but before %s", lid, auth_ids[i], l2a[lid])
raise ValueError("Inconsistent cif.")
else:
l2a[lid]=auth_ids[i]
a2l[auth_ids[i]].append(lid)
log.debug(l2a)
return l2a, a2l
def _get_assemblies(chains, cifdict):
assemblies = _extract_assembly_gen(cifdict)
id2chainid, chainid2ids = _extract_asym_auth_id_map(cifdict)
if assembly_nr is None:
assembly_nr = list(sorted(assemblies.keys()))[0]
assembly_gen = assemblies[assembly_nr]
old_chains = { c.id:c for c in chains}
new_chains = {}
log.debug("Original chains are %s. Now performing symmetry "
"for assembly %s", old_chains, assembly_nr)
log.debug("AG: %s",assembly_gen)
for operations, labelids in assembly_gen[0].items():
chainids = set(id2chainid[lid] for lid in labelids)
lids_back = set( x for x in a2l for cid in chainids for a2l in chainid2ids[cid])
if lids_back!=set(labelids):
log.error("%s, %s, extra: %s", lids_back, labelids, lids_back-set(labelids))
raise ValueError("Operation on part of an author designated assymetric unit "
"(auth_asym_id) nt supported.")
for chain_id in chainids:
if chain_id not in old_chains:
log.debug ("Skipping chain %s: not RNA? chains: %s", chain_id, old_chains.keys())
continue
for op_id in op_ids:
log.debug("Applying op %s to %s", op_id, chain_id)
if chain_id in new_chains:
chain = old_chains[chain_id].copy()
newid = _get_new_chainid(chain_id, op_id,
set(old_chains.keys())| set(new_chains.keys()))
log.debug("Setting id %s", newid)
chain.id = newid
chain.transform(operation_mat[op_id], operation_vec[op_id])
new_chains[chain.id]=chain
log.debug("new_chains: %s", new_chains)
chains = list(new_chains.values())
return chains
def get_all_chains(in_filename, parser=None, no_annotation=False, assembly_nr=None):
'''
Load the PDB file located at filename, read all chains and return them.
:param in_filename: The location of the original file.
:param assembly_nr: Which assembly to return. Default: The first.
:return: a tuple chains, missing_residues
* chains: A list of Bio.PDB chain structures corresponding to all
RNA structures stored in in_filename
* missing_residues: A list of dictionaries, describing the missing residues.
* interacting residues: A list of residues
'''
if parser is None:
if in_filename.endswith(".pdb"):
parser = bpdb.PDBParser()
elif in_filename.endswith(".cif"):
parser = bpdb.MMCIFParser()
else: # Cannot determine filetype by extention. Try to read first line.
with open(in_filename) as pdbfile:
line = pdbfile.readline(20)
# According to
# page 10 of ftp://ftp.wwpdb.org/pub/pdb/doc/format_descriptions/Format_v33_A4.pdf
# a HEADER entry is mandatory. Biopython sometime starts directly with ATOM
if line.startswith("HEADER") or line.startswith("ATOM"):
parser = bpdb.PDBParser()
else:
parser = bpdb.MMCIFParser()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
s = parser.get_structure('temp', in_filename)
if len(s) > 1:
warnings.warn("Multiple models in file. Using only the first model")
# Let's detach all H2O, to speed up processing.
for chain in s[0]:
log.debug("Before detaching water from %s: chain has %s residues", chain.id, len(chain))
for r in chain.child_list[:]: # We need a copy here, because we are modifying it during iteration
if r.resname.strip() == "HOH":
chain.detach_child(r.id)
log.debug("After detaching water from %s: chain has %s residues", chain.id, len(s[0][chain.id]))
# Rename residues from other programs
for chain in s[0]:
for r in chain:
# rename rosetta-generated structures
if r.resname == ' rA':
r.resname = ' A'
elif r.resname == ' rC':
r.resname = ' C'
elif r.resname == ' rG':
r.resname = ' G'
elif r.resname == ' rU':
r.resname = ' U'
# rename iFoldRNA-generated structures
if r.resname == 'ADE':
r.resname = ' A'
elif r.resname == 'CYT':
r.resname = ' C'
elif r.resname == 'GUA':
r.resname = ' G'
elif r.resname == 'URI':
r.resname = ' U'
# Now search for protein interactions.
if not no_annotation:
interacting_residues = enumerate_interactions_kdtree(s[0])
else:
interacting_residues = set()
# The chains containing RNA
chains = list(chain for chain in s[0] if contains_rna(chain))
try:
log.debug("PDB header %s", parser.header)
mr = parser.header["missing_residues"]
if assembly_nr is not None:
warnings.warn("Getting an assembly is not supported for the old PDB format.")
except AttributeError: # A mmCIF parser
cifdict = parser._mmcif_dict # We read a private attribute here, because parsing the mmcif dictionary a second time would cause a performance penalty.
# Generate an assembly
try:
operation_mat, operation_vec = _extract_symmetrymatrices_from_cif_dict(cifdict)
except KeyError:
pass
else:
if False: # Still experimental and not working correctly.
chains = _get_assemblies(chains, cifdict)
mr = []
try:
mask = np.array(
cifdict["_pdbx_poly_seq_scheme.pdb_mon_id"], dtype=str) == "?"
int_seq_ids = np.array(
cifdict["_pdbx_poly_seq_scheme.pdb_seq_num"], dtype=int)[mask]
cs = np.array(
cifdict["_pdbx_poly_seq_scheme.pdb_strand_id"], dtype=str)[mask]
insertions = np.array(
cifdict["_pdbx_poly_seq_scheme.pdb_ins_code"], dtype=str)[mask]
insertions[insertions == "."] = " "
symbol = np.array(
cifdict["_pdbx_poly_seq_scheme.mon_id"], dtype=str)[mask]
except KeyError:
pass
else:
if not no_annotation:
for i, sseq in enumerate(int_seq_ids):
mr.append({
"model": None,
"res_name": symbol[i],
"chain": cs[i],
"ssseq": sseq,
"insertion": insertions[i]
})
except KeyError:
mr = []
with open(in_filename) as f:
for wholeline in f:
if wholeline.startswith("REMARK 465"):
line = wholeline[10:].strip()
mr_info = _parse_remark_465(line)
if mr_info is not None:
mr.append(mr_info)
else:
continue
else:
if mr:
log.info("This PDB has missing residues")
elif not no_annotation:
log.info("This PDB has no missing residues")
'''for res1, res2 in itertools.combinations(s[0].get_residues(), 2):
rna_res=None
other_res=None
if res1.resname.strip() in RNA_RESIDUES:
rna_res=res1
else:
other_res=res1
if res2.resname.strip() in RNA_RESIDUES:
rna_res=res2
else:
other_res=res2
if rna_res is None or other_res is None:
continue
if other_res.resname.strip()=="HOH":
continue
if residues_interact(rna_res, other_res):
log.error("%s and %s interact", rna_res, other_res)
interacting_residues.add(rna_res)'''
log.debug("LOADING DONE: chains %s, mr %s, ir: %s",
chains, mr, interacting_residues)
return chains, mr, interacting_residues
def _parse_remark_465(line):
"""Parse missing residue remarks.
Returns a dictionary describing the missing residue.
The specification for REMARK 465 at
http://www.wwpdb.org/documentation/file-format-content/format33/remarks2.html#REMARK%20465
only gives templates, but does not say they have to be followed.
So we assume that not all pdb-files with a REMARK 465 can be understood.
Returns a dictionary with the following keys:
"model", "res_name", "chain", "ssseq", "insertion"
"""
if line:
# Note that line has been stripped.
assert line[0] != " " and line[-1] not in "\n ", "line has to be stripped"
pattern = (r"""
(\d+\s[\sA-Z][\sA-Z][A-Z] | # Either model number + residue name
[A-Z]?[A-Z]?[A-Z]) # Or only residue name with
# 1 (RNA) to 3 letters
\s ([A-Za-z0-9]) # A single character chain
\s+(\d+[A-Za-z]?)$ # Residue number: A digit followed
# by an optional insertion code
# (Hetero-flags make no sense in
# context with missing res)
""")
match = re.match(pattern, line, re.VERBOSE)
if match is not None:
residue = {}
if " " in match.group(1):
model, residue["res_name"] = match.group(1).split(" ")
residue["model"] = int(model)
else:
residue["model"] = None
residue["res_name"] = match.group(1)
residue["chain"] = match.group(2)
try:
residue["ssseq"] = int(match.group(3))
except ValueError:
residue["insertion"] = match.group(3)[-1]
residue["ssseq"] = int(match.group(3)[:-1])
else:
residue["insertion"] = None
return residue
return None
def enumerate_interactions_kdtree(model):
relevant_atoms = [a for a in model.get_atoms() if a.name[0] in [
"C", "N", "O"]]
if not relevant_atoms:
return set()
kdtree = bpdb.NeighborSearch(relevant_atoms)
pairs = kdtree.search_all(6, "A")
res_pair_list = set()
for a1, a2 in pairs:
if a1.name not in all_side_chains and a2.name not in all_side_chains:
continue
p1 = a1.get_parent()
p2 = a2.get_parent()
if p1.id == p2.id:
continue
elif p1 < p2:
res_pair_list.add((p1, p2))
else:
res_pair_list.add((p2, p1))
interacting_residues = set()
for res1, res2 in res_pair_list:
rna_res = None
other_res = None
if res1.resname.strip() in RNA_RESIDUES and not res1.id[0].startswith("H_"):
rna_res = res1
else:
other_res = res1
if res2.resname.strip() in RNA_RESIDUES and not res2.id[0].startswith("H_"):
rna_res = res2
else:
other_res = res2
if rna_res is None or other_res is None:
continue
log.debug("%s(chain %s) and %s(chain %s, resname %s) are close", rna_res,rna_res.parent.id,other_res, other_res.parent.id, other_res.resname.strip())
# Only consider C and N. So no ions etc
if any(a.name[0] in ["C", "N"] for a in other_res.get_atoms()):
interacting_residues.add(rna_res)
else:
log.debug("but %s has wrong atoms %s", other_res,
list(a.name for a in other_res.get_atoms()))
log.debug("Interacting: %s", interacting_residues)
return interacting_residues
"""def residues_interact(rna_res, other_res):
for rna_atom in rna_res:
if rna_atom.get_name() in all_side_chains:
for other_atom in other_res:
atom_symbol="".join(s for s in other_atom.get_name() if not s.isdigit())
if atom_symbol in ["C", "N"]:
d=ftuv.vec_distance(rna_atom.coord, other_atom.coord)
if d<6:
return True
return False"""
HBOND_CUTOFF = 4.5 # 4.5 and 0.9 are values optimized against DSSR for 5T5H_A-B-C
OOP_CUTOFF = 0.9
def _get_points(res1, res2):
labels = {res1.resname.strip(), res2.resname.strip()}
if labels == {"A", "U"}:
return _points_AU(res1, res2)
elif labels == {"G", "C"}:
return _points_GC(res1, res2)
elif labels == {"G", "U"}:
return _points_GC(res1, res2)
else:
return None
def _points_AU(res1, res2):
if res1.resname.strip() == "A":
resA = res1
resU = res2
else:
resA = res2
resU = res1
a = resA["N6"].coord
b = resU["O4"].coord
c = resA["N1"].coord
d = resU["N3"].coord
return (resA["C8"].coord, resU["C6"].coord), (a, b), (c, d)
def _points_GU(res1, res2):
if res1.resname.strip() == "G":
resG = res1
resU = res2
else:
resG = res2
resU = res1
a = resG["O6"].coord
b = resU["N3"].coord
c = resG["N1"].coord
d = resU["O2"].coord
return (resG["C8"].coord, resU["C6"].coord), (a, b), (c, d)
def _points_GC(res1, res2):
if res1.resname.strip() == "G":
resG = res1
resC = res2
else:
resG = res2
resC = res1
a = resG["O6"].coord
c = resG["N1"].coord
e = resG["N2"].coord
b = resC["N4"].coord
d = resC["N3"].coord
f = resC["O2"].coord
return (resC["C6"].coord, resG["C8"].coord), (a, b), (c, d), (e, f)
def is_basepair_pair(res1, res2):
pairs = _get_points(res1, res2)
if not pairs:
return False
for pair in pairs[1:]: # pairs[0] is only for coplanarity]]
d = ftuv.vec_distance(pair[0], pair[1])
if d >= HBOND_CUTOFF:
return False
if is_almost_coplanar(*[point for pair in pairs for point in pair]):
return True
return False
def _coplanar_point_indices(*points):
""" Thanks to https://stackoverflow.com/a/18968498"""
from numpy.linalg import svd
points = np.array(points).T
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(
points.shape[1], points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:, np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
normal = svd(M)[0][:, -1]
out = []
for i, p in enumerate(points.T):
w = p - ctr
oop_distance = ftuv.magnitude(
np.dot(w, normal)) / ftuv.magnitude(normal)
if oop_distance <= OOP_CUTOFF:
out.append(i)
return out, ctr, normal
def is_almost_coplanar(*points):
indices, c, n = _coplanar_point_indices(*points)
return len(indices) == len(points)
def annotate_fallback(chain_list):
"""
If neither DSSR nor MC-Annotate are available, we use an ad-hoc implementation of canonical
basepair detection as fallback.
This does not work well for missing atoms or modified residues.
"""
kdtree = bpdb.NeighborSearch(
[atom for chain in chain_list for atom in chain.get_atoms()])
pairs = kdtree.search_all(10, "R")
basepairs = {}
# Sorted, so conflicting basepairs are deterministically solved
for res1, res2 in sorted(pairs):
if res1.resname.strip() not in RNA_RESIDUES or res1.id[0].startswith("H_"):
continue
if res2.resname.strip() not in RNA_RESIDUES or res2.id[0].startswith("H_"):
continue
labels = {res1.resname.strip(), res2.resname.strip()}
try:
is_bp = is_basepair_pair(res1, res2)
if is_bp:
res1_id = fgr.resid_from_biopython(res1)
res2_id = fgr.resid_from_biopython(res2)
if res1_id in basepairs:
warnings.warn("More than one basepair detected for {}."
" Ignoring {}-{} because {}-{} is already"
" part of the structure".format(res1_id, res1_id, res2_id, res1_id, basepairs[res1_id]))
continue
if res2_id in basepairs:
warnings.warn("More than one basepair detected for {}."
" Ignoring {}-{} because {}-{} is already"
" part of the structure".format(res2_id, res2_id, res1_id, res2_id, basepairs[res2_id]))
continue
basepairs[res1_id] = res2_id
basepairs[res2_id] = res1_id
except KeyError as e:
log.debug("Missing atom %s. %s has atoms %s, %s has atoms %s",
e, res1, res1.child_dict, res2, res2.child_dict)
pass
seq_ids = []
for chain in sorted(chain_list, key=lambda x: x.id):
for residue in chain:
seq_ids.append(fgr.resid_from_biopython(residue))
bpseq = ""
chain_dict = {c.id: c for c in chain_list}
for i, seqid in enumerate(seq_ids):
if seqid in basepairs:
bp = seq_ids.index(basepairs[seqid]) + 1
else:
bp = 0
bpseq += "{} {} {}\n".format(i + 1,
chain_dict[seqid.chain][seqid.resid].resname.strip(
),
bp)
return bpseq, seq_ids
def rename_rosetta_atoms(chain):
'''
Rosetta names all the backbone atoms with an asterisk rather than an
apostrophe. All that needs to be reversed.
:param chain. A Bio.PDB.Chain structure generated by Rosetta
:return: The same chain with renamed atoms
'''
for a in bpdb.Selection.unfold_entities(chain, 'A'):
oldid = a.id
a.name = a.name.replace('*', "'")
a.fullname = a.name.replace('*', "'")
a.id = a.id.replace('*', "'")
#: Not needed with newer biopython versions
#: Seems to be needed again?
del a.parent.child_dict[oldid]
a.parent.child_dict[a.id] = a
# log.debug("Replaced rosetta atoms. \n%s\n%s",
# chain.child_list[0].child_list,
# chain.child_list[0].child_dict
# )
return chain
def remove_disordered(chain):
for i, residue in enumerate(chain):
if hasattr(residue, "selected_child"):
new_res = residue.selected_child
chain.detach_child(residue.id)
chain.insert(i, new_res)
residue = new_res
for j, atom in enumerate(residue):
if hasattr(atom, "selected_child"):
new_atom = atom.selected_child
new_atom.altloc = " "
new_atom.occupancy = 1.0
new_atom.disordered_flag = 0
residue.detach_child(atom.id)
residue.insert(j, new_atom)
return chain
def load_structure(pdb_filename):
'''
Load a Bio.PDB.Structure object and return the largest chain.
This chain will be modified so that all hetatms are removed, modified
residues will be renamed to regular residues, etc...
'''
chain, mr, ir = get_biggest_chain(pdb_filename)
return clean_chain(chain, True)[0]
def clean_chain(chain, query_PDBeChem=False):
"""
Clean a pdb chain for further use with forgi.
It will be modified so that all hetatms are removed, modified
residues will be renamed to regular residues, residue ids will be positive integers, ...
:param chain: A Bio.PDB.Chain object
:param query_PDBeChem: If true, query the PDBeChem database whenever a
modified residue with unknown 3-letter code
is encountered.
:returns: A modified version of this chain
"""
chain, modifications = to_4_letter_alphabeth(chain, query_PDBeChem)
chain = rename_rosetta_atoms(chain)
chain = remove_disordered(chain)
return chain, modifications
def interchain_contacts(struct):
all_atoms = bpdb.Selection.unfold_entities(struct, 'A')
ns = bpdb.NeighborSearch(all_atoms)
pairs = ns.search_all(2.8)
ic_pairs = []
for (a1, a2) in pairs:
if a1.parent.parent != a2.parent.parent:
ic_pairs += [(a1, a2)]
return ic_pairs
def contains_rna(chain):
'''
Determine if a Bio.PDB.Chain structure corresponds to an RNA
molecule.
:param chain: A Bio.PDB.Chain molecule
:return: True if it is an RNA molecule, False if at least one residue is not an RNA.
'''
for res in chain:
if res.resname.strip() in RNA_RESIDUES:
return True
return False
def is_protein(chain):
'''
Determine if a Bio.PDB.Chain structure corresponds to an protein
molecule.
:param chain: A Bio.PDB.Chain molecule
:return: True if it is a protein molecule, False otherwise
'''
for res in chain:
if res.resname in ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']:
return True
return False
|
ViennaRNA/forgi
|
forgi/threedee/utilities/pdb.py
|
Python
|
gpl-3.0
| 41,262
|
[
"Biopython"
] |
d21d43aa735112374187fa8bebf45c08b464a6b8b7f82208f1f74374b1869d1d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# scripts - backend to generate user and resource scripts
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""On demand headless script archive generator used as the base for
delivering the user and vgrid/resource scripts.
"""
import os
import zipfile
import time
import shared.returnvalues as returnvalues
import shared.userscriptgen as usergen
import shared.vgridscriptgen as vgridgen
from shared.base import client_id_dir
from shared.functional import validate_input_and_cert
from shared.handlers import correct_handler
from shared.init import initialize_main_variables, find_entry
sh_cmd_def = '/bin/bash'
python_cmd_def = '/usr/bin/python'
def signature():
"""Signature of the main function"""
defaults = {
'flags': [''],
'lang': [],
'flavor': [],
'sh_cmd': [sh_cmd_def],
'python_cmd': [python_cmd_def],
}
return ['link', defaults]
def usage(output_objects, valid_langs, valid_flavors):
"""Script usage help"""
output_objects.append({'object_type': 'sectionheader', 'text'
: 'Generator usage'})
output_objects.append({'object_type': 'text', 'text'
: 'SERVER_URL/scripts.py?[with_html=(true|false);][lang=(%s);[...]][flags=h;][flavor=(%s);[...]][sh_cmd=sh_path;][python_cmd=python_path;]'
% ('|'.join(valid_langs.keys()),
'|'.join(valid_flavors.keys()))})
output_objects.append({'object_type': 'text', 'text'
: '- each occurrence of lang adds the specified scripting language to the list of scripts to be generated.'
})
output_objects.append({'object_type': 'text', 'text'
: '- flags is a string of one character flags to be passed to the script'
})
output_objects.append({'object_type': 'text', 'text'
: '- each occurrence of flavor adds the specified flavor to the list of scripts to be generated.'
})
output_objects.append({'object_type': 'text', 'text'
: "- sh_cmd is the sh-interpreter command used on un*x if the scripts are run without specifying the interpreter (e.g. './migls.sh' rather than 'bash ./migls.sh')"
})
output_objects.append({'object_type': 'text', 'text'
: "- python_cmd is the python-interpreter command used on un*x if the scripts are run without specifying the interpreter (e.g. './migls.py' rather than 'python ./migls.py')"
})
return output_objects
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
client_dir = client_id_dir(client_id)
valid_langs = {'sh': 'shell', 'python': 'python'}
valid_flavors = {'user': 'userscriptgen',
'resource': 'vgridscriptgen'}
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
flags = ''.join(accepted['flags'])
langs = accepted['lang']
flavor_list = accepted['flavor']
sh_cmd = accepted['sh_cmd'][-1]
python_cmd = accepted['python_cmd'][-1]
flavors = []
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = 'Script generator'
output_objects.append({'object_type': 'header', 'text'
: 'Script generator'})
status = returnvalues.OK
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = os.path.abspath(os.path.join(configuration.user_home,
client_dir)) + os.sep
if 'h' in flags:
output_objects = usage(output_objects, valid_langs,
valid_flavors)
return (output_objects, status)
# Filter out any invalid flavors to avoid illegal filenames, etc.
for f in flavor_list:
if f in valid_flavors.keys():
flavors.append(f)
# Default to user scripts
if not flavors:
if flavor_list:
output_objects.append({'object_type': 'text', 'text'
: 'No valid flavors specified - falling back to user scripts'
})
flavors = ['user']
# Generate scripts in a "unique" destination directory
# gmtime([seconds]) -> (tm_year, tm_mon, tm_day, tm_hour, tm_min,
# tm_sec, tm_wday, tm_yday, tm_isdst)
now = time.gmtime()
timestamp = '%.2d%.2d%.2d-%.2d%.2d%.2d' % (
now[2],
now[1],
now[0],
now[3],
now[4],
now[5],
)
if not langs:
# Add new languages here
languages = [(usergen.sh_lang, sh_cmd, usergen.sh_ext),
(usergen.python_lang, python_cmd,
usergen.python_ext)]
else:
languages = []
# check arguments
for lang in langs:
if lang == 'sh':
interpreter = sh_cmd
extension = usergen.sh_ext
elif lang == 'python':
interpreter = python_cmd
extension = usergen.python_ext
else:
output_objects.append({'object_type': 'warning', 'text'
: 'Unknown script language: %s - ignoring!'
% lang})
continue
languages.append((lang, interpreter, extension))
if not languages:
output_objects.append({'object_type': 'error_text', 'text'
: 'No valid languages specified - aborting script generation'
})
return (output_objects, returnvalues.CLIENT_ERROR)
for flavor in flavors:
script_dir = '%s-%s-scripts-%s' % (configuration.short_title, flavor, timestamp)
dest_dir = '%s%s' % (base_dir, script_dir)
if not os.path.isdir(dest_dir):
try:
os.mkdir(dest_dir)
except Exception, exc:
output_objects.append({'object_type': 'error_text',
'text'
: 'Failed to create destination directory (%s) - aborting script generation'
% exc})
return (output_objects, returnvalues.SYSTEM_ERROR)
for (lang, _, _) in languages:
output_objects.append({'object_type': 'text', 'text'
: 'Generating %s %s scripts in the %s subdirectory of your %s home directory'
% (lang, flavor, script_dir, configuration.short_title )})
# Generate all scripts
if flavor == 'user':
for op in usergen.script_ops:
generator = 'usergen.generate_%s' % op
eval(generator)(languages, dest_dir)
if usergen.shared_lib:
usergen.generate_lib(usergen.script_ops, languages,
dest_dir)
if usergen.test_script:
usergen.generate_test(languages, dest_dir)
elif flavor == 'resource':
for op in vgridgen.script_ops_single_arg:
vgridgen.generate_single_argument(op[0], op[1],
languages, dest_dir)
for op in vgridgen.script_ops_single_upload_arg:
vgridgen.generate_single_argument_upload(op[0], op[1],
op[2], languages, dest_dir)
for op in vgridgen.script_ops_two_args:
vgridgen.generate_two_arguments(op[0], op[1], op[2],
languages, dest_dir)
for op in vgridgen.script_ops_ten_args:
vgridgen.generate_ten_arguments(op[0], op[1], op[2], op[3],
op[4], op[5], op[6], op[7],
op[8], op[9], op[10],
languages, dest_dir)
else:
output_objects.append({'object_type': 'warning_text', 'text'
: 'Unknown flavor: %s' % flavor})
continue
# Always include license conditions file
usergen.write_license(dest_dir)
output_objects.append({'object_type': 'text', 'text': '... Done'
})
output_objects.append({'object_type': 'text', 'text'
: '%s %s scripts are now available in your %s home directory:'
% (configuration.short_title, flavor, configuration.short_title)})
output_objects.append({'object_type': 'link', 'text'
: 'View directory', 'destination'
: 'fileman.py?path=%s/' % script_dir})
# Create zip from generated dir
output_objects.append({'object_type': 'text', 'text'
: 'Generating zip archive of the %s %s scripts'
% (configuration.short_title, flavor)})
script_zip = script_dir + '.zip'
dest_zip = '%s%s' % (base_dir, script_zip)
# Force compression
zip_file = zipfile.ZipFile(dest_zip, 'w', zipfile.ZIP_DEFLATED)
# Directory write is not supported - add each file manually
for script in os.listdir(dest_dir):
zip_file.write(dest_dir + os.sep + script, script_dir
+ os.sep + script)
# Preserve executable flag in accordance with:
# http://mail.python.org/pipermail/pythonmac-sig/2005-March/013491.html
for zinfo in zip_file.filelist:
zinfo.create_system = 3
zip_file.close()
# Verify CRC
zip_file = zipfile.ZipFile(dest_zip, 'r')
err = zip_file.testzip()
zip_file.close()
if err:
output_objects.append({'object_type': 'error_text', 'text'
: 'Zip file integrity check failed! (%s)'
% err})
status = returnvalues.SYSTEM_ERROR
continue
output_objects.append({'object_type': 'text', 'text': '... Done'
})
output_objects.append({'object_type': 'text', 'text'
: 'Zip archive of the %s %s scripts are now available in your %s home directory'
% (configuration.short_title, flavor, configuration.short_title)})
output_objects.append({'object_type': 'link', 'text'
: 'Download zip archive', 'destination'
: os.path.join('..', client_dir,
script_zip)})
return (output_objects, status)
|
heromod/migrid
|
mig/shared/functionality/scripts.py
|
Python
|
gpl-2.0
| 12,310
|
[
"Brian"
] |
cfa847d46cc516e87af81b95dea615a896e2d1286cdd44786e524f4fcb63de63
|
# -*- coding: utf-8 -*-
import sys
sys.path[0:0] = [""]
import bson
import os
import pickle
import unittest
import uuid
import weakref
from datetime import datetime
from bson import DBRef, ObjectId
from tests import fixtures
from tests.fixtures import (PickleEmbedded, PickleTest, PickleSignalsTest,
PickleDyanmicEmbedded, PickleDynamicTest)
from mongoengine import *
from mongoengine.errors import (NotRegistered, InvalidDocumentError,
InvalidQueryError, NotUniqueError,
FieldDoesNotExist, SaveConditionError)
from mongoengine.queryset import NULLIFY, Q
from mongoengine.connection import get_db
from mongoengine.base import get_document
from mongoengine.context_managers import switch_db, query_counter
from mongoengine import signals
TEST_IMAGE_PATH = os.path.join(os.path.dirname(__file__),
'../fields/mongoengine.png')
__all__ = ("InstanceTest",)
class InstanceTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
class Job(EmbeddedDocument):
name = StringField()
years = IntField()
class Person(Document):
name = StringField()
age = IntField()
job = EmbeddedDocumentField(Job)
non_field = True
meta = {"allow_inheritance": True}
self.Person = Person
self.Job = Job
def tearDown(self):
for collection in self.db.collection_names():
if 'system.' in collection:
continue
self.db.drop_collection(collection)
def assertDbEqual(self, docs):
self.assertEqual(
list(self.Person._get_collection().find().sort("id")),
sorted(docs, key=lambda doc: doc["_id"]))
def assertHasInstance(self, field, instance):
self.assertTrue(hasattr(field, "_instance"))
self.assertTrue(field._instance is not None)
if isinstance(field._instance, weakref.ProxyType):
self.assertTrue(field._instance.__eq__(instance))
else:
self.assertEqual(field._instance, instance)
def test_capped_collection(self):
"""Ensure that capped collections work properly.
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
'max_size': 4096,
}
Log.drop_collection()
# Ensure that the collection handles up to its maximum
for _ in range(10):
Log().save()
self.assertEqual(Log.objects.count(), 10)
# Check that extra documents don't increase the size
Log().save()
self.assertEqual(Log.objects.count(), 10)
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertEqual(options['max'], 10)
self.assertEqual(options['size'], 4096)
# Check that the document cannot be redefined with different options
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 11,
}
# Create the collection by accessing Document.objects
Log.objects
self.assertRaises(InvalidCollectionError, recreate_log_document)
Log.drop_collection()
def test_capped_collection_default(self):
"""Ensure that capped collections defaults work properly.
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
}
Log.drop_collection()
# Create a doc to create the collection
Log().save()
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertEqual(options['max'], 10)
self.assertEqual(options['size'], 10 * 2**20)
# Check that the document with default value can be recreated
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
}
# Create the collection by accessing Document.objects
Log.objects
recreate_log_document()
Log.drop_collection()
def test_capped_collection_no_max_size_problems(self):
"""Ensure that capped collections with odd max_size work properly.
MongoDB rounds up max_size to next multiple of 256, recreating a doc
with the same spec failed in mongoengine <0.10
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_size': 10000,
}
Log.drop_collection()
# Create a doc to create the collection
Log().save()
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertTrue(options['size'] >= 10000)
# Check that the document with odd max_size value can be recreated
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_size': 10000,
}
# Create the collection by accessing Document.objects
Log.objects
recreate_log_document()
Log.drop_collection()
def test_repr(self):
"""Ensure that unicode representation works
"""
class Article(Document):
title = StringField()
def __unicode__(self):
return self.title
doc = Article(title=u'привет мир')
self.assertEqual('<Article: привет мир>', repr(doc))
def test_repr_none(self):
"""Ensure None values handled correctly
"""
class Article(Document):
title = StringField()
def __str__(self):
return None
doc = Article(title=u'привет мир')
self.assertEqual('<Article: None>', repr(doc))
def test_queryset_resurrects_dropped_collection(self):
self.Person.drop_collection()
self.assertEqual([], list(self.Person.objects()))
class Actor(self.Person):
pass
# Ensure works correctly with inhertited classes
Actor.objects()
self.Person.drop_collection()
self.assertEqual([], list(Actor.objects()))
def test_polymorphic_references(self):
"""Ensure that the correct subclasses are returned from a query when
using references / generic references
"""
class Animal(Document):
meta = {'allow_inheritance': True}
class Fish(Animal):
pass
class Mammal(Animal):
pass
class Dog(Mammal):
pass
class Human(Mammal):
pass
class Zoo(Document):
animals = ListField(ReferenceField(Animal))
Zoo.drop_collection()
Animal.drop_collection()
Animal().save()
Fish().save()
Mammal().save()
Dog().save()
Human().save()
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Dog, Human])
Zoo.drop_collection()
class Zoo(Document):
animals = ListField(GenericReferenceField(Animal))
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Dog, Human])
Zoo.drop_collection()
Animal.drop_collection()
def test_reference_inheritance(self):
class Stats(Document):
created = DateTimeField(default=datetime.now)
meta = {'allow_inheritance': False}
class CompareStats(Document):
generated = DateTimeField(default=datetime.now)
stats = ListField(ReferenceField(Stats))
Stats.drop_collection()
CompareStats.drop_collection()
list_stats = []
for i in xrange(10):
s = Stats()
s.save()
list_stats.append(s)
cmp_stats = CompareStats(stats=list_stats)
cmp_stats.save()
self.assertEqual(list_stats, CompareStats.objects.first().stats)
def test_db_field_load(self):
"""Ensure we load data correctly
"""
class Person(Document):
name = StringField(required=True)
_rank = StringField(required=False, db_field="rank")
@property
def rank(self):
return self._rank or "Private"
Person.drop_collection()
Person(name="Jack", _rank="Corporal").save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_db_embedded_doc_field_load(self):
"""Ensure we load embedded document data correctly
"""
class Rank(EmbeddedDocument):
title = StringField(required=True)
class Person(Document):
name = StringField(required=True)
rank_ = EmbeddedDocumentField(Rank,
required=False,
db_field='rank')
@property
def rank(self):
if self.rank_ is None:
return "Private"
return self.rank_.title
Person.drop_collection()
Person(name="Jack", rank_=Rank(title="Corporal")).save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_custom_id_field(self):
"""Ensure that documents may be created with custom primary keys.
"""
class User(Document):
username = StringField(primary_key=True)
name = StringField()
meta = {'allow_inheritance': True}
User.drop_collection()
self.assertEqual(User._fields['username'].db_field, '_id')
self.assertEqual(User._meta['id_field'], 'username')
def create_invalid_user():
User(name='test').save() # no primary key field
self.assertRaises(ValidationError, create_invalid_user)
def define_invalid_user():
class EmailUser(User):
email = StringField(primary_key=True)
self.assertRaises(ValueError, define_invalid_user)
class EmailUser(User):
email = StringField()
user = User(username='test', name='test user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'test')
self.assertEqual(user_obj.pk, 'test')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'test')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
user = User(pk='mongo', name='mongo user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'mongo')
self.assertEqual(user_obj.pk, 'mongo')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'mongo')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
def test_document_not_registered(self):
class Place(Document):
name = StringField()
meta = {'allow_inheritance': True}
class NicePlace(Place):
pass
Place.drop_collection()
Place(name="London").save()
NicePlace(name="Buckingham Palace").save()
# Mimic Place and NicePlace definitions being in a different file
# and the NicePlace model not being imported in at query time.
from mongoengine.base import _document_registry
del(_document_registry['Place.NicePlace'])
def query_without_importing_nice_place():
print Place.objects.all()
self.assertRaises(NotRegistered, query_without_importing_nice_place)
def test_document_registry_regressions(self):
class Location(Document):
name = StringField()
meta = {'allow_inheritance': True}
class Area(Location):
location = ReferenceField('Location', dbref=True)
Location.drop_collection()
self.assertEqual(Area, get_document("Area"))
self.assertEqual(Area, get_document("Location.Area"))
def test_creation(self):
"""Ensure that document may be created using keyword arguments.
"""
person = self.Person(name="Test User", age=30)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 30)
def test_to_dbref(self):
"""Ensure that you can get a dbref of a document"""
person = self.Person(name="Test User", age=30)
self.assertRaises(OperationError, person.to_dbref)
person.save()
person.to_dbref()
def test_reload(self):
"""Ensure that attributes may be reloaded.
"""
person = self.Person(name="Test User", age=20)
person.save()
person_obj = self.Person.objects.first()
person_obj.name = "Mr Test User"
person_obj.age = 21
person_obj.save()
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 20)
person.reload('age')
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 21)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
def test_reload_sharded(self):
class Animal(Document):
superphylum = StringField()
meta = {'shard_key': ('superphylum',)}
Animal.drop_collection()
doc = Animal(superphylum='Deuterostomia')
doc.save()
doc.reload()
Animal.drop_collection()
def test_reload_sharded_nested(self):
class SuperPhylum(EmbeddedDocument):
name = StringField()
class Animal(Document):
superphylum = EmbeddedDocumentField(SuperPhylum)
meta = {'shard_key': ('superphylum.name',)}
Animal.drop_collection()
doc = Animal(superphylum=SuperPhylum(name='Deuterostomia'))
doc.save()
doc.reload()
Animal.drop_collection()
def test_reload_referencing(self):
"""Ensures reloading updates weakrefs correctly
"""
class Embedded(EmbeddedDocument):
dict_field = DictField()
list_field = ListField()
class Doc(Document):
dict_field = DictField()
list_field = ListField()
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc()
doc.dict_field = {'hello': 'world'}
doc.list_field = ['1', 2, {'hello': 'world'}]
embedded_1 = Embedded()
embedded_1.dict_field = {'hello': 'world'}
embedded_1.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field = embedded_1
doc.save()
doc = doc.reload(10)
doc.list_field.append(1)
doc.dict_field['woot'] = "woot"
doc.embedded_field.list_field.append(1)
doc.embedded_field.dict_field['woot'] = "woot"
self.assertEqual(doc._get_changed_fields(), [
'list_field', 'dict_field.woot', 'embedded_field.list_field',
'embedded_field.dict_field.woot'])
doc.save()
self.assertEqual(len(doc.list_field), 4)
doc = doc.reload(10)
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(len(doc.list_field), 4)
self.assertEqual(len(doc.dict_field), 2)
self.assertEqual(len(doc.embedded_field.list_field), 4)
self.assertEqual(len(doc.embedded_field.dict_field), 2)
doc.list_field.append(1)
doc.save()
doc.dict_field['extra'] = 1
doc = doc.reload(10, 'list_field')
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(len(doc.list_field), 5)
self.assertEqual(len(doc.dict_field), 3)
self.assertEqual(len(doc.embedded_field.list_field), 4)
self.assertEqual(len(doc.embedded_field.dict_field), 2)
def test_reload_doesnt_exist(self):
class Foo(Document):
pass
f = Foo()
try:
f.reload()
except Foo.DoesNotExist:
pass
except Exception:
self.assertFalse("Threw wrong exception")
f.save()
f.delete()
try:
f.reload()
except Foo.DoesNotExist:
pass
except Exception:
self.assertFalse("Threw wrong exception")
def test_reload_of_non_strict_with_special_field_name(self):
"""Ensures reloading works for documents with meta strict == False
"""
class Post(Document):
meta = {
'strict': False
}
title = StringField()
items = ListField()
Post.drop_collection()
Post._get_collection().insert({
"title": "Items eclipse",
"items": ["more lorem", "even more ipsum"]
})
post = Post.objects.first()
post.reload()
self.assertEqual(post.title, "Items eclipse")
self.assertEqual(post.items, ["more lorem", "even more ipsum"])
def test_dictionary_access(self):
"""Ensure that dictionary-style field access works properly.
"""
person = self.Person(name='Test User', age=30, job=self.Job())
self.assertEqual(person['name'], 'Test User')
self.assertRaises(KeyError, person.__getitem__, 'salary')
self.assertRaises(KeyError, person.__setitem__, 'salary', 50)
person['name'] = 'Another User'
self.assertEqual(person['name'], 'Another User')
# Length = length(assigned fields + id)
self.assertEqual(len(person), 5)
self.assertTrue('age' in person)
person.age = None
self.assertFalse('age' in person)
self.assertFalse('nationality' in person)
def test_embedded_document_to_mongo(self):
class Person(EmbeddedDocument):
name = StringField()
age = IntField()
meta = {"allow_inheritance": True}
class Employee(Person):
salary = IntField()
self.assertEqual(Person(name="Bob", age=35).to_mongo().keys(),
['_cls', 'name', 'age'])
self.assertEqual(
Employee(name="Bob", age=35, salary=0).to_mongo().keys(),
['_cls', 'name', 'age', 'salary'])
def test_embedded_document_to_mongo_id(self):
class SubDoc(EmbeddedDocument):
id = StringField(required=True)
sub_doc = SubDoc(id="abc")
self.assertEqual(sub_doc.to_mongo().keys(), ['id'])
def test_embedded_document(self):
"""Ensure that embedded documents are set up correctly.
"""
class Comment(EmbeddedDocument):
content = StringField()
self.assertTrue('content' in Comment._fields)
self.assertFalse('id' in Comment._fields)
def test_embedded_document_instance(self):
"""Ensure that embedded documents can reference parent instance
"""
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc(embedded_field=Embedded(string="Hi"))
self.assertHasInstance(doc.embedded_field, doc)
doc.save()
doc = Doc.objects.get()
self.assertHasInstance(doc.embedded_field, doc)
def test_embedded_document_complex_instance(self):
"""Ensure that embedded documents in complex fields can reference
parent instance"""
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
embedded_field = ListField(EmbeddedDocumentField(Embedded))
Doc.drop_collection()
doc = Doc(embedded_field=[Embedded(string="Hi")])
self.assertHasInstance(doc.embedded_field[0], doc)
doc.save()
doc = Doc.objects.get()
self.assertHasInstance(doc.embedded_field[0], doc)
def test_embedded_document_complex_instance_no_use_db_field(self):
"""Ensure that use_db_field is propagated to list of Emb Docs
"""
class Embedded(EmbeddedDocument):
string = StringField(db_field='s')
class Doc(Document):
embedded_field = ListField(EmbeddedDocumentField(Embedded))
d = Doc(embedded_field=[Embedded(string="Hi")]).to_mongo(
use_db_field=False).to_dict()
self.assertEqual(d['embedded_field'], [{'string': 'Hi'}])
def test_instance_is_set_on_setattr(self):
class Email(EmbeddedDocument):
email = EmailField()
class Account(Document):
email = EmbeddedDocumentField(Email)
Account.drop_collection()
acc = Account()
acc.email = Email(email='test@example.com')
self.assertHasInstance(acc._data["email"], acc)
acc.save()
acc1 = Account.objects.first()
self.assertHasInstance(acc1._data["email"], acc1)
def test_instance_is_set_on_setattr_on_embedded_document_list(self):
class Email(EmbeddedDocument):
email = EmailField()
class Account(Document):
emails = EmbeddedDocumentListField(Email)
Account.drop_collection()
acc = Account()
acc.emails = [Email(email='test@example.com')]
self.assertHasInstance(acc._data["emails"][0], acc)
acc.save()
acc1 = Account.objects.first()
self.assertHasInstance(acc1._data["emails"][0], acc1)
def test_document_clean(self):
class TestDocument(Document):
status = StringField()
pub_date = DateTimeField()
def clean(self):
if self.status == 'draft' and self.pub_date is not None:
msg = 'Draft entries may not have a publication date.'
raise ValidationError(msg)
# Set the pub_date for published items if not set.
if self.status == 'published' and self.pub_date is None:
self.pub_date = datetime.now()
TestDocument.drop_collection()
t = TestDocument(status="draft", pub_date=datetime.now())
try:
t.save()
except ValidationError, e:
expect_msg = "Draft entries may not have a publication date."
self.assertTrue(expect_msg in e.message)
self.assertEqual(e.to_dict(), {'__all__': expect_msg})
t = TestDocument(status="published")
t.save(clean=False)
self.assertEqual(t.pub_date, None)
t = TestDocument(status="published")
t.save(clean=True)
self.assertEqual(type(t.pub_date), datetime)
def test_document_embedded_clean(self):
class TestEmbeddedDocument(EmbeddedDocument):
x = IntField(required=True)
y = IntField(required=True)
z = IntField(required=True)
meta = {'allow_inheritance': False}
def clean(self):
if self.z:
if self.z != self.x + self.y:
raise ValidationError('Value of z != x + y')
else:
self.z = self.x + self.y
class TestDocument(Document):
doc = EmbeddedDocumentField(TestEmbeddedDocument)
status = StringField()
TestDocument.drop_collection()
t = TestDocument(doc=TestEmbeddedDocument(x=10, y=25, z=15))
try:
t.save()
except ValidationError, e:
expect_msg = "Value of z != x + y"
self.assertTrue(expect_msg in e.message)
self.assertEqual(e.to_dict(), {'doc': {'__all__': expect_msg}})
t = TestDocument(doc=TestEmbeddedDocument(x=10, y=25)).save()
self.assertEqual(t.doc.z, 35)
# Asserts not raises
t = TestDocument(doc=TestEmbeddedDocument(x=15, y=35, z=5))
t.save(clean=False)
def test_modify_empty(self):
doc = self.Person(name="bob", age=10).save()
self.assertRaises(
InvalidDocumentError, lambda: self.Person().modify(set__age=10))
self.assertDbEqual([dict(doc.to_mongo())])
def test_modify_invalid_query(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(name="jim", age=20).save()
docs = [dict(doc1.to_mongo()), dict(doc2.to_mongo())]
self.assertRaises(
InvalidQueryError,
lambda: doc1.modify(dict(id=doc2.id), set__value=20))
self.assertDbEqual(docs)
def test_modify_match_another_document(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(name="jim", age=20).save()
docs = [dict(doc1.to_mongo()), dict(doc2.to_mongo())]
assert not doc1.modify(dict(name=doc2.name), set__age=100)
self.assertDbEqual(docs)
def test_modify_not_exists(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(id=ObjectId(), name="jim", age=20)
docs = [dict(doc1.to_mongo())]
assert not doc2.modify(dict(name=doc2.name), set__age=100)
self.assertDbEqual(docs)
def test_modify_update(self):
other_doc = self.Person(name="bob", age=10).save()
doc = self.Person(
name="jim", age=20, job=self.Job(name="10gen", years=3)).save()
doc_copy = doc._from_son(doc.to_mongo())
# these changes must go away
doc.name = "liza"
doc.job.name = "Google"
doc.job.years = 3
assert doc.modify(
set__age=21, set__job__name="MongoDB", unset__job__years=True)
doc_copy.age = 21
doc_copy.job.name = "MongoDB"
del doc_copy.job.years
assert doc.to_json() == doc_copy.to_json()
assert doc._get_changed_fields() == []
self.assertDbEqual([dict(other_doc.to_mongo()), dict(doc.to_mongo())])
def test_save(self):
"""Ensure that a document may be saved in the database.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(person_obj['name'], 'Test User')
self.assertEqual(person_obj['age'], 30)
self.assertEqual(person_obj['_id'], person.id)
# Test skipping validation on save
class Recipient(Document):
email = EmailField(required=True)
recipient = Recipient(email='root@localhost')
self.assertRaises(ValidationError, recipient.save)
try:
recipient.save(validate=False)
except ValidationError:
self.fail()
def test_save_to_a_value_that_equates_to_false(self):
class Thing(EmbeddedDocument):
count = IntField()
class User(Document):
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
user = User(thing=Thing(count=1))
user.save()
user.reload()
user.thing.count = 0
user.save()
user.reload()
self.assertEqual(user.thing.count, 0)
def test_save_max_recursion_not_hit(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
friend = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p1.friend = p2
p1.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
p0 = Person.objects.first()
p0.name = 'wpjunior'
p0.save()
def test_save_max_recursion_not_hit_with_file_field(self):
class Foo(Document):
name = StringField()
picture = FileField()
bar = ReferenceField('self')
Foo.drop_collection()
a = Foo(name='hello').save()
a.bar = a
with open(TEST_IMAGE_PATH, 'rb') as test_image:
a.picture = test_image
a.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
b = Foo.objects.with_id(a.id)
b.name = 'world'
b.save()
self.assertEqual(b.picture, b.bar.picture, b.bar.bar.picture)
def test_save_cascades(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_kwargs(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p1.name = "Daddy Wilson"
p2.save(force_insert=True, cascade_kwargs={"force_insert": False})
p1.reload()
p2.reload()
self.assertEqual(p1.name, p2.parent.name)
def test_save_cascade_meta_false(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_meta_true(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save(cascade=True)
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
def test_save_cascades_generically(self):
class Person(Document):
name = StringField()
parent = GenericReferenceField()
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_atomicity_condition(self):
class Widget(Document):
toggle = BooleanField(default=False)
count = IntField(default=0)
save_id = UUIDField()
def flip(widget):
widget.toggle = not widget.toggle
widget.count += 1
def UUID(i):
return uuid.UUID(int=i)
Widget.drop_collection()
w1 = Widget(toggle=False, save_id=UUID(1))
# ignore save_condition on new record creation
w1.save(save_condition={'save_id': UUID(42)})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.save_id, UUID(1))
self.assertEqual(w1.count, 0)
# mismatch in save_condition prevents save and raise exception
flip(w1)
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
self.assertRaises(SaveConditionError,
w1.save, save_condition={'save_id': UUID(42)})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.count, 0)
# matched save_condition allows save
flip(w1)
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
w1.save(save_condition={'save_id': UUID(1)})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
# save_condition can be used to ensure atomic read & updates
# i.e., prevent interleaved reads and writes from separate contexts
w2 = Widget.objects.get()
self.assertEqual(w1, w2)
old_id = w1.save_id
flip(w1)
w1.save_id = UUID(2)
w1.save(save_condition={'save_id': old_id})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.count, 2)
flip(w2)
flip(w2)
self.assertRaises(SaveConditionError,
w2.save, save_condition={'save_id': old_id})
w2.reload()
self.assertFalse(w2.toggle)
self.assertEqual(w2.count, 2)
# save_condition uses mongoengine-style operator syntax
flip(w1)
w1.save(save_condition={'count__lt': w1.count})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 3)
flip(w1)
self.assertRaises(SaveConditionError,
w1.save, save_condition={'count__gte': w1.count})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 3)
def test_update(self):
"""Ensure that an existing document is updated instead of be
overwritten."""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Create same person object, with same id, without age
same_person = self.Person(name='Test')
same_person.id = person.id
same_person.save()
# Confirm only one object
self.assertEqual(self.Person.objects.count(), 1)
# reload
person.reload()
same_person.reload()
# Confirm the same
self.assertEqual(person, same_person)
self.assertEqual(person.name, same_person.name)
self.assertEqual(person.age, same_person.age)
# Confirm the saved values
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# Test only / exclude only updates included fields
person = self.Person.objects.only('name').get()
person.name = 'User'
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Test only / exclude can set non excluded / included fields
person = self.Person.objects.only('name').get()
person.name = 'Test'
person.age = 30
person.save()
person.reload()
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.name = 'User'
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Confirm does remove unrequired fields
person = self.Person.objects.exclude('name').get()
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, None)
person = self.Person.objects.get()
person.name = None
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, None)
self.assertEqual(person.age, None)
def test_inserts_if_you_set_the_pk(self):
p1 = self.Person(name='p1', id=bson.ObjectId()).save()
p2 = self.Person(name='p2')
p2.id = bson.ObjectId()
p2.save()
self.assertEqual(2, self.Person.objects.count())
def test_can_save_if_not_included(self):
class EmbeddedDoc(EmbeddedDocument):
pass
class Simple(Document):
pass
class Doc(Document):
string_field = StringField(default='1')
int_field = IntField(default=1)
float_field = FloatField(default=1.1)
boolean_field = BooleanField(default=True)
datetime_field = DateTimeField(default=datetime.now)
embedded_document_field = EmbeddedDocumentField(
EmbeddedDoc, default=lambda: EmbeddedDoc())
list_field = ListField(default=lambda: [1, 2, 3])
dict_field = DictField(default=lambda: {"hello": "world"})
objectid_field = ObjectIdField(default=bson.ObjectId)
reference_field = ReferenceField(Simple, default=lambda:
Simple().save())
map_field = MapField(IntField(), default=lambda: {"simple": 1})
decimal_field = DecimalField(default=1.0)
complex_datetime_field = ComplexDateTimeField(default=datetime.now)
url_field = URLField(default="http://mongoengine.org")
dynamic_field = DynamicField(default=1)
generic_reference_field = GenericReferenceField(
default=lambda: Simple().save())
sorted_list_field = SortedListField(IntField(),
default=lambda: [1, 2, 3])
email_field = EmailField(default="ross@example.com")
geo_point_field = GeoPointField(default=lambda: [1, 2])
sequence_field = SequenceField()
uuid_field = UUIDField(default=uuid.uuid4)
generic_embedded_document_field = GenericEmbeddedDocumentField(
default=lambda: EmbeddedDoc())
Simple.drop_collection()
Doc.drop_collection()
Doc().save()
my_doc = Doc.objects.only("string_field").first()
my_doc.string_field = "string"
my_doc.save()
my_doc = Doc.objects.get(string_field="string")
self.assertEqual(my_doc.string_field, "string")
self.assertEqual(my_doc.int_field, 1)
def test_document_update(self):
def update_not_saved_raises():
person = self.Person(name='dcrosta')
person.update(set__name='Dan Crosta')
self.assertRaises(OperationError, update_not_saved_raises)
author = self.Person(name='dcrosta')
author.save()
author.update(set__name='Dan Crosta')
author.reload()
p1 = self.Person.objects.first()
self.assertEqual(p1.name, author.name)
def update_no_value_raises():
person = self.Person.objects.first()
person.update()
self.assertRaises(OperationError, update_no_value_raises)
def update_no_op_should_default_to_set():
person = self.Person.objects.first()
person.update(name="Dan")
person.reload()
return person.name
self.assertEqual("Dan", update_no_op_should_default_to_set())
def test_update_unique_field(self):
class Doc(Document):
name = StringField(unique=True)
doc1 = Doc(name="first").save()
doc2 = Doc(name="second").save()
self.assertRaises(NotUniqueError, lambda:
doc2.update(set__name=doc1.name))
def test_embedded_update(self):
"""
Test update on `EmbeddedDocumentField` fields
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_embedded_update_db_field(self):
"""
Test update on `EmbeddedDocumentField` fields when db_field is other
than default.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
db_field="page_log_message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_save_only_changed_fields(self):
"""Ensure save only sets / unsets changed fields
"""
class User(self.Person):
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
user = User(name='Test User', age=30, active=True)
user.save()
user.reload()
# Simulated Race condition
same_person = self.Person.objects.get()
same_person.active = False
user.age = 21
user.save()
same_person.name = 'User'
same_person.save()
person = self.Person.objects.get()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
self.assertEqual(person.active, False)
def test_query_count_when_saving(self):
"""Ensure references don't cause extra fetches when saving"""
class Organization(Document):
name = StringField()
class User(Document):
name = StringField()
orgs = ListField(ReferenceField('Organization'))
class Feed(Document):
name = StringField()
class UserSubscription(Document):
name = StringField()
user = ReferenceField(User)
feed = ReferenceField(Feed)
Organization.drop_collection()
User.drop_collection()
Feed.drop_collection()
UserSubscription.drop_collection()
o1 = Organization(name="o1").save()
o2 = Organization(name="o2").save()
u1 = User(name="Ross", orgs=[o1, o2]).save()
f1 = Feed(name="MongoEngine").save()
sub = UserSubscription(user=u1, feed=f1).save()
user = User.objects.first()
# Even if stored as ObjectId's internally mongoengine uses DBRefs
# As ObjectId's aren't automatically derefenced
self.assertTrue(isinstance(user._data['orgs'][0], DBRef))
self.assertTrue(isinstance(user.orgs[0], Organization))
self.assertTrue(isinstance(user._data['orgs'][0], Organization))
# Changing a value
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
self.assertEqual(q, 1)
sub.name = "Test Sub"
sub.save()
self.assertEqual(q, 2)
# Changing a value that will cascade
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
self.assertEqual(q, 1)
sub.user.name = "Test"
self.assertEqual(q, 2)
sub.save(cascade=True)
self.assertEqual(q, 3)
# Changing a value and one that will cascade
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
sub.name = "Test Sub 2"
self.assertEqual(q, 1)
sub.user.name = "Test 2"
self.assertEqual(q, 2)
sub.save(cascade=True)
self.assertEqual(q, 4) # One for the UserSub and one for the User
# Saving with just the refs
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription(user=u1.pk, feed=f1.pk)
self.assertEqual(q, 0)
sub.save()
self.assertEqual(q, 1)
# Saving with just the refs on a ListField
with query_counter() as q:
self.assertEqual(q, 0)
User(name="Bob", orgs=[o1.pk, o2.pk]).save()
self.assertEqual(q, 1)
# Saving new objects
with query_counter() as q:
self.assertEqual(q, 0)
user = User.objects.first()
self.assertEqual(q, 1)
feed = Feed.objects.first()
self.assertEqual(q, 2)
sub = UserSubscription(user=user, feed=feed)
self.assertEqual(q, 2) # Check no change
sub.save()
self.assertEqual(q, 3)
def test_set_unset_one_operation(self):
"""Ensure that $set and $unset actions are performed in the same
operation.
"""
class FooBar(Document):
foo = StringField(default=None)
bar = StringField(default=None)
FooBar.drop_collection()
# write an entity with a single prop
foo = FooBar(foo='foo').save()
self.assertEqual(foo.foo, 'foo')
del foo.foo
foo.bar = 'bar'
with query_counter() as q:
self.assertEqual(0, q)
foo.save()
self.assertEqual(1, q)
def test_save_only_changed_fields_recursive(self):
"""Ensure save only sets / unsets changed fields
"""
class Comment(EmbeddedDocument):
published = BooleanField(default=True)
class User(self.Person):
comments_dict = DictField()
comments = ListField(EmbeddedDocumentField(Comment))
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
person = User(name='Test User', age=30, active=True)
person.comments.append(Comment())
person.save()
person.reload()
person = self.Person.objects.get()
self.assertTrue(person.comments[0].published)
person.comments[0].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments[0].published)
# Simple dict w
person.comments_dict['first_post'] = Comment()
person.save()
person = self.Person.objects.get()
self.assertTrue(person.comments_dict['first_post'].published)
person.comments_dict['first_post'].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments_dict['first_post'].published)
def test_delete(self):
"""Ensure that document may be deleted using the delete method.
"""
person = self.Person(name="Test User", age=30)
person.save()
self.assertEqual(self.Person.objects.count(), 1)
person.delete()
self.assertEqual(self.Person.objects.count(), 0)
def test_save_custom_id(self):
"""Ensure that a document may be saved with a custom _id.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
id='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_custom_pk(self):
"""
Ensure that a document may be saved with a custom _id using pk alias.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
pk='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_list(self):
"""Ensure that a list field may be properly saved.
"""
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost(content='Went for a walk today...')
post.tags = tags = ['fun', 'leisure']
comments = [Comment(content='Good for you'), Comment(content='Yay.')]
post.comments = comments
post.save()
collection = self.db[BlogPost._get_collection_name()]
post_obj = collection.find_one()
self.assertEqual(post_obj['tags'], tags)
for comment_obj, comment in zip(post_obj['comments'], comments):
self.assertEqual(comment_obj['content'], comment['content'])
BlogPost.drop_collection()
def test_list_search_by_embedded(self):
class User(Document):
username = StringField(required=True)
meta = {'allow_inheritance': False}
class Comment(EmbeddedDocument):
comment = StringField()
user = ReferenceField(User,
required=True)
meta = {'allow_inheritance': False}
class Page(Document):
comments = ListField(EmbeddedDocumentField(Comment))
meta = {'allow_inheritance': False,
'indexes': [
{'fields': ['comments.user']}
]}
User.drop_collection()
Page.drop_collection()
u1 = User(username="wilson")
u1.save()
u2 = User(username="rozza")
u2.save()
u3 = User(username="hmarr")
u3.save()
p1 = Page(comments=[Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world"),
Comment(user=u3, comment="Ping Pong"),
Comment(user=u1, comment="I like a beer")])
p1.save()
p2 = Page(comments=[Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world")])
p2.save()
p3 = Page(comments=[Comment(user=u3, comment="Its very good")])
p3.save()
p4 = Page(comments=[Comment(user=u2, comment="Heavy Metal song")])
p4.save()
self.assertEqual(
[p1, p2],
list(Page.objects.filter(comments__user=u1)))
self.assertEqual(
[p1, p2, p4],
list(Page.objects.filter(comments__user=u2)))
self.assertEqual(
[p1, p3],
list(Page.objects.filter(comments__user=u3)))
def test_save_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
employee_obj = collection.find_one({'name': 'Test Employee'})
self.assertEqual(employee_obj['name'], 'Test Employee')
self.assertEqual(employee_obj['age'], 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(employee_obj['details']['position'], 'Developer')
def test_embedded_update_after_save(self):
"""
Test update of `EmbeddedDocumentField` attached to a newly saved
document.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_updating_an_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Test updating an embedded document
promoted_employee = Employee.objects.get(name='Test Employee')
promoted_employee.details.position = 'Senior Developer'
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.name, 'Test Employee')
self.assertEqual(promoted_employee.age, 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(
promoted_employee.details.position, 'Senior Developer')
# Test removal
promoted_employee.details = None
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.details, None)
def test_object_mixins(self):
class NameMixin(object):
name = StringField()
class Foo(EmbeddedDocument, NameMixin):
quantity = IntField()
self.assertEqual(['name', 'quantity'], sorted(Foo._fields.keys()))
class Bar(Document, NameMixin):
widgets = StringField()
self.assertEqual(['id', 'name', 'widgets'], sorted(Bar._fields.keys()))
def test_mixin_inheritance(self):
class BaseMixIn(object):
count = IntField()
data = StringField()
class DoubleMixIn(BaseMixIn):
comment = StringField()
class TestDoc(Document, DoubleMixIn):
age = IntField()
TestDoc.drop_collection()
t = TestDoc(count=12, data="test",
comment="great!", age=19)
t.save()
t = TestDoc.objects.first()
self.assertEqual(t.age, 19)
self.assertEqual(t.comment, "great!")
self.assertEqual(t.data, "test")
self.assertEqual(t.count, 12)
def test_save_reference(self):
"""Ensure that a document reference field may be saved in the database.
"""
class BlogPost(Document):
meta = {'collection': 'blogpost_1'}
content = StringField()
author = ReferenceField(self.Person)
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV today... how exciting.')
# Should only reference author when saving
post.author = author
post.save()
post_obj = BlogPost.objects.first()
# Test laziness
self.assertTrue(isinstance(post_obj._data['author'],
bson.DBRef))
self.assertTrue(isinstance(post_obj.author, self.Person))
self.assertEqual(post_obj.author.name, 'Test User')
# Ensure that the dereferenced object may be changed and saved
post_obj.author.age = 25
post_obj.author.save()
author = list(self.Person.objects(name='Test User'))[-1]
self.assertEqual(author.age, 25)
BlogPost.drop_collection()
def test_duplicate_db_fields_raise_invalid_document_error(self):
"""Ensure a InvalidDocumentError is thrown if duplicate fields
declare the same db_field"""
def throw_invalid_document_error():
class Foo(Document):
name = StringField()
name2 = StringField(db_field='name')
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_invalid_son(self):
"""Raise an error if loading invalid data"""
class Occurrence(EmbeddedDocument):
number = IntField()
class Word(Document):
stem = StringField()
count = IntField(default=1)
forms = ListField(StringField(), default=list)
occurs = ListField(EmbeddedDocumentField(Occurrence), default=list)
def raise_invalid_document():
Word._from_son({'stem': [1, 2, 3], 'forms': 1, 'count': 'one',
'occurs': {"hello": None}})
self.assertRaises(InvalidDocumentError, raise_invalid_document)
def test_reverse_delete_rule_cascade_and_nullify(self):
"""Ensure that a referenced document is also deleted upon deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
# No effect on the BlogPost
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_with_custom_id_field(self):
"""Ensure that a referenced document with custom primary key
is also deleted upon deletion.
"""
class User(Document):
name = StringField(primary_key=True)
class Book(Document):
author = ReferenceField(User, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(User, reverse_delete_rule=NULLIFY)
User.drop_collection()
Book.drop_collection()
user = User(name='Mike').save()
reviewer = User(name='John').save()
book = Book(author=user, reviewer=reviewer).save()
reviewer.delete()
self.assertEqual(Book.objects.count(), 1)
self.assertEqual(Book.objects.get().reviewer, None)
user.delete()
self.assertEqual(Book.objects.count(), 0)
def test_reverse_delete_rule_with_shared_id_among_collections(self):
"""Ensure that cascade delete rule doesn't mix id among collections.
"""
class User(Document):
id = IntField(primary_key=True)
class Book(Document):
id = IntField(primary_key=True)
author = ReferenceField(User, reverse_delete_rule=CASCADE)
User.drop_collection()
Book.drop_collection()
user_1 = User(id=1).save()
user_2 = User(id=2).save()
book_1 = Book(id=1, author=user_2).save()
book_2 = Book(id=2, author=user_1).save()
user_2.delete()
# Deleting user_2 should also delete book_1 but not book_2
self.assertEqual(Book.objects.count(), 1)
self.assertEqual(Book.objects.get(), book_2)
user_3 = User(id=3).save()
book_3 = Book(id=3, author=user_3).save()
user_3.delete()
# Deleting user_3 should also delete book_3
self.assertEqual(Book.objects.count(), 1)
self.assertEqual(Book.objects.get(), book_2)
def test_reverse_delete_rule_with_document_inheritance(self):
"""Ensure that a referenced document is also deleted upon deletion
of a child document.
"""
class Writer(self.Person):
pass
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = Writer(name='Test User')
author.save()
reviewer = Writer(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Writer should lead to deletion of the BlogPost
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_cascade_and_nullify_complex_field(self):
"""Ensure that a referenced document is also deleted upon deletion for
complex fields.
"""
class BlogPost(Document):
content = StringField()
authors = ListField(ReferenceField(
self.Person, reverse_delete_rule=CASCADE))
reviewers = ListField(ReferenceField(
self.Person, reverse_delete_rule=NULLIFY))
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.authors = [author]
post.reviewers = [reviewer]
post.save()
# Deleting the reviewer should have no effect on the BlogPost
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewers, [])
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_cascade_triggers_pre_delete_signal(self):
""" ensure the pre_delete signal is triggered upon a cascading deletion
setup a blog post with content, an author and editor
delete the author which triggers deletion of blogpost via cascade
blog post's pre_delete signal alters an editor attribute
"""
class Editor(self.Person):
review_queue = IntField(default=0)
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
editor = ReferenceField(Editor)
@classmethod
def pre_delete(cls, sender, document, **kwargs):
# decrement the docs-to-review count
document.editor.update(dec__review_queue=1)
signals.pre_delete.connect(BlogPost.pre_delete, sender=BlogPost)
self.Person.drop_collection()
BlogPost.drop_collection()
Editor.drop_collection()
author = self.Person(name='Will S.').save()
editor = Editor(name='Max P.', review_queue=1).save()
BlogPost(content='wrote some books', author=author,
editor=editor).save()
# delete the author, the post is also deleted due to the CASCADE rule
author.delete()
# the pre-delete signal should have decremented the editor's queue
editor = Editor.objects(name='Max P.').get()
self.assertEqual(editor.review_queue, 0)
def test_two_way_reverse_delete_rule(self):
"""Ensure that Bi-Directional relationships work with
reverse_delete_rule
"""
class Bar(Document):
content = StringField()
foo = ReferenceField('Foo')
class Foo(Document):
content = StringField()
bar = ReferenceField(Bar)
Bar.register_delete_rule(Foo, 'bar', NULLIFY)
Foo.register_delete_rule(Bar, 'foo', NULLIFY)
Bar.drop_collection()
Foo.drop_collection()
b = Bar(content="Hello")
b.save()
f = Foo(content="world", bar=b)
f.save()
b.foo = f
b.save()
f.delete()
self.assertEqual(Bar.objects.count(), 1) # No effect on the BlogPost
self.assertEqual(Bar.objects.get().foo, None)
def test_invalid_reverse_delete_rule_raise_errors(self):
def throw_invalid_document_error():
class Blog(Document):
content = StringField()
authors = MapField(ReferenceField(
self.Person, reverse_delete_rule=CASCADE))
reviewers = DictField(
field=ReferenceField(
self.Person,
reverse_delete_rule=NULLIFY))
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def throw_invalid_document_error_embedded():
class Parents(EmbeddedDocument):
father = ReferenceField('Person', reverse_delete_rule=DENY)
mother = ReferenceField('Person', reverse_delete_rule=DENY)
self.assertRaises(
InvalidDocumentError, throw_invalid_document_error_embedded)
def test_reverse_delete_rule_cascade_recurs(self):
"""Ensure that a chain of documents is also deleted upon cascaded
deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
class Comment(Document):
text = StringField()
post = ReferenceField(BlogPost, reverse_delete_rule=CASCADE)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.save()
comment = Comment(text='Kudos.')
comment.post = post
comment.save()
# Delete the Person, which should lead to deletion of the BlogPost,
# and, recursively to the Comment, too
author.delete()
self.assertEqual(Comment.objects.count(), 0)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
def test_reverse_delete_rule_deny(self):
"""Ensure that a document cannot be referenced if there are still
documents referring to it.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=DENY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.save()
# Delete the Person should be denied
self.assertRaises(OperationError, author.delete) # Should raise denied error
self.assertEqual(BlogPost.objects.count(), 1) # No objects may have been deleted
self.assertEqual(self.Person.objects.count(), 1)
# Other users, that don't have BlogPosts must be removable, like normal
author = self.Person(name='Another User')
author.save()
self.assertEqual(self.Person.objects.count(), 2)
author.delete()
self.assertEqual(self.Person.objects.count(), 1)
self.Person.drop_collection()
BlogPost.drop_collection()
def subclasses_and_unique_keys_works(self):
class A(Document):
pass
class B(A):
foo = BooleanField(unique=True)
A.drop_collection()
B.drop_collection()
A().save()
A().save()
B(foo=True).save()
self.assertEqual(A.objects.count(), 2)
self.assertEqual(B.objects.count(), 1)
A.drop_collection()
B.drop_collection()
def test_document_hash(self):
"""Test document in list, dict, set
"""
class User(Document):
pass
class BlogPost(Document):
pass
# Clear old datas
User.drop_collection()
BlogPost.drop_collection()
u1 = User.objects.create()
u2 = User.objects.create()
u3 = User.objects.create()
u4 = User() # New object
b1 = BlogPost.objects.create()
b2 = BlogPost.objects.create()
# in List
all_user_list = list(User.objects.all())
self.assertTrue(u1 in all_user_list)
self.assertTrue(u2 in all_user_list)
self.assertTrue(u3 in all_user_list)
self.assertFalse(u4 in all_user_list) # New object
self.assertFalse(b1 in all_user_list) # Other object
self.assertFalse(b2 in all_user_list) # Other object
# in Dict
all_user_dic = {}
for u in User.objects.all():
all_user_dic[u] = "OK"
self.assertEqual(all_user_dic.get(u1, False), "OK")
self.assertEqual(all_user_dic.get(u2, False), "OK")
self.assertEqual(all_user_dic.get(u3, False), "OK")
self.assertEqual(all_user_dic.get(u4, False), False) # New object
self.assertEqual(all_user_dic.get(b1, False), False) # Other object
self.assertEqual(all_user_dic.get(b2, False), False) # Other object
# in Set
all_user_set = set(User.objects.all())
self.assertTrue(u1 in all_user_set)
def test_picklable(self):
pickle_doc = PickleTest(number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
# Test pickling changed data
pickle_doc.lists.append("3")
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
resurrected.string = "Two"
resurrected.save()
pickle_doc = PickleTest.objects.first()
self.assertEqual(resurrected, pickle_doc)
self.assertEqual(pickle_doc.string, "Two")
self.assertEqual(pickle_doc.lists, ["1", "2", "3"])
def test_regular_document_pickle(self):
pickle_doc = PickleTest(number=1, string="One", lists=['1', '2'])
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
# Test that when a document's definition changes the new
# definition is used
fixtures.PickleTest = fixtures.NewDocumentPickleTest
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected.__class__,
fixtures.NewDocumentPickleTest)
self.assertEqual(resurrected._fields_ordered,
fixtures.NewDocumentPickleTest._fields_ordered)
self.assertNotEqual(resurrected._fields_ordered,
pickle_doc._fields_ordered)
# The local PickleTest is still a ref to the original
fixtures.PickleTest = PickleTest
def test_dynamic_document_pickle(self):
pickle_doc = PickleDynamicTest(
name="test", number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleDyanmicEmbedded(foo="Bar")
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
self.assertEqual(resurrected._fields_ordered,
pickle_doc._fields_ordered)
self.assertEqual(resurrected._dynamic_fields.keys(),
pickle_doc._dynamic_fields.keys())
self.assertEqual(resurrected.embedded, pickle_doc.embedded)
self.assertEqual(resurrected.embedded._fields_ordered,
pickle_doc.embedded._fields_ordered)
self.assertEqual(resurrected.embedded._dynamic_fields.keys(),
pickle_doc.embedded._dynamic_fields.keys())
def test_picklable_on_signals(self):
pickle_doc = PickleSignalsTest(
number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickle_doc.save()
pickle_doc.delete()
def test_throw_invalid_document_error(self):
# test handles people trying to upsert
def throw_invalid_document_error():
class Blog(Document):
validate = DictField()
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_mutating_documents(self):
class B(EmbeddedDocument):
field1 = StringField(default='field1')
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
A.drop_collection()
a = A()
a.save()
a.reload()
self.assertEqual(a.b.field1, 'field1')
class C(EmbeddedDocument):
c_field = StringField(default='cfield')
class B(EmbeddedDocument):
field1 = StringField(default='field1')
field2 = EmbeddedDocumentField(C, default=lambda: C())
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
a = A.objects()[0]
a.b.field2.c_field = 'new value'
a.save()
a.reload()
self.assertEqual(a.b.field2.c_field, 'new value')
def test_can_save_false_values(self):
"""Ensures you can save False values on save"""
class Doc(Document):
foo = StringField()
archived = BooleanField(default=False, required=True)
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_can_save_false_values_dynamic(self):
"""Ensures you can save False values on dynamic docs"""
class Doc(DynamicDocument):
foo = StringField()
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_do_not_save_unchanged_references(self):
"""Ensures cascading saves dont auto update"""
class Job(Document):
name = StringField()
class Person(Document):
name = StringField()
age = IntField()
job = ReferenceField(Job)
Job.drop_collection()
Person.drop_collection()
job = Job(name="Job 1")
# job should not have any changed fields after the save
job.save()
person = Person(name="name", age=10, job=job)
from pymongo.collection import Collection
orig_update = Collection.update
try:
def fake_update(*args, **kwargs):
self.fail("Unexpected update for %s" % args[0].name)
return orig_update(*args, **kwargs)
Collection.update = fake_update
person.save()
finally:
Collection.update = orig_update
def test_db_alias_tests(self):
""" DB Alias tests """
# mongoenginetest - Is default connection alias from setUp()
# Register Aliases
register_connection('testdb-1', 'mongoenginetest2')
register_connection('testdb-2', 'mongoenginetest3')
register_connection('testdb-3', 'mongoenginetest4')
class User(Document):
name = StringField()
meta = {"db_alias": "testdb-1"}
class Book(Document):
name = StringField()
meta = {"db_alias": "testdb-2"}
# Drops
User.drop_collection()
Book.drop_collection()
# Create
bob = User.objects.create(name="Bob")
hp = Book.objects.create(name="Harry Potter")
# Selects
self.assertEqual(User.objects.first(), bob)
self.assertEqual(Book.objects.first(), hp)
# DeReference
class AuthorBooks(Document):
author = ReferenceField(User)
book = ReferenceField(Book)
meta = {"db_alias": "testdb-3"}
# Drops
AuthorBooks.drop_collection()
ab = AuthorBooks.objects.create(author=bob, book=hp)
# select
self.assertEqual(AuthorBooks.objects.first(), ab)
self.assertEqual(AuthorBooks.objects.first().book, hp)
self.assertEqual(AuthorBooks.objects.first().author, bob)
self.assertEqual(AuthorBooks.objects.filter(author=bob).first(), ab)
self.assertEqual(AuthorBooks.objects.filter(book=hp).first(), ab)
# DB Alias
self.assertEqual(User._get_db(), get_db("testdb-1"))
self.assertEqual(Book._get_db(), get_db("testdb-2"))
self.assertEqual(AuthorBooks._get_db(), get_db("testdb-3"))
# Collections
self.assertEqual(
User._get_collection(),
get_db("testdb-1")[User._get_collection_name()])
self.assertEqual(
Book._get_collection(),
get_db("testdb-2")[Book._get_collection_name()])
self.assertEqual(
AuthorBooks._get_collection(),
get_db("testdb-3")[AuthorBooks._get_collection_name()])
def test_db_alias_overrides(self):
"""db_alias can be overriden
"""
# Register a connection with db_alias testdb-2
register_connection('testdb-2', 'mongoenginetest2')
class A(Document):
"""Uses default db_alias
"""
name = StringField()
meta = {"allow_inheritance": True}
class B(A):
"""Uses testdb-2 db_alias
"""
meta = {"db_alias": "testdb-2"}
A.objects.all()
self.assertEqual('testdb-2', B._meta.get('db_alias'))
self.assertEqual('mongoenginetest',
A._get_collection().database.name)
self.assertEqual('mongoenginetest2',
B._get_collection().database.name)
def test_db_alias_propagates(self):
"""db_alias propagates?
"""
register_connection('testdb-1', 'mongoenginetest2')
class A(Document):
name = StringField()
meta = {"db_alias": "testdb-1", "allow_inheritance": True}
class B(A):
pass
self.assertEqual('testdb-1', B._meta.get('db_alias'))
def test_db_ref_usage(self):
""" DB Ref usage in dict_fields"""
class User(Document):
name = StringField()
class Book(Document):
name = StringField()
author = ReferenceField(User)
extra = DictField()
meta = {
'ordering': ['+name']
}
def __unicode__(self):
return self.name
def __str__(self):
return self.name
# Drops
User.drop_collection()
Book.drop_collection()
# Authors
bob = User.objects.create(name="Bob")
jon = User.objects.create(name="Jon")
# Redactors
karl = User.objects.create(name="Karl")
susan = User.objects.create(name="Susan")
peter = User.objects.create(name="Peter")
# Bob
Book.objects.create(name="1", author=bob, extra={
"a": bob.to_dbref(), "b": [karl.to_dbref(), susan.to_dbref()]})
Book.objects.create(name="2", author=bob, extra={
"a": bob.to_dbref(), "b": karl.to_dbref()})
Book.objects.create(name="3", author=bob, extra={
"a": bob.to_dbref(), "c": [jon.to_dbref(), peter.to_dbref()]})
Book.objects.create(name="4", author=bob)
# Jon
Book.objects.create(name="5", author=jon)
Book.objects.create(name="6", author=peter)
Book.objects.create(name="7", author=jon)
Book.objects.create(name="8", author=jon)
Book.objects.create(name="9", author=jon,
extra={"a": peter.to_dbref()})
# Checks
self.assertEqual(",".join([str(b) for b in Book.objects.all()]),
"1,2,3,4,5,6,7,8,9")
# bob related books
self.assertEqual(",".join([str(b) for b in Book.objects.filter(
Q(extra__a=bob) |
Q(author=bob) |
Q(extra__b=bob))]),
"1,2,3,4")
# Susan & Karl related books
self.assertEqual(",".join([str(b) for b in Book.objects.filter(
Q(extra__a__all=[karl, susan]) |
Q(author__all=[karl, susan]) |
Q(extra__b__all=[
karl.to_dbref(), susan.to_dbref()]))
]), "1")
# $Where
self.assertEqual(u",".join([str(b) for b in Book.objects.filter(
__raw__={
"$where": """
function(){
return this.name == '1' ||
this.name == '2';}"""
})]),
"1,2")
def test_switch_db_instance(self):
register_connection('testdb-1', 'mongoenginetest2')
class Group(Document):
name = StringField()
Group.drop_collection()
with switch_db(Group, 'testdb-1') as Group:
Group.drop_collection()
Group(name="hello - default").save()
self.assertEqual(1, Group.objects.count())
group = Group.objects.first()
group.switch_db('testdb-1')
group.name = "hello - testdb!"
group.save()
with switch_db(Group, 'testdb-1') as Group:
group = Group.objects.first()
self.assertEqual("hello - testdb!", group.name)
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
# Slightly contrived now - perform an update
# Only works as they have the same object_id
group.switch_db('testdb-1')
group.update(set__name="hello - update")
with switch_db(Group, 'testdb-1') as Group:
group = Group.objects.first()
self.assertEqual("hello - update", group.name)
Group.drop_collection()
self.assertEqual(0, Group.objects.count())
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
# Totally contrived now - perform a delete
# Only works as they have the same object_id
group.switch_db('testdb-1')
group.delete()
with switch_db(Group, 'testdb-1') as Group:
self.assertEqual(0, Group.objects.count())
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
def test_load_undefined_fields(self):
class User(Document):
name = StringField()
User.drop_collection()
User._get_collection().save({
'name': 'John',
'foo': 'Bar',
'data': [1, 2, 3]
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_with_strict_false(self):
class User(Document):
name = StringField()
meta = {'strict': False}
User.drop_collection()
User._get_collection().save({
'name': 'John',
'foo': 'Bar',
'data': [1, 2, 3]
})
user = User.objects.first()
self.assertEqual(user.name, 'John')
self.assertFalse(hasattr(user, 'foo'))
self.assertEqual(user._data['foo'], 'Bar')
self.assertFalse(hasattr(user, 'data'))
self.assertEqual(user._data['data'], [1, 2, 3])
def test_load_undefined_fields_on_embedded_document(self):
class Thing(EmbeddedDocument):
name = StringField()
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_on_embedded_document_with_strict_false_on_doc(self):
class Thing(EmbeddedDocument):
name = StringField()
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
meta = {'strict': False}
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_on_embedded_document_with_strict_false(self):
class Thing(EmbeddedDocument):
name = StringField()
meta = {'strict': False}
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
user = User.objects.first()
self.assertEqual(user.name, 'John')
self.assertEqual(user.thing.name, 'My thing')
self.assertFalse(hasattr(user.thing, 'foo'))
self.assertEqual(user.thing._data['foo'], 'Bar')
self.assertFalse(hasattr(user.thing, 'data'))
self.assertEqual(user.thing._data['data'], [1, 2, 3])
def test_spaces_in_keys(self):
class Embedded(DynamicEmbeddedDocument):
pass
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
setattr(doc, 'hello world', 1)
doc.save()
one = Doc.objects.filter(**{'hello world': 1}).count()
self.assertEqual(1, one)
def test_shard_key(self):
class LogEntry(Document):
machine = StringField()
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
self.assertTrue(log.id is not None)
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_shard_key_in_embedded_document(self):
class Foo(EmbeddedDocument):
foo = StringField()
class Bar(Document):
meta = {
'shard_key': ('foo.foo',)
}
foo = EmbeddedDocumentField(Foo)
bar = StringField()
foo_doc = Foo(foo='hello')
bar_doc = Bar(foo=foo_doc, bar='world')
bar_doc.save()
self.assertTrue(bar_doc.id is not None)
bar_doc.bar = 'baz'
bar_doc.save()
def change_shard_key():
bar_doc.foo.foo = 'something'
bar_doc.save()
self.assertRaises(OperationError, change_shard_key)
def test_shard_key_primary(self):
class LogEntry(Document):
machine = StringField(primary_key=True)
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
self.assertTrue(log.id is not None)
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_kwargs_simple(self):
class Embedded(EmbeddedDocument):
name = StringField()
class Doc(Document):
doc_name = StringField()
doc = EmbeddedDocumentField(Embedded)
def __eq__(self, other):
return (self.doc_name == other.doc_name and
self.doc == other.doc)
classic_doc = Doc(doc_name="my doc", doc=Embedded(name="embedded doc"))
dict_doc = Doc(**{"doc_name": "my doc",
"doc": {"name": "embedded doc"}})
self.assertEqual(classic_doc, dict_doc)
self.assertEqual(classic_doc._data, dict_doc._data)
def test_kwargs_complex(self):
class Embedded(EmbeddedDocument):
name = StringField()
class Doc(Document):
doc_name = StringField()
docs = ListField(EmbeddedDocumentField(Embedded))
def __eq__(self, other):
return (self.doc_name == other.doc_name and
self.docs == other.docs)
classic_doc = Doc(doc_name="my doc", docs=[
Embedded(name="embedded doc1"),
Embedded(name="embedded doc2")])
dict_doc = Doc(**{"doc_name": "my doc",
"docs": [{"name": "embedded doc1"},
{"name": "embedded doc2"}]})
self.assertEqual(classic_doc, dict_doc)
self.assertEqual(classic_doc._data, dict_doc._data)
def test_positional_creation(self):
"""Ensure that document may be created using positional arguments.
"""
person = self.Person("Test User", 42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_mixed_creation(self):
"""Ensure that document may be created using mixed arguments.
"""
person = self.Person("Test User", age=42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_positional_creation_embedded(self):
"""Ensure that embedded document may be created using positional arguments.
"""
job = self.Job("Test Job", 4)
self.assertEqual(job.name, "Test Job")
self.assertEqual(job.years, 4)
def test_mixed_creation_embedded(self):
"""Ensure that embedded document may be created using mixed arguments.
"""
job = self.Job("Test Job", years=4)
self.assertEqual(job.name, "Test Job")
self.assertEqual(job.years, 4)
def test_mixed_creation_dynamic(self):
"""Ensure that document may be created using mixed arguments.
"""
class Person(DynamicDocument):
name = StringField()
person = Person("Test User", age=42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_bad_mixed_creation(self):
"""Ensure that document gives correct error when duplicating arguments
"""
def construct_bad_instance():
return self.Person("Test User", 42, name="Bad User")
self.assertRaises(TypeError, construct_bad_instance)
def test_data_contains_id_field(self):
"""Ensure that asking for _data returns 'id'
"""
class Person(Document):
name = StringField()
Person.drop_collection()
Person(name="Harry Potter").save()
person = Person.objects.first()
self.assertTrue('id' in person._data.keys())
self.assertEqual(person._data.get('id'), person.id)
def test_complex_nesting_document_and_embedded_document(self):
class Macro(EmbeddedDocument):
value = DynamicField(default="UNDEFINED")
class Parameter(EmbeddedDocument):
macros = MapField(EmbeddedDocumentField(Macro))
def expand(self):
self.macros["test"] = Macro()
class Node(Document):
parameters = MapField(EmbeddedDocumentField(Parameter))
def expand(self):
self.flattened_parameter = {}
for parameter_name, parameter in self.parameters.iteritems():
parameter.expand()
class NodesSystem(Document):
name = StringField(required=True)
nodes = MapField(ReferenceField(Node, dbref=False))
def save(self, *args, **kwargs):
for node_name, node in self.nodes.iteritems():
node.expand()
node.save(*args, **kwargs)
super(NodesSystem, self).save(*args, **kwargs)
NodesSystem.drop_collection()
Node.drop_collection()
system = NodesSystem(name="system")
system.nodes["node"] = Node()
system.save()
system.nodes["node"].parameters["param"] = Parameter()
system.save()
system = NodesSystem.objects.first()
self.assertEqual(
"UNDEFINED",
system.nodes["node"].parameters["param"].macros["test"].value)
def test_embedded_document_equality(self):
class Test(Document):
field = StringField(required=True)
class Embedded(EmbeddedDocument):
ref = ReferenceField(Test)
Test.drop_collection()
test = Test(field='123').save() # has id
e = Embedded(ref=test)
f1 = Embedded._from_son(e.to_mongo())
f2 = Embedded._from_son(e.to_mongo())
self.assertEqual(f1, f2)
f1.ref # Dereferences lazily
self.assertEqual(f1, f2)
def test_dbref_equality(self):
class Test2(Document):
name = StringField()
class Test3(Document):
name = StringField()
class Test(Document):
name = StringField()
test2 = ReferenceField('Test2')
test3 = ReferenceField('Test3')
Test.drop_collection()
Test2.drop_collection()
Test3.drop_collection()
t2 = Test2(name='a')
t2.save()
t3 = Test3(name='x')
t3.id = t2.id
t3.save()
t = Test(name='b', test2=t2, test3=t3)
f = Test._from_son(t.to_mongo())
dbref2 = f._data['test2']
obj2 = f.test2
self.assertTrue(isinstance(dbref2, DBRef))
self.assertTrue(isinstance(obj2, Test2))
self.assertTrue(obj2.id == dbref2.id)
self.assertTrue(obj2 == dbref2)
self.assertTrue(dbref2 == obj2)
dbref3 = f._data['test3']
obj3 = f.test3
self.assertTrue(isinstance(dbref3, DBRef))
self.assertTrue(isinstance(obj3, Test3))
self.assertTrue(obj3.id == dbref3.id)
self.assertTrue(obj3 == dbref3)
self.assertTrue(dbref3 == obj3)
self.assertTrue(obj2.id == obj3.id)
self.assertTrue(dbref2.id == dbref3.id)
self.assertFalse(dbref2 == dbref3)
self.assertFalse(dbref3 == dbref2)
self.assertTrue(dbref2 != dbref3)
self.assertTrue(dbref3 != dbref2)
self.assertFalse(obj2 == dbref3)
self.assertFalse(dbref3 == obj2)
self.assertTrue(obj2 != dbref3)
self.assertTrue(dbref3 != obj2)
self.assertFalse(obj3 == dbref2)
self.assertFalse(dbref2 == obj3)
self.assertTrue(obj3 != dbref2)
self.assertTrue(dbref2 != obj3)
def test_default_values(self):
class Person(Document):
created_on = DateTimeField(default=lambda: datetime.utcnow())
name = StringField()
p = Person(name='alon')
p.save()
orig_created_on = Person.objects().only('created_on')[0].created_on
p2 = Person.objects().only('name')[0]
p2.name = 'alon2'
p2.save()
p3 = Person.objects().only('created_on')[0]
self.assertEquals(orig_created_on, p3.created_on)
class Person(Document):
created_on = DateTimeField(default=lambda: datetime.utcnow())
name = StringField()
height = IntField(default=189)
p4 = Person.objects()[0]
p4.save()
self.assertEquals(p4.height, 189)
self.assertEquals(Person.objects(height=189).count(), 1)
def test_from_son(self):
# 771
class MyPerson(self.Person):
meta = dict(shard_key=["id"])
p = MyPerson.from_json('{"name": "name", "age": 27}', created=True)
self.assertEquals(p.id, None)
p.id = "12345" # in case it is not working: "OperationError: Shard Keys are immutable..." will be raised here
p = MyPerson._from_son({"name": "name", "age": 27}, created=True)
self.assertEquals(p.id, None)
p.id = "12345" # in case it is not working: "OperationError: Shard Keys are immutable..." will be raised here
def test_null_field(self):
# 734
class User(Document):
name = StringField()
height = IntField(default=184, null=True)
str_fld = StringField(null=True)
int_fld = IntField(null=True)
flt_fld = FloatField(null=True)
dt_fld = DateTimeField(null=True)
cdt_fld = ComplexDateTimeField(null=True)
User.objects.delete()
u = User(name='user')
u.save()
u_from_db = User.objects.get(name='user')
u_from_db.height = None
u_from_db.save()
self.assertEquals(u_from_db.height, None)
# 864
self.assertEqual(u_from_db.str_fld, None)
self.assertEqual(u_from_db.int_fld, None)
self.assertEqual(u_from_db.flt_fld, None)
self.assertEqual(u_from_db.dt_fld, None)
self.assertEqual(u_from_db.cdt_fld, None)
# 735
User.objects.delete()
u = User(name='user')
u.save()
User.objects(name='user').update_one(set__height=None, upsert=True)
u_from_db = User.objects.get(name='user')
self.assertEquals(u_from_db.height, None)
def test_not_saved_eq(self):
"""Ensure we can compare documents not saved.
"""
class Person(Document):
pass
p = Person()
p1 = Person()
self.assertNotEqual(p, p1)
self.assertEqual(p, p)
def test_list_iter(self):
# 914
class B(EmbeddedDocument):
v = StringField()
class A(Document):
l = ListField(EmbeddedDocumentField(B))
A.objects.delete()
A(l=[B(v='1'), B(v='2'), B(v='3')]).save()
a = A.objects.get()
self.assertEqual(a.l._instance, a)
for idx, b in enumerate(a.l):
self.assertEqual(b._instance, a)
self.assertEqual(idx, 2)
if __name__ == '__main__':
unittest.main()
|
ProgressivePlanning/mongoengine
|
tests/document/instance.py
|
Python
|
mit
| 102,382
|
[
"exciting"
] |
59112097f7b169429819c95c7da201addee28d132eeca4245713a7463c4681d8
|
# Copyright (C) 2011 by Brandon Invergo (b.invergo@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from __future__ import print_function
import os
import os.path
from ._paml import Paml, _relpath
from . import _parse_codeml
class CodemlError(EnvironmentError):
"""CODEML has failed. Run with verbose = True to view CODEML's error
message"""
class Codeml(Paml):
"""This class implements an interface to CODEML, part of the PAML package."""
def __init__(self, alignment=None, tree=None, working_dir=None,
out_file=None):
"""Initialize the codeml instance.
The user may optionally pass in strings specifying the locations
of the input alignment and tree files, the working directory and
the final output file. Other options found in the CODEML control
have typical settings by default to run site class models 0, 1 and
2 on a nucleotide alignment.
"""
Paml.__init__(self, alignment, working_dir, out_file)
if tree is not None:
if not os.path.exists(tree):
raise IOError("The specified tree file does not exist.")
self.tree = tree
self.ctl_file = "codeml.ctl"
self._options = {"noisy": None,
"verbose": None,
"runmode": None,
"seqtype": None,
"CodonFreq": None,
"ndata": None,
"clock": None,
"aaDist": None,
"aaRatefile": None,
"model": None,
"NSsites": None,
"icode": None,
"Mgene": None,
"fix_kappa": None,
"kappa": None,
"fix_omega": None,
"omega": None,
"fix_alpha": None,
"alpha": None,
"Malpha": None,
"ncatG": None,
"getSE": None,
"RateAncestor": None,
"Small_Diff": None,
"cleandata": None,
"fix_blength": None,
"method": None,
"rho": None,
"fix_rho": None}
def write_ctl_file(self):
"""Dynamically build a CODEML control file from the options.
The control file is written to the location specified by the
ctl_file property of the codeml class.
"""
# Make sure all paths are relative to the working directory
self._set_rel_paths()
if True: # Dummy statement to preserve indentation for diff
with open(self.ctl_file, 'w') as ctl_handle:
ctl_handle.write("seqfile = %s\n" % self._rel_alignment)
ctl_handle.write("outfile = %s\n" % self._rel_out_file)
ctl_handle.write("treefile = %s\n" % self._rel_tree)
for option in self._options.items():
if option[1] is None:
# If an option has a value of None, there's no need
# to write it in the control file; it's normally just
# commented out.
continue
if option[0] == "NSsites":
# NSsites is stored in Python as a list but in the
# control file it is specified as a series of numbers
# separated by spaces.
NSsites = " ".join(str(site) for site in option[1])
ctl_handle.write("%s = %s\n" % (option[0], NSsites))
else:
ctl_handle.write("%s = %s\n" % (option[0], option[1]))
def read_ctl_file(self, ctl_file):
"""Parse a control file and load the options into the Codeml instance.
"""
temp_options = {}
if not os.path.isfile(ctl_file):
raise IOError("File not found: %r" % ctl_file)
else:
with open(ctl_file) as ctl_handle:
for line in ctl_handle:
line = line.strip()
uncommented = line.split("*", 1)[0]
if uncommented != "":
if "=" not in uncommented:
raise AttributeError(
"Malformed line in control file:\n%r" % line)
(option, value) = uncommented.split("=", 1)
option = option.strip()
value = value.strip()
if option == "seqfile":
self.alignment = value
elif option == "treefile":
self.tree = value
elif option == "outfile":
self.out_file = value
elif option == "NSsites":
site_classes = value.split(" ")
for n in range(len(site_classes)):
try:
site_classes[n] = int(site_classes[n])
except:
raise TypeError(
"Invalid site class: %s" % site_classes[n])
temp_options["NSsites"] = site_classes
elif option not in self._options:
raise KeyError("Invalid option: %s" % option)
else:
if "." in value:
try:
converted_value = float(value)
except:
converted_value = value
else:
try:
converted_value = int(value)
except:
converted_value = value
temp_options[option] = converted_value
for option in self._options:
if option in temp_options:
self._options[option] = temp_options[option]
else:
self._options[option] = None
def print_options(self):
"""Print out all of the options and their current settings."""
for option in self._options.items():
if option[0] == "NSsites" and option[1] is not None:
# NSsites is stored in Python as a list but in the
# control file it is specified as a series of numbers
# separated by spaces.
NSsites = " ".join(str(site) for site in option[1])
print("%s = %s" % (option[0], NSsites))
else:
print("%s = %s" % (option[0], option[1]))
def _set_rel_paths(self):
"""Convert all file/directory locations to paths relative to the current
working directory.
CODEML requires that all paths specified in the control file be
relative to the directory from which it is called rather than
absolute paths.
"""
Paml._set_rel_paths(self)
if self.tree is not None:
self._rel_tree = _relpath(self.tree, self.working_dir)
def run(self, ctl_file=None, verbose=False, command="codeml", parse=True):
"""Run codeml using the current configuration and then parse the results.
Return a process signal so the user can determine if
the execution was successful (return code 0 is successful, -N
indicates a failure). The arguments may be passed as either
absolute or relative paths, despite the fact that CODEML
requires relative paths.
"""
if self.tree is None:
raise ValueError("Tree file not specified.")
if not os.path.exists(self.tree):
raise IOError("The specified tree file does not exist.")
Paml.run(self, ctl_file, verbose, command)
if parse:
results = read(self.out_file)
else:
results = None
return results
def read(results_file):
"""Parse a CODEML results file."""
results = {}
if not os.path.exists(results_file):
raise IOError("Results file does not exist.")
with open(results_file) as handle:
lines = handle.readlines()
(results, multi_models, multi_genes) = _parse_codeml.parse_basics(lines,
results)
results = _parse_codeml.parse_nssites(lines, results, multi_models,
multi_genes)
results = _parse_codeml.parse_pairwise(lines, results)
results = _parse_codeml.parse_distances(lines, results)
if len(results) == 0:
raise ValueError("Invalid results file")
return results
|
zjuchenyuan/BioWeb
|
Lib/Bio/Phylo/PAML/codeml.py
|
Python
|
mit
| 9,172
|
[
"Biopython"
] |
f12a087bc6a98a90a8ce91a82c8f1db98354c6b49fed80bb02d5e84f6234f585
|
from aces.materials import Material
from aces.modify import get_unique_atoms
from ase import Atoms,Atom
from math import pi,sqrt
from ase.dft.kpoints import ibz_points
from ase.lattice import bulk
from ase import io
from aces import config
from aces.tools import *
class structure(Material):
def set_parameters(self):
self.enforceThick=False
self.latx=1
self.laty=1
self.latz=1
self.elements=['Co','Sb']
self.poscar="""CoSb3
1.0
9.0384998322 0.0000000000 0.0000000000
0.0000000000 9.0384998322 0.0000000000
0.0000000000 0.0000000000 9.0384998322
Co Sb
8 24
Direct
0.250000000 0.250000000 0.250000000
0.750000000 0.750000000 0.750000000
0.750000000 0.750000000 0.250000000
0.250000000 0.250000000 0.750000000
0.250000000 0.750000000 0.750000000
0.750000000 0.250000000 0.250000000
0.750000000 0.250000000 0.750000000
0.250000000 0.750000000 0.250000000
0.000000000 0.335370004 0.157879993
0.500000000 0.835370004 0.657880008
0.000000000 0.664629996 0.842119992
0.500000000 0.164629996 0.342119992
0.000000000 0.664629996 0.157879993
0.500000000 0.164629996 0.657880008
0.000000000 0.335370004 0.842119992
0.500000000 0.835370004 0.342119992
0.157879993 0.000000000 0.335370004
0.657880008 0.500000000 0.835370004
0.842119992 0.000000000 0.664629996
0.342119992 0.500000000 0.164629996
0.842119992 0.000000000 0.335370004
0.342119992 0.500000000 0.835370004
0.157879993 0.000000000 0.664629996
0.657880008 0.500000000 0.164629996
0.335370004 0.157879993 0.000000000
0.835370004 0.657880008 0.500000000
0.664629996 0.842119992 0.000000000
0.164629996 0.342119992 0.500000000
0.335370004 0.842119992 0.000000000
0.835370004 0.342119992 0.500000000
0.664629996 0.157879993 0.000000000
0.164629996 0.657880008 0.500000000
"""
def lmp_structure(self):
prototype=self.prototype
atoms=prototype(self.latx,self.laty,self.latz)
atoms.set_pbc([self.xp,self.yp,self.zp])
return atoms
def prototype(self,latx,laty,latz):
unit=self.unit()
col=unit.repeat((latx,laty,latz))
return col
def unit(self):
write(self.poscar,"POSCAR_ORI")
atoms=io.read("POSCAR_ORI")
return atoms
|
vanceeasleaf/aces
|
aces/materials/CoSb3.py
|
Python
|
gpl-2.0
| 2,865
|
[
"ASE"
] |
6fd9f47211bf8a8019f5fd11e21a3456b91695ef605b1d3e30c448d26e208900
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Zsolt Foldvari
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Brian Matherly
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011-2012 Paul Franklin
# Copyright (C) 2012 Craig Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""Report output generator based on Cairo.
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
from gramps.gen.ggettext import gettext as _
from math import radians
import re
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.plug.docgen import (BaseDoc, TextDoc, DrawDoc, ParagraphStyle,
TableCellStyle, SOLID, FONT_SANS_SERIF, FONT_SERIF,
FONT_MONOSPACE, PARA_ALIGN_CENTER, PARA_ALIGN_LEFT)
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.errors import PluginError
from gramps.gen.plug.docbackend import CairoBackend
from gramps.gen.utils.image import resize_to_buffer
from gramps.gui.utils import SystemFonts
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".libcairodoc")
#-------------------------------------------------------------------------
#
# GTK modules
#
#-------------------------------------------------------------------------
from gi.repository import Pango, PangoCairo
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
# each element draws some extra information useful for debugging
DEBUG = False
#------------------------------------------------------------------------
#
# Font selection
#
#------------------------------------------------------------------------
_TTF_FREEFONT = {
FONT_SERIF: 'FreeSerif',
FONT_SANS_SERIF: 'FreeSans',
FONT_MONOSPACE: 'FreeMono',
}
_MS_TTFONT = {
FONT_SERIF: 'Times New Roman',
FONT_SANS_SERIF: 'Arial',
FONT_MONOSPACE: 'Courier New',
}
_GNOME_FONT = {
FONT_SERIF: 'Serif',
FONT_SANS_SERIF: 'Sans',
FONT_MONOSPACE: 'Monospace',
}
font_families = _GNOME_FONT
# FIXME debug logging does not work here.
def set_font_families():
"""Set the used font families depending on availability.
"""
global font_families
fonts = SystemFonts()
family_names = fonts.get_system_fonts()
fam = [f for f in _TTF_FREEFONT.itervalues() if f in family_names]
if len(fam) == len(_TTF_FREEFONT):
font_families = _TTF_FREEFONT
log.debug('Using FreeFonts: %s' % font_families)
return
fam = [f for f in _MS_TTFONT.itervalues() if f in family_names]
if len(fam) == len(_MS_TTFONT):
font_families = _MS_TTFONT
log.debug('Using MS TrueType fonts: %s' % font_families)
return
fam = [f for f in _GNOME_FONT.itervalues() if f in family_names]
if len(fam) == len(_GNOME_FONT):
font_families = _GNOME_FONT
log.debug('Using Gnome fonts: %s' % font_families)
return
log.debug('No fonts found.')
set_font_families()
#------------------------------------------------------------------------
#
# Converter functions
#
#------------------------------------------------------------------------
def fontstyle_to_fontdescription(font_style):
"""Convert a FontStyle instance to a Pango.FontDescription one.
Font color and underline are not implemented in Pango.FontDescription,
and have to be set with Pango.Layout.set_attributes(attrlist) method.
"""
if font_style.get_bold():
f_weight = Pango.Weight.BOLD
else:
f_weight = Pango.Weight.NORMAL
if font_style.get_italic():
f_style = Pango.Style.ITALIC
else:
f_style = Pango.Style.NORMAL
font_description = Pango.FontDescription(font_families[font_style.face])
font_description.set_size(int(round(font_style.get_size() * Pango.SCALE)))
font_description.set_weight(f_weight)
font_description.set_style(f_style)
return font_description
def tabstops_to_tabarray(tab_stops, dpi):
"""Convert a list of tabs given in cm to a Pango.TabArray.
"""
tab_array = Pango.TabArray.new(initial_size=len(tab_stops),
positions_in_pixels=False)
for index in range(len(tab_stops)):
location = tab_stops[index] * dpi * Pango.SCALE / 2.54
tab_array.set_tab(index, Pango.TabAlign.LEFT, int(location))
return tab_array
def raw_length(s):
"""
Return the length of the raw string after all pango markup has been removed.
"""
s = re.sub('<.*?>', '', s)
s = s.replace('&', '&')
s = s.replace('<', '<')
s = s.replace('>', '>')
s = s.replace('"', '"')
s = s.replace(''', "'")
return len(s)
###------------------------------------------------------------------------
###
### Table row style
###
###------------------------------------------------------------------------
##class RowStyle(list):
##"""Specifies the format of a table row.
##RowStyle extents the available styles in
##The RowStyle contains the width of each column as a percentage of the
##width of the full row. Note! The width of the row is not known until
##divide() or draw() method is called.
##"""
##def __init__(self):
##self.columns = []
##def set_columns(self, columns):
##"""Set the number of columns.
##@param columns: number of columns that should be used.
##@param type: int
##"""
##self.columns = columns
##def get_columns(self):
##"""Return the number of columns.
##"""
##return self.columns
##def set_column_widths(self, clist):
##"""Set the width of all the columns at once.
##@param clist: list of width of columns in % of the full row.
##@param tyle: list
##"""
##self.columns = len(clist)
##for i in range(self.columns):
##self.colwid[i] = clist[i]
##def set_column_width(self, index, width):
##"""
##Set the width of a specified column to the specified width.
##@param index: column being set (index starts at 0)
##@param width: percentage of the table width assigned to the column
##"""
##self.colwid[index] = width
##def get_column_width(self, index):
##"""
##Return the column width of the specified column as a percentage of
##the entire table width.
##@param index: column to return (index starts at 0)
##"""
##return self.colwid[index]
class FrameStyle(object):
"""Define the style properties of a Frame.
- width: Width of the frame in cm.
- height: Height of the frame in cm.
- align: Horizontal position to entire page.
Available values: 'left','center', 'right'.
- spacing: Tuple of spacing around the frame in cm. Order of values:
(left, right, top, bottom).
"""
def __init__(self, width=0, height=0, align='left', spacing=(0, 0, 0, 0)):
self.width = width
self.height = height
self.align = align
self.spacing = spacing
#------------------------------------------------------------------------
#
# Document element classes
#
#------------------------------------------------------------------------
class GtkDocBaseElement(object):
"""Base of all document elements.
Support document element structuring and can render itself onto
a Cairo surface.
There are two categories of methods:
1. hierarchy building methods (add_child, get_children, set_parent,
get_parent);
2. rendering methods (divide, draw).
The hierarchy building methods generally don't have to be overridden in
the subclass, while the rendering methods (divide, draw) must be
implemented in the subclasses.
"""
_type = 'BASE'
_allowed_children = []
def __init__(self, style=None):
self._parent = None
self._children = []
self._style = style
def get_type(self):
"""Get the type of this element.
"""
return self._type
def set_parent(self, parent):
"""Set the parent element of this element.
"""
self._parent = parent
def get_parent(self):
"""Get the parent element of this element.
"""
return self._parent
def add_child(self, element):
"""Add a child element.
Returns False if the child cannot be added (e.g. not an allowed type),
or True otherwise.
"""
# check if it is an allowed child for this type
if element.get_type() not in self._allowed_children:
log.debug("%r is not an allowed child for %r" %
(element.__class__, self.__class__))
return False
# append the child and set its parent
self._children.append(element)
element.set_parent(self)
return True
def get_children(self):
"""Get the list of children of this element.
"""
return self._children
def get_marks(self):
"""Get the list of index marks for this element.
"""
marks = []
for child in self._children:
marks.extend(child.get_marks())
return marks
def divide(self, layout, width, height, dpi_x, dpi_y):
"""Divide the element into two depending on available space.
@param layout: pango layout to write on
@param type: Pango.Layout
@param width: width of available space for this element
@param type: device points
@param height: height of available space for this element
@param type: device points
@param dpi_x: the horizontal resolution
@param type: dots per inch
@param dpi_y: the vertical resolution
@param type: dots per inch
@return: the divided element, and the height of the first part
@rtype: (GtkDocXXX-1, GtkDocXXX-2), device points
"""
raise NotImplementedError
def draw(self, cairo_context, pango_layout, width, dpi_x, dpi_y):
"""Draw itself onto a cairo surface.
@param cairo_context: context to draw on
@param type: cairo.Context class
@param pango_layout: pango layout to write on
@param type: Pango.Layout class
@param width: width of available space for this element
@param type: device points
@param dpi_x: the horizontal resolution
@param type: dots per inch
@param dpi_y: the vertical resolution
@param type: dots per inch
@return: height of the element
@rtype: device points
"""
raise NotImplementedError
class GtkDocDocument(GtkDocBaseElement):
"""The whole document or a page.
"""
_type = 'DOCUMENT'
_allowed_children = ['PARAGRAPH', 'PAGEBREAK', 'TABLE', 'IMAGE', 'FRAME',
'TOC', 'INDEX']
def draw(self, cairo_context, pango_layout, width, dpi_x, dpi_y):
x = y = elem_height = 0
for elem in self._children:
cairo_context.translate(x, elem_height)
elem_height = elem.draw(cairo_context, pango_layout,
width, dpi_x, dpi_y)
y += elem_height
return y
def has_toc(self):
for elem in self._children:
if elem.get_type() == 'TOC':
return True
return False
def has_index(self):
for elem in self._children:
if elem.get_type() == 'INDEX':
return True
return False
class GtkDocPagebreak(GtkDocBaseElement):
"""Implement a page break.
"""
_type = 'PAGEBREAK'
_allowed_children = []
def divide(self, layout, width, height, dpi_x, dpi_y):
return (None, None), 0
class GtkDocTableOfContents(GtkDocBaseElement):
"""Implement a table of contents.
"""
_type = 'TOC'
_allowed_children = []
def divide(self, layout, width, height, dpi_x, dpi_y):
return (self, None), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
return 0
class GtkDocAlphabeticalIndex(GtkDocBaseElement):
"""Implement an alphabetical index.
"""
_type = 'INDEX'
_allowed_children = []
def divide(self, layout, width, height, dpi_x, dpi_y):
return (self, None), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
return 0
class GtkDocParagraph(GtkDocBaseElement):
"""Paragraph.
"""
_type = 'PARAGRAPH'
_allowed_children = []
# line spacing is not defined in ParagraphStyle
spacingfractionfont = 0.2
def __init__(self, style, leader=None):
GtkDocBaseElement.__init__(self, style)
if leader:
self._text = leader + '\t'
# FIXME append new tab to the existing tab list
self._style.set_tabs([-1 * self._style.get_first_indent()])
else:
self._text = ''
self._plaintext = None
self._attrlist = None
self._marklist = []
def add_text(self, text):
if self._plaintext is not None:
raise PluginError('CairoDoc: text is already parsed.'
' You cannot add text anymore')
self._text = self._text + text
def add_mark(self, mark):
"""
Add an index mark to this paragraph
"""
self._marklist.append((mark, raw_length(self._text)))
def get_marks(self):
"""
Return a list of index marks for this paragraph
"""
return [elem[0] for elem in self._marklist]
def __set_marklist(self, marklist):
"""
Internal method to allow for splitting of paragraphs
"""
self._marklist = marklist
def __set_plaintext(self, plaintext):
"""
Internal method to allow for splitting of paragraphs
"""
self._plaintext = plaintext
def __set_attrlist(self, attrlist):
"""
Internal method to allow for splitting of paragraphs
"""
self._attrlist = attrlist
def __parse_text(self):
"""
Parse the markup text. This method will only do this if not
done already
"""
if self._plaintext is None:
parse_ok, self._attrlist, self._plaintext, accel_char= \
Pango.parse_markup(self._text, -1, '\000')
def divide(self, layout, width, height, dpi_x, dpi_y):
self.__parse_text()
l_margin = self._style.get_left_margin() * dpi_x / 2.54
r_margin = self._style.get_right_margin() * dpi_x / 2.54
t_margin = self._style.get_top_margin() * dpi_y / 2.54
b_margin = self._style.get_bottom_margin() * dpi_y / 2.54
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
f_indent = self._style.get_first_indent() * dpi_x / 2.54
# calculate real width available for text
text_width = width - l_margin - 2 * h_padding - r_margin
if f_indent < 0:
text_width -= f_indent
layout.set_width(int(text_width * Pango.SCALE))
# set paragraph properties
layout.set_wrap(Pango.WrapMode.WORD_CHAR)
layout.set_indent(int(f_indent * Pango.SCALE))
layout.set_tabs(tabstops_to_tabarray(self._style.get_tabs(), dpi_x))
#
align = self._style.get_alignment_text()
if align == 'left':
layout.set_alignment(Pango.Alignment.LEFT)
elif align == 'right':
layout.set_alignment(Pango.Alignment.RIGHT)
elif align == 'center':
layout.set_alignment(Pango.Alignment.CENTER)
elif align == 'justify':
layout.set_justify(True)
else:
raise ValueError
#
font_style = self._style.get_font()
layout.set_font_description(fontstyle_to_fontdescription(font_style))
#set line spacing based on font:
spacing = font_style.get_size() * self.spacingfractionfont
layout.set_spacing(int(round(spacing * Pango.SCALE)))
text_height = height - t_margin - 2 * v_padding
# calculate where to cut the paragraph
layout.set_text(self._plaintext, -1)
layout.set_attributes(self._attrlist)
layout_width, layout_height = layout.get_pixel_size()
line_count = layout.get_line_count()
spacing = layout.get_spacing() / Pango.SCALE
# if all paragraph fits we don't need to cut
if layout_height - spacing <= text_height:
paragraph_height = layout_height + spacing +t_margin + (2 * v_padding)
if height - paragraph_height > b_margin:
paragraph_height += b_margin
return (self, None), paragraph_height
# we need to cut paragraph:
# 1. if paragraph part of a cell, we do not divide if only small part,
# of paragraph can be shown, instead move to next page
if line_count < 4 and self._parent._type == 'CELL':
return (None, self), 0
lineiter = layout.get_iter()
linenr = 0
linerange = lineiter.get_line_yrange()
# 2. if nothing fits, move to next page without split
# there is a spacing above and under the text
if linerange[1] - linerange[0] + 2.*spacing \
> text_height * Pango.SCALE:
return (None, self), 0
# 3. split the paragraph
startheight = linerange[0]
endheight = linerange[1]
splitline = -1
if lineiter.at_last_line():
#only one line of text that does not fit
return (None, self), 0
while not lineiter.at_last_line():
#go to next line, see if all fits, if not split
lineiter.next_line()
linenr += 1
linerange = lineiter.get_line_yrange()
if linerange[1] - startheight + 2.*spacing \
> text_height * Pango.SCALE:
splitline = linenr
break
endheight = linerange[1]
if splitline == -1:
print 'CairoDoc STRANGE '
return (None, self), 0
#we split at splitline
# get index of first character which doesn't fit on available height
layout_line = layout.get_line(splitline)
index = layout_line.start_index
# and divide the text, first create the second part
new_style = ParagraphStyle(self._style)
new_style.set_top_margin(0)
#we split a paragraph, text should begin in correct position: no indent
#as if the paragraph just continues from normal text
new_style.set_first_indent(0)
new_paragraph = GtkDocParagraph(new_style)
#index is in bytecode in the text..
new_paragraph.__set_plaintext(self._plaintext.encode('utf-8')[index:])
#now recalculate the attrilist:
newattrlist = layout.get_attributes().copy()
newattrlist.filter(self.filterattr, index)
oldattrlist = newattrlist.get_iterator()
while oldattrlist.next() :
vals = oldattrlist.get_attrs()
#print vals
for attr in vals:
newattr = attr.copy()
newattr.start_index -= index if newattr.start_index > index \
else 0
newattr.end_index -= index
newattrlist.insert(newattr)
new_paragraph.__set_attrlist(newattrlist)
# then update the first one
self.__set_plaintext(self._plaintext.encode('utf-8')[:index])
self._style.set_bottom_margin(0)
# split the list of index marks
para1 = []
para2 = []
for mark, position in self._marklist:
if position < index:
para1.append((mark, position))
else:
para2.append((mark, position - index))
self.__set_marklist(para1)
new_paragraph.__set_marklist(para2)
paragraph_height = endheight - startheight + spacing + t_margin + 2 * v_padding
return (self, new_paragraph), paragraph_height
def filterattr(self, attr, index):
"""callback to filter out attributes in the removed piece at beginning
"""
if attr.start_index > index or \
(attr.start_index < index and attr.end_index > index):
return False
return True
def draw(self, cr, layout, width, dpi_x, dpi_y):
self.__parse_text()
l_margin = self._style.get_left_margin() * dpi_x / 2.54
r_margin = self._style.get_right_margin() * dpi_x / 2.54
t_margin = self._style.get_top_margin() * dpi_y / 2.54
b_margin = self._style.get_bottom_margin() * dpi_y / 2.54
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
f_indent = self._style.get_first_indent() * dpi_x / 2.54
# calculate real width available for text
text_width = width - l_margin - 2 * h_padding - r_margin
if f_indent < 0:
text_width -= f_indent
layout.set_width(int(text_width * Pango.SCALE))
# set paragraph properties
layout.set_wrap(Pango.WrapMode.WORD_CHAR)
layout.set_indent(int(f_indent * Pango.SCALE))
layout.set_tabs(tabstops_to_tabarray(self._style.get_tabs(), dpi_x))
#
align = self._style.get_alignment_text()
if align == 'left':
layout.set_alignment(Pango.Alignment.LEFT)
elif align == 'right':
layout.set_alignment(Pango.Alignment.RIGHT)
elif align == 'center':
layout.set_alignment(Pango.Alignment.CENTER)
elif align == 'justify':
layout.set_justify(True)
#
font_style = self._style.get_font()
layout.set_font_description(fontstyle_to_fontdescription(font_style))
#set line spacing based on font:
spacing = font_style.get_size() * self.spacingfractionfont
layout.set_spacing(int(round(spacing * Pango.SCALE)))
# layout the text
layout.set_text(self._plaintext, -1)
layout.set_attributes(self._attrlist)
layout_width, layout_height = layout.get_pixel_size()
# render the layout onto the cairo surface
x = l_margin + h_padding
if f_indent < 0:
x += f_indent
# 3/4 of the spacing is added above the text, 1/4 is added below
cr.move_to(x, t_margin + v_padding + spacing * 0.75)
cr.set_source_rgb(*ReportUtils.rgb_color(font_style.get_color()))
PangoCairo.show_layout(cr, layout)
# calculate the full paragraph height
height = layout_height + spacing + t_margin + 2*v_padding + b_margin
# draw the borders
if self._style.get_top_border():
cr.move_to(l_margin, t_margin)
cr.rel_line_to(width - l_margin - r_margin, 0)
if self._style.get_right_border():
cr.move_to(width - r_margin, t_margin)
cr.rel_line_to(0, height - t_margin - b_margin)
if self._style.get_bottom_border():
cr.move_to(l_margin, height - b_margin)
cr.rel_line_to(width - l_margin - r_margin, 0)
if self._style.get_left_border():
cr.move_to(l_margin, t_margin)
cr.line_to(0, height - t_margin - b_margin)
cr.set_line_width(1)
cr.set_source_rgb(0, 0, 0)
cr.stroke()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(0, 0, width, height)
cr.stroke()
cr.set_source_rgb(0, 0, 1.0)
cr.rectangle(l_margin, t_margin,
width-l_margin-r_margin, height-t_margin-b_margin)
cr.stroke()
return height
class GtkDocTable(GtkDocBaseElement):
"""Implement a table.
"""
_type = 'TABLE'
_allowed_children = ['ROW']
def divide(self, layout, width, height, dpi_x, dpi_y):
#calculate real table width
table_width = width * self._style.get_width() / 100
# calculate the height of each row
table_height = 0
row_index = 0
while row_index < len(self._children):
row = self._children[row_index]
(r1, r2), row_height = row.divide(layout, table_width, height,
dpi_x, dpi_y)
if r2 is not None:
#break the table in two parts
break
table_height += row_height
row_index += 1
height -= row_height
# divide the table if any row did not fit
new_table = None
if row_index < len(self._children):
new_table = GtkDocTable(self._style)
#add the split row
new_table.add_child(r2)
map(new_table.add_child, self._children[row_index+1:])
del self._children[row_index+1:]
return (self, new_table), table_height
def draw(self, cr, layout, width, dpi_x, dpi_y):
#calculate real table width
table_width = width * self._style.get_width() / 100
# TODO is a table always left aligned??
table_height = 0
# draw all the rows
for row in self._children:
cr.save()
cr.translate(0, table_height)
row_height = row.draw(cr, layout, table_width, dpi_x, dpi_y)
cr.restore()
table_height += row_height
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(0, 0, table_width, table_height)
cr.stroke()
return table_height
class GtkDocTableRow(GtkDocBaseElement):
"""Implement a row in a table.
"""
_type = 'ROW'
_allowed_children = ['CELL']
def divide(self, layout, width, height, dpi_x, dpi_y):
# the highest cell gives the height of the row
cell_heights = []
dividedrow = False
cell_width_iter = self._style.__iter__()
new_row = GtkDocTableRow(self._style)
for cell in self._children:
cell_width = 0
for i in range(cell.get_span()):
cell_width += cell_width_iter.next()
cell_width = cell_width * width / 100
(c1, c2), cell_height = cell.divide(layout, cell_width, height,
dpi_x, dpi_y)
cell_heights.append(cell_height)
if c2 is None:
emptycell = GtkDocTableCell(c1._style, c1.get_span())
new_row.add_child(emptycell)
else:
dividedrow = True
new_row.add_child(c2)
# save height [inch] of the row to be able to draw exact cell border
row_height = max(cell_heights)
self.height = row_height / dpi_y
# return the new row if dividing was needed
if dividedrow:
if row_height == 0:
for cell in self._children:
cell._style.set_top_border(False)
cell._style.set_left_border(False)
cell._style.set_right_border(False)
return (self, new_row), row_height
else:
return (self, None), row_height
def draw(self, cr, layout, width, dpi_x, dpi_y):
cr.save()
# get the height of this row
row_height = self.height * dpi_y
# draw all the cells in the row
cell_width_iter = self._style.__iter__()
for cell in self._children:
cell_width = 0
for i in range(cell.get_span()):
cell_width += cell_width_iter.next()
cell_width = cell_width * width / 100
cell.draw(cr, layout, cell_width, row_height, dpi_x, dpi_y)
cr.translate(cell_width, 0)
cr.restore()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(0, 0, 1.0)
cr.rectangle(0, 0, width, row_height)
cr.stroke()
return row_height
class GtkDocTableCell(GtkDocBaseElement):
"""Implement a cell in a table row.
"""
_type = 'CELL'
_allowed_children = ['PARAGRAPH', 'IMAGE']
def __init__(self, style, span=1):
GtkDocBaseElement.__init__(self, style)
self._span = span
def get_span(self):
return self._span
def divide(self, layout, width, height, dpi_x, dpi_y):
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
# calculate real available width
width -= 2 * h_padding
available_height = height
# calculate height of each child
cell_height = 0
new_cell = None
e2 = None
childnr = 0
for child in self._children:
if new_cell is None:
(e1, e2), child_height = child.divide(layout, width,
available_height, dpi_x, dpi_y)
cell_height += child_height
available_height -= child_height
if e2 is not None:
#divide the cell
new_style = TableCellStyle(self._style)
if e1 is not None:
new_style.set_top_border(False)
new_cell = GtkDocTableCell(new_style, self._span)
new_cell.add_child(e2)
# then update this cell
self._style.set_bottom_border(False)
if e1 is not None:
childnr += 1
else:
#cell has been divided
new_cell.add_child(child)
self._children = self._children[:childnr]
# calculate real height
if cell_height <> 0:
cell_height += 2 * v_padding
# a cell can't be divided, return the height
return (self, new_cell), cell_height
def draw(self, cr, layout, width, cell_height, dpi_x, dpi_y):
"""Draw a cell.
This draw method is a bit different from the others, as common
cell height of all cells in a row is also given as parameter.
This is needed to be able to draw proper vertical borders around
each cell, i.e. the border should be as long as the highest cell
in the given row.
"""
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
# calculate real available width
i_width = width - 2 * h_padding
# draw children
cr.save()
cr.translate(h_padding, v_padding)
for child in self._children:
child_height = child.draw(cr, layout, i_width, dpi_x, dpi_y)
cr.translate(0, child_height)
cr.restore()
# draw the borders
if self._style.get_top_border():
cr.move_to(0, 0)
cr.rel_line_to(width , 0)
if self._style.get_right_border():
cr.move_to(width, 0)
cr.rel_line_to(0, cell_height)
if self._style.get_bottom_border():
cr.move_to(0, cell_height)
cr.rel_line_to(width, 0)
if self._style.get_left_border():
cr.move_to(0, 0)
cr.line_to(0, cell_height)
cr.set_line_width(1)
cr.set_source_rgb(0, 0, 0)
cr.stroke()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(0, 1.0, 0)
cr.rectangle(0, 0, width, cell_height)
cr.stroke()
return cell_height
class GtkDocPicture(GtkDocBaseElement):
"""Implement an image.
"""
_type = 'IMAGE'
_allowed_children = []
def __init__(self, style, filename, width, height, crop=None):
GtkDocBaseElement.__init__(self, style)
self._filename = filename
self._width = width
self._height = height
self._crop = crop
def divide(self, layout, width, height, dpi_x, dpi_y):
img_width = self._width * dpi_x / 2.54
img_height = self._height * dpi_y / 2.54
# image can't be divided, a new page must begin
# if it can't fit on the current one
if img_height <= height:
return (self, None), img_height
else:
return (None, self), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
from gi.repository import Gtk
img_width = self._width * dpi_x / 2.54
img_height = self._height * dpi_y / 2.54
if self._style == 'right':
l_margin = width - img_width
elif self._style == 'center':
l_margin = (width - img_width) / 2.0
else:
l_margin = 0
# load the image and get its extents
pixbuf = resize_to_buffer(self._filename, [img_width, img_height],
self._crop)
pixbuf_width = pixbuf.get_width()
pixbuf_height = pixbuf.get_height()
# calculate the scale to fit image into the set extents
scale = min(img_width / pixbuf_width, img_height / pixbuf_height)
# draw the image
cr.save()
cr.translate(l_margin, 0)
cr.scale(scale, scale)
gcr = Gdk.CairoContext(cr)
gcr.set_source_pixbuf(pixbuf,
(img_width / scale - pixbuf_width) / 2,
(img_height / scale - pixbuf_height) / 2)
cr.rectangle(0 , 0, img_width / scale, img_height / scale)
##gcr.set_source_pixbuf(pixbuf,
##(img_width - pixbuf_width) / 2,
##(img_height - pixbuf_height) / 2)
##cr.rectangle(0 , 0, img_width, img_height)
##cr.scale(scale, scale)
cr.fill()
cr.restore()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(l_margin, 0, img_width, img_height)
cr.stroke()
return (img_height)
class GtkDocFrame(GtkDocBaseElement):
"""Implement a frame.
"""
_type = 'FRAME'
_allowed_children = ['LINE', 'POLYGON', 'BOX', 'TEXT']
def divide(self, layout, width, height, dpi_x, dpi_y):
frame_width = round(self._style.width * dpi_x / 2.54)
frame_height = round(self._style.height * dpi_y / 2.54)
t_margin = self._style.spacing[2] * dpi_y / 2.54
b_margin = self._style.spacing[3] * dpi_y / 2.54
# frame can't be divided, a new page must begin
# if it can't fit on the current one
if frame_height + t_margin + b_margin <= height:
return (self, None), frame_height + t_margin + b_margin
elif frame_height + t_margin <= height:
return (self, None), height
else:
return (None, self), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
frame_width = self._style.width * dpi_x / 2.54
frame_height = self._style.height * dpi_y / 2.54
l_margin = self._style.spacing[0] * dpi_x / 2.54
r_margin = self._style.spacing[1] * dpi_x / 2.54
t_margin = self._style.spacing[2] * dpi_y / 2.54
b_margin = self._style.spacing[3] * dpi_y / 2.54
if self._style.align == 'left':
x_offset = l_margin
elif self._style.align == 'right':
x_offset = width - r_margin - frame_width
elif self._style.align == 'center':
x_offset = (width - frame_width) / 2.0
else:
raise ValueError
# draw each element in the frame
cr.save()
cr.translate(x_offset, t_margin)
cr.rectangle(0, 0, frame_width, frame_height)
cr.clip()
for elem in self._children:
elem.draw(cr, layout, frame_width, dpi_x, dpi_y)
cr.restore()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(x_offset, t_margin, frame_width, frame_height)
cr.stroke()
return frame_height + t_margin + b_margin
class GtkDocLine(GtkDocBaseElement):
"""Implement a line.
"""
_type = 'LINE'
_allowed_children = []
def __init__(self, style, x1, y1, x2, y2):
GtkDocBaseElement.__init__(self, style)
self._start = (x1, y1)
self._end = (x2, y2)
def draw(self, cr, layout, width, dpi_x, dpi_y):
start = (self._start[0] * dpi_x / 2.54, self._start[1] * dpi_y / 2.54)
end = (self._end[0] * dpi_x / 2.54, self._end[1] * dpi_y / 2.54)
line_color = ReportUtils.rgb_color(self._style.get_color())
cr.save()
cr.set_source_rgb(*line_color)
cr.set_line_width(self._style.get_line_width())
# TODO line style
line_style = self._style.get_line_style()
if line_style != SOLID:
cr.set_dash(self._style.get_dash_style(line_style), 0)
cr.move_to(*start)
cr.line_to(*end)
cr.stroke()
cr.restore()
return 0
class GtkDocPolygon(GtkDocBaseElement):
"""Implement a line.
"""
_type = 'POLYGON'
_allowed_children = []
def __init__(self, style, path):
GtkDocBaseElement.__init__(self, style)
self._path = path
def draw(self, cr, layout, width, dpi_x, dpi_y):
path = [(x * dpi_x / 2.54, y * dpi_y / 2.54) for (x, y) in self._path]
path_start = path.pop(0)
path_stroke_color = ReportUtils.rgb_color(self._style.get_color())
path_fill_color = ReportUtils.rgb_color(self._style.get_fill_color())
cr.save()
cr.move_to(*path_start)
for (x, y) in path:
cr.line_to(x, y)
cr.close_path()
cr.set_source_rgb(*path_fill_color)
cr.fill_preserve()
cr.set_source_rgb(*path_stroke_color)
cr.set_line_width(self._style.get_line_width())
# TODO line style
line_style = self._style.get_line_style()
if line_style != SOLID:
cr.set_dash(self._style.get_dash_style(line_style), 0)
cr.stroke()
cr.restore()
return 0
class GtkDocBox(GtkDocBaseElement):
"""Implement a box with optional shadow around it.
"""
_type = 'BOX'
_allowed_children = []
def __init__(self, style, x, y, width, height):
GtkDocBaseElement.__init__(self, style)
self._x = x
self._y = y
self._width = width
self._height = height
def draw(self, cr, layout, width, dpi_x, dpi_y):
box_x = self._x * dpi_x / 2.54
box_y = self._y * dpi_y / 2.54
box_width = self._width * dpi_x / 2.54
box_height = self._height * dpi_y / 2.54
box_stroke_color = ReportUtils.rgb_color((0, 0, 0))
box_fill_color = ReportUtils.rgb_color(self._style.get_fill_color())
shadow_color = ReportUtils.rgb_color((192, 192, 192))
cr.save()
cr.set_line_width(self._style.get_line_width())
# TODO line style
line_style = self._style.get_line_style()
if line_style != SOLID:
cr.set_dash(self._style.get_dash_style(line_style), 0)
if self._style.get_shadow():
shadow_x = box_x + self._style.get_shadow_space() * dpi_x / 2.54
shadow_y = box_y + self._style.get_shadow_space() * dpi_y / 2.54
cr.set_source_rgb(*shadow_color)
cr.rectangle(shadow_x, shadow_y, box_width, box_height)
cr.fill()
cr.rectangle(box_x, box_y, box_width, box_height)
cr.set_source_rgb(*box_fill_color)
cr.fill_preserve()
cr.set_source_rgb(*box_stroke_color)
cr.stroke()
cr.restore()
return 0
class GtkDocText(GtkDocBaseElement):
"""Implement a text on graphical reports.
"""
_type = 'TEXT'
_allowed_children = []
# line spacing is not defined in ParagraphStyle
spacingfractionfont = 0.2
def __init__(self, style, vertical_alignment, text, x, y,
angle=0, mark=None):
GtkDocBaseElement.__init__(self, style)
self._align_y = vertical_alignment
self._text = text
self._x = x
self._y = y
self._angle = angle
self._marklist = []
if mark:
self._marklist = [mark]
def draw(self, cr, layout, width, dpi_x, dpi_y):
text_x = self._x * dpi_x / 2.54
text_y = self._y * dpi_y / 2.54
# turn off text wrapping
layout.set_width(-1)
# set paragraph properties
align = self._style.get_alignment_text()
if align == 'left':
layout.set_alignment(Pango.Alignment.LEFT)
elif align == 'right':
layout.set_alignment(Pango.Alignment.RIGHT)
elif align == 'center':
layout.set_alignment(Pango.Alignment.CENTER)
elif align == 'justify':
layout.set_justify(True)
else:
raise ValueError
#
font_style = self._style.get_font()
layout.set_font_description(fontstyle_to_fontdescription(font_style))
#set line spacing based on font:
spacing = font_style.get_size() * self.spacingfractionfont
layout.set_spacing(int(round(spacing * Pango.SCALE)))
# layout the text
layout.set_markup(self._text)
layout_width, layout_height = layout.get_pixel_size()
# calculate horizontal and vertical alignment shift
if align == 'left':
align_x = 0
elif align == 'right':
align_x = - layout_width
elif align == 'center' or align == 'justify':
align_x = - layout_width / 2
else:
raise ValueError
if self._align_y == 'top':
align_y = 0
elif self._align_y == 'center':
align_y = - layout_height / 2
elif self._align_y == 'bottom':
align_y = - layout_height
else:
raise ValueError
# render the layout onto the cairo surface
cr.save()
cr.translate(text_x, text_y)
cr.rotate(radians(self._angle))
cr.move_to(align_x, align_y)
cr.set_source_rgb(*ReportUtils.rgb_color(font_style.get_color()))
PangoCairo.show_layout(cr, layout)
cr.restore()
return layout_height
def get_marks(self):
"""
Return the index mark for this text
"""
return self._marklist
#------------------------------------------------------------------------
#
# CairoDoc class
#
#------------------------------------------------------------------------
class CairoDoc(BaseDoc, TextDoc, DrawDoc):
"""Act as an abstract document that can render onto a cairo context.
Maintains an abstract model of the document. The root of this abstract
document is self._doc. The model is build via the subclassed BaseDoc, and
the implemented TextDoc, DrawDoc interface methods.
It can render the model onto cairo context pages, according to the received
page style.
"""
# BaseDoc implementation
def open(self, filename):
if filename[-4:] != '.pdf':
filename = filename + '.pdf'
self._backend = CairoBackend(filename)
self._doc = GtkDocDocument()
self._active_element = self._doc
self._pages = []
self._elements_to_paginate = []
self._links_error = False
def close(self):
self.run()
# TextDoc implementation
def page_break(self):
self._active_element.add_child(GtkDocPagebreak())
def start_bold(self):
self.__write_text('<b>', markup=True)
def end_bold(self):
self.__write_text('</b>', markup=True)
def start_superscript(self):
self.__write_text('<small><sup>', markup=True)
def end_superscript(self):
self.__write_text('</sup></small>', markup=True)
def start_paragraph(self, style_name, leader=None):
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
new_paragraph = GtkDocParagraph(style, leader)
self._active_element.add_child(new_paragraph)
self._active_element = new_paragraph
def end_paragraph(self):
self._active_element = self._active_element.get_parent()
def start_table(self, name, style_name):
style_sheet = self.get_style_sheet()
style = style_sheet.get_table_style(style_name)
new_table = GtkDocTable(style)
self._active_element.add_child(new_table)
self._active_element = new_table
# we need to remember the column width list from the table style.
# this is an ugly hack, but got no better idea.
self._active_row_style = map(style.get_column_width,
range(style.get_columns()))
def end_table(self):
self._active_element = self._active_element.get_parent()
def start_row(self):
new_row = GtkDocTableRow(self._active_row_style)
self._active_element.add_child(new_row)
self._active_element = new_row
def end_row(self):
self._active_element = self._active_element.get_parent()
def start_cell(self, style_name, span=1):
style_sheet = self.get_style_sheet()
style = style_sheet.get_cell_style(style_name)
new_cell = GtkDocTableCell(style, span)
self._active_element.add_child(new_cell)
self._active_element = new_cell
def end_cell(self):
self._active_element = self._active_element.get_parent()
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the cairo doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. CairoDoc does nothing different for html notes
links: bool, true if URLs should be made clickable
"""
text = str(styledtext)
s_tags = styledtext.get_tags()
#FIXME: following split should be regex to match \n\s*\n instead?
markuptext = self._backend.add_markup_from_styled(text, s_tags,
split='\n\n')
if format == 1:
#preformatted, retain whitespace. Cairo retains \n automatically,
#so use \n\n for paragraph detection
#FIXME: following split should be regex to match \n\s*\n instead?
for line in markuptext.split('\n\n'):
self.start_paragraph(style_name)
self.__write_text(line, markup=True, links=links)
self.end_paragraph()
elif format == 0:
#flowed
#FIXME: following split should be regex to match \n\s*\n instead?
for line in markuptext.split('\n\n'):
self.start_paragraph(style_name)
#flowed, normal whitespace goes away, but we keep linebreak
lines = line.split('\n')
newlines = []
for singleline in lines:
newlines.append(' '.join(singleline.split()))
self.__write_text('\n'.join(newlines), markup=True, links=links)
self.end_paragraph()
def __markup(self, text, markup=None):
if not markup:
# We need to escape the text here for later Pango.Layout.set_markup
# calls. This way we save the markup created by the report
# The markup in the note editor is not in the text so is not
# considered. It must be added by pango too
text = self._backend.ESCAPE_FUNC()(text)
return text
def __write_text(self, text, mark=None, markup=False, links=False):
"""
@param text: text to write.
@param mark: IndexMark to use for indexing
@param markup: True if text already contains markup info.
Then text will no longer be escaped
@param links: True if URLs should be made clickable
"""
if links == True:
import cairo
if cairo.cairo_version() < 11210 and self._links_error == False:
# Cairo v1.12 is suppose to be the first version
# that supports clickable links
print """
WARNING: This version of cairo (%s) does NOT support clickable links.
The first version that is suppose to is v1.12. See the roadmap:
http://www.cairographics.org/roadmap/
The work around is to save to another format that supports clickable
links (like ODF) and write PDF from that format.
""" % cairo.version
self._links_error = True
text = self.__markup(text, markup)
if mark:
self._active_element.add_mark(mark)
self._active_element.add_text(text)
def write_text(self, text, mark=None, links=False):
"""Write a normal piece of text according to the
present style
@param text: text to write.
@param mark: IndexMark to use for indexing
@param links: True if URLs should be made clickable
"""
self.__write_text(text, mark, links=links)
def write_markup(self, text, s_tags):
"""
Writes the text in the current paragraph. Should only be used after a
start_paragraph and before an end_paragraph.
@param text: text to write. The text is assumed to be _not_ escaped
@param s_tags: assumed to be list of styledtexttags to apply to the
text
"""
markuptext = self._backend.add_markup_from_styled(text, s_tags)
self.__write_text(markuptext, markup=True)
def add_media_object(self, name, pos, x_cm, y_cm, alt='',
style_name=None, crop=None):
new_image = GtkDocPicture(pos, name, x_cm, y_cm, crop=crop)
self._active_element.add_child(new_image)
if len(alt):
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
style.set_alignment(PARA_ALIGN_CENTER)
# Center the caption under the image
if pos == "right":
style.set_left_margin(self.get_usable_width() - new_image._width)
else:
style.set_right_margin(self.get_usable_width() - new_image._width)
new_paragraph = GtkDocParagraph(style)
new_paragraph.add_text('\n'.join(alt))
self._active_element.add_child(new_paragraph)
def insert_toc(self):
"""
Insert a Table of Contents at this point in the document.
"""
self._doc.add_child(GtkDocTableOfContents())
def insert_index(self):
"""
Insert an Alphabetical Index at this point in the document.
"""
self._doc.add_child(GtkDocAlphabeticalIndex())
# DrawDoc implementation
def start_page(self):
# if this is not the first page we need to "close" the previous one
children = self._doc.get_children()
if children and children[-1].get_type() != 'PAGEBREAK':
self._doc.add_child(GtkDocPagebreak())
new_frame_style = FrameStyle(width=self.get_usable_width(),
height=self.get_usable_height())
new_frame = GtkDocFrame(new_frame_style)
self._active_element.add_child(new_frame)
self._active_element = new_frame
def end_page(self):
self._active_element = self._active_element.get_parent()
def draw_line(self, style_name, x1, y1, x2, y2):
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
new_line = GtkDocLine(style, x1, y1, x2, y2)
self._active_element.add_child(new_line)
def draw_path(self, style_name, path):
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
new_polygon = GtkDocPolygon(style, path)
self._active_element.add_child(new_polygon)
def draw_box(self, style_name, text, x, y, w, h, mark=None):
""" @param mark: IndexMark to use for indexing """
# we handle the box and...
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
new_box = GtkDocBox(style, x, y, w, h)
self._active_element.add_child(new_box)
# ...the text separately
paragraph_style_name = style.get_paragraph_style()
if paragraph_style_name:
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_LEFT)
# horizontal position of the text is not included in the style,
# we assume that it is the size of the shadow, or 0.2mm
if style.get_shadow():
x_offset = style.get_shadow_space()
else:
x_offset = 0.2
new_text = GtkDocText(paragraph_style, 'center',
self.__markup(text),
x + x_offset, y + h / 2, angle=0, mark=mark)
self._active_element.add_child(new_text)
def draw_text(self, style_name, text, x, y, mark=None):
""" @param mark: IndexMark to use for indexing """
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
paragraph_style_name = style.get_paragraph_style()
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_LEFT)
new_text = GtkDocText(paragraph_style, 'top',
self.__markup(text), x, y, angle=0, mark=mark)
self._active_element.add_child(new_text)
def center_text(self, style_name, text, x, y, mark=None):
""" @param mark: IndexMark to use for indexing """
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
paragraph_style_name = style.get_paragraph_style()
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_CENTER)
new_text = GtkDocText(paragraph_style, 'top',
self.__markup(text), x, y, angle=0, mark=mark)
self._active_element.add_child(new_text)
def rotate_text(self, style_name, text, x, y, angle, mark=None):
""" @param mark: IndexMark to use for indexing """
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
paragraph_style_name = style.get_paragraph_style()
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_CENTER)
new_text = GtkDocText(paragraph_style, 'center',
self.__markup('\n'.join(text)), x, y, angle, mark)
self._active_element.add_child(new_text)
# paginating and drawing interface
def run(self):
"""Create the physical output from the meta document.
It must be implemented in the subclasses. The idea is that with
different subclass different output could be generated:
e.g. Print, PDF, PS, PNG (which are currently supported by Cairo).
"""
raise NotImplementedError
def paginate_document(self, layout, page_width, page_height, dpi_x, dpi_y):
"""Paginate the entire document.
"""
while not self.paginate(layout, page_width, page_height, dpi_x, dpi_y):
pass
def paginate(self, layout, page_width, page_height, dpi_x, dpi_y):
"""Paginate the meta document in chunks.
Only one document level element is handled at one run.
"""
# if first time run than initialize the variables
if not self._elements_to_paginate:
self._elements_to_paginate = self._doc.get_children()[:]
self._pages.append(GtkDocDocument())
self._available_height = page_height
# try to fit the next element to current page, divide it if needed
if not self._elements_to_paginate:
#this is a self._doc where nothing has been added. Empty page.
return True
elem = self._elements_to_paginate.pop(0)
(e1, e2), e1_h = elem.divide(layout,
page_width,
self._available_height,
dpi_x,
dpi_y)
# if (part of) it fits on current page add it
if e1 is not None:
self._pages[len(self._pages) - 1].add_child(e1)
# if elem was divided remember the second half to be processed
if e2 is not None:
self._elements_to_paginate.insert(0, e2)
# calculate how much space left on current page
self._available_height -= e1_h
# start new page if needed
if (e1 is None) or (e2 is not None):
self._pages.append(GtkDocDocument())
self._available_height = page_height
return len(self._elements_to_paginate) == 0
def draw_page(self, page_nr, cr, layout, width, height, dpi_x, dpi_y):
"""Draw a page on a Cairo context.
"""
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(0, 1.0, 0)
cr.rectangle(0, 0, width, height)
cr.stroke()
self._pages[page_nr].draw(cr, layout, width, dpi_x, dpi_y)
|
arunkgupta/gramps
|
gramps/plugins/lib/libcairodoc.py
|
Python
|
gpl-2.0
| 60,637
|
[
"Brian"
] |
7546da03fa7585d5c02a83a89e09b3cd4dd7b8afe2fa835094bf2bc460d37ad9
|
import os, re
import argparse
import pandas as pd
parser = argparse.ArgumentParser(description='Pars GASSST to blast output')
parser.add_argument('-i', type=str,dest='input',
help='Input GASSST output File')
parser.add_argument('-o', dest='Output', type=str,default=os.path.join(os.getcwd(),"out.txt"),
help='Output file')
args = parser.parse_args()
f=open(args.input,"r")
lines=f.readlines()
f.close()
data=pd.DataFrame()
BANKs=[x for x in lines if x.startswith("BANK")]
QUERYs=[x for x in lines if x.startswith("QUERY")]
BANKs_fields=[[x for x in re.split("\s",y) if x!=''] for y in BANKs]
QUERYs_fields=[[x for x in re.split("\s",y) if x!=''] for y in QUERYs]
gaps=[float(z[0].replace('gap(s):','')) for z in [y for y in [re.findall('gap\(s\):[0-9 ]+',x) for x in lines] if len(y)>0]]
mismatches=[float(z[0].replace('# mismatche(s):','')) for z in [y for y in [re.findall('# mismatche\(s\):[0-9 ]+',x) for x in lines] if len(y)>0]]
data["qseqid"]=[x[-1] for x in QUERYs_fields]
data["sseqid"]=[x[-1] for x in BANKs_fields]
data["qlen"]=[abs(int(x[3]) -int(x[1]) )+1 for x in QUERYs_fields]
data["slen"]=[abs(int(x[3]) -int(x[1]) )+1 for x in BANKs_fields]
data["qstart"]=[int(x[1]) for x in QUERYs_fields]
data["qend"]=[int(x[3]) for x in QUERYs_fields]
data["sstart"]=[int(x[1]) for x in BANKs_fields]
data["send"]=[int(x[3]) for x in BANKs_fields]
data["length"]=[len(x[2]) for x in BANKs_fields]
data["evalue"]=[float(z[0].replace('e-value:','')) for z in [y for y in [re.findall('e-value:[0-9 e \- \+ \.]+',x) for x in lines] if len(y)>0]]
data["pident"]=list(map(lambda gap,mis,alen: 100*(1-((gap+mis)/alen)),gaps,mismatches,data["length"]))
data["sseq"]=[x[2].upper() for x in BANKs_fields]
data.to_csv(args.Output, sep='\t',header=False,index=False,float_format="%g")
|
bioinfo-core-BGU/neatseq-flow_modules
|
neatseq_flow_modules/Liron/Gassst_module/Gassst2blast.py
|
Python
|
gpl-3.0
| 1,841
|
[
"BLAST"
] |
92b04623853ec65f4b17e9c7854a5bb08e05cab1d8b51794e63441e713455dab
|
"""Plotting functions for visualizing distributions."""
from __future__ import division
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
from six import string_types
try:
import statsmodels.nonparametric.api as smnp
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .utils import set_hls_values, iqr, _kde_support
from .palettes import color_palette, blend_palette
from .axisgrid import JointGrid
__all__ = ["distplot", "kdeplot", "rugplot", "jointplot"]
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From http://stats.stackexchange.com/questions/798/
a = np.asarray(a)
h = 2 * iqr(a) / (len(a) ** (1 / 3))
# fall back to sqrt(a) bins if iqr is 0
if h == 0:
return int(np.sqrt(a.size))
else:
return int(np.ceil((a.max() - a.min()) / h))
def distplot(a, bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
color=None, vertical=False, norm_hist=False, axlabel=None,
label=None, ax=None):
"""Flexibly plot a univariate distribution of observations.
This function combines the matplotlib ``hist`` function (with automatic
calculation of a good default bin size) with the seaborn :func:`kdeplot`
and :func:`rugplot` functions. It can also fit ``scipy.stats``
distributions and plot the estimated PDF over the data.
Parameters
----------
a : Series, 1d-array, or list.
Observed data. If this is a Series object with a ``name`` attribute,
the name will be used to label the data axis.
bins : argument for matplotlib hist(), or None, optional
Specification of hist bins, or None to use Freedman-Diaconis rule.
hist : bool, optional
Whether to plot a (normed) histogram.
kde : bool, optional
Whether to plot a gaussian kernel density estimate.
rug : bool, optional
Whether to draw a rugplot on the support axis.
fit : random variable object, optional
An object with `fit` method, returning a tuple that can be passed to a
`pdf` method a positional arguments following an grid of values to
evaluate the pdf on.
{hist, kde, rug, fit}_kws : dictionaries, optional
Keyword arguments for underlying plotting functions.
color : matplotlib color, optional
Color to plot everything but the fitted curve in.
vertical : bool, optional
If True, oberved values are on y-axis.
norm_hist : bool, optional
If True, the histogram height shows a density rather than a count.
This is implied if a KDE or fitted density is plotted.
axlabel : string, False, or None, optional
Name for the support axis label. If None, will try to get it
from a.namel if False, do not set a label.
label : string, optional
Legend label for the relevent component of the plot
ax : matplotlib axis, optional
if provided, plot on this axis
Returns
-------
ax : matplotlib Axes
Returns the Axes object with the plot for further tweaking.
See Also
--------
kdeplot : Show a univariate or bivariate distribution with a kernel
density estimate.
rugplot : Draw small vertical lines to show each observation in a
distribution.
Examples
--------
Show a default plot with a kernel density estimate and histogram with bin
size determined automatically with a reference rule:
.. plot::
:context: close-figs
>>> import seaborn as sns, numpy as np
>>> sns.set(rc={"figure.figsize": (8, 4)}); np.random.seed(0)
>>> x = np.random.randn(100)
>>> ax = sns.distplot(x)
Use Pandas objects to get an informative axis label:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> x = pd.Series(x, name="x variable")
>>> ax = sns.distplot(x)
Plot the distribution with a kenel density estimate and rug plot:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, rug=True, hist=False)
Plot the distribution with a histogram and maximum likelihood gaussian
distribution fit:
.. plot::
:context: close-figs
>>> from scipy.stats import norm
>>> ax = sns.distplot(x, fit=norm, kde=False)
Plot the distribution on the vertical axis:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, vertical=True)
Change the color of all the plot elements:
.. plot::
:context: close-figs
>>> sns.set_color_codes()
>>> ax = sns.distplot(x, color="y")
Pass specific parameters to the underlying plot functions:
.. plot::
:context: close-figs
>>> ax = sns.distplot(x, rug=True, rug_kws={"color": "g"},
... kde_kws={"color": "k", "lw": 3, "label": "KDE"},
... hist_kws={"histtype": "step", "linewidth": 3,
... "alpha": 1, "color": "g"})
"""
if ax is None:
ax = plt.gca()
# Intelligently label the support axis
label_ax = bool(axlabel)
if axlabel is None and hasattr(a, "name"):
axlabel = a.name
if axlabel is not None:
label_ax = True
# Make a a 1-d array
a = np.asarray(a).squeeze()
# Decide if the hist is normed
norm_hist = norm_hist or kde or (fit is not None)
# Handle dictionary defaults
if hist_kws is None:
hist_kws = dict()
if kde_kws is None:
kde_kws = dict()
if rug_kws is None:
rug_kws = dict()
if fit_kws is None:
fit_kws = dict()
# Get the color from the current color cycle
if color is None:
if vertical:
line, = ax.plot(0, a.mean())
else:
line, = ax.plot(a.mean(), 0)
color = line.get_color()
line.remove()
# Plug the label into the right kwarg dictionary
if label is not None:
if hist:
hist_kws["label"] = label
elif kde:
kde_kws["label"] = label
elif rug:
rug_kws["label"] = label
elif fit:
fit_kws["label"] = label
if hist:
if bins is None:
bins = min(_freedman_diaconis_bins(a), 50)
hist_kws.setdefault("alpha", 0.4)
hist_kws.setdefault("normed", norm_hist)
orientation = "horizontal" if vertical else "vertical"
hist_color = hist_kws.pop("color", color)
ax.hist(a, bins, orientation=orientation,
color=hist_color, **hist_kws)
if hist_color != color:
hist_kws["color"] = hist_color
if kde:
kde_color = kde_kws.pop("color", color)
kdeplot(a, vertical=vertical, ax=ax, color=kde_color, **kde_kws)
if kde_color != color:
kde_kws["color"] = kde_color
if rug:
rug_color = rug_kws.pop("color", color)
axis = "y" if vertical else "x"
rugplot(a, axis=axis, ax=ax, color=rug_color, **rug_kws)
if rug_color != color:
rug_kws["color"] = rug_color
if fit is not None:
fit_color = fit_kws.pop("color", "#282828")
gridsize = fit_kws.pop("gridsize", 200)
cut = fit_kws.pop("cut", 3)
clip = fit_kws.pop("clip", (-np.inf, np.inf))
bw = stats.gaussian_kde(a).scotts_factor() * a.std(ddof=1)
x = _kde_support(a, bw, gridsize, cut, clip)
params = fit.fit(a)
pdf = lambda x: fit.pdf(x, *params)
y = pdf(x)
if vertical:
x, y = y, x
ax.plot(x, y, color=fit_color, **fit_kws)
if fit_color != "#282828":
fit_kws["color"] = fit_color
if label_ax:
if vertical:
ax.set_ylabel(axlabel)
else:
ax.set_xlabel(axlabel)
return ax
def _univariate_kdeplot(data, shade, vertical, kernel, bw, gridsize, cut,
clip, legend, ax, cumulative=False, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes."""
# Sort out the clipping
if clip is None:
clip = (-np.inf, np.inf)
# Calculate the KDE
if _has_statsmodels:
# Prefer using statsmodels for kernel flexibility
x, y = _statsmodels_univariate_kde(data, kernel, bw,
gridsize, cut, clip,
cumulative=cumulative)
else:
# Fall back to scipy if missing statsmodels
if kernel != "gau":
kernel = "gau"
msg = "Kernel other than `gau` requires statsmodels."
warnings.warn(msg, UserWarning)
if cumulative:
raise ImportError("Cumulative distributions are currently"
"only implemented in statsmodels."
"Please install statsmodels.")
x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(data, "name"):
label = data.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
alpha = kwargs.get("alpha", 0.25)
if shade:
if vertical:
ax.fill_betweenx(y, 1e-12, x, facecolor=color, alpha=alpha)
else:
ax.fill_between(x, 1e-12, y, facecolor=color, alpha=alpha)
# Draw the legend here
if legend:
ax.legend(loc="best")
return ax
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
cumulative=False):
"""Compute a univariate kernel density estimate using statsmodels."""
fft = kernel == "gau"
kde = smnp.KDEUnivariate(data)
kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
if cumulative:
grid, y = kde.support, kde.cdf
else:
grid, y = kde.support, kde.density
return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
"""Compute a univariate kernel density estimate using scipy."""
try:
kde = stats.gaussian_kde(data, bw_method=bw)
except TypeError:
kde = stats.gaussian_kde(data)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
if isinstance(bw, string_types):
bw = "scotts" if bw == "scott" else bw
bw = getattr(kde, "%s_factor" % bw)() * np.std(data)
grid = _kde_support(data, bw, gridsize, cut, clip)
y = kde(grid)
return grid, y
def _bivariate_kdeplot(x, y, filled, fill_lowest,
kernel, bw, gridsize, cut, clip,
axlabel, ax, **kwargs):
"""Plot a joint KDE estimate as a bivariate contour plot."""
# Determine the clipping
if clip is None:
clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
elif np.ndim(clip) == 1:
clip = [clip, clip]
# Calculate the KDE
if _has_statsmodels:
xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
else:
xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
# Plot the contours
n_levels = kwargs.pop("n_levels", 10)
cmap = kwargs.get("cmap", "BuGn" if filled else "BuGn_d")
if isinstance(cmap, string_types):
if cmap.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
cmap = blend_palette(pal, as_cmap=True)
else:
cmap = mpl.cm.get_cmap(cmap)
kwargs["cmap"] = cmap
contour_func = ax.contourf if filled else ax.contour
cset = contour_func(xx, yy, z, n_levels, **kwargs)
if filled and not fill_lowest:
cset.collections[0].set_alpha(0)
kwargs["n_levels"] = n_levels
# Label the axes
if hasattr(x, "name") and axlabel:
ax.set_xlabel(x.name)
if hasattr(y, "name") and axlabel:
ax.set_ylabel(y.name)
return ax
def _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using statsmodels."""
if isinstance(bw, string_types):
bw_func = getattr(smnp.bandwidths, "bw_" + bw)
x_bw = bw_func(x)
y_bw = bw_func(y)
bw = [x_bw, y_bw]
elif np.isscalar(bw):
bw = [bw, bw]
if isinstance(x, pd.Series):
x = x.values
if isinstance(y, pd.Series):
y = y.values
kde = smnp.KDEMultivariate([x, y], "cc", bw)
x_support = _kde_support(x, kde.bw[0], gridsize, cut, clip[0])
y_support = _kde_support(y, kde.bw[1], gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using scipy."""
data = np.c_[x, y]
kde = stats.gaussian_kde(data.T)
data_std = data.std(axis=0, ddof=1)
if isinstance(bw, string_types):
bw = "scotts" if bw == "scott" else bw
bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
elif np.isscalar(bw):
bw_x, bw_y = bw, bw
else:
msg = ("Cannot specify a different bandwidth for each dimension "
"with the scipy backend. You should install statsmodels.")
raise ValueError(msg)
x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def kdeplot(data, data2=None, shade=False, vertical=False, kernel="gau",
bw="scott", gridsize=100, cut=3, clip=None, legend=True,
cumulative=False, shade_lowest=True, ax=None, **kwargs):
"""Fit and plot a univariate or bivariate kernel density estimate.
Parameters
----------
data : 1d array-like
Input data.
data2: 1d array-like, optional
Second input data. If present, a bivariate KDE will be estimated.
shade : bool, optional
If True, shade in the area under the KDE curve (or draw with filled
contours when data is bivariate).
vertical : bool, optional
If True, density is on x-axis.
kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
Code for shape of kernel to fit with. Bivariate KDE can only use
gaussian kernel.
bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
Name of reference method to determine kernel size, scalar factor,
or scalar for each dimension of the bivariate plot.
gridsize : int, optional
Number of discrete points in the evaluation grid.
cut : scalar, optional
Draw the estimate to cut * bw from the extreme data points.
clip : pair of scalars, or pair of pair of scalars, optional
Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
legend : bool, optional
If True, add a legend or label the axes when possible.
cumulative : bool, optional
If True, draw the cumulative distribution estimated by the kde.
shade_lowest : bool, optional
If True, shade the lowest contour of a bivariate KDE plot. Not
relevant when drawing a univariate plot or when ``shade=False``.
Setting this to ``False`` can be useful when you want multiple
densities on the same Axes.
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis.
kwargs : key, value pairings
Other keyword arguments are passed to ``plt.plot()`` or
``plt.contour{f}`` depending on whether a univariate or bivariate
plot is being drawn.
Returns
-------
ax : matplotlib Axes
Axes with plot.
See Also
--------
distplot: Flexibly plot a univariate distribution of observations.
jointplot: Plot a joint dataset with bivariate and marginal distributions.
Examples
--------
Plot a basic univariate density:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(10)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> mean, cov = [0, 2], [(1, .5), (.5, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, size=50).T
>>> ax = sns.kdeplot(x)
Shade under the density curve and use a different color:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, shade=True, color="r")
Plot a bivariate density:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y)
Use filled contours:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y, shade=True)
Use more contour levels and a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, y, n_levels=30, cmap="Purples_d")
Use a narrower bandwith:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, bw=.15)
Plot the density on the vertical axis:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(y, vertical=True)
Limit the density curve within the range of the data:
.. plot::
:context: close-figs
>>> ax = sns.kdeplot(x, cut=0)
Plot two shaded bivariate densities:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> setosa = iris.loc[iris.species == "setosa"]
>>> virginica = iris.loc[iris.species == "virginica"]
>>> ax = sns.kdeplot(setosa.sepal_width, setosa.sepal_length,
... cmap="Reds", shade=True, shade_lowest=False)
>>> ax = sns.kdeplot(virginica.sepal_width, virginica.sepal_length,
... cmap="Blues", shade=True, shade_lowest=False)
"""
if ax is None:
ax = plt.gca()
data = data.astype(np.float64)
if data2 is not None:
data2 = data2.astype(np.float64)
bivariate = False
if isinstance(data, np.ndarray) and np.ndim(data) > 1:
bivariate = True
x, y = data.T
elif isinstance(data, pd.DataFrame) and np.ndim(data) > 1:
bivariate = True
x = data.iloc[:, 0].values
y = data.iloc[:, 1].values
elif data2 is not None:
bivariate = True
x = data
y = data2
if bivariate and cumulative:
raise TypeError("Cumulative distribution plots are not"
"supported for bivariate distributions.")
if bivariate:
ax = _bivariate_kdeplot(x, y, shade, shade_lowest,
kernel, bw, gridsize, cut, clip, legend,
ax, **kwargs)
else:
ax = _univariate_kdeplot(data, shade, vertical, kernel, bw,
gridsize, cut, clip, legend, ax,
cumulative=cumulative, **kwargs)
return ax
def rugplot(a, height=.05, axis="x", ax=None, **kwargs):
"""Plot datapoints in an array as sticks on an axis.
Parameters
----------
a : vector
1D array of observations.
height : scalar, optional
Height of ticks as proportion of the axis.
axis : {'x' | 'y'}, optional
Axis to draw rugplot on.
ax : matplotlib axes, optional
Axes to draw plot into; otherwise grabs current axes.
kwargs : key, value pairings
Other keyword arguments are passed to ``axvline`` or ``axhline``.
Returns
-------
ax : matplotlib axes
The Axes object with the plot on it.
"""
if ax is None:
ax = plt.gca()
a = np.asarray(a)
vertical = kwargs.pop("vertical", axis == "y")
func = ax.axhline if vertical else ax.axvline
kwargs.setdefault("linewidth", 1)
for pt in a:
func(pt, 0, height, **kwargs)
return ax
def jointplot(x, y, data=None, kind="scatter", stat_func=stats.pearsonr,
color=None, size=6, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None,
joint_kws=None, marginal_kws=None, annot_kws=None, **kwargs):
"""Draw a plot of two variables with bivariate and univariate graphs.
This function provides a convenient interface to the :class:`JointGrid`
class, with several canned plot kinds. This is intended to be a fairly
lightweight wrapper; if you need more flexibility, you should use
:class:`JointGrid` directly.
Parameters
----------
x, y : strings or vectors
Data or names of variables in ``data``.
data : DataFrame, optional
DataFrame when ``x`` and ``y`` are variable names.
kind : { "scatter" | "reg" | "resid" | "kde" | "hex" }, optional
Kind of plot to draw.
stat_func : callable or None, optional
Function used to calculate a statistic about the relationship and
annotate the plot. Should map `x` and `y` either to a single value
or to a (value, p) tuple. Set to ``None`` if you don't want to
annotate the plot.
color : matplotlib color, optional
Color used for the plot elements.
size : numeric, optional
Size of the figure (it will be square).
ratio : numeric, optional
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from ``x`` and ``y``.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
{joint, marginal, annot}_kws : dicts, optional
Additional keyword arguments for the plot components.
kwargs : key, value pairings
Additional keyword arguments are passed to the function used to
draw the plot on the joint Axes, superseding items in the
``joint_kws`` dictionary.
Returns
-------
grid : :class:`JointGrid`
:class:`JointGrid` object with the plot on it.
See Also
--------
JointGrid : The Grid class used for drawing this plot. Use it directly if
you need more flexibility.
Examples
--------
Draw a scatterplot with marginal histograms:
.. plot::
:context: close-figs
>>> import numpy as np, pandas as pd; np.random.seed(0)
>>> import seaborn as sns; sns.set(style="white", color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.jointplot(x="total_bill", y="tip", data=tips)
Add regression and kernel density fits:
.. plot::
:context: close-figs
>>> g = sns.jointplot("total_bill", "tip", data=tips, kind="reg")
Replace the scatterplot with a joint histogram using hexagonal bins:
.. plot::
:context: close-figs
>>> g = sns.jointplot("total_bill", "tip", data=tips, kind="hex")
Replace the scatterplots and histograms with density estimates and align
the marginal Axes tightly with the joint Axes:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> g = sns.jointplot("sepal_width", "petal_length", data=iris,
... kind="kde", space=0, color="g")
Use a different statistic for the annotation:
.. plot::
:context: close-figs
>>> from scipy.stats import spearmanr
>>> g = sns.jointplot("size", "total_bill", data=tips,
... stat_func=spearmanr, color="m")
Draw a scatterplot, then add a joint density estimate:
.. plot::
:context: close-figs
>>> g = (sns.jointplot("sepal_length", "sepal_width",
... data=iris, color="k")
... .plot_joint(sns.kdeplot, zorder=0, n_levels=6))
Pass vectors in directly without using Pandas, then name the axes:
.. plot::
:context: close-figs
>>> x, y = np.random.randn(2, 300)
>>> g = (sns.jointplot(x, y, kind="hex", stat_func=None)
... .set_axis_labels("x", "y"))
Draw a smaller figure with more space devoted to the marginal plots:
.. plot::
:context: close-figs
>>> g = sns.jointplot("total_bill", "tip", data=tips,
... size=5, ratio=3, color="g")
Pass keyword arguments down to the underlying plots:
.. plot::
:context: close-figs
>>> g = sns.jointplot("petal_length", "sepal_length", data=iris,
... marginal_kws=dict(bins=15, rug=True),
... annot_kws=dict(stat="r"),
... s=40, edgecolor="w", linewidth=1)
"""
# Set up empty default kwarg dicts
if joint_kws is None:
joint_kws = {}
joint_kws.update(kwargs)
if marginal_kws is None:
marginal_kws = {}
if annot_kws is None:
annot_kws = {}
# Make a colormap based off the plot color
if color is None:
color = color_palette()[0]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [set_hls_values(color_rgb, l=l) for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Initialize the JointGrid object
grid = JointGrid(x, y, data, dropna=dropna,
size=size, ratio=ratio, space=space,
xlim=xlim, ylim=ylim)
# Plot the data using the grid
if kind == "scatter":
joint_kws.setdefault("color", color)
grid.plot_joint(plt.scatter, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = _freedman_diaconis_bins(grid.x)
y_bins = _freedman_diaconis_bins(grid.y)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("kde"):
joint_kws.setdefault("shade", True)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("shade", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("reg"):
from .linearmodels import regplot
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
from .linearmodels import residplot
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", False)
distplot(x, ax=grid.ax_marg_x, **marginal_kws)
distplot(y, vertical=True, fit=stats.norm, ax=grid.ax_marg_y,
**marginal_kws)
stat_func = None
else:
msg = "kind must be either 'scatter', 'reg', 'resid', 'kde', or 'hex'"
raise ValueError(msg)
if stat_func is not None:
grid.annotate(stat_func, **annot_kws)
return grid
|
JWarmenhoven/seaborn
|
seaborn/distributions.py
|
Python
|
bsd-3-clause
| 28,496
|
[
"Gaussian"
] |
107182888b8b61637a46f711ec8d4cfa138b12d5b45970dadc5f22afddc8be24
|
# -*- coding: utf-8 -*-
"""
This module contains a custom streamlining class derived from the MayaVi2
streamlining class, modified to accept an array of seed points for visulaisation
using mayavi.
.. warning::
The documentation for this class cannot be built on Read The Docs, it is possible to build it locally.
You can use this class thus:
Create a new Streamline instance and add it to a pipeline
"""
import numpy as np
from tvtk.api import tvtk
from traits.api import Instance, TraitPrefixList, Trait, Array
import mayavi
from mayavi.modules.streamline import Streamline
__all__ = ['SeedStreamline']
class SeedStreamline(Streamline):
"""
This class is a modification of the mayavi Streamline class that accepts
an array of seed points as a input rather than a widget.
Examples
--------
Create a new Streamline instance and add it to a pipeline
>>> from solarbextrapolation.mayavi_seed_streamlines import SeedStreamline
>>> import numpy as np
>>> seeds = [[1, 2, 5], [3, 4, 5]]
>>> field_lines = SeedStreamline(seed_points = np.array(seeds)) #doctest: +SKIP
>>> myvectorfield.add_child(field_lines) #doctest: +SKIP
"""
seed_points = Array(allow_none=False)
seed = Instance(tvtk.PolyData, args=())
update_mode = Trait('interactive', TraitPrefixList(['interactive',
'semi-interactive',
'non-interactive']),
desc='the speed at which the poly data is updated')
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
# Create and setup the default objects.
self.seed = tvtk.PolyData(points=self.seed_points)
self.stream_tracer = tvtk.StreamTracer(maximum_propagation=2000,
integration_direction='backward',
compute_vorticity=False,
integrator_type='runge_kutta4',
)
self.ribbon_filter = tvtk.RibbonFilter()
self.tube_filter = tvtk.TubeFilter()
self.actor = mayavi.components.actor.Actor()
# Setup the actor suitably for this module.
self.actor.property.line_width = 2.0
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
mm = self.module_manager
if mm is None:
return
src = mm.source
self.stream_tracer.input = src.outputs[0]
#self.seed.inputs = [src]
# Setup the radius/width of the tube/ribbon filters based on
# given input.
if self._first:
b = src.outputs[0].bounds
l = [(b[1]-b[0]), (b[3]-b[2]), (b[5]-b[4])]
length = np.sqrt(l[0]*l[0] + l[1]*l[1] + l[2]*l[2])
self.ribbon_filter.width = length*0.0075
self.tube_filter.radius = length*0.0075
self._first = False
self._streamline_type_changed(self.streamline_type)
# Set the LUT for the mapper.
self.actor.set_lut(mm.scalar_lut_manager.lut)
self.pipeline_changed = True
def _seed_points_changed(self, old, new):
self.seed = tvtk.PolyData(points=self.seed_points)
def _stream_tracer_changed(self, old, new):
if old is not None:
old.on_trait_change(self.render, remove=True)
seed = self.seed
if seed is not None:
new.source = seed
new.on_trait_change(self.render)
mm = self.module_manager
if mm is not None:
new.input = mm.source.outputs[0]
# A default output so there are no pipeline errors. The
# update_pipeline call corrects this if needed.
self.outputs = [new.output]
self.update_pipeline()
def _seed_changed(self, old, new):
st = self.stream_tracer
if st is not None:
st.source = new#.poly_data
#self._change_components(old, new)
|
Alex-Ian-Hamilton/solarbextrapolation
|
solarbextrapolation/mayavi_seed_streamlines.py
|
Python
|
mit
| 4,775
|
[
"Mayavi"
] |
9f9f7d488baa6601ce2be2669dda93c50c37ddd7a7e06a945651aab794fd4610
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Downloads poems from poets.org and saves them to a text file.
This is just a quick hack, but it's licensed under the GNU GPL, either version
3 or, at your option, any later version; see the file LICENSE.md for details.
This code is copyright 2017 by Patrick Mooney.
"""
import html, os, requests, xml
import bs4 # https://www.crummy.com/software/BeautifulSoup/
from bs4 import BeautifulSoup
import bleach # https://pypi.python.org/pypi/bleach
import text_handling as th # From https://github.com/patrick-brian-mooney/python-personal-library
files_to_download = '/LibidoMechanica/scrapers/poets.org/urls.list'
failed_poems = [][:]
def remove_tags(text):
"""Thanks to http://stackoverflow.com/a/35880312"""
return bleach.clean(text, tags=[], attributes={}, styles=[], strip=True)
with open(files_to_download) as list_file:
url_list = [ f.strip() for f in list_file.readlines() ]
for which_url in sorted(url_list):
try:
if not len(which_url.strip()): continue # Don't bother trying to process blank lines
print("Processing '%s' ... " % which_url, end='')
page = requests.get(which_url)
soup = BeautifulSoup(page.content, 'html.parser')
html_title = soup.find('title').decode()
html_title = remove_tags(html_title.split('-')[0]) # Split page title on hyphen, take the leftmost bit
poem_title, poem_author = html_title.split('by') # Yes, that's the delimiter
poem_filename = '%s/%s: "%s"' % (os.path.dirname(files_to_download), poem_author.strip(), poem_title.strip())
# the_html = [x for x in list(soup.children) if isinstance(x, bs4.element.Tag)][0] # Get access by tag.
poem_with_cruft = '\n'.join([t.decode() for t in soup.find_all('div', class_='field-name-body')])
# OK, do any HTML preprocessing we need to do.
poem_with_cruft = th.multi_replace(poem_with_cruft, [['<br>', '\n'],
['<br />', '\n'],
['<br/>', '\n'],])
poem_with_cruft = poem_with_cruft.replace('<div>', '<div>\n\n')
poem_with_cruft = poem_with_cruft.replace('<p>', '<p>\n\n')
plain_text_poem = remove_tags(poem_with_cruft)
plain_text_poem = html.unescape(plain_text_poem)
with open(poem_filename.strip(), mode="w") as poem_file:
poem_file.write(plain_text_poem)
print('done!')
except Exception as e:
failed_poems += [ which_url ]
print("\n\nAll URLs processed! Hooray!\n")
if len(failed_poems):
with open('%s/failed.url' % os.path.dirname(files_to_download), mode='w') as failed_file:
failed_file.writelines(['%s\n' % l for l in failed_poems])
print('\n ... but %d failed URLs written to %s/failed.url. Alas.\n\n') % (len(failed_poems), os.path.dirname(files_to_download))
|
patrick-brian-mooney/LibidoMechanica
|
scrapers/poets.org.py
|
Python
|
gpl-3.0
| 3,025
|
[
"Brian"
] |
1841d6cb14694226e59e52f5c75271eafb1bc78851213b517cab9220dd39d0a1
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-11 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1960-2020/0.5x0.5/combined_sources_HCHO_1960-2020_greg.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i304: HCHO surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i304'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='HCHO'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name=str.strip(species_name)+' surf emissions'
ocube.standard_name='tendency_of_atmosphere_mass_content_of_formaldehyde_due_to_emission'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_HCHO_1960-2020_greg.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of formaldehyde from 1960 to 2020'
ocube.attributes['File_version']='v2'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1960-2020/regrid_HCHO_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 17,334
|
[
"NetCDF"
] |
fd2e578bd49485db8ae7cf8cbd6d01c3b012f751a20559d0d3822cd25d8e078b
|
import os
from ase import *
from ase.parallel import rank, barrier
from gpaw.utilities import equal
from gpaw import Calculator
from gpaw.atom.generator import Generator
from gpaw.atom.configurations import parameters
from gpaw.xc_functional import XCFunctional
from gpaw import setup_paths
def test():
# Generate setup
symbol = 'He'
if rank == 0:
g = Generator(symbol, 'revPBE', scalarrel=True, nofiles=True)
g.run(exx=True, **parameters[symbol])
barrier()
setup_paths.insert(0, '.')
a = 7.5 * Bohr
n = 16
atoms = Atoms([Atom('He', (0.0, 0.0, 0.0))], cell=(a, a, a), pbc=True)
calc = Calculator(gpts=(n, n, n), nbands=1, xc='revPBE')
atoms.set_calculator(calc)
e1 = atoms.get_potential_energy()
calc.write('He')
e2 = e1 + calc.get_xc_difference('vdWDF')
print e1, e2
if 'VDW' in os.environ:
test()
|
qsnake/gpaw
|
oldtest/nonselfconsistentvdw.py
|
Python
|
gpl-3.0
| 878
|
[
"ASE",
"GPAW"
] |
14f12a39de28264ccac2a0340a79b760a0642bf236fd62535d03a1faffec17e6
|
"""
Methods related to importing data.
"""
import copy
import csv
import os
import shutil
import re
import subprocess
from tempfile import mkdtemp
from tempfile import mkstemp
from tempfile import NamedTemporaryFile
from BCBio import GFF
from Bio import Entrez
from Bio import SeqIO
from celery import task
from django.conf import settings
from django.db import transaction
from main.celery_util import assert_celery_running
from main.exceptions import ValidationException
from main.models import Chromosome
from main.models import Dataset
from main.models import ExperimentSample
from main.models import ReferenceGenome
from main.models import VariantSet
from main.models import VariantToVariantSet
from main.model_utils import clean_filesystem_location
from main.model_utils import get_dataset_with_type
from main.s3 import project_files_needed
from pipeline.read_alignment_util import ensure_bwa_index
from pipeline.variant_effects import build_snpeff
from utils import generate_safe_filename_prefix_from_label
from utils import uppercase_underscore
from utils.jbrowse_util import prepare_jbrowse_ref_sequence
from utils.jbrowse_util import add_genbank_file_track
from variants.vcf_parser import get_or_create_variant
IMPORT_FORMAT_TO_DATASET_TYPE = {
'fasta': Dataset.TYPE.REFERENCE_GENOME_FASTA,
'genbank': Dataset.TYPE.REFERENCE_GENOME_GENBANK,
'gff': Dataset.TYPE.REFERENCE_GENOME_GFF,
'vcf_user': Dataset.TYPE.VCF_USERINPUT
}
SAMPLE_SERVER_COPY_KEY__SAMPLE_NAME = 'Sample_Name'
SAMPLE_SERVER_COPY_KEY__READ_1 = 'Read_1_Path'
SAMPLE_SERVER_COPY_KEY__READ_2 = 'Read_2_Path'
REQUIRED_SAMPLE_SERVER_COPY_HEADER = [
SAMPLE_SERVER_COPY_KEY__SAMPLE_NAME,
SAMPLE_SERVER_COPY_KEY__READ_1,
]
# Cols that we know about, to distinguish them from user-defined cols.
PRE_DEFINED_SAMPLE_SERVER_COPY_HEADER_PARTS = (
REQUIRED_SAMPLE_SERVER_COPY_HEADER +
[SAMPLE_SERVER_COPY_KEY__READ_2])
SAMPLE_BROWSER_UPLOAD_KEY__SAMPLE_NAME = 'Sample_Name'
SAMPLE_BROWSER_UPLOAD_KEY__READ_1 = 'Read_1_Filename'
SAMPLE_BROWSER_UPLOAD_KEY__READ_2 = 'Read_2_Filename'
REQUIRED_SAMPLE_UPLOAD_THROUGH_BROWSER_HEADER = [
SAMPLE_BROWSER_UPLOAD_KEY__SAMPLE_NAME,
SAMPLE_BROWSER_UPLOAD_KEY__READ_1,
]
# Cols that we know about, to distinguish them from user-defined cols.
PRE_DEFINED_SAMPLE_UPLOAD_THROUGH_BROWSER_PARTS = (
REQUIRED_SAMPLE_UPLOAD_THROUGH_BROWSER_HEADER +
[SAMPLE_BROWSER_UPLOAD_KEY__READ_2])
REQUIRED_VCF_HEADER_PART = ['CHROM', 'POS', 'ID', 'REF', 'ALT']
if settings.S3_ENABLED:
from main.s3 import s3_temp_get, s3_get
def import_reference_genome_from_s3(project, label, s3file, import_format):
with s3_temp_get(s3file) as f:
return import_reference_genome_from_local_file(
project, label, f, import_format)
@project_files_needed
def import_samples_from_s3(project, targets_file_rows, s3files):
tmp_dir = mkdtemp()
local_s3files_map = {}
for s3file in s3files:
filepath = os.path.join(tmp_dir, s3file.name)
s3_get(s3file.key, filepath)
local_s3files_map[s3file.name] = filepath
for row in targets_file_rows:
sample_label = row['Sample_Name']
experiment_sample = ExperimentSample.objects.create(
project=project, label=sample_label)
copy_and_add_dataset_source(experiment_sample, Dataset.TYPE.FASTQ1,
Dataset.TYPE.FASTQ1, local_s3files_map[row['Read_1_Path']])
if 'Read_2_Path' in row and row['Read_2_Path']:
copy_and_add_dataset_source(experiment_sample, Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQ2, local_s3files_map[row['Read_2_Path']])
shutil.rmtree(tmp_dir)
class DataImportError(Exception):
"""Exception thrown when there are errors in imported data.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'DataImportError: ' + str(self.msg)
@project_files_needed
def import_reference_genome_from_local_file(project, label, file_location,
import_format, move=False):
"""Creates a ReferenceGenome associated with the given Project.
Args:
project: The Project we're storing everyting relative to.
label: The human-readable label for the ReferenceGenome.
file_location: Location of the genome on the server.
import_format: Must be 'fasta' or 'genbank'.
move: move instead of copy the original file_location - for instance,
if we saved it to a temporary file. Moving is of course faster than
copying.
Returns:
ReferenceGenome.
"""
# Validate the input.
assert import_format in ['fasta', 'genbank']
# Validate the file.
assert os.path.exists(file_location), "File %s doesn't exist." % (
file_location)
# Validate the input by parsing it with BioPython, while also
# counting the number of chromosomes.
num_bases = 0
for genome_record in SeqIO.parse(file_location, import_format):
num_bases += len(genome_record)
# Make sure sequence exists.
if not num_bases > 0:
raise DataImportError("No sequence in file.")
# Create the ReferenceGenome object.
reference_genome = ReferenceGenome.objects.create(
project=project,
label=label)
# Copy the source file to the ReferenceGenome data location.
dataset_type = IMPORT_FORMAT_TO_DATASET_TYPE[import_format]
copy_and_add_dataset_source(reference_genome, dataset_type,
dataset_type, file_location)
return reference_genome
def add_chromosomes(reference_genome, dataset):
""" Makes a Chromosome for each unique SeqRecord.name in the dataset
"""
chromosomes = [chrom.label for chrom \
in Chromosome.objects.filter(reference_genome=reference_genome)]
def _make_chromosome(seq_rec_iter):
for seq_record in seq_rec_iter:
if seq_record.name and seq_record.name not in chromosomes:
Chromosome.objects.create(reference_genome=reference_genome,
label=seq_record.name, num_bases=len(seq_record))
# Add chromosome ids
dataset_path = dataset.get_absolute_location()
if dataset.TYPE.REFERENCE_GENOME_FASTA:
_make_chromosome(SeqIO.parse(dataset_path, "fasta"))
elif dataset.TYPE.REFERENCE_GENOME_GENBANK:
_make_chromosome(SeqIO.parse(dataset_path, "genbank"))
else:
raise AssertionError("Unexpected Dataset type")
def generate_fasta_from_genbank(ref_genome):
"""If this reference genome has a genbank but not a FASTA, generate
a FASTA from the genbank. """
# If a FASTA already exists, then just return.
if ref_genome.dataset_set.filter(
type=Dataset.TYPE.REFERENCE_GENOME_FASTA).exists():
return
# Check that a genbank exists.
assert ref_genome.dataset_set.filter(
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).exists()
# Get genbank path and filename components (for creating FASTA file name).
genbank_path = get_dataset_with_type(
ref_genome,
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).get_absolute_location()
genbank_dir, genbank_filename = os.path.split(genbank_path)
genbank_noext = os.path.splitext(genbank_filename)[0]
# Put the fasta file in the same dir, just change the extension to .fa.
fasta_filename = os.path.join(genbank_dir, (genbank_noext + '.fa'))
# Get the individual records, each corresponding to a chromosome.
genome_records = list(SeqIO.parse(genbank_path, 'genbank'))
# SnpEFF takes the name attr, but the BioPython uses the id attr to make its
# fasta file, so overwrite the id with the name when converting to fasta.
for genome_record in genome_records:
genome_record.id = genome_record.name
SeqIO.write(genome_records, fasta_filename, 'fasta')
dataset_type = IMPORT_FORMAT_TO_DATASET_TYPE['fasta']
copy_and_add_dataset_source(ref_genome, dataset_type,
dataset_type, fasta_filename)
def ensure_fasta_index(ref_genome_fasta):
"""
Check if a fasta index is present w/ extension .fai. If not,
use samtools to generate one.
"""
if not os.path.exists(ref_genome_fasta + '.fai'):
subprocess.check_call([
settings.SAMTOOLS_BINARY,
'faidx',
ref_genome_fasta])
def generate_gff_from_genbank(ref_genome):
"""If this reference genome has a genbank but not a GFF, generate
a GFF from the genbank. """
# If a GFF already exists, then just return.
if ref_genome.dataset_set.filter(
type=Dataset.TYPE.REFERENCE_GENOME_GFF).exists():
return
# Check that a genbank exists.
assert ref_genome.dataset_set.filter(
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).exists()
# Get genbank path and filename components (for creating GFF file name).
genbank_path = get_dataset_with_type(
ref_genome,
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).get_absolute_location()
genbank_dir, genbank_filename = os.path.split(genbank_path)
genbank_noext = os.path.splitext(genbank_filename)[0]
# Put the GFF file in the same dir, just change the extension to .gff.
gff_filename = os.path.join(genbank_dir, (genbank_noext + '.gff'))
# Get the individual records, each corresponding to a chromosome.
genome_records = list(SeqIO.parse(genbank_path, 'genbank'))
# SnpEFF takes the name attr, but the BioPython uses the id attr to make its
# GFF file, so overwrite the id with the name when converting to GFF.
for genome_record in genome_records:
genome_record.id = genome_record.name
GFF.write(genome_records, open(gff_filename, 'w'))
dataset_type = IMPORT_FORMAT_TO_DATASET_TYPE['gff']
copy_and_add_dataset_source(ref_genome, dataset_type,
dataset_type, gff_filename)
def import_reference_genome_from_ncbi(project, label, record_id, import_format):
"""Imports a reference genome by accession from NCBI using efetch.
"""
# Validate the input.
assert import_format in ['fasta', 'genbank'], (
'Import Format must be \'fasta\' or \'genbank\'')
# Format keys for Efetch.
# More info at: http://www.ncbi.nlm.nih.gov/
# books/NBK25499/table/chapter4.chapter4_table1/?report=objectonly
CONVERT_FORMAT = {
'fasta': 'fa',
'genbank': 'gbwithparts'
}
# What suffix to use for each input format
# TODO: Should this be a property of the Dataset TYPE?
FORMAT_SUFFIX = {
'fasta': '.fa',
'genbank': '.gb'
}
Entrez.email = settings.EMAIL
handle = Entrez.efetch(
db="nuccore",
id=record_id,
rettype=CONVERT_FORMAT[import_format],
retmode="text")
# Store results in temporary file.
filename_prefix = generate_safe_filename_prefix_from_label(label) + '_'
temp = NamedTemporaryFile(delete=False, prefix=filename_prefix,
suffix=FORMAT_SUFFIX[import_format])
temp.write(handle.read())
handle.close()
temp.close()
# Create ref genome from this temporary file.
reference_genome = import_reference_genome_from_local_file(
project, label, temp.name, import_format, move=True)
# Clean up.
if os.path.isfile(temp.name):
os.remove(temp.name)
return reference_genome
def sanitize_record_id(record_id_string):
"""We want to grab only the first word-only part of each seqrecord in a
FASTA/Genbank file, and use that as a consistent and readable id between
genbank and FASTA.
"""
return re.match( r'^\w{1,20}', record_id_string).group()
def _assert_sample_targets_file_size(targets_file):
if hasattr(targets_file, "size"):
assert targets_file.size < 1000000, (
"Targets file is too large: %d" % targets_file.size)
@project_files_needed
def import_samples_from_targets_file(project, targets_file, options={}):
"""Uses the uploaded targets file to add a set of samples to the project.
We need to check each line of the targets file for consistency before we
do anything, however. Checking is moved to parse_targets_file() which parses
targets_file and returns valid rows. parse_targets_file() will also be
called from parse_targets_file_s3 in xhr_handlers in case of S3 uploading.
It writes a copy of the uploaded targets file to a temporary file
Args:
project: The project we're storing everything relative to>
targets_file: The UploadedFile django object that holds the targets
in .tsv format.
options: Dictionary of options. Currently a hack to allow different
parts of the pipeline to not run during tests (e.g. FastQC).
"""
assert_celery_running()
parsed_rows = parse_experiment_sample_targets_file(
project,
targets_file,
REQUIRED_SAMPLE_SERVER_COPY_HEADER,
SAMPLE_SERVER_COPY_KEY__SAMPLE_NAME,
SAMPLE_SERVER_COPY_KEY__READ_1,
SAMPLE_SERVER_COPY_KEY__READ_2)
# We perform the additional step of testing file locations, and for the
# test data, switching out $GD_ROOT template variable.
valid_rows = []
for row in parsed_rows:
updated_row = copy.copy(row)
for field, value in row.iteritems():
if field in (SAMPLE_SERVER_COPY_KEY__READ_1,
SAMPLE_SERVER_COPY_KEY__READ_2):
updated_value = value.replace('$GD_ROOT', settings.PWD)
with open(updated_value, 'rb') as test_file:
try:
test_file.read(8)
except:
raise AssertionError(
"Cannot read file at %s" % updated_value)
updated_row[field] = updated_value
valid_rows.append(updated_row)
return create_samples_from_row_data(project, valid_rows, move=False,
options=options)
def create_samples_from_row_data(
project, data_source_list, move=False, options={}):
"""Creates ExperimentSample objects along with their respective Datasets.
The data is copied to the entity location. We block until we've created the
models, and then go async for actual copying.
Args:
project: Project these Samples should be added to.
data_source_list: List of objects with keys:
* Sample_Name
* Read_1_Path
* Read_2_Path (optional)
* other metadata keys (optional)
* ...
move: Whether to move the source data. Else copy.
options: Dictionary of options. Currently a hack to allow different
parts of the pipeline to not run during tests (e.g. FastQC).
Returns:
List of ExperimentSamples.
"""
experiment_samples = []
for row in data_source_list:
# Create ExperimentSample object and then store the data relative to
# it.
sample_label = row['Sample_Name']
experiment_sample = ExperimentSample.objects.create(
project=project, label=sample_label)
# Create the Datasets before starting copying so we can show status in
# the ui. This is a new pattern where we are moving copying to happen
# asynchronously in Celery.
_create_fastq_dataset(
experiment_sample, row['Read_1_Path'], Dataset.TYPE.FASTQ1,
Dataset.STATUS.QUEUED_TO_COPY)
maybe_read2_path = row.get('Read_2_Path', '')
if maybe_read2_path:
_create_fastq_dataset(
experiment_sample, maybe_read2_path, Dataset.TYPE.FASTQ2,
Dataset.STATUS.QUEUED_TO_COPY)
# Add extra metadata columns.
_update_experiment_sample_data_for_row(experiment_sample, row,
PRE_DEFINED_SAMPLE_SERVER_COPY_HEADER_PARTS)
# Start the async job of copying.
copy_experiment_sample_data.delay(
project, experiment_sample, row, move=move, options=options)
experiment_samples.append(experiment_sample)
_update_experiment_sample_parentage(experiment_samples)
return experiment_samples
def _create_fastq_dataset(experiment_sample, fastq_source, dataset_type,
dataset_status):
"""Helper function for creating a Dataset that will point to a file.
Since clients of this function are responsible for actuallying copying the
data, this function sets the is_present bit to False on the Dataset.
"""
fastq_dest = _get_copy_target_path(experiment_sample, fastq_source)
reads_dataset = add_dataset_to_entity(experiment_sample,
dataset_type, dataset_type, fastq_dest)
reads_dataset.status = dataset_status
reads_dataset.save()
return reads_dataset
def _copy_dataset_data(experiment_sample, fastq_source, dataset_type,
move=False, set_status=Dataset.STATUS.VERIFYING):
"""Helper to copy data and set status.
"""
dataset = experiment_sample.dataset_set.get(type=dataset_type)
dataset.status = Dataset.STATUS.COPYING
dataset.save()
copy_dataset_to_entity_data_dir(experiment_sample, fastq_source, move=move)
dataset.status = set_status
dataset.save()
return dataset
@task
@project_files_needed
def copy_experiment_sample_data(
project, experiment_sample, data, move=False,
options={'skip_fastqc': False}):
"""Celery task that wraps the process of copying the data for an
ExperimentSample.
"""
# Copy read1.
read1_dataset = _copy_dataset_data(experiment_sample, data['Read_1_Path'],
Dataset.TYPE.FASTQ1, move=move)
# Copy read2.
maybe_read2_path = data.get('Read_2_Path', '')
if maybe_read2_path:
read2_dataset = _copy_dataset_data(experiment_sample, maybe_read2_path,
Dataset.TYPE.FASTQ2, move=move)
else:
read2_dataset = None
# Verification.
if read2_dataset is not None:
# Paired reads.
if (read1_dataset.filesystem_location ==
read2_dataset.filesystem_location):
# Make sure the files are not the same.
read1_dataset.status = Dataset.STATUS.FAILED
read2_dataset.status = Dataset.STATUS.FAILED
# TODO: Provide way for user to get an error message, similar to
# how make an error link for alignments.
else:
read1_dataset.status = Dataset.STATUS.READY
read2_dataset.status = Dataset.STATUS.READY
else:
# Unpaired.
read1_dataset.status = Dataset.STATUS.READY
read2_dataset.status = Dataset.STATUS.READY
# Quality Control via FASTQC and save.
read1_dataset.status = Dataset.STATUS.QC
if not options.get('skip_fastqc', False):
run_fastqc_on_sample_fastq(experiment_sample, read1_dataset)
read1_dataset.status = Dataset.STATUS.READY
read1_dataset.save()
read2_dataset.status = Dataset.STATUS.QC
if not options.get('skip_fastqc', False):
run_fastqc_on_sample_fastq(experiment_sample, read2_dataset, rev=True)
read2_dataset.status = Dataset.STATUS.READY
read2_dataset.save()
def run_fastqc_on_sample_fastq(experiment_sample, fastq_dataset, rev=False):
"""Runs FASTQC on a fastq dataset object.
Args:
experiment_sample: The ExperimentSample for this fastq.
fastq_dataset: Dataset that points to uploaded fastq file.
Returns:
New Dataset pointing to html file of FastQC results.
"""
fastq_filename = fastq_dataset.get_absolute_location()
# There's no option to pass the output filename to FastQC so we just
# create the name that matches what FastQC outputs.
fastqc_filename = _get_fastqc_path(fastq_dataset)
if rev:
dataset_type = Dataset.TYPE.FASTQC2_HTML
else:
dataset_type = Dataset.TYPE.FASTQC1_HTML
# create the tmp dir if it doesn't exist
if not os.path.exists(settings.TEMP_FILE_ROOT):
os.mkdir(settings.TEMP_FILE_ROOT)
command = [
settings.FASTQC_BINARY,
fastq_filename,
'-o', experiment_sample.get_model_data_dir(),
'-d', settings.TEMP_FILE_ROOT]
fastqc_output = subprocess.check_output(
command, stderr=subprocess.STDOUT)
# Check that fastqc file has been made
# TODO: We need proper error checking and logging probably, so that this
# non-essential step doesn't destroy the whole import process.
if not os.path.exists(fastqc_filename):
print 'FastQC Failed for {}:\n{}'.format(
fastq_filename, fastqc_output)
fastqc_dataset = add_dataset_to_entity(experiment_sample,
dataset_type, dataset_type, fastqc_filename)
fastqc_dataset.status = Dataset.STATUS.READY
fastqc_dataset.save()
return fastqc_dataset
def _get_fastqc_path(fastq_dataset):
"""Returns fastqc filename given Dataset pointing to fastq.
"""
fastq_filename = fastq_dataset.get_absolute_location()
if fastq_dataset.is_compressed():
unzipped_fastq_filename = os.path.splitext(fastq_filename)[0]
else:
unzipped_fastq_filename = fastq_filename
# NOTE: FASTQC apparently has slightly different behavior when the file
# extension is.fastqc where it chops off the .fastqc part so we have to do
# that here manually too.
if os.path.splitext(unzipped_fastq_filename)[1] == '.fastq':
unzipped_fastq_filename = os.path.splitext(unzipped_fastq_filename)[0]
return unzipped_fastq_filename + '_fastqc.html'
def create_sample_models_for_eventual_upload(project, targets_file):
"""Parses the form to create sample placeholers that are awaiting
data upload.
Args:
targets_file: The filled out form.
Raises:
ValidationException if validation fails.
"""
try:
valid_rows = parse_experiment_sample_targets_file(
project,
targets_file,
REQUIRED_SAMPLE_UPLOAD_THROUGH_BROWSER_HEADER,
SAMPLE_BROWSER_UPLOAD_KEY__SAMPLE_NAME,
SAMPLE_BROWSER_UPLOAD_KEY__READ_1,
SAMPLE_BROWSER_UPLOAD_KEY__READ_2)
except AssertionError as e:
raise ValidationException(e)
for row in valid_rows:
_create_sample_and_placeholder_dataset(project, row)
def _create_sample_and_placeholder_dataset(project, row):
"""Create Datasets but don't copy data.
"""
# Parsing and validation.
fastq1_filename = row['Read_1_Filename']
maybe_fastq2_filename = row.get('Read_2_Filename', '')
assert fastq1_filename != maybe_fastq2_filename
# Now create the models.
experiment_sample = ExperimentSample.objects.create(
project=project, label=row['Sample_Name'])
fastq1_filename = row['Read_1_Filename']
_create_fastq_dataset(
experiment_sample, fastq1_filename, Dataset.TYPE.FASTQ1,
Dataset.STATUS.AWAITING_UPLOAD)
if maybe_fastq2_filename:
_create_fastq_dataset(
experiment_sample, maybe_fastq2_filename,
Dataset.TYPE.FASTQ2, Dataset.STATUS.AWAITING_UPLOAD)
# Add extra metadata columns.
_update_experiment_sample_data_for_row(experiment_sample, row,
PRE_DEFINED_SAMPLE_UPLOAD_THROUGH_BROWSER_PARTS)
def _update_experiment_sample_data_for_row(experiment_sample, row, known_cols):
"""Updates the catch-all ExperimentSample.data field with user-defined
fields.
"""
for field, value in row.iteritems():
if field not in known_cols:
clean_field = uppercase_underscore(field)
if not clean_field.startswith('SAMPLE_'):
clean_field = 'SAMPLE_' + clean_field
experiment_sample.data[clean_field] = str(value)
experiment_sample.save(update_fields=['data'])
def _update_experiment_sample_parentage(experiment_samples):
"""
Adds children/parent relations to ExperimentSamples according to a
dictionary of parents parsed out of the targets file.
"""
es_label_dict = dict([(es.label, es) for es in experiment_samples])
# make a dict of child to parent labels.
parent_dict = {}
for es in experiment_samples:
if 'SAMPLE_PARENTS' in es.data:
parents = es.data['SAMPLE_PARENTS'].split('|')
parent_dict[es.label] = parents
# add the children of each parent to the model.
for child, parents in parent_dict.items():
assert child in es_label_dict.keys(), (
'Child {} missing from ExperimentSamples'.format(child))
for parent in parents:
if not parent: continue #skip blank strings
assert parent in es_label_dict.keys(), (
'Parent {} missing from ExperimentSamples'.format(parent))
es_label_dict[parent].add_child(es_label_dict[child])
def parse_experiment_sample_targets_file(project,
targets_filehandle_or_filename, required_header, sample_name_key,
read_1_key, read_2_key):
"""Parses and validates the file.
Returns:
List of objects representing the rows.
"""
_assert_sample_targets_file_size(targets_filehandle_or_filename)
# The purpose of the next few lines of somewhat convoluted code is to
# make sure we support weird template formats such as Excel on OsX might
# output. In the end, we want to end up with the variable targets_file
# being a File object that has been read in in universal mode,
# open(..., 'rU'). This requirement is made slightly trickier by the fact
# that the aptly named param targets_filehandle_or_filename is of ambiguous
# type (because Python) and so the remaining code needs to work whether
# it's a string filename, or a File object. One way we can solve all
# these constraints is to write the contents of the file to a temporary
# location, and then read it back in universal mode. I would welcome a more
# elegant fix.
if isinstance(targets_filehandle_or_filename, str):
temp_file_location = targets_filehandle_or_filename
else:
# It's an open File object.
if not os.path.exists(settings.TEMP_FILE_ROOT):
os.mkdir(settings.TEMP_FILE_ROOT)
_, temp_file_location = mkstemp(dir=settings.TEMP_FILE_ROOT)
with open(temp_file_location, 'w') as temp_fh:
temp_fh.write(targets_filehandle_or_filename.read())
targets_file = open(temp_file_location, 'rU')
os.remove(temp_file_location)
# Now this works even if there are silly carriage return characters ^M.
reader = csv.DictReader(targets_file, delimiter='\t')
# Read the header / schema.
targets_file_header = reader.fieldnames
# Make sure all header cols are present.
missing_header_cols = (set(required_header) - set(targets_file_header))
assert 0 == len(missing_header_cols), (
"Missing cols: %s" % ' '.join(missing_header_cols))
# Query all relevant datasets to check for filename clashes.
existing_sample_dataset_filename_set = set([
os.path.split(ds.filesystem_location)[1]
for ds in Dataset.objects.filter(
experimentsample__project=project)])
# Set this to a boolean on the first iteration, and make sure all rows
# are either paired or unpaired.
is_paired_end = None
# Initial aggregation and validation.
valid_rows = []
for raw_row_obj in reader:
clean_row_obj = {}
for key, value in raw_row_obj.iteritems():
# Ignore rows of the form K/V pair {None: ''}
if key is None:
continue
clean_row_obj[key] = value.strip()
sample_name = clean_row_obj[sample_name_key]
if not sample_name:
# Null sample name, skip the row.
continue
assert len(targets_file_header) == len(clean_row_obj.keys()), (
"Row %s has the wrong number of fields." % sample_name)
# Determine whether paired-end data (first iteration only).
if is_paired_end is None:
is_paired_end = (read_2_key in clean_row_obj and
clean_row_obj[read_2_key])
# Check filenames are present.
assert clean_row_obj[read_1_key], (
"No read 1 in row %s" % sample_name)
if is_paired_end:
assert clean_row_obj[read_2_key], (
"No read 2 in row %s" % sample_name)
# Catch a common copy paste error where read1 matches read2.
if is_paired_end:
same = (clean_row_obj[read_1_key] == clean_row_obj[read_2_key])
assert not same, "Read 1 filename is same as read 2 filename"
# Make sure Dataset with that name doesn't exist.
def _assert_not_filename_exists(filename_col):
filename = os.path.basename(clean_row_obj[filename_col])
assert not filename in existing_sample_dataset_filename_set, (
"%s exists" % clean_row_obj[filename_col])
_assert_not_filename_exists(read_1_key)
if is_paired_end:
_assert_not_filename_exists(read_2_key)
valid_rows.append(clean_row_obj)
# Make sure all the standard fields have unique values relative to each
# other.
def _assert_no_repeated_value(col):
values = set([row[col] for row in valid_rows])
assert len(values) == len(valid_rows), (
"Non-unique %s detected." % col)
_assert_no_repeated_value(sample_name_key)
_assert_no_repeated_value(read_1_key)
if is_paired_end:
_assert_no_repeated_value(read_2_key)
targets_file.close()
return valid_rows
@transaction.commit_on_success
def import_variant_set_from_vcf(ref_genome, variant_set_name, variant_set_file):
"""Convert an uploaded VCF file into a new variant set object.
Args:
ref_genome: ReferenceGenome.
variant_set_name: Name of the variant set (label).
variant_set_file: Path to the variant set on disk.
"""
# For now, variant set name must be unique even among diff ref genomes.
variant_set_name_exists = bool(VariantSet.objects.filter(
reference_genome=ref_genome,
label=variant_set_name).count())
assert not variant_set_name_exists, 'Variant set name must be unique.'
# Create the VariantSet.
variant_set = VariantSet.objects.create(
reference_genome=ref_genome,
label=variant_set_name)
# First, save this vcf as a dataset, so we can point to it from the
# new variant common_data_objs
dataset_type = IMPORT_FORMAT_TO_DATASET_TYPE['vcf_user']
dataset = copy_and_add_dataset_source(variant_set, dataset_type,
dataset_type, variant_set_file)
# Now read the variant set file.
_read_variant_set_file_as_csv(variant_set_file, ref_genome, dataset,
variant_set)
# These actions invalidate the materialized view.
ref_genome.invalidate_materialized_view()
def _read_variant_set_file_as_csv(variant_set_file, reference_genome,
dataset, variant_set):
"""If reading the variant set file as a vcf fails (because we arent using
all columns, as will usually be the case) then read it as a CSV and check
manually for the required columns.
Args:
* variant_set_file: Path to vcf file.
* reference_genome: ReferenceGenome object.
"""
# NOTE: Must open with 'rU', universal mode, to handle non-standard
# linebreaks that might be introduced in different environments. For
# example, Excel on Mac OS X saves funky linebreaks.
with open(variant_set_file, 'rU') as fh:
# Use this wrapper to skip the header lines
# Double ##s are part of the header, but single #s are column
# headings and must be stripped and kept.
def remove_vcf_header(iterable):
for line in iterable:
if not line.startswith('##'):
if line.startswith('#'):
line = line.lstrip('#')
yield line
vcf_noheader = remove_vcf_header(fh)
reader = csv.DictReader(vcf_noheader, delimiter='\t')
# Check that the required columns are present.
assert (len(reader.fieldnames) >= len(REQUIRED_VCF_HEADER_PART)), (
'Header for PseudoVCF %s is too short, should have [%s], has %s' % (
variant_set_file, ', '.join(REQUIRED_VCF_HEADER_PART),
', '.join(reader.fieldnames)))
for col, check in zip(reader.fieldnames[0:len(REQUIRED_VCF_HEADER_PART)],
REQUIRED_VCF_HEADER_PART):
assert col == check, (
"Header column '%s' is missing or out of order; %s" % (check,
', '.join(reader.fieldnames)))
class PseudoVCF:
"""Pseudo wrapper class to satisfy interface of
extract_raw_data_dict().
"""
def __init__(self, **entries):
self.__dict__.update(entries)
self.__dict__['ALT'] = self.__dict__['ALT'].strip().split(',')
self.__dict__['samples'] = []
for record in reader:
record = PseudoVCF(**record)
# Get or create the Variant for this record.
# NOTE: No samples so query_cache is not necessary.
variant, alts = get_or_create_variant(
reference_genome, record, dataset, query_cache=None)
# Create a link between the Variant and the VariantSet if
# it doesn't exist.
VariantToVariantSet.objects.get_or_create(
variant=variant,
variant_set=variant_set)
##############################################################################
# Helper Functions
##############################################################################
def copy_and_add_dataset_source(entity, dataset_label, dataset_type,
original_source_location, move=False):
"""Copies the dataset to the entity location and then adds as
Dataset. If the original_source_location is a file object, then
it just read()s from the handle and writes to destination.
If move is true, move instead of copying it. Good for files downloaded
to a temp directory, since copying is slower.
The model entity must satisfy the following interface:
* property dataset_set
* method get_model_data_dir()
Returns:
The Dataset object.
"""
dest = copy_dataset_to_entity_data_dir(entity, original_source_location,
move)
dataset = add_dataset_to_entity(entity, dataset_label, dataset_type,
dest)
# First create the dataset and set copying status on it, dont set
# the filesystem location.
# dest = _get_copy_target_path(entity, original_source_location)
# dataset = add_dataset_to_entity(entity, dataset_label, dataset_type, dest)
# actual_dest = copy_dataset_to_entity_data_dir(
# entity, original_source_location, move=move)
# assert actual_dest == dest, "If this fails, there's a bug."
return dataset
def _get_copy_target_path(entity, original_source_location):
"""Returns the full path to the copy target.
Args:
entity: Model entity from which we determine the target dir.
original_source_location: Original location from which we determine a
filename.
Returns:
String describing full target path.
"""
assert hasattr(entity, 'get_model_data_dir')
source_name = os.path.split(original_source_location)[1]
return os.path.join(entity.get_model_data_dir(), source_name)
def copy_dataset_to_entity_data_dir(entity, original_source_location,
move=False):
"""If a file path, copy the data to the entity model data dir.
If a handle, then just write it to the data dir.
Returns:
The destination to which the file was copied.
"""
dest = _get_copy_target_path(entity, original_source_location)
if not original_source_location == dest:
try: #first try path
if move:
shutil.move(original_source_location, dest)
else:
shutil.copy(original_source_location, dest)
except TypeError: #then try a handle
open(dest,'w').write(
original_source_location.read())
return dest
def add_dataset_to_entity(entity, dataset_label, dataset_type,
filesystem_location=None):
"""Helper function for adding a Dataset to a model.
"""
dataset = Dataset.objects.create(
label=dataset_label, type=dataset_type)
if filesystem_location is not None:
dataset.filesystem_location = clean_filesystem_location(
filesystem_location)
dataset.save()
entity.dataset_set.add(dataset)
entity.save()
return dataset
def prepare_ref_genome_related_datasets(ref_genome, dataset):
"""Prepares data related to a ReferenceGenome.
For example, if only Genbank exists, creates a Fasta Dataset.
If related Datasets exists, this function is a no-op.
Args:
ref_genome: ReferenceGenome.
dataset: A dataset pointing to a genome.
Raises:
AssertionError if dataset status is NOT_STARTED.
"""
assert dataset.status != Dataset.STATUS.NOT_STARTED
if dataset.type == Dataset.TYPE.REFERENCE_GENOME_FASTA:
# make sure the fasta index is generated
# Run jbrowse ref genome processing
prepare_jbrowse_ref_sequence(ref_genome)
elif dataset.type == Dataset.TYPE.REFERENCE_GENOME_GENBANK:
# Run snpeff build after creating ReferenceGenome obj.
build_snpeff(ref_genome)
# These functions are NO-OPS if the respective Datasets exist.
generate_fasta_from_genbank(ref_genome)
generate_gff_from_genbank(ref_genome)
# Run jbrowse genbank genome processing for genes
add_genbank_file_track(ref_genome)
# We create the bwa index once here, so that alignments running in
# parallel don't step on each others' toes.
ref_genome_fasta = get_dataset_with_type(ref_genome,
Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
ensure_bwa_index(ref_genome_fasta)
|
woodymit/millstone_accidental_source
|
genome_designer/utils/import_util.py
|
Python
|
mit
| 38,681
|
[
"BWA",
"Biopython"
] |
002d0d358747e9442302be2f0dd42edc954275595558828bed8a84ea1eb5c930
|
from aiida import load_dbenv
load_dbenv()
from aiida.orm import Code, DataFactory, WorkflowFactory
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
import numpy as np
# Silicon structure
a = 5.404
cell = [[a, 0, 0],
[0, a, 0],
[0, 0, a]]
symbols=['Si'] * 8
scaled_positions = [(0.875, 0.875, 0.875),
(0.875, 0.375, 0.375),
(0.375, 0.875, 0.375),
(0.375, 0.375, 0.875),
(0.125, 0.125, 0.125),
(0.125, 0.625, 0.625),
(0.625, 0.125, 0.625),
(0.625, 0.625, 0.125)]
structure = StructureData(cell=cell)
positions = np.dot(scaled_positions, cell)
for i, scaled_position in enumerate(scaled_positions):
structure.append_atom(position=np.dot(scaled_position, cell).tolist(),
symbols=symbols[i])
structure.store()
lammps_machine = {
'num_machines': 1,
'parallel_env': 'mpi*',
'tot_num_mpiprocs': 16}
parameters_opt = {'relaxation': 'tri', # iso/aniso/tri
# 'pressure': 0.0, # In Gruneisen workflow this is ignored. Pressure is set in workflow arguments
'vmax': 0.000001, # Angstrom^3
'energy_tolerance': 1.0e-25, # eV
'force_tolerance': 1.0e-25, # eV angstrom
'max_evaluations': 1000000,
'max_iterations': 500000}
# Cluster information
machine_dict = {
'num_machines': 1,
'parallel_env':'mpi*',
'tot_num_mpiprocs' : 16}
# Phonopy input parameters
phonopy_parameters = {'supercell': [[2, 0, 0],
[0, 2, 0],
[0, 0, 2]],
'primitive': [[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0]],
'distance': 0.01,
'mesh': [40, 40, 40],
'symmetry_precision': 1e-5}
# Silicon(C) Tersoff
tersoff_si = {'Si Si Si ': '3.0 1.0 1.7322 1.0039e5 16.218 -0.59826 0.78734 1.0999e-6 1.7322 471.18 2.85 0.15 2.4799 1830.8'}
potential ={'pair_style': 'tersoff',
'data': tersoff_si}
# Collect workflow input data
wf_parameters = {
'structure': structure,
'phonopy_input': {'parameters': phonopy_parameters},
'input_force': {'code': 'lammps_force@boston',
'potential': potential,
'resources': lammps_machine},
'input_optimize': {'code': 'lammps_optimize@boston',
'potential': potential,
'parameters': parameters_opt,
'resources': lammps_machine},
}
#Submit workflow
WorkflowGruneisen = WorkflowFactory('wf_gruneisen_pressure')
wf = WorkflowGruneisen(params=wf_parameters, pre_optimize=True) # pressure in kb
wf.label = 'Gruneisen Si lammps'
wf.start()
print ('pk: {}'.format(wf.pk))
|
abelcarreras/aiida_extensions
|
workflows/launcher/launch_gruneisen_lammps_si.py
|
Python
|
mit
| 3,040
|
[
"LAMMPS",
"phonopy"
] |
77ae717e70d8f2b30df4add5269fbc9e596eaf7e012704851680b895b595fdda
|
import dryscrape
# make sure you have xvfb installed
dryscrape.start_xvfb()
search_term = 'dryscrape'
# set up a web scraping session
sess = dryscrape.Session(base_url = 'http://google.com')
# we don't need images
sess.set_attribute('auto_load_images', False)
# visit homepage and search for a term
sess.visit('/')
q = sess.at_xpath('//*[@name="q"]')
q.set(search_term)
q.form().submit()
# extract all links
for link in sess.xpath('//a[@href]'):
print(link['href'])
# save a screenshot of the web page
sess.render('google.png')
print("Screenshot written to 'google.png'")
|
carlosb1/examples-python
|
ideas/mango-example/google.py
|
Python
|
gpl-2.0
| 581
|
[
"VisIt"
] |
9e03133d3a5006d142381ee228a87c959ee48a4edf20e4729c1fe65718dca07b
|
# -*- coding: utf-8 -*-
from django.http import HttpRequest, HttpResponse, HttpResponseForbidden, HttpResponseRedirect
from salesReport.pymagento import Magento
import csv, math
from datetime import date, timedelta, datetime
import models
from django.shortcuts import render_to_response
from django.template import RequestContext
from xlrd import open_workbook
from .correios import correios_frete_simples
from centralFitEstoque.settings import FRETE_ORIGEM
from django.core.serializers import serialize
from django.utils import simplejson
from django.utils import timezone
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.db import transaction
def timeInUTC(dateString):
dateReturn = datetime.strptime(dateString, "%Y-%m-%d %H:%M:%S").replace(tzinfo=timezone.utc)
dateReturn = dateReturn + timedelta(hours=3)
return dateReturn
def timeInGMT(dateString):
dateReturn = datetime.strptime(dateString, "%Y-%m-%d %H:%M:%S")
dateReturn = dateReturn - timedelta(hours=3)
return dateReturn
def saveItemInDatabse(i):
if not 'cost' in i:
i['cost'] = 0
#Certo produtos não tem special_price
if not 'special_price' in i:
i['special_price'] = 0
#Certos produtos não envia o status
if not 'status' in i:
i['status'] = True
#Alguns produtos não tem weight
if not 'weight' in i:
i['weight'] = 0
if i['status'] == '1':
i['status'] = True
else:
i['status'] = False
if 'marca' in i and i['marca'] != None:
try:
marca = models.brands.objects.get(name=i['marca'])
except Exception as e:
marca = models.brands.objects.create(name=i['marca'], meta_dias_estoque=1)
else:
marca = None
dateInit = datetime.today().replace(hour=0, minute=0, second=0) - timedelta(hours=3)
dateEnd = datetime.today().replace(hour=23, minute=59, second=59) - timedelta(days=30) - timedelta(hours=3)
vmd = getVMD30(i, dateEnd, dateInit)
if 'special_price' in i and i['special_price'] != None:
valor_produto = i['special_price']
else:
valor_produto = i['price']
valor_faturado_do_dia = vmd * float(valor_produto)
try:
newItem = models.item.objects.filter(product_id=i['product_id'])
if len(newItem) == 0:
newItem = models.item.objects.create(
product_id=i['product_id'],
sku=i['sku'],
name=i['name'],
cost=i['cost'],
price=i['price'],
specialPrice=i['special_price'],
status=i['status'],
weight=i['weight'],
cmm=i['cost'],
estoque_atual=0,
estoque_empenhado=0,
estoque_disponivel=0,
margem=0,
brand=marca,
vmd=vmd,
valor_faturado_do_dia=valor_faturado_do_dia,
)
return newItem
else:
return newItem[0]
except Exception as e:
print e
def saveOrderStatusHistory(iteration, order):
if not iteration['status']:
iteration['status'] = 'None'
return models.status_history.objects.create(
comment=iteration['comment'],
status=iteration['status'],
entity_name=iteration['entity_name'],
created_at=datetime.strptime(iteration['created_at'], '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc),
order=order,
)
def calculate_stock_variables(itemToSave):
if itemToSave.brand:
fator_quantidade = itemToSave.estoque_disponivel - (itemToSave.vmd * itemToSave.brand.meta_dias_estoque)
if (fator_quantidade) >= 0:
itemToSave.quantidade_excedente = math.ceil(fator_quantidade)
itemToSave.quantidade_faltante = 0
else:
itemToSave.quantidade_faltante = math.ceil(fator_quantidade * -1)
itemToSave.quantidade_excedente = 0
def saveOrderItemInDatabase(order, orderItemToSave):
try:
itemToSave = models.item.objects.get(sku=int(orderItemToSave['sku']))
except Exception as e:
itemToSave = saveItemInDatabse(orderItemToSave)
if orderItemToSave['parent_item_id'] != None:
is_child = orderItemToSave['parent_item_id']
else:
is_child = False
newOrderItem = models.orderItem.objects.create(
item=itemToSave,
order=order,
quantidade=float(orderItemToSave['qty_ordered']),
created_at=datetime.strptime(orderItemToSave['created_at'], '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc),
updated_at=datetime.strptime(orderItemToSave['updated_at'], '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc),
price=float(orderItemToSave['price']),
is_child=is_child,
productType=orderItemToSave['product_type'],
)
#Fix se estiver none os campos
if not itemToSave.estoque_disponivel:
itemToSave.estoque_disponivel = 0
if not itemToSave.estoque_empenhado:
itemToSave.estoque_empenhado = 0
if not itemToSave.estoque_atual:
itemToSave.estoque_atual = 0
if order.status == 'holded':
itemToSave.estoque_empenhado += 1
elif order.status == 'processing' or order.status == 'complete' or order.status == 'complete2':
itemToSave.estoque_atual -= 1
itemToSave.estoque_disponivel = itemToSave.estoque_atual - itemToSave.estoque_empenhado
#Update the vmd
dateInit = datetime.today().replace(hour=0, minute=0, second=0) - timedelta(hours=3)
dateEnd = datetime.today().replace(hour=23, minute=59, second=59) - timedelta(days=30) - timedelta(hours=3)
itemToSave.vmd = getVMD30ForDatabaseItem(itemToSave, dateEnd, dateInit)
calculate_stock_variables(itemToSave)
#Update the valor_faturado_do_dia
preco_item = itemToSave.specialPrice if itemToSave.specialPrice else itemToSave.price
itemToSave.valor_faturado_do_dia = itemToSave.vmd * float(preco_item)
itemToSave.save()
return newOrderItem
@transaction.commit_on_success
def saveOrderInDatabase(o):
print 'Saving Order: %s' % o['increment_id']
databaseOrder = models.order.objects.filter(increment_id=o['increment_id'])
if len(databaseOrder) > 0:
print('Order in database: %s' % databaseOrder[0].increment_id)
return databaseOrder[0]
else:
if len(o['payment']['additional_information']) > 0:
payment_method = o['payment']['additional_information']['PaymentMethod']
else:
payment_method = 'Sem Informacao'
pesoPedido = 0
for item in o['items']:
pesoPedido += float(item['weight'].replace(',', '.'))
shipping_amount_simulate = correios_frete_simples(FRETE_ORIGEM, o['billing_address']['postcode'], 30, 30, 30, pesoPedido)
if o['shipping_method']:
if o['shipping_method'].split('_')[2] == '41112':
shipping_amount_centralfit = float(shipping_amount_simulate['sedex']['valor'].replace(',', '.'))
else:
shipping_amount_centralfit = float(shipping_amount_simulate['pac']['valor'].replace(',', '.'))
else:
shipping_amount_centralfit = 10
o['shipping_method'] = 'Envio Especial'
if float(o['grand_total']) == 0.0:
o['grand_total'] = o['subtotal']
o['base_grand_total'] = o['subtotal']
databaseOrder = models.order(
increment_id=o['increment_id'],
created_at= datetime.strptime(o['created_at'], '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc),
updated_at=datetime.strptime(o['updated_at'], '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc),
is_active=True,
customer_id=o['customer_id'],
grand_total=o['base_grand_total'],
subtotal=o['base_subtotal'],
status=o['status'],
customer_email=o['customer_email'],
order_id=o['order_id'],
shipping_amount=o['shipping_amount'],
shipping_method=o['shipping_method'],
discount_amount=o['discount_amount'],
payment_method=payment_method,
shipping_address_postcode = o['shipping_address']['postcode'],
shipping_address_region = o['shipping_address']['region'],
shipping_address_street = o['shipping_address']['street'],
weight=o['weight'],
shipping_amount_centralfit=shipping_amount_centralfit
)
for itemInOrder in o['items']:
saveOrderItemInDatabase(databaseOrder, itemInOrder)
#Salvar o historico de iteracoes do pedido
for iteration in o['status_history']:
saveOrderStatusHistory(iteration, databaseOrder)
databaseOrder.save()
return databaseOrder
def getQtyHolded(item, dateEnd):
dateStart = dateEnd - timedelta(days=7)
try:
totalInPeriod = models.orderItem.objects.filter(item__sku=item[0]).filter(created_at__range=[dateStart, dateEnd]).filter(order__status='holded')
except Exception as e:
print e
totalInPeriod = []
return len(totalInPeriod)
def getVMD30(item, dateMinus30, dateRangeEnd):
try:
totalInPeriod = models.orderItem.objects.filter(item__sku=item[0]).filter(created_at__range=[dateMinus30, dateRangeEnd]).exclude(order__status='canceled').exclude(order__status='holded')
except Exception as e:
print e
totalInPeriod = []
vmd30 = round(float(len(totalInPeriod) / 30.0), 3)
return vmd30
def getVMD30ForDatabaseItem(item, dateMinus30, dateRangeEnd):
try:
totalInPeriod = models.orderItem.objects.filter(item__sku=item.sku).filter(created_at__range=[dateMinus30, dateRangeEnd]).exclude(order__status='canceled').exclude(order__status='holded')
except Exception as e:
print e
totalInPeriod = []
vmd30 = round(float(len(totalInPeriod) / 30.0), 3)
return vmd30
def getVMD(item, dateRangeInDays):
if dateRangeInDays.days == 0.0:
vmd = round(float(item[4] + item[5] + item[6] + item[7] + item[8] + item[9] / 1.0), 3)
else:
vmd = round(float(item[4] + item[5] + item[6] + item[7] + item[8] + item[9] / float(dateRangeInDays.days)), 3)
return vmd
def saveCSV(productList, dateStart, dateEnd):
print('Saving CSV File')
dateRangeInDays = dateEnd - dateStart
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="salesReport.csv"'
writer = csv.writer(response)
writer.writerow(['sku', 'name', 'brand', 'price', 'qty', 'qty_holded', 'VMD', 'VMD30',
'qty_complete', 'qty_fraud', 'qty_fraud2', 'qty_complete2', 'status'])
dateMinus30 = dateEnd - timedelta(days=30)
for item in productList:
qtd_holded = getQtyHolded(item, dateEnd)
vmd = getVMD(item, dateRangeInDays)
VMD30 = getVMD30(item, dateMinus30, dateEnd)
writer.writerow([item[0], item[1].encode('utf-8', 'replace'), item[2].encode('utf-8', 'replace')
, item[3], item[4], qtd_holded, vmd, VMD30, item[6], item[7], item[8], item[9], item[10]])
return response
def generateCSV(orderArray, dateStart, dateEnd, itemsHash, productList):
#Salva a quantidade de pedidos por tupo
for order in orderArray:
if order['status'] == 'processing':
for item in order['items']:
try:
productList[itemsHash.index(item['sku'])][4] += 1
except:
pass
elif order['status'] == 'holded':
for item in order['items']:
try:
productList[itemsHash.index(item['sku'])][5] += 1
except:
pass
elif order['status'] == 'complete':
for item in order['items']:
try:
productList[itemsHash.index(item['sku'])][6] += 1
except:
pass
elif order['status'] == 'fraud':
for item in order['items']:
try:
productList[itemsHash.index(item['sku'])][7] += 1
except:
pass
elif order['status'] == 'fraud2':
for item in order['items']:
try:
productList[itemsHash.index(item['sku'])][8] += 1
except:
pass
elif order['status'] == 'complete2':
for item in order['items']:
try:
productList[itemsHash.index(item['sku'])][9] += 1
except:
pass
return saveCSV(productList, dateStart, dateEnd)
def getBrand(item):
BRANDS_ARRAY = []
for brand in models.brands.objects.all():
BRANDS_ARRAY.append(brand.name.encode('UTF-8'))
itemDetail = item['name'].split('-')
if itemDetail[-1].strip().encode('UTF-8') not in BRANDS_ARRAY and len(itemDetail) >= 2:
#Case X-Pharma
testString = itemDetail[-2] + '-' + itemDetail[-1]
if testString.encode('utf-8').strip() == 'X-Pharma' or testString.encode('utf-8').strip() == 'X-pharma':
return testString.strip()
return itemDetail[-2].strip()
else:
return itemDetail[-1].strip()
def importOrdersSinceDay(request, dateStart, dateEnd):
print('-- Start import')
salesReport = Magento()
salesReport.connect()
orders = salesReport.listOrdersSinceStatusDate('holded', dateStart, dateEnd) + \
salesReport.listOrdersSinceStatusDate('processing', dateStart, dateEnd) + \
salesReport.listOrdersSinceStatusDate('complete', dateStart, dateEnd) + \
salesReport.listOrdersSinceStatusDate('fraud', dateStart, dateEnd) + \
salesReport.listOrdersSinceStatusDate('fraud2', dateStart, dateEnd) + \
salesReport.listOrdersSinceStatusDate('complete2', dateStart, dateEnd)
itemsHash = []
productList = []
BRANDS_ARRAY = []
for brand in models.brands.objects.all():
BRANDS_ARRAY.append(brand.name.encode('UTF-8'))
for product in salesReport.getProductArray():
itemsHash.append(product['sku'])
if product['status'] == '1':
status = 'Enable'
else:
status = 'Disable'
if product['special_price']:
productList.append([product['sku'], product['name'], getBrand(product), product['special_price'], 0, 0, 0, 0, 0, 0, status])
else:
productList.append([product['sku'], product['name'], getBrand(product), product['price'], 0, 0, 0, 0, 0, 0, status])
for order in orders:
saveOrderInDatabase(order)
csvFile = generateCSV(orders, dateStart, dateEnd, itemsHash, productList)
print('-- End import')
return csvFile
def exportar(request):
if request.method == "POST":
itemsHash = []
productList = []
BRANDS_ARRAY = []
for brand in models.brands.objects.all():
BRANDS_ARRAY.append(brand.name.encode('UTF-8'))
for product in models.item.objects.all():
itemsHash.append(product.sku)
itemDict = {
'name': product.name
}
if product.status:
status = 'Enable'
else:
status = 'Disable'
if product.specialPrice:
productList.append([product.sku, product.name, getBrand(itemDict), product.specialPrice, 0, 0, 0, 0, 0, 0, status])
else:
productList.append([product.sku, product.name, getBrand(itemDict), product.price, 0, 0, 0, 0, 0, 0, status])
dataInicial = datetime.strptime(request.POST.get('dataInicio'), '%d-%m-%Y')
dataFinal = datetime.strptime(request.POST.get('dataFim') + ' 23:59:59', '%d-%m-%Y %H:%M:%S')
orders = models.order.objects.filter(created_at__range=[dataInicial, dataFinal])
for order in orders:
if order.status == 'processing':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][4] += 1
except:
pass
elif order.status == 'holded':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][5] += 1
except:
pass
elif order.status == 'complete':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][6] += 1
except:
pass
elif order.status == 'fraud':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][7] += 1
except:
pass
elif order.status == 'fraud2':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][8] += 1
except:
pass
elif order.status == 'complete2':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][9] += 1
except:
pass
return saveCSV(productList, dataInicial, dataFinal)
else:
return render_to_response('exportar.html',
{'status': 'ok'},
context_instance=RequestContext(request))
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def importAllProducts(request):
if request.method == 'POST':
updateItemDetail()
print('-- Start Product import')
salesReport = Magento()
salesReport.connect()
quantidadeImportada = 0
BRANDS_ARRAY = []
for brand in models.brands.objects.all():
BRANDS_ARRAY.append(brand.name.encode('UTF-8'))
for product in salesReport.getProductArray():
if RepresentsInt(product['sku']):
exist = models.item.objects.filter(sku=product['sku'])
if len(exist) == 0:
saveItemInDatabse(product)
quantidadeImportada += 1
return render_to_response('importar.html',
{
'status': '',
'quantidadeImportada': quantidadeImportada
},
context_instance=RequestContext(request))
else:
return render_to_response('importar.html',
{'status': 'ok'},
context_instance=RequestContext(request))
def importOrders(dateEndImUTC, dateStartImUTC, importado, naBase):
salesReport = Magento()
salesReport.connect()
orders = salesReport.listOrdersSinceStatusDate('holded', dateStartImUTC.strftime('%Y-%m-%d %H:%M:%s'),
dateEndImUTC.strftime('%Y-%m-%d %H:%M:%S')) + \
salesReport.listOrdersSinceStatusDate('processing', dateStartImUTC.strftime('%Y-%m-%d %H:%M:%s'),
dateEndImUTC.strftime('%Y-%m-%d %H:%M:%S')) + \
salesReport.listOrdersSinceStatusDate('complete', dateStartImUTC.strftime('%Y-%m-%d %H:%M:%s'),
dateEndImUTC.strftime('%Y-%m-%d %H:%M:%S')) + \
salesReport.listOrdersSinceStatusDate('fraud', dateStartImUTC.strftime('%Y-%m-%d %H:%M:%s'),
dateEndImUTC.strftime('%Y-%m-%d %H:%M:%S')) + \
salesReport.listOrdersSinceStatusDate('fraud2', dateStartImUTC.strftime('%Y-%m-%d %H:%M:%s'),
dateEndImUTC.strftime('%Y-%m-%d %H:%M:%S')) + \
salesReport.listOrdersSinceStatusDate('complete2', dateStartImUTC.strftime('%Y-%m-%d %H:%M:%s'),
dateEndImUTC.strftime('%Y-%m-%d %H:%M:%S'))
for order in orders:
status = saveOrderInDatabase(order)
if status == 'NaBase':
naBase += 1
else:
importado += 1
return importado, naBase
def importAllOrders(request):
if request.method == 'POST':
naBase = 0
importado = 0
dateStart = request.POST.get('dataInicio').split('-')
dateEnd = request.POST.get('dataFim').split('-')
dateStartImUTC = timeInUTC(dateStart[2] + '-' + dateStart[1] + '-' + dateStart[0] + ' 00:00:00')
dateEndImUTC = timeInUTC(dateEnd[2] + '-' + dateEnd[1] + '-' + dateEnd[0] + ' 23:59:59')
print('-- Start Order import')
importado, naBase = importOrders(dateEndImUTC, dateStartImUTC, importado, naBase)
return render_to_response('importar.html',
{
'status': 'importacaoSucesso',
'quantidadeImportada': importado,
'naBase': naBase,
'RangeImportado': 'de %s ate %s'% (request.POST.get('dataInicio'), request.POST.get('dataFim'))
},
context_instance=RequestContext(request))
else:
return render_to_response('importar.html',
{'status': 'ok'},
context_instance=RequestContext(request))
def importProductCost(request):
if request.method == "POST":
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from centralFitEstoque.settings import MEDIA_ROOT
quantidadeAtualizada = 0
file = request.FILES['docfile']
path = default_storage.save('tabelaCustoProduto.xlsx', ContentFile(file.read()))
wb = open_workbook(MEDIA_ROOT + '/' + path)
for s in wb.sheets():
for row in range(s.nrows):
values = []
for col in range(s.ncols):
values.append(s.cell(row, col).value)
try:
if values[2] != 0:
produto = models.item.objects.get(sku=values[2])
produto.cost = values[4]
produto.save()
quantidadeAtualizada += 1
except Exception as e:
print e
return render_to_response('importar.html',
{'status': 'importacaoSucesso',
'atualizadoSucesso': quantidadeAtualizada
},
context_instance=RequestContext(request))
else:
return HttpResponseForbidden
def updateLast7daysOrderStatus():
data_inicio = datetime.today() - timedelta(days=7)
orders = models.order.objects.filter(created_at__gt=data_inicio, status__in=['holded', 'processing'])
quantidadeAtualizada = 0
salesReport = Magento()
salesReport.connect()
#call magento and update orderStatus
for orderToBeUpdated in orders:
print u'trying update order %s' % orderToBeUpdated.increment_id
new_order_info = salesReport.getSingleOrderInfo(orderToBeUpdated.increment_id)
if not new_order_info['status']:
new_order_info['status'] = 'Nao Informado'
if new_order_info['status'] != orderToBeUpdated.status:
print 'Order Updated %s !' % orderToBeUpdated.increment_id
#Atualiza o estoque
#Caso 2: De pedido holded -> canceled subtrai um no estoque compremetido
if orderToBeUpdated.status == 'holded' and new_order_info['status'] == 'canceled':
for item in orderToBeUpdated.orderitem_set.all():
item.item.estoque_empenhado -= item.quantidade
item.item.estoque_disponivel = item.item.estoque_atual - item.item.estoque_empenhado
item.item.save()
#Caso 1: De pedido processing -> canceled soma um no estoque disponivel
if orderToBeUpdated.status == 'processing' and new_order_info['status'] == 'canceled':
for item in orderToBeUpdated.orderitem_set.all():
item.item.estoque_atual += item.quantidade
item.item.estoque_disponivel = item.item.estoque_atual - item.item.estoque_empenhado
item.item.save()
#Caso 3: De pedido completo -> cancelado, volta o item ao estoque
if orderToBeUpdated.status == 'complete' and new_order_info['status'] == 'canceled':
for item in orderToBeUpdated.orderitem_set.all():
item.item.estoque_atual += item.quantidade
item.item.estoque_disponivel = item.item.estoque_atual - item.item.estoque_empenhado
item.item.save()
#Caso 4: De pedido completo -> cancelado, volta o item ao estoque
if orderToBeUpdated.status == 'complete2' and new_order_info['status'] == 'canceled':
for item in orderToBeUpdated.orderitem_set.all():
item.item.estoque_atual += item.quantidade
item.item.estoque_disponivel = item.item.estoque_atual - item.item.estoque_empenhado
item.item.save()
orderToBeUpdated.status = new_order_info['status']
orderToBeUpdated.updated_at = datetime.strptime(new_order_info['updated_at'], '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc)
#salva novas iteracoes no histórico
for iteraction in new_order_info['status_history']:
ja_existe = False
for orderIteraction in orderToBeUpdated.status_history_set.all():
if orderIteraction.created_at.strftime("%Y-%m-%d %H:%M:%S") == iteraction['created_at'] and orderIteraction.comment == iteraction['comment']:
ja_existe = True
if not ja_existe:
databaseIteration = saveOrderStatusHistory(iteraction, orderToBeUpdated)
print u'Nova iteracao adicionada: %s - %s' % (databaseIteration.created_at, databaseIteration.status)
orderToBeUpdated.save()
quantidadeAtualizada += 1
return quantidadeAtualizada
def extractOrderInfoFromMagento(order_id):
salesReport = Magento()
salesReport.connect()
return salesReport.getSingleOrderInfo(order_id)
def SingleOrderInfo(request, order_id):
orderJson = extractOrderInfoFromMagento(order_id)
return HttpResponse(simplejson.dumps(orderJson))
def atualizarStatusPedido(request):
quantidadeAtualizada = updateLast7daysOrderStatus()
return render_to_response('importar.html',
{'status': 'atualizadoSucesso',
'quantidadeAtualizada': quantidadeAtualizada,
},
context_instance=RequestContext(request))
def generateCsvFileCron(dataInicial, dataFinal):
itemsHash = []
productList = []
for product in models.item.objects.all():
itemsHash.append(product.sku)
if not product.brand:
marca = u'Não associou a marca'
else:
marca = product.brand.name
if product.status:
status = 'Enable'
else:
status = 'Disable'
if product.specialPrice:
price = product.specialPrice
else:
price = product.price
productList.append([product.sku, product.name, marca, price, 0, 0, 0, 0, 0, 0, status])
orders = models.order.objects.filter(created_at__range=[dataInicial, dataFinal])
for order in orders:
if order.status == 'processing':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][4] += 1
except:
pass
elif order.status == 'holded':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][5] += 1
except:
pass
elif order.status == 'complete':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][6] += 1
except:
pass
elif order.status == 'fraud':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][7] += 1
except:
pass
elif order.status == 'fraud2':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][8] += 1
except:
pass
elif order.status == 'complete2':
for itemOrder in order.orderitem_set.all():
try:
productList[itemsHash.index(itemOrder.item.sku)][9] += 1
except:
pass
print('Saving CSV File')
dateRangeInDays = dataFinal - dataInicial
with open('centralFitEstoque/media/csv_report/report_' + dataInicial.strftime('%m%d') + '_' + dataFinal.strftime('%m%d') + '.csv', 'wb+') as csvfile:
writer = csv.writer(csvfile, delimiter=';',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['sku', 'name', 'brand', 'price', 'qty', 'qty_holded', 'VMD', 'VMD30',
'qty_complete', 'qty_fraud', 'qty_fraud2', 'qty_complete2', 'status'])
dateMinus30 = dataFinal - timedelta(days=30)
for item in productList:
qtd_holded = getQtyHolded(item, dataFinal)
vmd = getVMD(item, dateRangeInDays)
VMD30 = getVMD30(item, dateMinus30, dataFinal)
writer.writerow([item[0], item[1].encode('utf-8', 'replace'), item[2].encode('utf-8', 'replace')
, item[3], item[4], qtd_holded, vmd, VMD30, item[6], item[7], item[8], item[9], item[10]])
from django.core.files import File
djangoFile = File(csvfile)
csvReport = models.csvReport(csvFile=djangoFile, created_at=datetime.now())
csvReport.save()
return csvReport.csvFile.url
def generateCsvFileCronTeste(request):
dateInit = datetime.today().replace(hour=0, minute=0, second=0) - timedelta(days=1)
dateEnd = datetime.today().replace(hour=23, minute=59, second=59) - timedelta(days=1)
dateInitInUtc = timeInUTC('%s-%s-%s 00:00:00' % (dateInit.year, dateInit.month, dateInit.day))
dateEndInUtc = timeInUTC('%s-%s-%s 23:59:59' % (dateEnd.year, dateEnd.month, dateEnd.day))
url_sales_report = generateCsvFileCron(dateInitInUtc, dateEndInUtc)
return HttpResponse(simplejson.dumps({'status': 'success', 'url': url_sales_report}))
def update_brand(request):
salesReport = Magento()
salesReport.connect()
quantidadeImportada = 0
for product in salesReport.getProductArray():
if RepresentsInt(product['sku']):
exist = models.item.objects.filter(sku=product['sku'])
if len(exist) == 0:
saveItemInDatabse(product)
quantidadeImportada += 1
return redirect(reverse('importar'))
def removeOldHoldedOrdersFrom(rangeInicio, rangeFim):
#tem que fazer uma rotina para tirar do estoque empenhado tb de 8 dias
data_fim = datetime.today() - timedelta(days=rangeFim)
data_inicio = datetime.today() - timedelta(days=rangeInicio)
pedidos_alterados = 0
orders = models.order.objects.filter(created_at__range=[data_inicio, data_fim], status='holded')
for order in orders:
for item in order.orderitem_set.all():
if not item.removido_estoque:
item.item.estoque_empenhado -= item.quantidade
item.item.estoque_atual += item.quantidade
item.item.estoque_disponivel += item.quantidade
item.save()
pedidos_alterados += 1
return pedidos_alterados
def updateProductInformation(product, quantidade_atualizada):
atualizado = False
if RepresentsInt(product['sku']):
item = models.item.objects.filter(product_id=product['product_id'])
if len(item) > 0:
item = item[0]
if int(product['status']) == 1:
product['status'] = True
else:
product['status'] = False
if item.status != product['status']:
item.status = product['status']
atualizado = True
if item.price != float(product['price']):
item.price = float(product['price'])
atualizado = True
if 'special_price' in product and product['special_price'] and item.specialPrice != float(
product['special_price']):
item.specialPrice = float(product['special_price'])
atualizado = True
if atualizado:
item.save()
quantidade_atualizada += 1
else:
saveItemInDatabse(product)
return quantidade_atualizada
def updateItemDetail():
#Atualizar diariamente os precos e o status dos produtos
salesReport = Magento()
salesReport.connect()
quantidade_atualizada = 0
for product in salesReport.getProductArray():
quantidade_atualizada = updateProductInformation(product, quantidade_atualizada)
return quantidade_atualizada
def teste_update_item_detail(request):
updateItemDetail()
def updateVMDCron():
dateInit = datetime.today().replace(hour=0, minute=0, second=0) - timedelta(hours=3)
dateEnd = datetime.today().replace(hour=23, minute=59, second=59) - timedelta(days=30) - timedelta(hours=3)
for item in models.item.objects.all():
item.vmd = getVMD30ForDatabaseItem(item, dateEnd, dateInit)
item.save()
def updateVlrFaturadoDiaCron():
for item in models.item.objects.all():
valor_produto = item.specialPrice if item.specialPrice else item.price
item.valor_faturado_do_dia = item.vmd * valor_produto
item.save()
def updateABCValues():
total_faturado_no_periodo = 0
for i in models.item.objects.all():
if float(i.vmd) > 0.0:
if not i.valor_faturado_do_dia or float(i.valor_faturado_do_dia) == 0.0:
if i.specialPrice:
preco_item = i.specialPrice
else:
preco_item = i.price
if float(preco_item) > 0:
i.valor_faturado_do_dia = i.vmd * float(preco_item)
i.save()
total_faturado_no_periodo += i.valor_faturado_do_dia
percentage_count = 0
for count, item in enumerate(models.item.objects.filter(valor_faturado_do_dia__gt=0).order_by('-valor_faturado_do_dia')):
percentage = (float(item.valor_faturado_do_dia) / float(total_faturado_no_periodo)) * 100
percentage_count += percentage
item.percentage = round(percentage, 4)
if percentage_count <= 65.00:
item.abc_letter = "A"
elif percentage_count > 65.00 and percentage_count <= 90.00:
item.abc_letter = "B"
elif percentage_count > 90.00:
item.abc_letter = "C"
item.save()
|
akiokio/centralfitestoque
|
src/salesReport/views.py
|
Python
|
bsd-2-clause
| 36,745
|
[
"VMD"
] |
72d5a25c76f846018f6a30ddb98d129a87a2374d3e7f551bd85d7e9a6a81d92f
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""
AbstractSyntaxTree classes along with their concrete ones.
The module defines a generic AST element along with four AST node categories (which act as a basis for all the concrete
AST nodes) and finally, the concrete classes which represent the output of the parsing process.
The generic AST node categories are:
- Leaf
- UnaryOp
- BinaryOp
- ListOp
The concrete AST nodes, represent higher level (domain specific) nodes.
"""
from __future__ import unicode_literals
# #### Abstract Syntax Tree classes ####
class ASTElement(object):
"""Root AbstractSyntaxTree node that acts as a stub for calling the Visitor's `visit` dispatcher method."""
def accept(self, visitor, *args, **kwargs):
return visitor.visit(self, *args, **kwargs)
class Leaf(ASTElement):
def __init__(self, value=None):
self.value = value
def __eq__(self, other):
return type(self) == type(other) and self.value == other.value
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.value)
def __hash__(self):
return hash(self.value)
class UnaryOp(ASTElement):
def __init__(self, op):
self.op = op
def __eq__(self, other):
return type(self) == type(other) and self.op == other.op
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.op)
def __hash__(self):
return hash(self.op)
class BinaryOp(ASTElement):
def __init__(self, left, right):
self.left = left
self.right = right
def __eq__(self, other):
return (
type(self) == type(other)
) and (
self.left == other.left
) and (
self.right == other.right
)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self.left), repr(self.right))
def __hash__(self):
return hash((self.left, self.right))
class ListOp(ASTElement):
def __init__(self, children):
try:
iter(children)
except TypeError:
self.children = [children]
else:
self.children = children
def __eq__(self, other):
return type(self) == type(other) and self.children == other.children
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.children)
def __hash__(self):
return hash(tuple(self.children))
# Concrete Syntax Tree classes
class AndOp(BinaryOp):
pass
class OrOp(BinaryOp):
pass
class KeywordOp(BinaryOp):
pass
class NotOp(UnaryOp):
pass
class NestedKeywordOp(BinaryOp):
pass
class ValueOp(UnaryOp):
pass
class QueryWithMalformedPart(BinaryOp):
"""A combination of recognized part of a query (with a parse tree) and some malformed input.
Its left child is the recognized parse tree, while its right child has the :class:`MalformedQuery`.
"""
pass
class MalformedQuery(ListOp):
"""A :class:`ListOp` with children the unrecognized words of the parser's input."""
pass
class RangeOp(BinaryOp):
pass
class GreaterEqualThanOp(UnaryOp):
pass
class GreaterThanOp(UnaryOp):
pass
class LessThanOp(UnaryOp):
pass
class LessEqualThanOp(UnaryOp):
pass
# #### Leafs ####
class Keyword(Leaf):
pass
class GenericValue(Leaf):
"""Represents a generic value, which might contain a wildcard."""
WILDCARD_TOKEN = '*'
def __init__(self, value, contains_wildcard=False):
super(GenericValue, self).__init__(value)
self.contains_wildcard = contains_wildcard
def __eq__(self, other):
return super(GenericValue, self).__eq__(other) and self.contains_wildcard == other.contains_wildcard
def __hash__(self):
return hash((super(GenericValue, self).__hash__(), self.contains_wildcard))
class Value(GenericValue):
pass
class ExactMatchValue(Leaf):
pass
class PartialMatchValue(GenericValue):
pass
class RegexValue(Leaf):
pass
class EmptyQuery(Leaf):
pass
|
chris-asl/inspire-query-parser
|
inspire_query_parser/ast.py
|
Python
|
gpl-3.0
| 4,970
|
[
"VisIt"
] |
81960f74b7ecb8c212fb93781267188b691f37831d26f8bead2f603535554759
|
# Copyright 2013 by David Arenillas and Anthony Mathelier. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Provides read access to a JASPAR5 formatted database.
This modules requires MySQLdb to be installed.
Example, substitute the your database credentials as
appropriate:
>>> from Bio.motifs.jaspar.db import JASPAR5
>>>
>>> JASPAR_DB_HOST = "hostname.example.org"
>>> JASPAR_DB_NAME = "JASPAR_2013"
>>> JASPAR_DB_USER = "guest"
>>> JASPAR_DB_PASS = "guest"
>>>
>>> DFLT_COLLECTION = 'CORE'
>>> jdb = JASPAR5(
... host=JASPAR_DB_HOST,
... name=JASPAR_DB_NAME,
... user=JASPAR_DB_USER,
... password=JASPAR_DB_PASS
... )
>>>
>>>
>>> ets1 = jdb.fetch_motif_by_id('MA0098')
>>> print(ets1)
TF name ETS1
Matrix ID MA0098.1
Collection CORE
TF class Winged Helix-Turn-Helix
TF family Ets
Species 9606
Taxonomic group vertebrates
Accession ['CAG47050']
Data type used SELEX
Medline 1542566
PAZAR ID TF0000070
Comments -
Matrix:
0 1 2 3 4 5
A: 4.00 17.00 0.00 0.00 0.00 5.00
C: 16.00 0.00 1.00 39.00 39.00 3.00
G: 4.00 0.00 0.00 1.00 0.00 17.00
T: 16.00 23.00 39.00 0.00 1.00 15.00
>>>
>>> motifs = jdb.fetch_motifs(
... collection = 'CORE',
... tax_group = ['vertebrates', 'insects'],
... tf_class = 'Winged Helix-Turn-Helix',
... tf_family = ['Forkhead', 'Ets'],
... min_ic = 12
... )
>>>
>>> for motif in motifs:
... pass # do something with the motif
"""
from __future__ import print_function
import warnings
from Bio import BiopythonWarning
from Bio import MissingPythonDependencyError
try:
import MySQLdb as mdb
except:
raise MissingPythonDependencyError("Install MySQLdb if you want to use "
"Bio.motifs.jaspar.db")
from Bio.Alphabet.IUPAC import unambiguous_dna as dna
from Bio.motifs import jaspar, matrix
__docformat__ = "restructuredtext en"
JASPAR_DFLT_COLLECTION = 'CORE'
class JASPAR5(object):
"""
Class representing a JASPAR5 DB. The methods within are loosely based
on the perl TFBS::DB::JASPAR5 module.
Note: We will only implement reading of JASPAR motifs from the DB.
Unlike the perl module, we will not attempt to implement any methods to
store JASPAR motifs or create a new DB at this time.
"""
def __init__(self, host=None, name=None, user=None, password=None):
"""
Construct a JASPAR5 instance and connect to specified DB
Arguments:
host - host name of the the JASPAR DB server
name - name of the JASPAR database
user - user name to connect to the JASPAR DB
password - JASPAR DB password
"""
self.name = name
self.host = host
self.user = user
self.password = password
self.dbh = mdb.connect(host, user, password, name)
def __str__(self):
"""
Return a string represention of the JASPAR5 DB connection.
"""
text = "%s\@%s:%s" % (self.user, self.host, self.name)
return text
def fetch_motif_by_id(self, id):
"""
Fetch a single JASPAR motif from the DB by it's JASPAR matrix ID
(e.g. 'MA0001.1').
Arguments:
- id - JASPAR matrix ID. This may be a fully specified ID including
the version number (e.g. MA0049.2) or just the base ID (e.g.
MA0049). If only a base ID is provided, the latest version is
returned.
Returns:
- A Bio.motifs.jaspar.Motif object
**NOTE:** The perl TFBS module allows you to specify the type of matrix
to return (PFM, PWM, ICM) but matrices are always stored in JASPAR as
PFMs so this does not really belong here. Once a PFM is fetched the
pwm() and pssm() methods can be called to return the normalized and
log-odds matrices.
"""
# separate stable ID and version number
(base_id, version) = jaspar.split_jaspar_id(id)
if not version:
# if ID contains no version portion, fetch the latest version
version = self._fetch_latest_version(base_id)
# fetch internal JASPAR matrix ID - also a check for validity
int_id = None
if version:
int_id = self._fetch_internal_id(base_id, version)
# fetch JASPAR motif using internal ID
motif = None
if int_id:
motif = self._fetch_motif_by_internal_id(int_id)
return motif
def fetch_motifs_by_name(self, name):
"""
Fetch a list of JASPAR motifs from a JASPAR DB by the given TF name(s).
Arguments:
name - a single name or list of names
Returns:
A list of Bio.motifs.Motif.japar objects
Notes:
Names are not guaranteed to be unique. There may be more than one
motif with the same name. Therefore even if name specifies a single
name, a list of motifs is returned. This just calls
self.fetch_motifs(collection = None, tf_name = name).
This behaviour is different from the TFBS perl module's
get_Matrix_by_name() method which always returns a single matrix,
issuing a warning message and returning the first matrix retrieved
in the case where multiple matrices have the same name.
"""
return self.fetch_motifs(collection=None, tf_name=name)
def fetch_motifs(
self, collection=JASPAR_DFLT_COLLECTION, tf_name=None, tf_class=None,
tf_family=None, matrix_id=None, tax_group=None, species=None,
pazar_id=None, data_type=None, medline=None, min_ic=0, min_length=0,
min_sites=0, all=False, all_versions=False
):
"""
Fetch a jaspar.Record (list) of motifs based on the provided selection
criteria.
Arguments::
Except where obvious, all selection criteria arguments may be
specified as a single value or a list of values. Motifs must
meet ALL the specified selection criteria to be returned with
the precedent exceptions noted below.
all - Takes precedent of all other selection criteria.
Every motif is returned. If 'all_versions' is also
specified, all versions of every motif are returned,
otherwise just the latest version of every motif is
returned.
matrix_id - Takes precedence over all other selection criteria
except 'all'. Only motifs with the given JASPAR
matrix ID(s) are returned. A matrix ID may be
specified as just a base ID or full JASPAR IDs
including version number. If only a base ID is
provided for specific motif(s), then just the latest
version of those motif(s) are returned unless
'all_versions' is also specified.
collection - Only motifs from the specified JASPAR collection(s)
are returned. NOTE - if not specified, the collection
defaults to CORE for all other selection criteria
except 'all' and 'matrix_id'. To apply the other
selection criteria across all JASPAR collections,
explicitly set collection=None.
tf_name - Only motifs with the given name(s) are returned.
tf_class - Only motifs of the given TF class(es) are returned.
tf_family - Only motifs from the given TF families are returned.
tax_group - Only motifs belonging to the given taxonomic
supergroups are returned (e.g. 'vertebrates',
'insects', 'nematodes' etc.)
species - Only motifs derived from the given species are
returned. Species are specified as taxonomy IDs.
data_type - Only motifs generated with the given data type (e.g.
('ChIP-seq', 'PBM', 'SELEX' etc.) are returned.
NOTE - must match exactly as stored in the database.
pazar_id - Only motifs with the given PAZAR TF ID are returned.
medline - Only motifs with the given medline (PubmMed IDs) are
returned.
min_ic - Only motifs whose profile matrices have at least this
information content (specificty) are returned.
min_length - Only motifs whose profiles are of at least this
length are returned.
min_sites - Only motifs compiled from at least these many binding
sites are returned.
all_versions- Unless specified, just the latest version of motifs
determined by the other selection criteria are
returned. Otherwise all versions of the selected
motifs are returned.
Returns:
- A Bio.motifs.jaspar.Record (list) of motifs.
"""
# Fetch the internal IDs of the motifs using the criteria provided
int_ids = self._fetch_internal_id_list(
collection=collection,
tf_name=tf_name,
tf_class=tf_class,
tf_family=tf_family,
matrix_id=matrix_id,
tax_group=tax_group,
species=species,
pazar_id=pazar_id,
data_type=data_type,
medline=medline,
all=all,
all_versions=all_versions
)
record = jaspar.Record()
"""
Now further filter motifs returned above based on any specified
matrix specific criteria.
"""
for int_id in int_ids:
motif = self._fetch_motif_by_internal_id(int_id)
# Filter motifs to those with matrix IC greater than min_ic
if min_ic:
if motif.pssm.mean() < min_ic:
continue
# Filter motifs to those with minimum length of min_length
if min_length:
if motif.length < min_length:
continue
# XXX We could also supply a max_length filter.
"""
Filter motifs to those composed of at least this many sites.
The perl TFBS module assumes column sums may be different but
this should be strictly enforced here we will ignore this and
just use the first column sum.
"""
if min_sites:
num_sites = sum(
[motif.counts[nt][0] for nt in motif.alphabet.letters]
)
if num_sites < min_sites:
continue
record.append(motif)
return record
def _fetch_latest_version(self, base_id):
"""
Get the latest version number for the given base_id,
"""
cur = self.dbh.cursor()
cur.execute("""select VERSION from MATRIX where BASE_id = %s
order by VERSION desc limit 1""", (base_id,))
row = cur.fetchone()
latest = None
if row:
latest = row[0]
else:
warnings.warn("Failed to fetch latest version number for JASPAR motif with base ID '{0}'. No JASPAR motif with this base ID appears to exist in the database.".format(base_id), BiopythonWarning)
return latest
def _fetch_internal_id(self, base_id, version):
"""
Fetch the internal id for a base id + version. Also checks if this
combo exists or not
"""
cur = self.dbh.cursor()
cur.execute("""select id from MATRIX where BASE_id = %s
and VERSION = %s""", (base_id, version))
row = cur.fetchone()
int_id = None
if row:
int_id = row[0]
else:
warnings.warn("Failed to fetch internal database ID for JASPAR motif with matrix ID '{0}.{1}'. No JASPAR motif with this matrix ID appears to exist.".format(base_id, version), BiopythonWarning)
return int_id
def _fetch_motif_by_internal_id(self, int_id):
# fetch basic motif information
cur = self.dbh.cursor()
cur.execute("""select BASE_ID, VERSION, COLLECTION, NAME from MATRIX
where id = %s""", (int_id,))
row = cur.fetchone()
# This should never happen as it is an internal method. If it does
# we should probably raise an exception
if not row:
warnings.warn("Could not fetch JASPAR motif with internal ID = {0}".format(int_id), BiopythonWarning)
return None
base_id = row[0]
version = row[1]
collection = row[2]
name = row[3]
matrix_id = "".join([base_id, '.', str(version)])
# fetch the counts matrix
counts = self._fetch_counts_matrix(int_id)
# Create new JASPAR motif
motif = jaspar.Motif(
matrix_id, name, collection=collection, counts=counts
)
# fetch species
cur.execute("""select TAX_ID from MATRIX_SPECIES
where id = %s""", (int_id,))
tax_ids = []
rows = cur.fetchall()
for row in rows:
tax_ids.append(row[0])
# Many JASPAR motifs (especially those not in the CORE collection)
# do not have taxonomy IDs. So this warning would get annoying.
#if not tax_ids:
# warnings.warn("Could not fetch any taxonomy IDs for JASPAR motif {0}".format(motif.matrix_id), BiopythonWarning)
motif.species = tax_ids
# fetch protein accession numbers
cur.execute("select ACC FROM MATRIX_PROTEIN where id = %s", (int_id,))
accs = []
rows = cur.fetchall()
for row in rows:
accs.append(row[0])
# Similarly as for taxonomy IDs, it would get annoying to print
# warnings for JASPAR motifs which do not have accession numbers.
motif.acc = accs
# fetch remaining annotation as tags from the ANNOTATION table
cur.execute("""select TAG, VAL from MATRIX_ANNOTATION
where id = %s""", (int_id,))
rows = cur.fetchall()
for row in rows:
attr = row[0]
val = row[1]
if attr == 'class':
motif.tf_class = val
elif attr == 'family':
motif.tf_family = val
elif attr == 'tax_group':
motif.tax_group = val
elif attr == 'type':
motif.data_type = val
elif attr == 'pazar_tf_id':
motif.pazar_id = val
elif attr == 'medline':
motif.medline = val
elif attr == 'comment':
motif.comment = val
else:
"""
TODO If we were to implement additional abitrary tags
motif.tag(attr, val)
"""
pass
return motif
def _fetch_counts_matrix(self, int_id):
"""
Fetch the counts matrix from the JASPAR DB by the internal ID
Returns a Bio.motifs.matrix.GenericPositionMatrix
"""
counts = {}
cur = self.dbh.cursor()
for base in dna.letters:
base_counts = []
cur.execute("""select val from MATRIX_DATA where ID = %s
and row = %s order by col""", (int_id, base))
rows = cur.fetchall()
for row in rows:
base_counts.append(row[0])
counts[base] = [float(x) for x in base_counts]
return matrix.GenericPositionMatrix(dna, counts)
def _fetch_internal_id_list(
self, collection=JASPAR_DFLT_COLLECTION, tf_name=None, tf_class=None,
tf_family=None, matrix_id=None, tax_group=None, species=None,
pazar_id=None, data_type=None, medline=None, all=False,
all_versions=False
):
"""
Fetch a list of internal JASPAR motif IDs based on various passed
parameters which may then be used to fetch the rest of the motif data.
Caller:
fetch_motifs()
Arguments:
See arguments sections of fetch_motifs()
Returns:
A list of internal JASPAR motif IDs which match the given
selection criteria arguments.
Build an SQL query based on the selection arguments provided.
1: First add table joins and sub-clauses for criteria corresponding to
named fields from the MATRIX and MATRIX_SPECIES tables such as
collection, matrix ID, name, species etc.
2: Then add joins/sub-clauses for tag/value parameters from the
MATRIX_ANNOTATION table.
For the surviving matrices, the responsibility to do matrix-based
feature filtering such as ic, number of sites etc, fall on the
calling fetch_motifs() method.
"""
int_ids = []
cur = self.dbh.cursor()
"""
Special case 1: fetch ALL motifs. Highest priority.
Ignore all other selection arguments.
"""
if all:
cur.execute("select ID from MATRIX")
rows = cur.fetchall()
for row in rows:
int_ids.append(row[0])
return int_ids
"""
Special case 2: fetch specific motifs by their JASPAR IDs. This
has higher priority than any other except the above 'all' case.
Ignore all other selection arguments.
"""
if matrix_id:
"""
These might be either stable IDs or stable_ID.version.
If just stable ID and if all_versions == 1, return all versions,
otherwise just the latest
"""
if all_versions:
for id in matrix_id:
# ignore vesion here, this is a stupidity filter
(base_id, version) = jaspar.split_jaspar_id(id)
cur.execute(
"select ID from MATRIX where BASE_ID = %s", (base_id,)
)
rows = cur.fetchall()
for row in rows:
int_ids.append(row[0])
else:
# only the lastest version, or the requested version
for id in matrix_id:
(base_id, version) = jaspar.split_jaspar_id(id)
if not version:
version = self._fetch_latest_version(base_id)
int_id = None
if version:
int_id = self._fetch_internal_id(base_id, version)
if int_id:
int_ids.append(int_id)
return int_ids
tables = ["MATRIX m"]
where_clauses = []
# Select by MATRIX.COLLECTION
if collection:
if isinstance(collection, list):
# Multiple collections passed in as a list
clause = "m.COLLECTION in ('"
clause = "".join([clause, "','".join(collection)])
clause = "".join([clause, "')"])
else:
# A single collection - typical usage
clause = "m.COLLECTION = '%s'" % collection
where_clauses.append(clause)
# Select by MATRIX.NAME
if tf_name:
if isinstance(tf_name, list):
# Multiple names passed in as a list
clause = "m.NAME in ('"
clause = "".join([clause, "','".join(tf_name)])
clause = "".join([clause, "')"])
else:
# A single name
clause = "m.NAME = '%s'" % tf_name
where_clauses.append(clause)
# Select by MATRIX_SPECIES.TAX_ID
if species:
tables.append("MATRIX_SPECIES ms")
where_clauses.append("m.ID = ms.ID")
"""
NOTE: species are numeric taxonomy IDs but stored as varchars
in the DB.
"""
if isinstance(species, list):
# Multiple tax IDs passed in as a list
clause = "ms.TAX_ID in ('"
clause = "".join([clause, "','".join(str(s) for s in species)])
clause = "".join([clause, "')"])
else:
# A single tax ID
clause = "ms.TAX_ID = '%s'" % str(species)
where_clauses.append(clause)
"""
Tag based selection from MATRIX_ANNOTATION
Differs from perl TFBS module in that the matrix class explicitly
has a tag attribute corresponding to the tags in the database. This
provides tremendous flexibility in adding new tags to the DB and
being able to select based on those tags with out adding new code.
In the JASPAR Motif class we have elected to use specific attributes
for the most commonly used tags and here correspondingly only allow
selection on these attributes.
The attributes corresponding to the tags for which selection is
provided are:
Attribute Tag
tf_class class
tf_family family
pazar_id pazar_tf_id
medline medline
data_type type
tax_group tax_group
"""
# Select by TF class(es) (MATRIX_ANNOTATION.TAG="class")
if tf_class:
tables.append("MATRIX_ANNOTATION ma1")
where_clauses.append("m.ID = ma1.ID")
clause = "ma1.TAG = 'class'"
if isinstance(tf_class, list):
# A list of TF classes
clause = "".join([clause, " and ma1.VAL in ('"])
clause = "".join([clause, "','".join(tf_class)])
clause = "".join([clause, "')"])
else:
# A single TF class
clause = "".join([clause, " and ma1.VAL = '%s' " % tf_class])
where_clauses.append(clause)
# Select by TF families (MATRIX_ANNOTATION.TAG="family")
if tf_family:
tables.append("MATRIX_ANNOTATION ma2")
where_clauses.append("m.ID = ma2.ID")
clause = "ma2.TAG = 'family'"
if isinstance(tf_family, list):
# A list of TF families
clause = "".join([clause, " and ma2.VAL in ('"])
clause = "".join([clause, "','".join(tf_family)])
clause = "".join([clause, "')"])
else:
# A single TF family
clause = "".join([clause, " and ma2.VAL = '%s' " % tf_family])
where_clauses.append(clause)
# Select by PAZAR TF ID(s) (MATRIX_ANNOTATION.TAG="pazar_tf_id")
if pazar_id:
tables.append("MATRIX_ANNOTATION ma3")
where_clauses.append("m.ID = ma3.ID")
clause = "ma3.TAG = 'pazar_tf_id'"
if isinstance(pazar_id, list):
# A list of PAZAR IDs
clause = "".join([clause, " and ma3.VAL in ('"])
clause = "".join([clause, "','".join(pazar_id)])
clause = "".join([clause, "')"])
else:
# A single PAZAR ID
clause = "".join([" and ma3.VAL = '%s' " % pazar_id])
where_clauses.append(clause)
# Select by PubMed ID(s) (MATRIX_ANNOTATION.TAG="medline")
if medline:
tables.append("MATRIX_ANNOTATION ma4")
where_clauses.append("m.ID = ma4.ID")
clause = "ma4.TAG = 'medline'"
if isinstance(medline, list):
# A list of PubMed IDs
clause = "".join([clause, " and ma4.VAL in ('"])
clause = "".join([clause, "','".join(medline)])
clause = "".join([clause, "')"])
else:
# A single PubMed ID
clause = "".join([" and ma4.VAL = '%s' " % medline])
where_clauses.append(clause)
# Select by data type(s) used to compile the matrix
# (MATRIX_ANNOTATION.TAG="type")
if data_type:
tables.append("MATRIX_ANNOTATION ma5")
where_clauses.append("m.ID = ma5.ID")
clause = "ma5.TAG = 'type'"
if isinstance(data_type, list):
# A list of data types
clause = "".join([clause, " and ma5.VAL in ('"])
clause = "".join([clause, "','".join(data_type)])
clause = "".join([clause, "')"])
else:
# A single data type
clause = "".join([" and ma5.VAL = '%s' " % data_type])
where_clauses.append(clause)
# Select by taxonomic supergroup(s) (MATRIX_ANNOTATION.TAG="tax_group")
if tax_group:
tables.append("MATRIX_ANNOTATION ma6")
where_clauses.append("m.ID = ma6.ID")
clause = "ma6.TAG = 'tax_group'"
if isinstance(tax_group, list):
# A list of tax IDs
clause = "".join([clause, " and ma6.VAL in ('"])
clause = "".join([clause, "','".join(tax_group)])
clause = "".join([clause, "')"])
else:
# A single tax ID
clause = "".join([clause, " and ma6.VAL = '%s' " % tax_group])
where_clauses.append(clause)
sql = "".join(["select distinct(m.ID) from ", ", ".join(tables)])
if where_clauses:
sql = "".join([sql, " where ", " and ".join(where_clauses)])
# print "sql = %s" % sql
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
id = row[0]
if all_versions:
int_ids.append(id)
else:
# is the latest version?
if self._is_latest_version(id):
int_ids.append(id)
if len(int_ids) < 1:
warnings.warn("Zero motifs returned with current select critera", BiopythonWarning)
return int_ids
def _is_latest_version(self, int_id):
"""
Does this internal ID represent the latest version of the JASPAR
matrix (collapse on base ids)
"""
cur = self.dbh.cursor()
cur.execute(
"""select count(*) from MATRIX
where BASE_ID = (select BASE_ID from MATRIX where ID = %s)
and VERSION > (select VERSION from MATRIX where ID = %s)""",
(int_id, int_id)
)
row = cur.fetchone()
count = row[0]
if count == 0:
# no matrices with higher version ID and same base id
return True
return False
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/motifs/jaspar/db.py
|
Python
|
apache-2.0
| 27,398
|
[
"Biopython"
] |
48e9be23d31878e210b8e45c96d3110b728e53f5092e017e9e5fde00a9d31272
|
"""
From the 26 March meeting, the plan was:
1) Fix 2D separation and overall R flux ratio. Find best fit PSF.
Issues... the best fit PSF can't just be a Gaussian. It is naturally the convolution of
multiple functional forms, i.e. something that is positive everywhere. On a quick search,
I can't find any obvious parameterisations. Options...
a: Just use the interpolated PSF with a correction for the companion. Problem: we don't
know how to correct for the companion, so will have to do this iteratively.
b: Use a "distortion map".
c: Use a functional form that can be negative and don't worry about details.
2) Extract spectra of A and B components. This is best done with a *good seeing* night and doesn't have to
be done for every data set. Save these spectra.
3) Fix B spectrum, and using the PSFs from step (1) extract the 2D positions of the A and
B components.
Star: GaiaDR2 6110141563309613184
...
is:
2.343 arcsec North
0.472 arcsec East
It is 3.726 mags fainter in Rp.
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as pyfits
import glob
import scipy.optimize as op
import scipy.signal as sig
import time
import multiprocessing
import pdb
plt.ion()
#Settings
multiprocess=False #Setting this for a macbook changes total time from ~9 to ~5 seconds. Only a moderte help!
MIN_PEAK=20
NPSF_PARAMS = 5
WAVE = np.arange(6400.0,7000.0,0.25)
ddir = '/Users/mireland/data/pds70/190225/' #!!! This comes from
#ddir = '/Users/mireland/data/pds70/190225/' #From Marusa's reduction.
fns = np.sort(glob.glob(ddir + '*p11.fits'))
xscale = 1.0 #arcsec/pix
yscale = 0.5 #arcsec/pix
#---------------------------------
#Local function declarations
def PSF(p,x,y,companion_params=None):
"""A simple 2D PSF based on a Gaussian.
Parameters
----------
p: numpy array
Parameters for the PSF.
p[0]: x coordinate offset
p[1]: x coordinate width
p[2]: y coordinate offset
p[3]: y coordinate width
p[4]: Total flux
p[5]: 2nd order symmetric term
x: x coordinate in arcsec.
y: y coordinate in arcsec.
"""
xp = (x-p[0])/p[1]
yp = (y-p[2])/p[3]
if companion_params != None:
xp_comp = (x-p[0]-companion_params[1])/p[1]
yp_comp = (y-p[2]-companion_params[2])/p[3]
return p[4]*(np.exp(-(xp**2 + yp**2)/2.0) + companion_params[0]*np.exp(-(xp_comp**2 + yp_comp**2)/2.0))
else:
return p[4]*np.exp(-(xp**2 + yp**2)/2.0)
def PSF_resid(p,x,y,data, gain=1.0, rnoise=3.0):
"Residuals for fitting to a 1D Gaussian"
return ((PSF(p,x,y) - data)/10.).flatten() #np.sqrt(np.maximum(y,0) + rnoise**2)
def lsq_PSF( args ):
"""
Fit a Gaussian to data y(x)
Parameters
----------
args: tuple
guess_p, xfit, yfit
Notes
-----
nline: int
index of this line
guess_center: float
initial guess position
"""
fit = op.least_squares(PSF_resid, args[0], method='lm', \
xtol=1e-04, ftol=1e-4, f_scale=[3.,1.,1.], args=(args[1], args[2], args[3]))
#Check for unphysical solutions and set c_inv to zero for those solutions...
c_inv = fit.jac.T.dot(fit.jac)
return fit.x, c_inv
#---------------------------------
#Main "Script" code
pas = []
mjds = []
fits = []
sigs = []
yx_peak = np.zeros( (len(WAVE), 2), dtype=np.int)
peak_vals = np.zeros( len(WAVE) )
dds = []
#Loop through files and make a 2D fit.
for f in fns[-3:]:
ff = pyfits.open(f)
pas.append(ff[0].header['TELPAN'])
mjds.append(ff[0].header['MJD-OBS'])
dd = ff[0].data[:,8:-8,13:-2]
dds += [dd]
#Subtract off local sky contribution. Could be more sophisticated!
meds = np.median(dd.reshape(dd.shape[0], dd.shape[1]*dd.shape[2]), axis=1).reshape(dd.shape[0],1,1)
dd -= meds
#Find the maxima in every column.
for i in range(len(WAVE)):
yx_peak[i] = np.unravel_index(np.argmax(dd[i]), dd[i].shape)
peak_vals[i] = dd[i, yx_peak[i][0], yx_peak[i][1]]
#Create the x and y arrays
xs, ys = np.meshgrid(np.arange(dd.shape[2])*xscale, np.arange(dd.shape[1])*yscale)
#Now fit to every wavelength
for i in range(len(WAVE)):
fit, sig = lsq_PSF( ([yx_peak[i,1]*xscale,1,yx_peak[i,0]*yscale,1,peak_vals[i]], xs, ys, dd[i]) )
fits += [fit]
sigs += [sig]
fits = np.array(fits)
fits = fits.reshape( (len(fns), len(WAVE), NPSF_PARAMS) )
good = np.where(np.median(fits[:,:,4], axis=1) > 100)[0]
#Now find an average offset as a function of wavelength.
NE_offset = np.zeros( (len(WAVE),2) )
for i in good:
NE_offset[:,0] += np.cos(np.radians(pas[i]))*fits[i,:,2] + np.sin(np.radians(pas[i]))*fits[i,:,0]
NE_offset[:,1] += np.cos(np.radians(pas[i]))*fits[i,:,0] - np.sin(np.radians(pas[i]))*fits[i,:,2]
NE_offset /= len(fns)
|
PyWiFeS/tools
|
spectroastro2D.py
|
Python
|
mit
| 4,907
|
[
"Gaussian"
] |
63a08e37e02e74f02b6b9dd722f57f93138270ea11f06f045bdafcde5b968a7b
|
"""
Mixture models for multi-dimensional data.
Reference: Hirsch M, Habeck M. - Bioinformatics. 2008 Oct 1;24(19):2184-92
"""
import numpy
from abc import ABCMeta, abstractmethod
class GaussianMixture(object):
"""
Gaussian mixture model for multi-dimensional data.
"""
_axis = None
# prior for variance (inverse Gamma distribution)
ALPHA_SIGMA = 0.0001
BETA_SIGMA = 0.01
MIN_SIGMA = 0.0
use_cache = True
def __init__(self, X, K, train=True, axis=None):
"""
@param X: multi dimensional input vector with samples along first axis
@type X: (M,...) numpy array
@param K: number of components
@type K: int
@param train: train model
@type train: bool
@param axis: component axis in C{X}
@type axis: int
"""
if self._axis is not None:
if axis is not None and axis != self._axis:
raise ValueError('axis is fixed for {0}'.format(type(self).__name__))
axis = self._axis
elif axis is None:
axis = 0
self._axis = axis
N = X.shape[axis]
self._X = X
self._dimension = numpy.prod(X.shape) / N
c = numpy.linspace(0, K, N, False).astype(int)
self._scales = numpy.equal.outer(range(K), c).astype(float)
self._means = numpy.zeros((K,) + X.shape[1:])
self.del_cache()
if train:
self.em()
@property
def K(self):
"""
Number of components
@rtype: int
"""
return len(self.means)
@property
def N(self):
"""
Length of component axis
@rtype: int
"""
return self._scales.shape[1]
@property
def M(self):
"""
Number of data points
@rtype: int
"""
return len(self._X)
def del_cache(self):
"""Clear model parameter cache (force recalculation)"""
self._w = None
self._sigma = None
self._delta = None
@property
def dimension(self):
"""
Dimensionality of the mixture domain
@rtype: int
"""
return self._dimension
@property
def means(self):
"""
@rtype: (K, ...) numpy array
"""
return self._means
@means.setter
def means(self, means):
if means.shape != self._means.shape:
raise ValueError('shape mismatch')
self._means = means
self.del_cache()
@property
def scales(self):
"""
@rtype: (K, N) numpy array
"""
return self._scales
@scales.setter
def scales(self, scales):
if scales.shape != self._scales.shape:
raise ValueError('shape mismatch')
self._scales = scales
self.del_cache()
@property
def w(self):
"""
Component weights
@rtype: (K,) numpy array
"""
if not self.use_cache or self._w is None:
self._w = self.scales.mean(1)
return self._w
@property
def sigma(self):
"""
Component variations
@rtype: (K,) numpy array
"""
if not self.use_cache or self._sigma is None:
alpha = self.dimension * self.scales.sum(1) + self.ALPHA_SIGMA
beta = (self.delta * self.scales.T).sum(0) + self.BETA_SIGMA
self._sigma = numpy.sqrt(beta / alpha).clip(self.MIN_SIGMA)
return self._sigma
@property
def delta(self):
"""
Squared "distances" between data and components
@rtype: (N, K) numpy array
"""
if not self.use_cache or self._delta is None:
self._delta = numpy.transpose([[d.sum()
for d in numpy.swapaxes([(self.means[k] - self.datapoint(m, k)) ** 2
for m in range(self.M)], 0, self._axis)]
for k in range(self.K)])
return self._delta
@property
def log_likelihood_reduced(self):
"""
Log-likelihood of the marginalized model (no auxiliary indicator variables)
@rtype: float
"""
from csb.numeric import log, log_sum_exp
s_sq = (self.sigma ** 2).clip(1e-300, 1e300)
log_p = log(self.w) - 0.5 * \
(self.delta / s_sq + self.dimension * log(2 * numpy.pi * s_sq))
return log_sum_exp(log_p.T).sum()
@property
def log_likelihood(self):
"""
Log-likelihood of the extended model (with indicators)
@rtype: float
"""
from csb.numeric import log
from numpy import pi, sum
n = self.scales.sum(1)
N = self.dimension
Z = self.scales.T
s_sq = (self.sigma ** 2).clip(1e-300, 1e300)
return sum(n * log(self.w)) - 0.5 * \
(sum(Z * self.delta / s_sq) + N * sum(n * log(2 * pi * s_sq)) + sum(log(s_sq)))
def datapoint(self, m, k):
"""
Training point number C{m} as if it would belong to component C{k}
@rtype: numpy array
"""
return self._X[m]
def estimate_means(self):
"""
Update means from current model and samples
"""
n = self.scales.sum(1)
self.means = numpy.array([numpy.sum([self.scales[k, m] * self.datapoint(m, k)
for m in range(self.M)], 0) / n[k]
for k in range(self.K)])
def estimate_scales(self, beta=1.0):
"""
Update scales from current model and samples
@param beta: inverse temperature
@type beta: float
"""
from csb.numeric import log, log_sum_exp, exp
s_sq = (self.sigma ** 2).clip(1e-300, 1e300)
Z = (log(self.w) - 0.5 * (self.delta / s_sq + self.dimension * log(s_sq))) * beta
self.scales = exp(Z.T - log_sum_exp(Z.T))
def randomize_means(self):
"""
Pick C{K} samples from C{X} as means
"""
import random
self.means = numpy.asarray(random.sample(self._X, self.K))
self.estimate_scales()
def randomize_scales(self, ordered=True):
"""
Random C{scales} initialization
"""
from numpy.random import random, multinomial
if ordered:
K, N = self.scales.shape
Ks = numpy.arange(K)
w = random(K) + (5. * K / N) # with pseudocounts
c = numpy.repeat(Ks, multinomial(N, w / w.sum()))
self.scales = numpy.equal.outer(Ks, c).astype(float)
else:
s = random(self.scales.shape)
self.scales = s / s.sum(0)
if 0.0 in self.w:
self.randomize_scales(ordered)
return
self.estimate_means()
def e_step(self, beta=1.0):
"""
Expectation step for EM
@param beta: inverse temperature
@type beta: float
"""
self.estimate_scales(beta)
def m_step(self):
"""
Maximization step for EM
"""
self.estimate_means()
def em(self, n_iter=100, eps=1e-30):
"""
Expectation maximization
@param n_iter: maximum number of iteration steps
@type n_iter: int
@param eps: log-likelihood convergence criterion
@type eps: float
"""
LL_prev = -numpy.inf
for i in range(n_iter):
self.m_step()
self.e_step()
if eps is not None:
LL = self.log_likelihood
if abs(LL - LL_prev) < eps:
break
LL_prev = LL
def anneal(self, betas):
"""
Deterministic annealing
@param betas: sequence of inverse temperatures
@type betas: iterable of floats
"""
for beta in betas:
self.m_step()
self.e_step(beta)
def increment_K(self, train=True):
"""
Split component with largest sigma
@returns: new instance of mixture with incremented C{K}
@rtype: L{GaussianMixture} subclass
"""
i = self.sigma.argmax()
# duplicate column
Z = numpy.vstack([self.scales, self.scales[i]])
# mask disjoint equal sized parts
mask = Z[i].cumsum() / Z[i].sum() > 0.5
Z[i, mask] *= 0.0
Z[-1, ~mask] *= 0.0
new = type(self)(self._X, self.K + 1, False, self._axis)
new.scales = Z
new.m_step()
if train:
new.em()
return new
@classmethod
def series(cls, X, start=1, stop=9):
"""
Iterator with mixture instances for C{K in range(start, stop)}
@type X: (M,...) numpy array
@type start: int
@type stop: int
@rtype: generator
"""
mixture = cls(X, start)
yield mixture
for K in range(start + 1, stop): #@UnusedVariable
mixture = mixture.increment_K()
yield mixture
@classmethod
def new(cls, X, K=0):
"""
Factory method with optional C{K}. If C{K=0}, guess best C{K} according
to L{BIC<GaussianMixture.BIC>}.
@param X: multi dimensional input vector with samples along first axis
@type X: (M,...) numpy array
@return: Mixture instance
@rtype: L{GaussianMixture} subclass
"""
if K > 0:
return cls(X, K)
mixture_it = cls.series(X)
mixture = next(mixture_it)
# increase K as long as next candidate looks better
for candidate in mixture_it:
if candidate.BIC >= mixture.BIC:
break
mixture = candidate
return mixture
@property
def BIC(self):
"""
Bayesian information criterion, calculated as
BIC = M * ln(sigma_e^2) + K * ln(M)
@rtype: float
"""
from numpy import log
n = self.M
k = self.K
error_variance = sum(self.sigma ** 2 * self.w)
return n * log(error_variance) + k * log(n)
@property
def membership(self):
"""
Membership array
@rtype: (N,) numpy array
"""
return self.scales.argmax(0)
def overlap(self, other):
"""
Similarity of two mixtures measured in membership overlap
@param other: Mixture or membership array
@type other: L{GaussianMixture} or sequence
@return: segmentation overlap
@rtype: float in interval [0.0, 1.0]
"""
if isinstance(other, GaussianMixture):
other_w = other.membership
K = min(self.K, other.K)
elif isinstance(other, (list, tuple, numpy.ndarray)):
other_w = other
K = min(self.K, len(set(other)))
else:
raise TypeError('other')
self_w = self.membership
if len(self_w) != len(other_w):
raise ValueError('self.N != other.N')
# position numbers might be permutated, so count equal pairs
ww = tuple(zip(self_w, other_w))
same = sum(sorted(ww.count(i) for i in set(ww))[-K:])
return float(same) / len(ww)
class AbstractStructureMixture(GaussianMixture):
"""
Abstract mixture model for protein structure ensembles.
"""
__metaclass__ = ABCMeta
def __init__(self, X, K, *args, **kwargs):
if len(X.shape) != 3 or X.shape[-1] != 3:
raise ValueError('X must be array of shape (M,N,3)')
self._R = numpy.zeros((len(X), K, 3, 3))
self._t = numpy.zeros((len(X), K, 3))
super(AbstractStructureMixture, self).__init__(X, K, *args, **kwargs)
@property
def R(self):
"""
Rotation matrices
@rtype: (M,K,3,3) numpy array
"""
return self._R
@property
def t(self):
"""
Translation vectors
@rtype: (M,K,3) numpy array
"""
return self._t
def datapoint(self, m, k):
return numpy.dot(self._X[m] - self._t[m, k], self._R[m, k])
def m_step(self):
self.estimate_means()
self.estimate_T()
@abstractmethod
def estimate_T(self):
"""
Estimate superpositions
"""
raise NotImplementedError
class SegmentMixture(AbstractStructureMixture):
"""
Gaussian mixture model for protein structure ensembles using a set of segments
If C{X} is the coordinate array of a protein structure ensemble which
can be decomposed into 2 rigid segments, the segmentation will be found by:
>>> mixture = SegmentMixture(X, 2)
The segment membership of each atom is given by:
>>> mixture.membership
array([0, 0, 0, ..., 1, 1, 1])
"""
_axis = 1
def estimate_T(self):
from csb.bio.utils import wfit
for m in range(self.M):
for k in range(self.K):
self._R[m, k], self._t[m, k] = wfit(self._X[m], self.means[k], self.scales[k])
def estimate_means(self):
# superpositions are weighted, so do unweighted mean here
self.means = numpy.mean([[self.datapoint(m, k)
for m in range(self.M)]
for k in range(self.K)], 1)
class ConformerMixture(AbstractStructureMixture):
"""
Gaussian mixture model for protein structure ensembles using a set of conformers
If C{mixture} is a trained model, the ensemble coordinate array of
structures from C{X} which belong to conformation C{k} is given by:
>>> indices = numpy.where(mixture.membership == k)[0]
>>> conformer = [mixture.datapoint(m, k) for m in indices]
"""
_axis = 0
def estimate_T(self):
from csb.bio.utils import fit
for m in range(self.M):
for k in range(self.K):
self._R[m, k], self._t[m, k] = fit(self._X[m], self.means[k])
# vi:expandtab:smarttab:sw=4
|
csb-toolbox/CSB
|
csb/statistics/mixtures.py
|
Python
|
mit
| 13,837
|
[
"Gaussian"
] |
08e23ea1b083aaa5dc13720731d332cd4a8e21350aa4d35f95d15bccf04ef67f
|
#!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: xattr
version_added: "1.3"
short_description: Manage user defined extended attributes
description:
- Manages filesystem user defined extended attributes, requires that they are enabled
on the target filesystem and that the setfattr/getfattr utilities are present.
options:
path:
description:
- The full path of the file/object to get the facts of.
- Before 2.3 this option was only usable as I(name).
aliases: [ name ]
required: true
key:
description:
- The name of a specific Extended attribute key to set/retrieve.
value:
description:
- The value to set the named name/key to, it automatically sets the C(state) to 'set'.
state:
description:
- defines which state you want to do.
C(read) retrieves the current value for a C(key) (default)
C(present) sets C(name) to C(value), default if value is set
C(all) dumps all data
C(keys) retrieves all keys
C(absent) deletes the key
choices: [ absent, all, keys, present, read ]
default: read
follow:
description:
- If C(yes), dereferences symlinks and sets/gets attributes on symlink target,
otherwise acts on symlink itself.
type: bool
default: 'yes'
notes:
- As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
author:
- Brian Coca (@bcoca)
'''
EXAMPLES = '''
- name: Obtain the extended attributes of /etc/foo.conf
xattr:
path: /etc/foo.conf
- name: Sets the key 'foo' to value 'bar'
xattr:
path: /etc/foo.conf
key: user.foo
value: bar
- name: Removes the key 'foo'
xattr:
path: /etc/foo.conf
key: user.foo
state: absent
'''
import operator
import os
import re
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
def get_xattr_keys(module, path, follow):
cmd = [module.get_bin_path('getfattr', True)]
# prevents warning and not sure why it's not default
cmd.append('--absolute-names')
if not follow:
cmd.append('-h')
cmd.append(path)
return _run_xattr(module, cmd)
def get_xattr(module, path, key, follow):
cmd = [module.get_bin_path('getfattr', True)]
# prevents warning and not sure why it's not default
cmd.append('--absolute-names')
if not follow:
cmd.append('-h')
if key is None:
cmd.append('-d')
else:
cmd.append('-n %s' % key)
cmd.append(path)
return _run_xattr(module, cmd, False)
def set_xattr(module, path, key, value, follow):
cmd = [module.get_bin_path('setfattr', True)]
if not follow:
cmd.append('-h')
cmd.append('-n %s' % key)
cmd.append('-v %s' % value)
cmd.append(path)
return _run_xattr(module, cmd)
def rm_xattr(module, path, key, follow):
cmd = [module.get_bin_path('setfattr', True)]
if not follow:
cmd.append('-h')
cmd.append('-x %s' % key)
cmd.append(path)
return _run_xattr(module, cmd, False)
def _run_xattr(module, cmd, check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception:
e = get_exception()
module.fail_json(msg="%s!" % e.strerror)
# result = {'raw': out}
result = {}
for line in out.splitlines():
if re.match("^#", line) or line == "":
pass
elif re.search('=', line):
(key, val) = line.split("=")
result[key] = val.strip('"')
else:
result[line] = ''
return result
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['name']),
key=dict(type='str'),
value=dict(type='str'),
state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']),
follow=dict(type='bool', default=True),
),
supports_check_mode=True,
)
path = module.params.get('path')
key = module.params.get('key')
value = module.params.get('value')
state = module.params.get('state')
follow = module.params.get('follow')
if not os.path.exists(path):
module.fail_json(msg="path not found or not accessible!")
changed = False
msg = ""
res = {}
if key is None and state in ['absent', 'present']:
module.fail_json(msg="%s needs a key parameter" % state)
# All xattr must begin in user namespace
if key is not None and not re.match('^user\.', key):
key = 'user.%s' % key
if (state == 'present' or value is not None):
current = get_xattr(module, path, key, follow)
if current is None or key not in current or value != current[key]:
if not module.check_mode:
res = set_xattr(module, path, key, value, follow)
changed = True
res = current
msg = "%s set to %s" % (key, value)
elif state == 'absent':
current = get_xattr(module, path, key, follow)
if current is not None and key in current:
if not module.check_mode:
res = rm_xattr(module, path, key, follow)
changed = True
res = current
msg = "%s removed" % (key)
elif state == 'keys':
res = get_xattr_keys(module, path, follow)
msg = "returning all keys"
elif state == 'all':
res = get_xattr(module, path, None, follow)
msg = "dumping all"
else:
res = get_xattr(module, path, key, follow)
msg = "returning %s" % key
module.exit_json(changed=changed, msg=msg, xattr=res)
if __name__ == '__main__':
main()
|
ppanczyk/ansible
|
lib/ansible/modules/files/xattr.py
|
Python
|
gpl-3.0
| 6,151
|
[
"Brian"
] |
2866466edd58488e58f0e604b2f8014fbc557c54c1991ce601f9a5816a828553
|
import numpy as np
import numba as nb
from PolyLibScan.Analysis.sim_run import AtomFilter
def distance_to_active_site(xyz_coords, db, polymer_ids, active_site_no):
'''calculates the minimal distance between the polymer and the
active site.
'''
type_filter = AtomFilter(db.traj_type_order, db.sequence, polymer_ids, molecule='polymer')
poly_coords = xyz_coords[type_filter.mask]
# lammps atom ids start at 1 while numpy arrays
# ids start at 0, thus decreasing the index
# yields the right index
active_site = xyz_coords[active_site_no-1]
min_dist = 1000
for resi in active_site:
for mono in poly_coords:
md = _dist(resi, mono)
if md < min_dist:
min_dist = md
return min_dist
@nb.jit
def _dist(a, b):
an = np.array([a['x'], a['y'], a['z']])
bn = np.array([b['x'], b['y'], b['z']])
c = an - bn
return np.sqrt(np.sum(c**2))
|
luminescence/PolyLibScan
|
Save/compute.py
|
Python
|
mit
| 941
|
[
"LAMMPS"
] |
7a2675683cd0dc23a6e3d1898d153956efda967d7430fd457ce7698654af3394
|
# Copyright (c) 2010-2014 Bo Lin
# Copyright (c) 2010-2014 Yanhong Annie Liu
# Copyright (c) 2010-2014 Stony Brook University
# Copyright (c) 2010-2014 The Research Foundation of SUNY
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import builtins
import sys
from ast import *
from .. import common
from . import dast
from .utils import printe, printw, printd
# DistAlgo keywords
KW_PROCESS_DEF = "process"
KW_CONFIG = "config"
KW_RECV_QUERY = "received"
KW_SENT_QUERY = "sent"
KW_RECV_EVENT = "receive"
KW_SENT_EVENT = "sent"
KW_MSG_PATTERN = "msg"
KW_EVENT_SOURCE = "from_"
KW_EVENT_DESTINATION = "dst"
KW_EVENT_TIMESTAMP = "clk"
KW_EVENT_LABEL = "at"
KW_DECORATOR_LABEL = "labels"
KW_EXISTENTIAL_QUANT = "some"
KW_UNIVERSAL_QUANT = "each"
KW_AGGREGATE_SIZE = "lenof"
KW_AGGREGATE_MIN = "minof"
KW_AGGREGATE_MAX = "maxof"
KW_AGGREGATE_SUM = "sumof"
KW_COMP_SET = "setof"
KW_COMP_TUPLE = "tupleof"
KW_COMP_LIST = "listof"
KW_COMP_DICT = "dictof"
KW_AWAIT = "await"
KW_AWAIT_TIMEOUT = "timeout"
KW_SEND = "send"
KW_SEND_TO = "to"
KW_BROADCAST = "bcast"
KW_PRINT = "output"
KW_SELF = "self"
KW_TRUE = "True"
KW_FALSE = "False"
KW_NULL = "None"
KW_SUCH_THAT = "has"
KW_RESET = "reset"
def is_setup_func(node):
"""Returns True if this node defines a function named 'setup'."""
return (isinstance(node, FunctionDef) and
node.name == "setup")
def extract_label(node):
"""Returns the label name specified in 'node', or None if 'node' is not a
label.
"""
if (isinstance(node, UnaryOp) and
isinstance(node.op, USub) and
isinstance(node.operand, UnaryOp) and
isinstance(node.operand.op, USub) and
isinstance(node.operand.operand, Name)):
return node.operand.operand.id
else:
return None
##########
# Operator mappings:
##########
NegatedOperators = {
NotEq : dast.EqOp,
IsNot : dast.IsOp,
NotIn : dast.InOp
}
OperatorMap = {
Add : dast.AddOp,
Sub : dast.SubOp,
Mult : dast.MultOp,
Div : dast.DivOp,
Mod : dast.ModOp,
Pow : dast.PowOp,
LShift : dast.LShiftOp,
RShift : dast.RShiftOp,
BitOr : dast.BitOrOp,
BitXor : dast.BitXorOp,
BitAnd : dast.BitAndOp,
FloorDiv : dast.FloorDivOp,
Eq : dast.EqOp,
NotEq: dast.NotEqOp,
Lt : dast.LtOp,
LtE : dast.LtEOp,
Gt : dast.GtOp,
GtE : dast.GtEOp,
Is : dast.IsOp,
IsNot : dast.IsNotOp,
In : dast.InOp,
NotIn : dast.NotInOp,
USub : dast.USubOp,
UAdd : dast.UAddOp,
Invert : dast.InvertOp,
And : dast.AndOp,
Or : dast.OrOp
}
# New matrix multiplication operator since 3.5:
if sys.version_info > (3, 5):
OperatorMap[MatMult] = dast.MatMultOp
# FIXME: is there a better way than hardcoding these?
KnownUpdateMethods = {
"add", "append", "extend", "update",
"insert", "reverse", "sort",
"delete", "remove", "pop", "clear", "discard"
}
ValidResetTypes = {"Received", "Sent", ""}
ApiMethods = common.api_registry.keys()
BuiltinMethods = common.builtin_registry.keys()
PythonBuiltins = dir(builtins)
ComprehensionTypes = {KW_COMP_SET, KW_COMP_TUPLE, KW_COMP_DICT, KW_COMP_LIST}
AggregateKeywords = {KW_AGGREGATE_MAX, KW_AGGREGATE_MIN,
KW_AGGREGATE_SIZE, KW_AGGREGATE_SUM}
Quantifiers = {KW_UNIVERSAL_QUANT, KW_EXISTENTIAL_QUANT}
##########
# Exceptions:
class MalformedStatementError(Exception): pass
##########
# Name context types:
class NameContext:
def __init__(self, type=None):
self.type = type
class Assignment(NameContext): pass
class Update(NameContext): pass
class Read(NameContext): pass
class IterRead(Read): pass
class FunCall(NameContext): pass
class Delete(NameContext): pass
class AttributeLookup(NameContext): pass
class SubscriptLookup(NameContext): pass
class PatternContext(NameContext): pass
class Existential(NameContext): pass
class Universal(NameContext): pass
##########
class PatternParser(NodeVisitor):
"""Parses a pattern.
"""
def __init__(self, parser, literal=False):
self._parser = parser
if parser.current_query_scope is None:
self.namescope = dast.NameScope(parser.current_scope)
else:
self.namescope = parser.current_query_scope
self.parent_node = parser.current_parent
self.current_query = parser.current_query
self.use_object_style = parser.use_object_style
self.literal = literal
@property
def outer_scope(self):
return self.namescope.parent_scope
def visit(self, node):
if isinstance(node, Name):
return self.visit_Name(node)
elif isinstance(node, Tuple):
return self.visit_Tuple(node)
elif isinstance(node, List):
return self.visit_List(node)
# Parse general expressions:
self._parser.current_context = Read()
expr = self._parser.visit(node)
if isinstance(expr, dast.ConstantExpr):
return dast.ConstantPattern(self.parent_node, node, value=expr)
else:
return dast.BoundPattern(self.parent_node, node, value=expr)
def is_bound(self, name):
n = self.namescope.find_name(name)
if n is not None:
for r, _ in n.reads:
if r.is_child_of(self.current_query):
return True
return False
def visit_Name(self, node):
if self._parser.current_process is not None and \
node.id == KW_SELF:
return dast.ConstantPattern(
self.parent_node, node,
value=dast.SelfExpr(self.parent_node, node))
elif node.id == KW_TRUE:
return dast.ConstantPattern(
self.parent_node, node,
value=dast.TrueExpr(self.parent_node, node))
elif node.id == KW_FALSE:
return dast.ConstantPattern(
self.parent_node, node,
value=dast.FalseExpr(self.parent_node, node))
elif node.id == KW_NULL:
return dast.ConstantPattern(
self.parent_node, node,
value=dast.NoneExpr(self.parent_node, node))
elif self.literal:
name = node.id
n = self.outer_scope.find_name(name)
if n is None:
n = self.namescope.add_name(name)
pat = dast.BoundPattern(self.parent_node, node, value=n)
n.add_read(pat)
return pat
name = node.id
if name == "_":
# Wild card
return dast.FreePattern(self.parent_node, node)
elif name.startswith("_"):
# Bound variable:
name = node.id[1:]
n = self.outer_scope.find_name(name)
if n is None:
self._parser.warn(
("new variable '%s' introduced by bound pattern." % name),
node)
n = self.namescope.add_name(name)
pat = dast.BoundPattern(self.parent_node, node, value=n)
n.add_read(pat)
return pat
else:
# Could be free or bound:
name = node.id
if self.is_bound(name):
self._parser.debug("[PatternParser] reusing bound name " +
name, node)
n = self.namescope.find_name(name)
pat = dast.BoundPattern(self.parent_node, node, value=n)
n.add_read(pat)
else:
self._parser.debug("[PatternParser] free name " + name, node)
n = self.namescope.add_name(name)
pat = dast.FreePattern(self.parent_node, node, value=n)
n.add_assignment(pat)
return pat
def visit_Str(self, node):
return dast.ConstantPattern(
self.parent_node, node,
value=dast.ConstantExpr(self.parent_node, node, node.s))
def visit_Bytes(self, node):
return dast.ConstantPattern(
self.parent_node, node,
value=dast.ConstantExpr(self.parent_node, node, node.s))
def visit_Num(self, node):
return dast.ConstantPattern(
self.parent_node, node,
value=dast.ConstantExpr(self.parent_node, node, node.n))
def visit_Tuple(self, node):
return dast.TuplePattern(
self.parent_node, node,
value=[self.visit(e) for e in node.elts])
def visit_List(self, node):
return dast.ListPattern(
self.parent_node, node,
value=[self.visit(e) for e in node.elts])
def visit_Call(self, node):
if not self.use_object_style:
return self.generic_visit(node)
if not isinstance(node.func, Name): return None
elts = [dast.ConstantPattern(
self.parent_node, node,
value=dast.ConstantExpr(self.parent_node,
node.func,
value=node.func.id))]
for e in node.args:
elts.append(self.visit(e))
return dast.TuplePattern(self.parent_node, node,
value=elts)
class Pattern2Constant(NodeVisitor):
def __init__(self, parent):
super().__init__()
self.stack = [parent]
@property
def current_parent(self):
return self.stack[-1]
def visit_ConstantPattern(self, node):
expr = node.value.clone()
expr._parent = self.current_parent
return expr
visit_BoundPattern = visit_ConstantPattern
def visit_TuplePattern(self, node):
expr = TupleExpr(self.current_parent)
self.stack.push(expr)
expr.subexprs = [self.visit(e) for e in node.value]
self.stack.pop()
return expr
def visit_ListPattern(self, node):
expr = ListExpr(self.current_parent)
self.stack.push(expr)
expr.subexprs = [self.visit(e) for e in node.value]
self.stack.pop()
return expr
class PatternFinder(NodeVisitor):
def __init__(self):
self.found = False
# It's a pattern if it has bound variables:
def visit_Name(self, node):
if node.id.startswith("_"):
self.found = True
# It's also a pattern if it contains constants:
def visit_Constant(self, node):
self.found = True
visit_Num = visit_Constant
visit_Str = visit_Constant
visit_Bytes = visit_Constant
visit_NameConstant = visit_Constant
class Parser(NodeVisitor):
"""The main parser class.
"""
def __init__(self, filename="", options=None, execution_context=None):
# used in error messages:
self.filename = filename
# used to construct statement tree, also used for symbol table:
self.state_stack = []
# new statements are appended to this list:
self.current_block = None
self.current_context = None
self.current_label = None
self.current_query_scope = None
self.current_query = None
self.errcnt = 0
self.warncnt = 0
self.program = execution_context if execution_context is not None \
else dast.Program() # Just in case
self.full_event_pattern = (options.full_event_pattern
if hasattr(options,
'full_event_pattern')
else False)
self.use_object_style = (options.enable_object_pattern
if hasattr(options,
'enable_object_pattern')
else False)
self.enable_membertest_pattern = (options.enable_membertest_pattern
if hasattr(options,
'enable_membertest_pattern')
else False)
self.enable_iterator_pattern = (options.enable_iterator_pattern
if hasattr(options,
'enable_iterator_pattern')
else False)
def push_state(self, node):
self.state_stack.append((node,
self.current_context,
self.current_label,
self.current_query_scope,
self.current_block))
def pop_state(self):
(_,
self.current_context,
self.current_label,
self.current_query_scope,
self.current_block) = self.state_stack.pop()
def is_in_setup(self):
if self.current_process is None:
return False
elif isinstance(self.current_scope, dast.Function):
return self.current_scope.name == "setup"
def enter_query(self):
if self.current_query_scope is None:
self.current_query_scope = dast.NameScope(self.current_scope)
self.current_query = self.current_parent
def leave_query(self, node=None):
if self.current_parent is self.current_query:
self.current_query = None
self.current_scope.parent_scope.merge_scope(self.current_query_scope)
if node is not None:
self.audit_query(self.current_parent, node)
@property
def current_parent(self):
return self.state_stack[-1][0]
@property
def current_process(self):
for node, _, _, _, _ in reversed(self.state_stack):
if isinstance(node, dast.Process):
return node
return None
@property
def current_scope(self):
if self.current_query_scope is not None:
return self.current_query_scope
for node, _, _, _, _ in reversed(self.state_stack):
if isinstance(node, dast.NameScope):
return node
return None
@property
def current_loop(self):
for node, _, _, _, _ in reversed(self.state_stack):
if isinstance(node, dast.ArgumentsContainer) or \
isinstance(node, dast.ClassStmt):
break
elif isinstance(node, dast.LoopStmt):
return node
return None
def visit_Module(self, node):
self.program = dast.Program(None, node)
# Populate global scope with Python builtins:
for name in PythonBuiltins:
self.program.add_name(name)
self.push_state(self.program)
self.current_block = self.program.body
self.current_context = Read()
self.body(node.body)
self.pop_state()
def visit_Interactive(self, node):
self.program = dast.InteractiveProgram(None, node)
# Populate global scope with Python builtins:
for name in PythonBuiltins:
self.program.add_name(name)
self.push_state(self.program)
contxtproc = dast.Process()
self.push_state(contxtproc)
# Helpers:
def parse_bases(self, node):
"""Scans a ClassDef's bases list and checks whether the class defined by
'node' is a DistProcess.
A DistProcess is a class whose bases contain the name $KW_PROCESS_DEF.
"""
isproc = False
bases = []
for b in node.bases:
if (isinstance(b, Name) and b.id == KW_PROCESS_DEF):
isproc = True
else:
bases.append(self.visit(b))
return isproc, bases
def parse_pattern_expr(self, node, literal=False):
expr = self.create_expr(dast.PatternExpr, node)
pp = PatternParser(self, literal)
pattern = pp.visit(node)
if pattern is None:
self.error("invalid pattern", node)
self.pop_state()
return None
expr.pattern = pattern
self.pop_state()
return expr
def parse_decorators(self, node):
assert hasattr(node, 'decorator_list')
labels = set()
notlabels = set()
decorators = []
for exp in node.decorator_list:
if isinstance(exp, Call) and exp.func.id == KW_DECORATOR_LABEL:
for arg in exp.args:
l, negated = self.parse_label_spec(arg)
if negated:
notlabels |= l
else:
labels |= l
else:
decorators.append(self.visit(exp))
return decorators, labels, notlabels
def parse_label_spec(self, expr):
negated = False
if (type(expr) is UnaryOp and
type(expr.operand) in {Set, Tuple, List}):
names = expr.operand.elts
negated = True
elif type(expr) in {Set, Tuple, List}:
names = expr.elts
else:
self.error("invalid label spec.", expr)
names = []
result = set()
for elt in names:
if type(elt) is not Name:
self.error("invalid label spec.", elt)
else:
result.add(elt.id)
return result, negated
def parse_event_handler(self, node):
if node.name == KW_RECV_EVENT:
eventtype = dast.ReceivedEvent
elif node.name == KW_SENT_EVENT:
eventtype = dast.SentEvent
else:
# Impossible
return None
extras = []
args = node.args
if len(args.defaults) < len(args.args):
extras.append(args.args[:(len(args.defaults) - len(args.args))])
args.args = args.args[(len(args.defaults) - len(args.args)):]
if args.vararg:
extras.append(args.vararg)
if args.kwonlyargs:
extras.append(args.kwonlyargs)
if args.kwarg:
extras.append(args.kwarg)
if len(extras) > 0:
for node in extras:
self.warn("extraneous arguments in event spec ignored.", node)
events = []
labels = set()
notlabels = set()
self.enter_query()
for key, patexpr in zip(args.args, args.defaults):
if key.arg == KW_EVENT_LABEL:
ls, neg = self.parse_label_spec(patexpr)
if neg:
notlabels |= ls
else:
labels |= ls
continue
pat = self.parse_pattern_expr(patexpr)
if key.arg == KW_MSG_PATTERN:
events.append(dast.Event(self.current_process, ast=node,
event_type=eventtype, pattern=pat))
continue
if len(events) == 0:
self.error("invalid event spec: missing 'msg' argument.", node)
# Add a phony event so we can recover as much as possible:
events.append(dast.Event(self.current_process))
if key.arg == KW_EVENT_SOURCE:
events[-1].sources.append(pat)
elif key.arg == KW_EVENT_DESTINATION:
events[-1].destinations.append(pat)
elif key.arg == KW_EVENT_TIMESTAMP:
events[-1].timestamps.append(pat)
else:
self.warn("unrecognized event parameter '%s'" % key.arg, node)
self.leave_query()
return events, labels, notlabels
def body(self, statements):
"""Process a block of statements.
"""
for stmt in statements:
self.current_context = Read()
self.visit(stmt)
if self.current_label is not None:
# Create a noop statement to hold the last label:
self.create_stmt(dast.NoopStmt, statements[-1])
def proc_body(self, statements):
"""Process the body of a process definition.
Process bodies differs from normal ClassDef bodies in that the names
defined in this scope are visible to the whole process.
"""
for stmt in statements:
if (isinstance(stmt, FunctionDef) and stmt.name not in
{KW_RECV_EVENT, KW_SENT_EVENT}):
self.debug("Adding function %s to process scope." % stmt.name,
stmt)
self.current_scope.add_name(stmt.name)
elif isinstance(stmt, ClassDef):
self.debug("Adding class %s to process scope." % stmt.name,
stmt)
self.current_scope.add_name(stmt.name)
elif isinstance(stmt, Assign):
for expr in stmt.targets:
if isinstance(expr, Name):
self.debug(
"Adding variable %s to process scope." % expr.id,
stmt)
self.current_scope.add_name(expr.id)
elif isinstance(stmt, AugAssign):
if isinstance(target, Name):
self.current_scope.add_name(target.id)
for stmt in statements:
self.visit(stmt)
if self.current_label is not None:
# Create a noop statement to hold the last label:
self.create_stmt(dast.NoopStmt, statements[-1])
def signature(self, node):
"""Process the argument lists."""
assert isinstance(self.current_parent, dast.ArgumentsContainer)
padding = len(node.args) - len(node.defaults)
container = self.current_parent.args
for arg in node.args[:padding]:
container.add_arg(arg.arg)
for arg, val in zip(node.args[padding:], node.defaults):
container.add_defaultarg(arg.arg, self.visit(val))
if node.vararg is not None:
# Python 3.4 compatibility:
if type(node.vararg) is str:
container.add_vararg(node.vararg)
else:
container.add_vararg(node.vararg.arg)
if node.kwarg is not None:
# Python 3.4 compatibility:
if type(node.kwarg) is str:
container.add_kwarg(node.kwarg)
else:
container.add_vararg(node.kwarg.arg)
for kwarg, val in zip(node.kwonlyargs, node.kw_defaults):
container.add_kwonlyarg(kwarg.arg, self.visit(val))
# Top-level blocks:
def visit_ClassDef(self, node):
isproc, bases = self.parse_bases(node)
if isproc:
if type(self.current_parent) is not dast.Program:
self.error("Process definition must be at top level.", node)
initfun = None
bodyidx = None
for idx, s in enumerate(node.body):
if is_setup_func(s):
if initfun is None:
initfun = s
bodyidx = idx
else:
self.error("Duplicate setup() definition.", s)
if initfun is None:
self.error("Process missing 'setup()' definition.", node)
return
n = self.current_scope.add_name(node.name)
proc = dast.Process(self.current_parent, node,
name=node.name, bases=bases)
n.add_assignment(proc)
proc.decorators, _, _ = self.parse_decorators(node)
self.push_state(proc)
self.program.processes.append(proc)
self.program.body.append(proc)
self.signature(initfun.args)
self.current_block = proc.body
# setup() has to be parsed first:
self.proc_body([node.body[bodyidx]] +
node.body[:bodyidx] + node.body[(bodyidx+1):])
proc.setup = proc.body[0]
self.pop_state()
else:
clsobj = dast.ClassStmt(self.current_parent, node,
name=node.name, bases=bases)
if self.current_block is None or self.current_parent is None:
self.error("Statement not allowed in this context.", ast)
else:
self.current_block.append(clsobj)
n = self.current_scope.add_name(node.name)
n.add_assignment(clsobj)
self.current_context = Read()
clsobj.decorators, _, _ = self.parse_decorators(node)
self.push_state(clsobj)
self.current_block = clsobj.body
self.body(node.body)
self.pop_state()
def visit_FunctionDef(self, node):
if (self.current_process is None or
node.name not in {KW_SENT_EVENT, KW_RECV_EVENT}):
# This is a normal method
n = self.current_scope.add_name(node.name)
s = self.create_stmt(dast.Function, node,
params={"name" : node.name})
n.add_assignment(s)
s.process = self.current_process
if type(s.parent) is dast.Process:
if s.name == "main":
self.current_process.entry_point = s
else:
self.current_process.methods.append(s)
elif (type(s.parent) is dast.Program and
s.name == "main"):
self.current_parent.entry_point = s
# Ignore the label decorators:
s.decorators, _, _ = self.parse_decorators(node)
self.current_block = s.body
self.signature(node.args)
self.body(node.body)
self.pop_state()
else:
# This is an event handler:
h = dast.EventHandler(self.current_parent, node)
# Parse decorators before adding h to node_stack, since decorators
# should belong to the outer scope:
h.decorators, h.labels, h.notlabels = self.parse_decorators(node)
self.push_state(h)
events, labels, notlabels = self.parse_event_handler(node)
events = self.current_process.add_events(events)
h.events = events
h.labels |= labels
h.notlabels |= notlabels
if len(h.labels) == 0:
h.labels = None
if len(h.notlabels) == 0:
h.notlabels = None
for evt in events:
evt.handlers.append(h)
for v in evt.freevars:
if v is not None:
self.debug("adding event argument %s" % v)
h.args.add_arg(v.name)
self.current_block = h.body
self.body(node.body)
self.pop_state()
def check_await(self, node):
if (isinstance(node, Call) and
isinstance(node.func, Name) and
node.func.id == KW_AWAIT):
if len(node.args) <= 2:
return True
else:
self.error("malformed await statement.", node)
return None
else:
return False
# Statements:
#
# The visit_* method for statements appends generated dast AST statements
# to self.current_block.
def create_stmt(self, stmtcls, ast, params=None, nopush=False):
"""Convenience method to instantiate a statement node and append to
'current_block'.
"""
if params is None:
stmtobj = stmtcls(parent=self.current_parent, ast=ast)
else:
stmtobj = stmtcls(parent=self.current_parent, ast=ast, **params)
stmtobj.label = self.current_label
self.current_label = None
if self.current_block is None or self.current_parent is None:
self.error("Statement not allowed in this context.", ast)
else:
self.current_block.append(stmtobj)
if not nopush:
self.push_state(stmtobj)
self.current_context = Read()
return stmtobj
def create_expr(self, exprcls, ast, params=None, nopush=False):
"""Convenience method to instantiate an expression node.
"""
if params is None:
expr = exprcls(self.current_parent, ast=ast)
else:
expr = exprcls(self.current_parent, ast=ast, **params)
if not nopush:
self.push_state(expr)
return expr
def visit_Assign(self, node):
stmtobj = self.create_stmt(dast.AssignmentStmt, node)
self.current_context = Read()
stmtobj.value = self.visit(node.value)
self.current_context = Assignment(stmtobj.value)
for target in node.targets:
stmtobj.targets.append(self.visit(target))
self.pop_state()
def visit_AugAssign(self, node):
stmtobj = self.create_stmt(dast.OpAssignmentStmt, node,
params={'op':OperatorMap[type(node.op)]})
self.current_context = Read()
valexpr = self.visit(node.value)
self.current_context = Assignment(valexpr)
tgtexpr = self.visit(node.target)
stmtobj.target = tgtexpr
stmtobj.value = valexpr
self.pop_state()
def visit_ImportFrom(self, node):
if type(self.current_parent) is not dast.Program:
self.error("'import' statement is only allowed at the top level.",
node)
return
stmtobj = self.create_stmt(dast.PythonStmt, node)
for alias in node.names:
if alias.asname is not None:
name = alias.asname
else:
name = alias.name
nobj = self.current_scope.add_name(name)
nobj.add_assignment(stmtobj)
self.pop_state()
visit_Import = visit_ImportFrom
def expr_check(self, name, minargs, maxargs, node,
keywords={}, optional_keywords={}):
if not (isinstance(node, Call) and
isinstance(node.func, Name) and
node.func.id == name):
return False
errmsg = None
if len(node.args) >= minargs and len(node.args) <= maxargs:
if keywords is None:
return True
for kw in node.keywords:
if kw.arg in keywords:
keywords -= {kw.arg}
elif kw.arg not in optional_keywords:
errmsg = "unrecognized keyword in %s statement." % name
break
if errmsg is None:
if len(keywords) > 0:
errmsg = ("missing required keywords: " + keywords +
" in " + name + " statement.")
else:
return True
else:
errmsg = "Malformed %s statement." % name
self.error(errmsg, node)
raise MalformedStatementError
def kw_check(self, node, names):
if not isinstance(node, Name):
return False
if node.id not in names:
return False
return True
def parse_message(self, node):
expr = dast.TupleExpr(self.current_parent, node)
if type(node) is Call:
assert type(node.func) is Name
elem = dast.ConstantExpr(self.current_parent, node.func)
elem.value = node.func.id
expr.subexprs.append(elem)
elts = node.args
else:
elts = node.elts
for elt in elts:
expr.subexprs.append(self.visit(elt))
return expr
def visit_Expr(self, node):
l = extract_label(node.value)
if l is not None and self.current_process is not None:
self.current_label = l
return
stmtobj = None
try:
e = node.value
if self.expr_check(KW_AWAIT, 1, 2, e,
keywords={},
optional_keywords={KW_AWAIT_TIMEOUT}):
stmtobj = self.create_stmt(dast.AwaitStmt, node)
branch = dast.Branch(stmtobj, node,
condition=self.visit(e.args[0]))
stmtobj.branches.append(branch)
if len(e.args) == 2:
stmtobj.timeout = self.visit(e.args[1])
if len(e.keywords) > 0:
if stmtobj.timeout is not None:
self.warn(
"duplicate timeout value in await statement.",
e)
stmtobj.timeout = self.visit(kw.value)
elif self.expr_check(KW_SEND, 1, 1, e, keywords={KW_SEND_TO}):
stmtobj = self.create_stmt(dast.SendStmt, node)
stmtobj.message = self.parse_message(e.args[0])
stmtobj.target = self.visit(e.keywords[0].value)
elif self.expr_check(KW_BROADCAST, 1, 1, e, keywords={KW_SEND_TO}):
stmtobj = self.create_stmt(dast.SendStmt, node)
stmtobj.message = self.parse_message(e.args[0])
stmtobj.target = self.visit(e.keywords[0].value)
elif self.expr_check(KW_PRINT, 1, 2, e):
stmtobj = self.create_stmt(dast.OutputStmt, node)
stmtobj.message = self.visit(e.args[0])
if len(e.args) == 2:
stmtobj.level = self.visit(e.args[1])
elif self.current_process is not None and \
self.expr_check(KW_RESET, 0, 1, e):
stmtobj = self.create_stmt(dast.ResetStmt, node)
if len(e.args) > 0:
stmtobj.expr = self.visit(e.args[0])
if not isinstance(stmtobj.expr, dast.ConstantExpr):
self.error("Invalid argument in reset statement.", e)
elif stmtobj.expr.value not in ValidResetTypes:
self.error("Unknown argument in reset statement. "
"Valid arguments are: " +
str(ValidResetTypes), node)
elif (isinstance(self.current_parent, dast.Process) and
self.expr_check(KW_CONFIG, 0, 0, e, keywords=None)):
self.current_process.configurations.extend(
self.parse_config_section(e))
# 'yield' and 'yield from' should be statements, handle them here:
elif type(e) is Yield:
stmtobj = self.create_stmt(dast.YieldStmt, node)
stmtobj.expr = self.visit(e)
elif type(e) is YieldFrom:
# 'yield' should be a statement, handle it here:
stmtobj = self.create_stmt(dast.YieldFromStmt, node)
stmtobj.expr = self.visit(e)
else:
stmtobj = self.create_stmt(dast.SimpleStmt, node)
stmtobj.expr = self.visit(node.value)
except MalformedStatementError:
# already errored in expr_check so just ignore:
pass
finally:
if stmtobj is not None:
self.pop_state()
# ~~~
def visit_If(self, node):
stmtobj = None
try:
if self.expr_check(KW_AWAIT, 1, 1, node.test):
stmtobj = self.create_stmt(dast.AwaitStmt, node)
branch = dast.Branch(stmtobj, node.test,
condition=self.visit(node.test.args[0]))
self.current_block = branch.body
self.body(node.body)
stmtobj.branches.append(branch)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
if self.expr_check(KW_AWAIT_TIMEOUT, 1 ,1, node.test):
stmtobj.timeout = self.visit(node.test.args[0])
self.current_block = stmtobj.orelse
self.body(node.body)
if len(node.orelse) > 0:
self.error("timeout branch must be the last"
" branch of await statement", node)
else:
branch = dast.Branch(stmtobj, node,
condition=self.visit(node.test))
self.current_block = branch.body
self.body(node.body)
stmtobj.branches.append(branch)
elif len(else_) == 0:
break
else:
self.current_block = stmtobj.orelse
self.body(else_)
break
else:
stmtobj = self.create_stmt(dast.IfStmt, node)
stmtobj.condition = self.visit(node.test)
self.current_block = stmtobj.body
self.body(node.body)
self.current_block = stmtobj.elsebody
self.body(node.orelse)
except MalformedStatementError:
pass
finally:
if stmtobj is not None:
self.pop_state()
def visit_For(self, node):
s = self.create_stmt(dast.ForStmt, node)
self.current_context = Assignment()
s.domain = self.parse_domain_spec(node)
self.current_context = Read()
self.current_block = s.body
self.body(node.body)
self.current_block = s.elsebody
self.body(node.orelse)
self.pop_state()
def visit_While(self, node):
if self.expr_check(KW_AWAIT, 1, 2, node.test,
optional_keywords={KW_AWAIT_TIMEOUT}):
s = self.create_stmt(dast.LoopingAwaitStmt, node)
s.condition = self.visit(node.test.args[0])
if len(node.test.args) == 2:
s.timeout = self.visit(node.test.args[1])
else:
s = self.create_stmt(dast.WhileStmt, node)
s.condition = self.visit(node.test)
self.current_block = s.body
self.body(node.body)
if hasattr(s, "elsebody"):
self.current_block = s.elsebody
self.body(node.orelse)
self.pop_state()
def visit_With(self, node):
s = self.create_stmt(dast.WithStmt, node)
for item in node.items:
self.current_context = Read()
ctxexpr = self.visit(item.context_expr)
if item.optional_vars is not None:
self.current_context = Assignment(ctxexpr)
s.items.append((ctxexpr, self.visit(item.optional_vars)))
else:
s.items.append((ctxexpr, None))
self.current_context = Read()
self.current_block = s.body
self.body(node.body)
self.pop_state()
def visit_Pass(self, node):
self.create_stmt(dast.PassStmt, node, nopush=True)
def visit_Break(self, node):
loop = self.current_loop
if loop is None:
self.warn("Possible use of 'break' outside loop.", node)
self.create_stmt(dast.BreakStmt, node, nopush=True,
params={"loopstmt": loop})
def visit_Continue(self, node):
loop = self.current_loop
if loop is None:
self.warn("Possible use of 'continue' outside loop.", node)
self.create_stmt(dast.ContinueStmt, node, nopush=True,
params={"loopstmt": loop})
def visit_Delete(self, node):
s = self.create_stmt(dast.DeleteStmt, node)
self.current_context = Delete()
for target in node.targets:
s.targets.append(self.visit(target))
self.pop_state()
def visit_Try(self, node):
s = self.create_stmt(dast.TryStmt, node)
self.current_block = s.body
self.body(node.body)
self.current_context = Read()
for handler in node.handlers:
h = dast.ExceptHandler(s, handler)
h.name = handler.name
if h.name is not None:
n = self.current_scope.find_name(h.name)
if n is None:
self.current_scope.add_name(h.name)
n.add_assignment(s)
if handler.type is not None:
h.type = self.visit(handler.type)
self.current_block = h.body
self.body(handler.body)
s.excepthandlers.append(h)
self.current_block = s.elsebody
self.body(node.orelse)
self.current_block = s.finalbody
self.body(node.finalbody)
self.pop_state()
def visit_Assert(self, node):
s = self.create_stmt(dast.AssertStmt, node)
s.expr = self.visit(node.test)
if node.msg is not None:
s.msg = self.visit(node.msg)
self.pop_state()
def visit_Global(self, node):
if self.current_process is not None:
self.warn("'global' statement inside process is redundant and "
"ignored.", node)
else:
self.create_stmt(dast.GlobalStmt, node,
{"names": list(node.names)})
for name in node.names:
localname = self.current_scope.find_name(name, local=True)
if localname is not None:
self.warn("name '%s' used before declared 'global'." %
name, node)
nobj = self.program.find_name(name)
if nobj is None:
nobj = self.program.add_name(name)
self.debug("Linking global name '%s'" % name)
self.current_scope.link_name(nobj)
self.pop_state()
def visit_Nonlocal(self, node):
self.create_stmt(dast.NonlocalStmt, node, {"names": list(node.names)})
if self.current_scope.parent_scope is None:
self.error("No nonlocal scope found.", node)
else:
for name in node.names:
nobj = self.current_scope.find_name(name, local=True)
if nobj is not None:
self.warn("Variable '%s' used before declared 'nonlocal'." %
name, node)
nobj = self.current_scope.parent_scope.find_name(name, local=False)
if nobj is None:
self.warn("Unable to determine scope for nonlocal var %s" %
name, node)
else:
self.debug("Linking nonlocal name '%s'" % name)
self.current_scope.link_name(nobj)
self.pop_state()
def visit_Return(self, node):
s = self.create_stmt(dast.ReturnStmt, node)
if node.value is not None:
s.value = self.visit(node.value)
self.pop_state()
def visit_Raise(self, node):
s = self.create_stmt(dast.RaiseStmt, node)
if node.exc is not None:
s.expr = self.visit(node.exc)
if node.cause is not None:
s.cause = self.visit(node.cause)
self.pop_state()
# Expressions:
#
# The visit_* methods for expressions return the newly
# constructed dast AST node
def visit_Attribute(self, node):
if (isinstance(self.current_context, FunCall) and
node.attr in KnownUpdateMethods):
# Calling a method that is known to update an object's state is an
# Update operation:
self.current_context = Update()
expr = self.create_expr(dast.AttributeExpr, node)
if type(self.current_context) is Assignment:
# Assigning to an attribute of an object updates that object:
self.current_context = Update()
expr.value = self.visit(node.value)
expr.attr = node.attr
self.pop_state()
if isinstance(expr.value, dast.SelfExpr):
# Need to update the namedvar object
n = self.current_process.find_name(expr.attr)
if n is None:
if (self.is_in_setup() and
isinstance(self.current_context, Assignment)):
self.debug("Adding name '%s' to process scope"
" from setup()." % expr.attr, node)
n = self.current_process.add_name(expr.attr)
n.add_assignment(expr)
n.set_scope(self.current_process)
else:
self.error("Undefined process state variable: " +
str(expr.attr), node)
else:
if isinstance(self.current_context, Assignment):
self.debug("Assignment to variable '%s'" % str(n), node)
n.add_assignment(expr)
elif isinstance(self.current_context, Update) or \
isinstance(self.current_context, Delete):
self.debug("Update to process variable '%s'" % str(n), node)
n.add_update(expr)
else:
n.add_read(expr)
return expr
def ensure_one_arg(self, name, node):
l = len(node.args)
if l != 1:
self.error("'%s' takes exactly one argument (%d given)" % (name, l),
node)
return False
return True
def ensure_sequence_arg(self, name, node):
l = len(node.args)
if l > 1:
self.error("'%s' takes zero or one argument (%d given)" % (name, l),
node)
return False
if l == 1 and not hasattr(node.args[0], "elts"):
return False
return True
def parse_event_expr(self, node, literal=False):
if (node.starargs is not None or node.kwargs is not None):
self.warn("extraneous arguments in event expression.", node)
pattern = self.parse_pattern_expr(node.args[0], literal)
if node.func.id == KW_RECV_QUERY:
event = dast.Event(self.current_process,
event_type=dast.ReceivedEvent,
pattern=pattern)
elif node.func.id == KW_SENT_QUERY:
event = dast.Event(self.current_process,
event_type=dast.SentEvent,
pattern=pattern)
else:
self.error("unknown event specifier", node)
return None
for kw in node.keywords:
pat = self.parse_pattern_expr(kw.value, literal)
if kw.arg == KW_EVENT_SOURCE:
event.sources.append(pat)
elif kw.arg == KW_EVENT_DESTINATION:
event.destinations.append(pat)
elif kw.arg == KW_EVENT_TIMESTAMP:
event.timestamps.append(pat)
else:
self.warn("unknown keyword in query.", node)
return self.current_process.add_event(event)
def event_from_pattern(self, node, event_type):
assert isinstance(node, dast.PatternExpr)
pattern = node.pattern
assert isinstance(pattern, dast.TuplePattern)
event = dast.Event(self.current_process,
event_type=event_type)
if self.full_event_pattern:
if len(pattern.value) != 3:
self.error("malformed event pattern.", node)
else:
event.pattern = dast.PatternExpr(node.parent,
pattern=pattern.value[2])
envpat = pattern.value[1]
if isinstance(envpat, dast.TuplePattern):
if len(envpat.value) != 3:
self.warn("possible malformed envelope pattern.", node)
else:
event.timestamps.append(
dast.PatternExpr(node.parent,
pattern=envpat.value[0]))
event.destinations.append(
dast.PatternExpr(node.parent,
pattern=envpat.value[1]))
event.sources.append(
dast.PatternExpr(node.parent,
pattern=envpat.value[2]))
else:
if len(pattern.value) != 2:
self.error("malformed event pattern.", node)
else:
event.pattern = dast.PatternExpr(node.parent,
pattern=pattern.value[0])
event.sources.append(
dast.PatternExpr(node.parent, pattern=pattern.value[1]))
return self.current_process.add_event(event)
def pattern_from_event(self, node, literal=False):
if not isinstance(node, dast.Event):
return None
expr = self.create_expr(dast.PatternExpr if not literal else
dast.LiteralPatternExpr,
node.ast)
pattern = dast.TuplePattern(node.parent)
# Pattern structure:
# (TYPE, ENVELOPE, MESSAGE)
# ENVELOPE: (TIMESTAMP, DESTINATION, SOURCE)
if isinstance(node.type, dast.EventType):
pattern.value.append(
dast.ConstantPattern(
pattern,
value=self.current_scope.add_name(
node.type.__name__)))
else:
pattern.value.append(dast.FreePattern(pattern))
env = dast.TuplePattern(pattern)
if (len(node.timestamps) == 0):
env.value.append(dast.FreePattern(env))
elif len(node.timestamps) == 1:
env.value.append(node.timestamps[0].pattern.clone())
env.value[-1]._parent = env
else:
self.error("multiple timestamp spec in event pattern.", node)
if (len(node.destinations) == 0):
env.value.append(dast.FreePattern(env))
elif len(node.destinations) == 1:
env.value.append(node.destinations[0].pattern.clone())
env.value[-1]._parent = env
else:
self.error("multiple destination spec in event pattern.", node)
if (len(node.sources) == 0):
env.value.append(dast.FreePattern(env))
elif len(node.sources) == 1:
env.value.append(node.sources[0].pattern.clone())
env.value[-1]._parent = env
else:
self.error("multiple source spec in event pattern.", node)
pattern.value.append(env)
if node.pattern is None:
msgpat = dast.FreePattern(pattern)
else:
msgpat = node.pattern.pattern.clone()
msgpat._parent = pattern
pattern.value.append(msgpat)
expr.pattern = pattern
self.pop_state()
return expr
def call_check(self, names, minargs, maxargs, node):
if (isinstance(node.func, Name) and node.func.id in names):
if ((minargs is not None and len(node.args) < minargs) or
(maxargs is not None and len(node.args) > maxargs)):
self.error("Malformed %s expression." % node.func.id, node)
return False
else:
return True
return False
def parse_domain_spec(self, node):
if (self.current_process is not None and
isinstance(node, Call) and
self.call_check({KW_RECV_QUERY, KW_SENT_QUERY}, 1, 1, node)):
# As a short hand, "sent" and "rcvd" can be used as a domain spec:
# some(rcvd(EVENT_PATTERN) | PRED) is semantically equivalent to
# some(EVENT_PATTERN in rcvd | PRED).
expr = self.create_expr(dast.DomainSpec, node)
event = self.parse_event_expr(node, literal=False)
if event is not None:
event.record_history = True
expr.pattern = self.pattern_from_event(event)
if node.func.id == KW_RECV_QUERY:
expr.domain = self.create_expr(dast.ReceivedExpr, node)
else:
expr.domain = self.create_expr(dast.SentExpr, node)
expr.domain.event = event
self.pop_state()
self.pop_state()
return expr
elif (isinstance(node, Compare) and len(node.ops) == 1 and
type(node.ops[0]) is In):
expr = self.create_expr(dast.DomainSpec, node)
self.current_context = Assignment()
expr.pattern = self.parse_pattern_expr(node.left)
self.current_context = IterRead(expr.pattern)
expr.domain = self.visit(node.comparators[0])
self.pop_state()
return expr
elif isinstance(node, comprehension) or isinstance(node, For):
expr = self.create_expr(dast.DomainSpec, node)
self.current_context = Assignment()
if self.enable_iterator_pattern:
expr.pattern = self.parse_pattern_expr(node.target)
else:
expr.pattern = self.visit(node.target)
self.current_context = IterRead(expr.pattern)
expr.domain = self.visit(node.iter)
if isinstance(expr.domain, dast.HistoryExpr):
expr.pattern = self.pattern_from_event(expr.domain.event)
self.pop_state()
return expr
else:
raise MalformedStatementError("malformed domain specifier.")
def parse_quantified_expr(self, node):
if node.func.id == KW_EXISTENTIAL_QUANT:
context = dast.ExistentialOp
elif node.func.id == KW_UNIVERSAL_QUANT:
context = dast.UniversalOp
else:
raise MalformedStatementError("Unknown quantifier.")
expr = self.create_expr(dast.QuantifiedExpr, node, {'op': context})
self.enter_query()
try:
expr.domains, predicates = self.parse_domains_and_predicate(node)
if len(predicates) > 1:
self.warn("Multiple predicates in quantified expression, "
"first one is used, the rest are ignored.", node)
expr.predicate = predicates[0]
finally:
self.leave_query(node)
self.pop_state()
return expr
def parse_comprehension(self, node):
if node.func.id == KW_COMP_SET:
expr_type = dast.SetCompExpr
elif node.func.id == KW_COMP_LIST:
expr_type = dast.ListCompExpr
elif node.func.id == KW_COMP_DICT:
expr_type = dast.DictCompExpr
elif node.func.id == KW_COMP_TUPLE:
expr_type = dast.TupleCompExpr
expr = self.create_expr(expr_type, node)
self.enter_query()
first_arg = node.args[0]
node.args = node.args[1:]
try:
expr.domains, expr.conditions = self.parse_domains_and_predicate(node)
if expr_type is dast.DictCompExpr:
if not (isinstance(first_arg, Tuple) and
len(first_arg.elts) == 2):
self.error("Malformed element in dict comprehension.",
first_arg)
else:
kv = dast.KeyValue(expr)
kv.key = self.visit(node.key)
kv.value = self.visit(node.value)
expr.elem = kv
else:
expr.elem = self.visit(first_arg)
finally:
self.leave_query(node)
self.pop_state()
return expr
def audit_query(self, expr, node):
self.debug("auditing " + str(expr), node)
self.debug("...freevars: " + str(expr.freevars), node)
self.debug("...boundvars: " + str(expr.boundvars), node)
intersect = {v.name for v in expr.ordered_freevars} & \
{v.name for v in expr.ordered_boundvars}
if intersect:
msg = ("query variables " +
" ".join(["'" + n + "'" for n in intersect]) +
" are both free and bound.")
self.error(msg, node)
def parse_aggregates(self, node):
if node.func.id == KW_AGGREGATE_SUM:
expr_type = dast.SumExpr
elif node.func.id == KW_AGGREGATE_SIZE:
expr_type = dast.SizeExpr
elif node.func.id == KW_AGGREGATE_MIN:
expr_type = dast.MinExpr
elif node.func.id == KW_AGGREGATE_MAX:
expr_type = dast.MaxExpr
expr = self.create_expr(expr_type, node)
first_arg = node.args[0]
node.args = node.args[1:]
try:
expr.domains, expr.conditions = self.parse_domains_and_predicate(node)
expr.elem = self.visit(first_arg)
finally:
self.pop_state()
return expr
def parse_domains_and_predicate(self, node):
preds = []
# Find predicate:
for kw in node.keywords:
if kw.arg == KW_SUCH_THAT:
preds.append(kw.value)
else:
self.error("Unknown keyword '%s' in comprehension expression." %
kw.arg, node)
# ..if no predicate found, then default to True:
if len(preds) == 0:
preds= [NameConstant(True)]
domains = node.args
if len(domains) == 0:
self.warn("No domain specifiers in comprehension expression.", node)
dadomains = [self.parse_domain_spec(node) for node in domains]
self.current_context = Read()
dapredicates = [self.visit(pred) for pred in preds]
return dadomains, dapredicates
def parse_config_section(self, node):
res = []
for kw in node.keywords:
key = kw.arg
vnode = kw.value
value = None
if isinstance(vnode, Name):
value = vnode.id
elif isinstance(vnode, Num):
value = vnode.n
elif isinstance(vnode, Str) or isinstance(vnode, Bytes):
value = vnode.s
elif isinstance(vnode, NameConstant):
value = vnode.value
else:
self.error("Invalid configuration value.", vnode)
if value is not None:
res.append((key, value))
return res
def visit_Call(self, node):
if self.call_check(Quantifiers, 1, None, node):
try:
return self.parse_quantified_expr(node)
except MalformedStatementError as e:
self.error("Malformed quantification expression: " + str(e),
node)
return dast.SimpleExpr(self.current_parent, node)
if self.call_check(ComprehensionTypes, 2, None, node):
try:
return self.parse_comprehension(node)
except MalformedStatementError as e:
self.error("Malformed comprehension expression: " + str(e),
node)
return dast.SimpleExpr(self.current_parent, node)
if (self.current_process is not None and
self.call_check({KW_RECV_QUERY, KW_SENT_QUERY}, 1, 1, node)):
if isinstance(self.current_context, IterRead):
if node.func.id == KW_RECV_QUERY:
expr = self.create_expr(dast.ReceivedExpr, node)
else:
expr = self.create_expr(dast.SentExpr, node)
expr.context = self.current_context.type
expr.event = self.parse_event_expr(
node, literal=(not self.enable_iterator_pattern))
self.pop_state()
if expr.event is not None:
expr.event.record_history = True
return expr
else:
outer = self.create_expr(dast.ComparisonExpr, node)
outer.comparator = dast.InOp
if node.func.id == KW_RECV_QUERY:
expr = self.create_expr(dast.ReceivedExpr, node)
else:
expr = self.create_expr(dast.SentExpr, node)
if self.current_context is not None:
expr.context = self.current_context.type
event = self.parse_event_expr(
node, literal=(not self.enable_membertest_pattern))
self.pop_state()
expr.event = event
outer.right = expr
if event is not None:
outer.left = self.pattern_from_event(
event, literal=(not self.enable_membertest_pattern))
event.record_history = True
self.pop_state()
return outer
if self.call_check(ApiMethods, None, None, node):
self.debug("Api method call: " + node.func.id, node)
expr = self.create_expr(dast.ApiCallExpr, node)
expr.func = node.func.id
elif self.call_check(BuiltinMethods, None, None, node):
self.debug("Builtin method call: " + node.func.id, node)
expr = self.create_expr(dast.BuiltinCallExpr, node)
expr.func = node.func.id
else:
if isinstance(node.func, Name):
self.debug("Method call: " + str(node.func.id), node)
expr = self.create_expr(dast.CallExpr, node)
self.current_context = FunCall()
expr.func = self.visit(node.func)
self.current_context = Read()
expr.args = [self.visit(a) for a in node.args]
expr.keywords = [(kw.arg, self.visit(kw.value))
for kw in node.keywords]
expr.starargs = self.visit(node.starargs) \
if node.starargs is not None else None
expr.kwargs = self.visit(node.kwargs) \
if node.kwargs is not None else None
self.pop_state()
return expr
def visit_Name(self, node):
if node.id in {KW_TRUE, KW_FALSE, KW_NULL}:
if type(self.current_context) in {Assignment, Update, Delete}:
self.warn("Constant expression in update context.", node)
if node.id == KW_TRUE:
return self.create_expr(dast.TrueExpr, node, nopush=True)
elif node.id == KW_FALSE:
return self.create_expr(dast.FalseExpr, node, nopush=True)
elif node.id == KW_NULL:
return self.create_expr(dast.NoneExpr, node, nopush=True)
if self.current_process is not None and node.id == KW_SELF:
return self.create_expr(dast.SelfExpr, node, nopush=True)
if (self.current_process is not None and
(node.id in {KW_RECV_QUERY, KW_SENT_QUERY})):
if node.id == KW_RECV_QUERY:
expr = self.create_expr(dast.ReceivedExpr, node)
event_type = dast.ReceivedEvent
else:
expr = self.create_expr(dast.SentExpr, node)
event_type = dast.SentEvent
if (isinstance(self.current_context, Read) and
isinstance(self.current_context.type, dast.PatternExpr)):
expr.context = self.current_context.type
event = self.event_from_pattern(expr.context, event_type)
expr.event = event
event.record_history = True
else:
self.error("Invalid context for '%s'" % node.id, node)
self.pop_state()
return expr
# NamedVar is not by itself an Expression, we'll have to wrap it in a
# SimpleExpr:
expr = self.create_expr(dast.SimpleExpr, node)
if isinstance(self.current_context, Assignment):
n = self.current_scope.find_name(node.id, local=False)
if n is None:
self.debug("Adding name %s to %s" % (node.id,
self.current_scope), node)
n = self.current_scope.add_name(node.id)
n.add_assignment(expr)
elif isinstance(self.current_context, Update) or\
isinstance(self.current_context, Delete):
n = self.current_scope.find_name(node.id, local=False)
if n is None:
self.warn("Possible use of uninitialized variable '%s'" %
node.id, node)
self.debug(str(self.current_scope.parent_scope), node)
n = self.current_scope.add_name(node.id)
n.add_update(expr)
elif isinstance(self.current_context, Read) or \
isinstance(self.current_context, FunCall):
n = self.current_scope.find_name(node.id, local=False)
if n is None:
self.warn("Possible use of uninitialized variable '%s'" %
node.id, node)
self.debug(str(self.current_scope.parent_scope), node)
if self.current_scope.parent_scope is not None:
self.debug(self.current_scope.parent_scope._names, node)
else:
self.debug(self.current_scope._names, node)
n = self.current_scope.add_name(node.id)
n.add_read(expr)
expr.value = n
self.pop_state()
return expr
def visit_Str(self, node):
expr = self.create_expr(dast.ConstantExpr, node)
expr.value = node.s
self.pop_state()
return expr
def visit_Bytes(self, node):
expr = self.create_expr(dast.ConstantExpr, node)
expr.value = node.s
self.pop_state()
return expr
def visit_Num(self, node):
expr = self.create_expr(dast.ConstantExpr, node)
expr.value = node.n
self.pop_state()
return expr
# Since Python 3.4:
def visit_NameConstant(self, node):
if node.value == True:
return self.create_expr(dast.TrueExpr, node, nopush=True)
elif node.value == False:
return self.create_expr(dast.FalseExpr, node, nopush=True)
elif node.value == None:
return self.create_expr(dast.NoneExpr, node, nopush=True)
else:
raise NotImplementedError("Unrecognized NameConstant %s." % repr(node.value))
def visit_Tuple(self, node):
expr = self.create_expr(dast.TupleExpr, node)
for item in node.elts:
expr.subexprs.append(self.visit(item))
self.pop_state()
return expr
def visit_List(self, node):
expr = self.create_expr(dast.ListExpr, node)
for item in node.elts:
expr.subexprs.append(self.visit(item))
self.pop_state()
return expr
def visit_Set(self, node):
expr = self.create_expr(dast.SetExpr, node)
for item in node.elts:
expr.subexprs.append(self.visit(item))
self.pop_state()
return expr
def visit_Dict(self, node):
expr = self.create_expr(dast.DictExpr, node)
for key in node.keys:
expr.keys.append(self.visit(key))
for value in node.values:
expr.values.append(self.visit(value))
self.pop_state()
return expr
def visit_BinOp(self, node):
e = self.create_expr(dast.BinaryExpr, node,
{"op": OperatorMap[type(node.op)]})
e.left = self.visit(node.left)
e.right = self.visit(node.right)
self.pop_state()
return e
def visit_BoolOp(self, node):
e = self.create_expr(dast.LogicalExpr, node,
{"op": OperatorMap[type(node.op)]})
for v in node.values:
e.subexprs.append(self.visit(v))
self.pop_state()
return e
def visit_Compare(self, node):
if len(node.ops) > 1:
self.error("Explicit parenthesis required in comparison expression",
node)
return None
outer = None
# We make all negation explicit:
if type(node.ops[0]) in NegatedOperators:
outer = self.create_expr(dast.LogicalExpr, node)
outer.operator = dast.NotOp
expr = self.create_expr(dast.ComparisonExpr, node)
if self.enable_membertest_pattern:
# DistAlgo: overload "in" to allow pattern matching
if isinstance(node.ops[0], In) or \
isinstance(node.ops[0], NotIn):
# Backward compatibility: only assume pattern if containing free
# var
pf = PatternFinder()
pf.visit(node.left)
if pf.found:
expr.left = self.parse_pattern_expr(node.left)
if expr.left is None:
expr.left = self.visit(node.left)
self.current_context = Read(expr.left)
expr.right = self.visit(node.comparators[0])
if (isinstance(expr.right, dast.HistoryExpr) and
expr.right.event is not None):
# Must replace short pattern format with full pattern here:
expr.left = self.pattern_from_event(expr.right.event)
if outer is not None:
expr.comparator = NegatedOperators[type(node.ops[0])]
outer.subexprs.append(expr)
self.pop_state()
self.pop_state()
return outer
else:
expr.comparator = OperatorMap[type(node.ops[0])]
self.pop_state()
return expr
def visit_UnaryOp(self, node):
if type(node.op) is Not:
expr = self.create_expr(dast.LogicalExpr, node, {"op": dast.NotOp})
expr.subexprs.append(self.visit(node.operand))
else:
expr = self.create_expr(dast.UnaryExpr, node,
{"op": OperatorMap[type(node.op)]})
expr.right = self.visit(node.operand)
self.pop_state()
return expr
def visit_Subscript(self, node):
expr = self.create_expr(dast.SubscriptExpr, node)
expr.value = self.visit(node.value)
self.current_context = Read()
expr.index = self.visit(node.slice)
self.pop_state()
return expr
def visit_Index(self, node):
return self.visit(node.value)
def visit_Slice(self, node):
expr = self.create_expr(dast.SliceExpr, node)
if node.lower is not None:
expr.lower = self.visit(node.lower)
if node.upper is not None:
expr.upper = self.visit(node.upper)
if node.step is not None:
expr.step = self.visit(node.step)
self.pop_state()
return expr
def visit_ExtSlice(self, node):
self.warn("ExtSlice in subscript not supported.", node)
return self.context_expr(dast.PythonExpr, node, nopush=True)
def visit_Yield(self, node):
# Should not get here: 'yield' statements should have been handles by
# visit_Expr
self.error("unexpected 'yield' expression.", node)
return self.create_expr(dast.PythonExpr, node, nopush=True)
def visit_YieldFrom(self, node):
# Should not get here: 'yield from' statements should have been
# handles by visit_Expr
self.error("unexpected 'yield from' expression.", node)
return self.create_expr(dast.PythonExpr, node, nopush=True)
def visit_Lambda(self, node):
expr = self.create_expr(dast.LambdaExpr, node)
self.signature(node.args)
expr.body = self.visit(node.body)
self.pop_state()
return expr
def visit_Ellipsis(self, node):
return self.create_expr(dast.EllipsisExpr, node, nopush=True)
def generator_visit(self, node):
if isinstance(node, SetComp):
expr = self.create_expr(dast.SetCompExpr, node)
elif isinstance(node, ListComp):
expr = self.create_expr(dast.ListCompExpr, node)
elif isinstance(node, DictComp):
expr = self.create_expr(dast.DictCompExpr, node)
else:
expr = self.create_expr(dast.GeneratorExpr, node)
for g in node.generators:
expr.unlock()
self.current_context = Assignment()
# DistAlgo: overload 'in' to allow pattern matching:
expr.domains.append(self.parse_domain_spec(g))
expr.lock()
self.current_context = Read()
expr.conditions.extend([self.visit(i) for i in g.ifs])
if isinstance(node, DictComp):
kv = dast.KeyValue(expr)
kv.key = self.visit(node.key)
kv.value = self.visit(node.value)
expr.elem = kv
else:
expr.elem = self.visit(node.elt)
self.pop_state()
return expr
visit_ListComp = generator_visit
visit_GeneratorExp = generator_visit
visit_SetComp = generator_visit
visit_DictComp = generator_visit
del generator_visit
def visit_IfExp(self, node):
expr = self.create_expr(dast.IfExpr, node)
expr.condition = self.visit(node.test)
expr.body = self.visit(node.body)
expr.orbody = self.visit(node.orelse)
self.pop_state()
return expr
def visit_Starred(self, node):
expr = self.create_expr(dast.StarredExpr, node)
expr.value = self.visit(node.value)
self.pop_state()
return expr
# Helper Nodes
def error(self, mesg, node):
self.errcnt += 1
if node is not None:
printe(mesg, node.lineno, node.col_offset, self.filename)
else:
printe(mesg, 0, 0, self.filename)
def warn(self, mesg, node):
self.warncnt += 1
if node is not None:
printw(mesg, node.lineno, node.col_offset, self.filename)
else:
printw(mesg, 0, 0, self.filename)
def debug(self, mesg, node=None):
if node is not None:
printd(mesg, node.lineno, node.col_offset, self.filename)
else:
printd(mesg, 0, 0, self.filename)
if __name__ == "__main__":
pass
|
mayli/DistAlgo
|
da/compiler/parser.py
|
Python
|
mit
| 75,023
|
[
"VisIt"
] |
57856c58d63832945ade8936a3106007f0b2172db46e51cd8343f8729fe7bd4b
|
###############################################################################
# Settings file for birdie stress tests
###############################################################################
# base URL of installation
BASE_URL='http://localhost:6543'
DB_FILE='sqlite:///fakeusers.sqlite'
# pre-registered users to consider during the test - run the initialize_db script first
#MAX_USERS=50
#MAX_USERS=500
MAX_USERS=500
#MAX_USERS=50000
#MAX_USERS=500000
# pre-generated users up to 100 -- must be created by running register.py manually
BASE_USERNAME='user_'
# bio sketch for all generated users
ABOUT='I am a fake user generated by multi mechanize series of stress tests'
# True Twitter stats (Craig Smith, December 1, 2013)
#
# unique monthly visitors = 36 million (2013-09-24)
# total number of tweets sent = 300 billion (2013-10-03)
# monthly active twitter users 231.7 million (2013-10-17)
# daily active twitter users = 100 million (2013-10-03)
# average number of followers per twitter user = 208 (2012-10-11)
# average number of tweets sent per day = 500 million (2013-10-03)
# average number of tweets per twitter user = 307 (2013-01-11)
# total number of twitter registered users =~ 1 billion (2013-09-16)
# number of monthly active twitter users that actually tweet = 117 million (2013-11-11)
# estimated percentage of twitter accounts that tweet monthly = 13% (2013-11-11)
# 1 mn test: (43.200 mn per month)
# 5364 active users (view - follow - unfollow)
# 2708 users that actually tweet (post_chirp)
# 347.200 tweets (post_chirp + RT) - 128 tweet/min/user !!!
# 833 unique visitors (visit)
|
simonwoo/Birdie_Redis
|
birdie-stress/test_scripts/birdie_settings.py
|
Python
|
mit
| 1,619
|
[
"VisIt"
] |
3070101d5bd4378cca14a1433d63418146b2400696440c153506760fb6400dbb
|
import calendar
import json
import re
import uuid
import mock
from django.core.cache import cache
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import reverse
from nose.tools import eq_, ok_
from airmozilla.main.models import Event
from airmozilla.base.tests.testbase import Response, DjangoTestCase
from airmozilla.comments.views import (
can_manage_comments,
get_latest_comment
)
from airmozilla.comments.models import (
Discussion,
Comment,
Unsubscription
)
from airmozilla.base.tests.test_mozillians import (
VOUCHED_FOR_USERS,
VOUCHED_FOR,
)
class TestComments(DjangoTestCase):
def _create_discussion(self, event, enabled=True, moderate_all=True,
notify_all=True):
return Discussion.objects.create(
event=event,
enabled=enabled,
moderate_all=moderate_all,
notify_all=notify_all
)
def test_can_manage_comments(self):
event = Event.objects.get(title='Test event')
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
richard = User.objects.create(username='richard',
email='richard@mozilla.com',
is_superuser=True)
discussion = self._create_discussion(event)
discussion.moderators.add(jay)
ok_(not can_manage_comments(bob, discussion))
ok_(can_manage_comments(jay, discussion))
ok_(can_manage_comments(richard, discussion))
def test_get_latest_comment(self):
event = Event.objects.get(title='Test event')
eq_(get_latest_comment(event), None)
# or by ID
eq_(get_latest_comment(event.pk), None)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
comment = Comment.objects.create(
event=event,
user=bob,
comment="Hi, it's Bob",
status=Comment.STATUS_POSTED
)
latest = get_latest_comment(event)
eq_(latest, None)
latest = get_latest_comment(event, include_posted=True)
modified = calendar.timegm(comment.modified.utctimetuple())
eq_(latest, modified)
# again, or by event ID
latest_second_time = get_latest_comment(event.pk, include_posted=True)
eq_(latest, latest_second_time)
def test_basic_event_data(self):
event = Event.objects.get(title='Test event')
# render the event and there should be no comments
url = reverse('main:event', args=(event.slug,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Comments' not in response.content)
# if not enabled you get that back in JSON
comments_url = reverse('comments:event_data', args=(event.pk,))
response = self.client.get(comments_url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['discussion']['enabled'], False)
# also, trying to post a comment when it's not enable
# should cause an error
response = self.client.post(comments_url, {
'name': 'Peter',
'comment': 'Bla bla'
})
eq_(response.status_code, 400)
# enable discussion
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
discussion.moderators.add(jay)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Comments' in response.content)
comments_url = reverse('comments:event_data', args=(event.pk,))
response = self.client.get(comments_url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['discussion']['enabled'], True)
eq_(structure['discussion']['closed'], False)
ok_('No comments posted' in structure['html'])
# even though it's enabled, it should reject postings
# because we're not signed in
response = self.client.post(comments_url, {
'name': 'Peter',
'comment': 'Bla bla'
})
eq_(response.status_code, 403)
# so, let's sign in and try again
User.objects.create_user('richard', password='secret')
# but it should be ok if self.user had the add_event permission
assert self.client.login(username='richard', password='secret')
response = self.client.post(comments_url, {
'name': 'Richard',
'comment': 'Bla bla'
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
ok_('No comments posted' not in structure['html'])
ok_('Bla bla' in structure['html'])
comment = Comment.objects.get(comment='Bla bla')
ok_(comment)
eq_(comment.status, Comment.STATUS_POSTED)
# the moderator should now have received an email
email_sent = mail.outbox[-1]
ok_(event.title in email_sent.subject)
ok_('requires moderation' in email_sent.subject)
ok_(url in email_sent.body)
ok_(url + '#comment-%d' % comment.pk in email_sent.body)
def test_post_comment_no_moderation(self):
event = Event.objects.get(title='Test event')
self._create_discussion(event, moderate_all=False)
User.objects.create_user('richard', password='secret')
assert self.client.login(username='richard', password='secret')
comments_url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(comments_url, {
'name': 'Richard',
'comment': 'Bla bla'
})
eq_(response.status_code, 200)
# structure = json.loads(response.content)
comment = Comment.objects.get(event=event)
eq_(comment.status, Comment.STATUS_APPROVED)
def test_moderation_immediately(self):
"""when you post a comment that needs moderation, the moderator
can click a link in the email notification that immediately
approves the comment without being signed in"""
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_POSTED
)
identifier = uuid.uuid4().hex[:10]
cache.set('approve-%s' % identifier, comment.pk, 60)
cache.set('remove-%s' % identifier, comment.pk, 60)
approve_url = reverse(
'comments:approve_immediately',
args=(identifier, comment.pk)
)
remove_url = reverse(
'comments:remove_immediately',
args=(identifier, comment.pk)
)
response = self.client.get(approve_url)
eq_(response.status_code, 200)
ok_('Comment Approved' in response.content)
# reload
comment = Comment.objects.get(pk=comment.pk)
eq_(comment.status, Comment.STATUS_APPROVED)
response = self.client.get(remove_url)
eq_(response.status_code, 200)
ok_('Comment Removed' in response.content)
# reload
comment = Comment.objects.get(pk=comment.pk)
eq_(comment.status, Comment.STATUS_REMOVED)
# try with identifiers that aren't in the cache
bogus_identifier = uuid.uuid4().hex[:10]
bogus_approve_url = reverse(
'comments:approve_immediately',
args=(bogus_identifier, comment.pk)
)
bogus_remove_url = reverse(
'comments:remove_immediately',
args=(bogus_identifier, comment.pk)
)
response = self.client.get(bogus_approve_url)
eq_(response.status_code, 200)
ok_('Comment Approved' not in response.content)
ok_('Unable to Approve Comment' in response.content)
response = self.client.get(bogus_remove_url)
eq_(response.status_code, 200)
ok_('Comment Removed' not in response.content)
ok_('Unable to Remove Comment' in response.content)
def test_unsubscribe_on_reply_notifications(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'name': 'Jay',
'reply_to': comment.pk,
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
ok_('Bla bla' in structure['html'])
ok_('I think this' in structure['html'])
# now, we must approve this comment
new_comment = Comment.objects.get(
comment='I think this',
user=jay
)
response = self.client.post(url, {
'approve': new_comment.pk,
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure, {'ok': True})
email_sent = mail.outbox[-1]
ok_('Reply' in email_sent.subject)
ok_(event.title in email_sent.subject)
eq_(email_sent.to, ['bob@mozilla.com'])
# expect there to be two unsubscribe links in there
url_unsubscribe = re.findall(
'/comments/unsubscribe/\w{10}/\d+/',
email_sent.body
)[0]
urls_unsubscribe_all = re.findall(
'/comments/unsubscribe/\w{10}/',
email_sent.body
)
for url in urls_unsubscribe_all:
if not url_unsubscribe.startswith(url):
url_unsubscribe_all = url
self.client.logout()
# now let's visit these
response = self.client.get(url_unsubscribe)
eq_(response.status_code, 200)
ok_('Are you sure' in response.content)
response = self.client.post(url_unsubscribe, {})
eq_(response.status_code, 302)
Unsubscription.objects.get(
user=bob,
discussion=discussion
)
unsubscribed_url = reverse(
'comments:unsubscribed',
args=(discussion.pk,)
)
ok_(unsubscribed_url in response['location'])
response = self.client.get(unsubscribed_url)
eq_(response.status_code, 200)
ok_('Unsubscribed' in response.content)
ok_(event.title in response.content)
response = self.client.post(url_unsubscribe_all, {})
eq_(response.status_code, 302)
Unsubscription.objects.get(
user=bob,
discussion__isnull=True
)
unsubscribed_url = reverse('comments:unsubscribed_all')
ok_(unsubscribed_url in response['location'])
def test_unsubscribed_reply_notifications_discussion(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
Unsubscription.objects.create(
user=bob,
discussion=discussion
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'reply_to': comment.pk,
})
eq_(response.status_code, 200)
# But it needs to be approved for reply notifications to
# even be attempted.
new_comment = Comment.objects.get(comment='I think this')
eq_(new_comment.reply_to.user, bob)
response = self.client.post(url, {
'approve': new_comment.pk,
})
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure, {'ok': True})
ok_(not mail.outbox)
def test_unsubscribed_reply_notifications_all(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
Unsubscription.objects.create(
user=bob,
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'reply_to': comment.pk,
})
eq_(response.status_code, 200)
# But it needs to be approved for reply notifications to
# even be attempted.
new_comment = Comment.objects.get(comment='I think this')
eq_(new_comment.reply_to.user, bob)
response = self.client.post(url, {
'approve': new_comment.pk,
})
ok_(not mail.outbox)
def test_invalid_reply_to(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
jay = User.objects.create(username='jay', email='jay@mozilla.com')
bob = User.objects.create(username='bob', email='bob@mozilla.com')
discussion.moderators.add(jay)
Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_APPROVED
)
jay.set_password('secret')
jay.save()
assert self.client.login(username='jay', password='secret')
# post a reply
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'comment': 'I think this',
'reply_to': '999999999',
})
eq_(response.status_code, 400)
@mock.patch('logging.error')
@mock.patch('requests.get')
def test_fetch_user_name(self, rget, rlogging):
cache.clear()
def mocked_get(url, **options):
if '/v2/users/99999' in url:
return Response(VOUCHED_FOR)
if 'peterbe' in url:
return Response(VOUCHED_FOR_USERS)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('comments:user_name')
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['name'], '')
peterbe = User.objects.create_user(
username='peterbe', password='secret'
)
assert self.client.login(username='peterbe', password='secret')
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['name'], '')
peterbe.email = 'peterbe@mozilla.com'
peterbe.save()
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['name'], 'Peter Bengtsson')
def test_modify_comment_without_permission(self):
event = Event.objects.get(title='Test event')
self._create_discussion(event)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_POSTED
)
url = reverse('comments:event_data', args=(event.pk,))
response = self.client.post(url, {
'approve': comment.pk,
})
eq_(response.status_code, 403)
# and not being logged in you definitely can't post comments
response = self.client.post(url, {
'comment': "My opinion",
})
eq_(response.status_code, 403)
User.objects.create_user(username='jay', password='secret')
assert self.client.login(username='jay', password='secret')
response = self.client.post(url, {
'approve': comment.pk,
})
eq_(response.status_code, 403)
response = self.client.post(url, {
'unapprove': comment.pk,
})
eq_(response.status_code, 403)
response = self.client.post(url, {
'remove': comment.pk,
})
eq_(response.status_code, 403)
# but you can flag
response = self.client.post(url, {
'flag': comment.pk,
})
eq_(response.status_code, 200)
# but not unflag
response = self.client.post(url, {
'unflag': comment.pk,
})
eq_(response.status_code, 403)
def test_modify_comment_with_permission(self):
event = Event.objects.get(title='Test event')
discussion = self._create_discussion(event)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
jay = User.objects.create_user(username='jay', password='secret')
discussion.moderators.add(jay)
comment = Comment.objects.create(
event=event,
user=bob,
comment='Bla bla',
status=Comment.STATUS_POSTED,
flagged=1
)
url = reverse('comments:event_data', args=(event.pk,))
assert self.client.login(username='jay', password='secret')
response = self.client.post(url, {
'approve': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(status=Comment.STATUS_APPROVED))
response = self.client.post(url, {
'unapprove': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(status=Comment.STATUS_POSTED))
response = self.client.post(url, {
'remove': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(status=Comment.STATUS_REMOVED))
response = self.client.post(url, {
'unflag': comment.pk,
})
eq_(response.status_code, 200)
ok_(Comment.objects.get(flagged=0))
def test_event_data_latest_400(self):
cache.clear()
event = Event.objects.get(title='Test event')
url = reverse('comments:event_data_latest', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 400)
discussion = self._create_discussion(event)
discussion.enabled = False
discussion.save()
response = self.client.get(url)
eq_(response.status_code, 400)
def test_event_data_latest(self):
event = Event.objects.get(title='Test event')
self._create_discussion(event)
url = reverse('comments:event_data_latest', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['latest_comment'], None)
bob = User.objects.create(username='bob', email='bob@mozilla.com')
comment = Comment.objects.create(
user=bob,
event=event,
comment="Hi, it's Bob",
status=Comment.STATUS_POSTED
)
response = self.client.get(url)
eq_(response.status_code, 200)
structure = json.loads(response.content)
eq_(structure['latest_comment'], None)
response = self.client.get(url, {'include_posted': True})
eq_(response.status_code, 200)
structure = json.loads(response.content)
modified = calendar.timegm(comment.modified.utctimetuple())
eq_(structure['latest_comment'], modified)
# ask it again and it should be the same
response_second = self.client.get(url, {'include_posted': True})
eq_(response_second.status_code, 200)
eq_(response.content, response_second.content)
|
kenrick95/airmozilla
|
airmozilla/comments/tests/test_views.py
|
Python
|
bsd-3-clause
| 21,315
|
[
"VisIt"
] |
e6667e7c63b2ef59a810ebb389617d2e404b7a9073fe86b9b431ba12a26b8dad
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import time
import numpy
from pyscf import gto
from pyscf.df import incore
# (ij|L)
def aux_e2(mol, auxmol, intor='int3c2e_spinor', aosym='s1', comp=None, hermi=0):
intor, comp = gto.moleintor._get_intor_and_comp(mol._add_suffix(intor), comp)
atm, bas, env = gto.mole.conc_env(mol._atm, mol._bas, mol._env,
auxmol._atm, auxmol._bas, auxmol._env)
shls_slice = (0, mol.nbas, 0, mol.nbas, mol.nbas, mol.nbas+auxmol.nbas)
ao_loc1 = mol.ao_loc_2c()
ao_loc2 = auxmol.ao_loc_nr('ssc' in intor)
nao = ao_loc1[-1]
ao_loc = numpy.append(ao_loc1, ao_loc2[1:]+nao)
out = gto.moleintor.getints3c(intor, atm, bas, env, shls_slice,
comp, aosym, ao_loc=ao_loc)
return out
# (L|ij)
def aux_e1(mol, auxmol, intor='int3c2e_spinor', aosym='s1', comp=1, hermi=0):
raise NotImplementedError
def cholesky_eri(mol, auxbasis='weigend+etb', auxmol=None,
int3c='int3c2e_spinor', aosym='s1', int2c='int2c2e_sph', comp=1,
verbose=0):
return incore.cholesky_eri_debug(mol, auxbasis, auxmol, int3c, aosym, int2c,
comp, verbose, aux_e2)
if __name__ == '__main__':
from pyscf import lib
from pyscf import scf
mol = gto.Mole()
mol.build(
verbose = 0,
atom = [["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'ccpvdz',
)
cderi = (cholesky_eri(mol, int3c='int3c2e_spinor', verbose=5),
cholesky_eri(mol, int3c='int3c2e_spsp1_spinor', verbose=5))
n2c = mol.nao_2c()
c2 = .5 / lib.param.LIGHT_SPEED
def fjk(mol, dm, *args, **kwargs):
# dm is 4C density matrix
cderi_ll = cderi[0].reshape(-1,n2c,n2c)
cderi_ss = cderi[1].reshape(-1,n2c,n2c)
vj = numpy.zeros((n2c*2,n2c*2), dtype=dm.dtype)
vk = numpy.zeros((n2c*2,n2c*2), dtype=dm.dtype)
rho =(numpy.dot(cderi[0], dm[:n2c,:n2c].T.reshape(-1))
+ numpy.dot(cderi[1], dm[n2c:,n2c:].T.reshape(-1)*c2**2))
vj[:n2c,:n2c] = numpy.dot(rho, cderi[0]).reshape(n2c,n2c)
vj[n2c:,n2c:] = numpy.dot(rho, cderi[1]).reshape(n2c,n2c) * c2**2
v1 = numpy.einsum('pij,jk->pik', cderi_ll, dm[:n2c,:n2c])
vk[:n2c,:n2c] = numpy.einsum('pik,pkj->ij', v1, cderi_ll)
v1 = numpy.einsum('pij,jk->pik', cderi_ss, dm[n2c:,n2c:])
vk[n2c:,n2c:] = numpy.einsum('pik,pkj->ij', v1, cderi_ss) * c2**4
v1 = numpy.einsum('pij,jk->pik', cderi_ll, dm[:n2c,n2c:])
vk[:n2c,n2c:] = numpy.einsum('pik,pkj->ij', v1, cderi_ss) * c2**2
vk[n2c:,:n2c] = vk[:n2c,n2c:].T.conj()
return vj, vk
mf = scf.DHF(mol)
mf.get_jk = fjk
mf.direct_scf = False
ehf1 = mf.scf()
print(ehf1, -76.08073868516945)
cderi = cderi[0].reshape(-1,n2c,n2c)
print(numpy.allclose(cderi, cderi.transpose(0,2,1).conj()))
|
gkc1000/pyscf
|
pyscf/df/r_incore.py
|
Python
|
apache-2.0
| 3,656
|
[
"PySCF"
] |
34d74dfb2f8e5f20eb8eb61b21c2e9e3cf87b1dbc2eab52c8a3330c759188dac
|
"""Script to list all the modules and packages used by a Brython page
It parses the HTML page, detecting the tags <script type="text/python"> and
gets the Python code embedded inside these tags, or of the external Python
scripts if the tag has an attribute "src".
Use module ast to detect all the imports in the scripts, either "import X"
or "from A import B". Then try to find the modules or packages referenced by
these imports ; for those that are found, again detect all imports
recursively.
"""
import os
import io
import re
import tokenize
import token
import ast
import html.parser
www = os.path.join(os.path.dirname(os.getcwd()), 'www')
js_path = os.path.join(www, 'src', 'libs')
paths = [os.path.join(www, 'src', 'Lib'),
js_path,
os.path.join(www, 'src', 'Lib/site-packages')]
class ImportLister(ast.NodeVisitor):
"""Store all imports in a dictionary indexed by module or
package name
imports[name] = None for "import name"
imports[name] = package for "from X import name"
"""
def __init__(self):
ast.NodeVisitor()
self.imports = {}
def visit_Import(self, node):
for imported in node.names:
self.imports[imported.name] = None
self.generic_visit(node)
def visit_ImportFrom(self, node):
for name in node.names:
self.imports[name.name] = node.module
self.generic_visit(node)
class ScriptsFinder(html.parser.HTMLParser):
def __init__(self, script_path):
html.parser.HTMLParser.__init__(self)
self.scripts = {}
self.python = False
self.counter = None
self.script_path = script_path
self.folder = os.path.dirname(script_path)
self.imported = {}
self.not_found = set()
with open(self.script_path, encoding='utf-8') as fobj:
src = fobj.read()
self.feed(src)
# After self.feed, self.scripts is a dictionary with
# all the internal and external Python scripts in the
# HTML document
for script_id, [script_path, src] in self.scripts.items():
self.find_modules(script_path, src)
# After find_modules we have a dictionary of all the modules
# and packages found
items = sorted(self.imported.items())
# Normalise the qualified module or package name
# self.imports is a sorted list of 3-element lists, giving
# for all modules and packages needed by the HTML page :
# - its qualified name
# - its type : "js_module" (built-in Javascript module, in libs),
# "module" or "package"
# - its absolute path in the file system
self.imports = imports = []
for key, (typ, path) in items:
if typ=='module':
for lib in paths:
if path.startswith(lib):
elts = os.path.split(path[len(lib):].lstrip(os.sep))
elts = [x for x in elts if x]
elts[-1] = os.path.splitext(elts[-1])[0]
key = '.'.join(elts)
imports.append([key, typ,
'/'.join(path[len(www)+1:].split(os.sep))])
imports.sort()
def handle_starttag(self, tag, attrs):
"""Set self.python to True if tag is script and
type = text/python and src is not set
"""
if tag=='script':
src = None
python = False
_id = None
for key, value in attrs:
if key=='type' and value.lower() in ['text/python',
'text/python3']:
python = True
elif key=='src':
src = value
elif key == 'id':
_id = value
if python:
# get script id
if _id is not None:
self._id = _id
elif self.counter is None:
self._id = '__main__'
self.counter = 1
else:
self._id = '__main__{}'.format(self.counter)
self.counter += 1
if not src:
# set attribute python to store next HTML data
self.python = True
else:
# find url of external script
path = os.path.join(self.folder, *src.split('/'))
with open(path, 'rb') as ext_script_obj:
self.scripts[self._id] = [path, ext_script_obj.read()]
def handle_data(self, data):
if self.python:
if data.strip():
indent = 0
lines = data.split('\n')
print(lines)
for line in lines:
if line:
indent = len(line)-len(line.lstrip())
break
print('indent', indent)
if indent:
lines = [line[indent:] for line in lines]
data = '\n'.join(lines)
self.scripts[self._id] = [self.script_path, data.encode('utf-8')]
self.python = False
def find_modules(self, path, src):
"""Parse source code, resolve all imports"""
imports = ImportLister()
imports.visit(ast.parse(src))
for name, _from in imports.imports.items():
if _from is None:
self.resolve_import(path, name)
else:
self.resolve_import_from(path, name, _from)
def resolve_import(self, script_path, name):
"""Find module source code, based on script path and name
"""
if name in self.imported or name in self.not_found:
return
elts = name.split('.')
script_folder = os.path.dirname(script_path)
for mod_path in [paths[0], js_path, script_folder, paths[2]]:
if mod_path is js_path:
module_filename = os.path.join(js_path, name+'.js')
if os.path.exists(module_filename):
self.imported[name] = 'js_module', module_filename
return
continue
for elt in elts[:-1]:
sub_path = os.path.join(mod_path, elt)
if os.path.exists(os.path.join(sub_path, '__init__.py')):
mod_path = sub_path
else:
break
else:
# If we get here, mod_path is the path where the module might
# be.
# Try module_name.py
module_filename = os.path.join(mod_path, elts[-1]+'.py')
if os.path.exists(module_filename):
self.imported[name] = 'module', module_filename
with open(module_filename, 'rb') as fobj:
self.find_modules(mod_path, fobj.read())
return module_filename
else:
# try module/__init__.py
package_folder = os.path.join(mod_path, elts[-1])
package_filename = os.path.join(package_folder,
'__init__.py')
if os.path.exists(package_filename):
self.imported[name] = 'package', package_filename
with open(package_filename, 'rb') as fobj:
self.find_modules(package_folder, fobj.read())
return package_filename
self.not_found.add(name)
def resolve_import_from(self, script_path, name, _from):
"""Resolve an import of the form "from X import name"
found in the script at specified path
"""
origin = self.resolve_import(script_path, _from)
try:
typ, filename = self.imported[_from]
except KeyError:
self.not_found.add(_from)
return
if typ == "package":
# form "from package import name"
# search a module name.py in package directory
# or a module __init__.py in the subdirectory "name"
package_path = os.path.dirname(filename)
module_filename = os.path.join(package_path, name+'.py')
package_filename = os.path.join(package_path, name, '__init__.py')
if os.path.exists(module_filename):
self.imported[name] = 'module', module_filename
with open(module_filename, encoding="utf-8") as fobj:
self.find_modules(package_path, fobj.read())
return module_filename
elif os.path.exists(package_filename):
self.imported[name] = 'package', package_filename
with open(package_filename, encoding="utf-8") as fobj:
self.find_modules(os.path.join(package_path, name),
fobj.read())
return package_filename
else:
# ignore names imported from the package __init__.py file
return
else:
# ignore names imported from a module
return
if __name__ == '__main__':
brython_page = os.path.join(www, 'app', 'test_bundle.html')
finder = ScriptsFinder(brython_page)
for line in finder.imports:
print(line)
builtin_modules = ['browser', 'browser.html', 'javascript', '_sys']
with open(os.path.join(www, 'gallery', 'imports.txt'), 'w',
encoding='utf-8') as out:
for name, typ, url in finder.imports:
# exclude the modules that will be in brython_dist.js
if name in builtin_modules:
continue
if url.startswith('src/libs/'):
continue
if url.startswith('src/Lib') and not \
url.startswith('src/Lib/site-packages'):
continue
out.write('{} {} {}\n'.format(name, typ, url))
print('Not found\n', finder.not_found)
|
jonathanverner/brython
|
scripts/find_modules.py
|
Python
|
bsd-3-clause
| 10,215
|
[
"VisIt"
] |
9dec1b6788b3c234315dd575984e4ca6feccd6330b7a4e685797d8ffd23bc938
|
# coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of GaussianKernel for MinDiff."""
import tensorflow as tf
from tensorflow_model_remediation.common import types
from tensorflow_model_remediation.min_diff.losses.kernels import base_kernel
@tf.keras.utils.register_keras_serializable()
class GaussianKernel(base_kernel.MinDiffKernel):
# pyformat: disable
"""Gaussian kernel class.
The Gaussian kernel is a mathematical tool that approximates a given
distribution as a sum of gaussian distributions. This is particularly useful
when we are trying to determine a distribution from a set of points.
Arguments:
kernel_length: Length (sometimes also called 'width') of the kernel.
Defaults to `0.1`. This parameter essentially describes how far apart
points can be and still affect each other.
The choice for kernel length should be influenced by the average distance
of inputs. The smaller the distance, the smaller the kernel length likely
needs to be for best performance. In general, a good first guess is the
standard deviation of your predictions.
Note: A kernel length that is too large will result in losing most of the
kernel's non-linearity making it much less effective. A kernel length
that is too small will make the kernel highly sensitive to input noise
potentially leading to unstable results.
**kwargs: Named parameters that will be passed directly to the base
class' `__init__` function.
See [paper](https://arxiv.org/abs/1910.11779) for reference on how it can be
used in MinDiff.
"""
# pyformat: enable
def __init__(self, kernel_length: complex = 0.1, **kwargs):
super(GaussianKernel, self).__init__(**kwargs)
self.kernel_length = kernel_length
def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
"""Computes the Gaussian kernel."""
return tf.exp(-tf.reduce_sum(tf.square(x - y), axis=2) /
tf.math.pow(self.kernel_length, 2))
def get_config(self):
"""Returns the config dictionary for the GaussianKernel instance."""
config = super(GaussianKernel, self).get_config()
config.update({"kernel_length": self.kernel_length})
return config
|
tensorflow/model-remediation
|
tensorflow_model_remediation/min_diff/losses/kernels/gaussian_kernel.py
|
Python
|
apache-2.0
| 2,796
|
[
"Gaussian"
] |
9c2479f693c761a2da7bf9aabfb1a19ee60b0c5d42389212501774c8f053356a
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Licence: BSD 3 clause
from __future__ import division
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import array2d, check_random_state
from ..utils.validation import check_arrays
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
max_leaf_nodes,
random_state):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_mask=None, X_argsorted=None, check_input=True,
sample_weight=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32`` for maximum
efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Deprecations
if sample_mask is not None:
warn("The sample_mask parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if X_argsorted is not None:
warn("The X_argsorted parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
# Convert data
if check_input:
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf, max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf, max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
`feature_importances_` : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None,
max_leaf_nodes=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`feature_importances_` : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None,
max_leaf_nodes=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/tree/tree.py
|
Python
|
apache-2.0
| 31,383
|
[
"Brian"
] |
c857f8eaf03ba590dc751d4c5e0869fa4a7d2cecc161b8fc4d415cd680a6404a
|
# Author: Samuel Genheden samuel.genheden@gmail.com
"""
This script uses the encore library
"""
import copy
import argparse
import numpy as np
import MDAnalysis as md
import encore
from encore.similarity import harmonic_ensemble_similarity, bootstrap_coordinates
class MyEnsemble(encore.Ensemble) :
def get_coordinates(self, subset_selection_string=None, firsthalf=True):
if not subset_selection_string:
subset_selection_string = self.atom_selection_string
subset_selection = self.universe.select_atoms(subset_selection_string)
if len(subset_selection) == 0:
logging.error("ERROR: selection \'%s\' not found in topology."% subset_selection_string)
exit(1)
try:
subset_coordinates = self.universe.trajectory.timeseries(subset_selection, skip=self.frame_interval, format='fac')
except:
n_coordinates = 0
k = 0
for i,time_step in enumerate(self.universe.trajectory):
if (i % self.frame_interval) == 0:
n_coordinates += 1
subset_coordinates = numpy.zeros(tuple([n_coordinates]) + subset_selection.coordinates().shape)
for i, time_step in enumerate(self.universe.trajectory):
if (i % self.frame_interval) == 0:
subset_coordinates[k] = subset_selection.coordinates(time_step)
k+=1
n = int(0.5*subset_coordinates.shape[0])
if firsthalf :
return subset_coordinates[:n,:]
else :
return subset_coordinates[n:,:]
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description="Program to calculate HES metric on the two halves of a trajectory")
argparser.add_argument('-s','--struct',help="the filename of a PDB file")
argparser.add_argument('-f','--file',help="the DCD trajectory")
argparser.add_argument('-o','--out',help="output prefix")
args = argparser.parse_args()
nboots = 500
uni = md.Universe(args.struct, args.file)
ensemble = encore.Ensemble(uni, trajectory=args.file)
n = int(0.5*ensemble.coordinates.shape[0])
ensemble1 = copy.copy(ensemble)
ensemble1.coordinates = ensemble.coordinates[:n,:]
bootcoord1 = bootstrap_coordinates(ensemble1.coordinates, nboots)
sigma1 = encore.covariance_matrix(ensemble1)
ensemble2 = copy.copy(ensemble)
ensemble2.coordinates = ensemble.coordinates[n:,:]
bootcoord2 = bootstrap_coordinates(ensemble2.coordinates, nboots)
sigma2 = encore.covariance_matrix(ensemble2)
boots = []
for c1, c2 in zip(bootcoord1, bootcoord2):
x1 = np.average(c1, axis=0).flatten()
x2 = np.average(c2, axis=0).flatten()
boots.append(harmonic_ensemble_similarity(x1=x1, x2=x2,
sigma1=sigma1, sigma2=sigma2))
boots = np.asarray(boots)
print "%.3f\t%.3f"%(boots.mean(), boots.std())
|
SGenheden/Scripts
|
Projects/Orderparam/calc_heshalf.py
|
Python
|
mit
| 2,936
|
[
"MDAnalysis"
] |
2f13fc50f353e89bde622145ecdf6f97bbf22b29f4635c0b12887c9c5ccf45a6
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.conf.urls import include, url
from karaage.people.views import groups
urlpatterns = [
url(r'^$', groups.group_list, name='kg_group_list'),
url(r'^add/$', groups.add_group, name='kg_group_add'),
url(r'^detail/(?P<group_name>%s)/' % settings.GROUP_VALIDATION_RE,
include('karaage.people.urls.group_detail')),
]
|
brianmay/karaage
|
karaage/people/urls/groups.py
|
Python
|
gpl-3.0
| 1,116
|
[
"Brian"
] |
ba65a069bbed9220673bdd2e634dcbe8a51a8fd51c80e833017e8050ed4c4275
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os, subprocess
import dialog
from io import BytesIO
from zeroinstall import _
from zeroinstall.injector import model, selections, qdom
from gui import glib
XMLNS_0COMPILE = 'http://zero-install.sourceforge.net/2006/namespaces/0compile'
class Command(object):
def __init__(self):
self.child = None
self.error = b""
self.stdout = b""
self.watched_streams = 0
def run(self, command, success, get_stdout = False):
assert self.child is None
self.success = success
if get_stdout:
self.child = subprocess.Popen(command,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
glib.io_add_watch(self.child.stdout, glib.IO_IN | glib.IO_HUP, self.got_stdout)
glib.io_add_watch(self.child.stderr, glib.IO_IN | glib.IO_HUP, self.got_errors)
self.watched_streams = 2
else:
self.child = subprocess.Popen(command,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
glib.io_add_watch(self.child.stdout, glib.IO_IN | glib.IO_HUP, self.got_errors)
self.watched_streams = 1
def got_stdout(self, src, cond):
data = os.read(src.fileno(), 100)
if data:
self.stdout += data
return True
else:
self.done()
return False
def done(self):
self.watched_streams -= 1
if self.watched_streams == 0:
status = self.child.wait()
self.child = None
if status == 0:
self.success(self.stdout)
else:
if status == 1 and not self.error:
return False # Cancelled
dialog.alert(None, _("Command failed with exit code %(status)d:\n%(error)s\n") %
{'status': status, 'error': self.error})
def got_errors(self, src, cond):
data = os.read(src.fileno(), 100)
if data:
self.error += data
return True
else:
self.done()
return False
def compile(on_success, interface_uri, autocompile = False):
our_min_version = '0.18' # The oldest version of 0compile we support
def build(selections_xml):
# Get the chosen versions
sels = selections.Selections(qdom.parse(BytesIO(selections_xml)))
impl = sels.selections[interface_uri]
min_version = impl.attrs.get(XMLNS_0COMPILE + ' min-version', our_min_version)
# Check the syntax is valid and the version is high enough
if model.parse_version(min_version) < model.parse_version(our_min_version):
min_version = our_min_version
# Do the whole build-and-register-feed
c = Command()
c.run(("0launch",
'--message', _('Download the 0compile tool, to compile the source code'),
'--not-before=' + min_version,
"http://0install.net/2006/interfaces/0compile.xml",
'gui',
interface_uri), lambda unused: on_success())
if autocompile:
c = Command()
c.run(("0launch",
'--message', 'Download the 0compile tool, to compile the source code',
'--not-before=' + our_min_version,
"http://0install.net/2006/interfaces/0compile.xml",
'autocompile',
'--gui',
interface_uri), lambda unused: on_success())
else:
# Prompt user to choose source version
c = Command()
c.run(['0install', 'download', '--xml',
'--message', _('Download the source code to be compiled'),
'--gui', '--source', '--', interface_uri], build, get_stdout = True)
|
rammstein/0install
|
zeroinstall/0launch-gui/compile.py
|
Python
|
lgpl-2.1
| 3,234
|
[
"VisIt"
] |
f7513fe9f034d609c46216409e3d68c0ac5d8682c86745d23fc0dbdbf2842f38
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.utils import translation
import pytest
from mock import Mock, patch
from pyquery import PyQuery as pq
from olympia import amo
from olympia.activity.models import ActivityLog, ActivityLogToken
from olympia.addons.models import (
Addon, AddonApprovalsCounter, AddonReviewerFlags)
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import TestCase, file_factory, version_factory
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import send_mail
from olympia.files.models import File
from olympia.reviewers.models import AutoApprovalSummary, ReviewerScore
from olympia.reviewers.utils import (
PENDING_STATUSES, ReviewAddon, ReviewFiles, ReviewHelper,
ViewPendingQueueTable, ViewUnlistedAllListTable)
from olympia.tags.models import Tag
from olympia.users.models import UserProfile
pytestmark = pytest.mark.django_db
REVIEW_FILES_STATUSES = (amo.STATUS_PUBLIC, amo.STATUS_DISABLED)
class TestViewPendingQueueTable(TestCase):
def setUp(self):
super(TestViewPendingQueueTable, self).setUp()
self.table = ViewPendingQueueTable([])
def test_addon_name(self):
row = Mock()
page = Mock()
page.start_index = Mock()
page.start_index.return_value = 1
row.addon_name = 'フォクすけといっしょ'.decode('utf8')
row.addon_slug = 'test'
row.latest_version = u'0.12'
self.table.set_page(page)
a = pq(self.table.render_addon_name(row))
assert a.attr('href') == (
reverse('reviewers.review', args=[str(row.addon_slug)]))
assert a.text() == "フォクすけといっしょ 0.12".decode('utf8')
def test_addon_type_id(self):
row = Mock()
row.addon_type_id = amo.ADDON_THEME
assert unicode(self.table.render_addon_type_id(row)) == (
u'Complete Theme')
def test_waiting_time_in_days(self):
row = Mock()
row.waiting_time_days = 10
row.waiting_time_hours = 10 * 24
assert self.table.render_waiting_time_min(row) == u'10 days'
def test_waiting_time_one_day(self):
row = Mock()
row.waiting_time_days = 1
row.waiting_time_hours = 24
row.waiting_time_min = 60 * 24
assert self.table.render_waiting_time_min(row) == u'1 day'
def test_waiting_time_in_hours(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 22
row.waiting_time_min = 60 * 22
assert self.table.render_waiting_time_min(row) == u'22 hours'
def test_waiting_time_in_min(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 0
row.waiting_time_min = 11
assert self.table.render_waiting_time_min(row) == u'11 minutes'
def test_waiting_time_in_secs(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 0
row.waiting_time_min = 0
assert self.table.render_waiting_time_min(row) == u'moments ago'
def test_flags(self):
row = Mock()
row.flags = [('admin-review', 'Admin Review')]
doc = pq(self.table.render_flags(row))
assert doc('div.ed-sprite-admin-review').length
class TestUnlistedViewAllListTable(TestCase):
def setUp(self):
super(TestUnlistedViewAllListTable, self).setUp()
self.table = ViewUnlistedAllListTable([])
def test_addon_name(self):
row = Mock()
page = Mock()
page.start_index = Mock()
page.start_index.return_value = 1
row.addon_name = 'フォクすけといっしょ'.decode('utf8')
row.addon_slug = 'test'
row.latest_version = u'0.12'
self.table.set_page(page)
a = pq(self.table.render_addon_name(row))
assert (a.attr('href') == reverse(
'reviewers.review', args=['unlisted', str(row.addon_slug)]))
assert a.text() == 'フォクすけといっしょ 0.12'.decode('utf8')
def test_last_review(self):
row = Mock()
row.review_version_num = u'0.34.3b'
row.review_date = u'2016-01-01'
doc = pq(self.table.render_review_date(row))
assert doc.text() == u'0.34.3b on 2016-01-01'
def test_no_review(self):
row = Mock()
row.review_version_num = None
row.review_date = None
doc = pq(self.table.render_review_date(row))
assert doc.text() == u'No Reviews'
def test_authors_few(self):
row = Mock()
row.authors = [(123, 'bob'), (456, 'steve')]
doc = pq(self.table.render_authors(row))
assert doc('span').text() == 'bob steve'
assert doc('span a:eq(0)').attr('href') == UserProfile.create_user_url(
123, username='bob')
assert doc('span a:eq(1)').attr('href') == UserProfile.create_user_url(
456, username='steve')
assert doc('span').attr('title') == 'bob steve'
def test_authors_four(self):
row = Mock()
row.authors = [(123, 'bob'), (456, 'steve'), (789, 'cvan'),
(999, 'basta')]
doc = pq(self.table.render_authors(row))
assert doc.text() == 'bob steve cvan ...'
assert doc('span a:eq(0)').attr('href') == UserProfile.create_user_url(
123, username='bob')
assert doc('span a:eq(1)').attr('href') == UserProfile.create_user_url(
456, username='steve')
assert doc('span a:eq(2)').attr('href') == UserProfile.create_user_url(
789, username='cvan')
assert doc('span').attr('title') == 'bob steve cvan basta', doc.html()
yesterday = datetime.today() - timedelta(days=1)
class TestReviewHelper(TestCase):
fixtures = ['base/addon_3615', 'base/users']
preamble = 'Mozilla Add-ons: Delicious Bookmarks 2.1.072'
def setUp(self):
super(TestReviewHelper, self).setUp()
class FakeRequest:
user = UserProfile.objects.get(pk=10482)
self.request = FakeRequest()
self.addon = Addon.objects.get(pk=3615)
self.version = self.addon.versions.all()[0]
self.helper = self.get_helper()
self.file = self.version.files.all()[0]
self.create_paths()
def _check_score(self, reviewed_type, bonus=0):
scores = ReviewerScore.objects.all()
assert len(scores) > 0
assert scores[0].score == amo.REVIEWED_SCORES[reviewed_type] + bonus
assert scores[0].note_key == reviewed_type
def create_paths(self):
if not storage.exists(self.file.file_path):
with storage.open(self.file.file_path, 'w') as f:
f.write('test data\n')
def get_data(self):
return {'comments': 'foo', 'addon_files': self.version.files.all(),
'action': 'public', 'operating_systems': 'osx',
'applications': 'Firefox',
'info_request': self.addon.pending_info_request}
def get_helper(self, content_review_only=False):
return ReviewHelper(
request=self.request, addon=self.addon, version=self.version,
content_review_only=content_review_only)
def setup_type(self, status):
self.addon.update(status=status)
return self.get_helper().handler.review_type
def check_log_count(self, id):
return (ActivityLog.objects.for_addons(self.helper.addon)
.filter(action=id).count())
def test_no_request(self):
self.request = None
helper = self.get_helper()
assert helper.content_review_only is False
assert helper.actions == {}
helper = self.get_helper(content_review_only=True)
assert helper.content_review_only is True
assert helper.actions == {}
def test_type_nominated(self):
assert self.setup_type(amo.STATUS_NOMINATED) == 'nominated'
def test_type_pending(self):
assert self.setup_type(amo.STATUS_PENDING) == 'pending'
assert self.setup_type(amo.STATUS_NULL) == 'pending'
assert self.setup_type(amo.STATUS_PUBLIC) == 'pending'
assert self.setup_type(amo.STATUS_DISABLED) == 'pending'
def test_no_version(self):
helper = ReviewHelper(
request=self.request, addon=self.addon, version=None)
assert helper.handler.review_type == 'pending'
def test_review_files(self):
version_factory(addon=self.addon,
created=self.version.created - timedelta(days=1),
file_kw={'status': amo.STATUS_PUBLIC})
for status in REVIEW_FILES_STATUSES:
self.setup_data(status=status)
assert self.helper.handler.__class__ == ReviewFiles
def test_review_addon(self):
self.setup_data(status=amo.STATUS_NOMINATED)
assert self.helper.handler.__class__ == ReviewAddon
def test_process_action_none(self):
self.helper.set_data({'action': 'foo'})
self.assertRaises(self.helper.process)
def test_process_action_good(self):
self.helper.set_data({'action': 'reply', 'comments': 'foo'})
self.helper.process()
assert len(mail.outbox) == 1
def test_action_details(self):
for status in Addon.STATUS_CHOICES:
self.addon.update(status=status)
helper = self.get_helper()
actions = helper.actions
for k, v in actions.items():
assert unicode(v['details']), "Missing details for: %s" % k
def get_review_actions(
self, addon_status, file_status, content_review_only=False):
self.file.update(status=file_status)
self.addon.update(status=addon_status)
# Need to clear self.version.all_files cache since we updated the file.
if self.version:
del self.version.all_files
return self.get_helper(content_review_only=content_review_only).actions
def test_actions_full_nominated(self):
expected = ['public', 'reject', 'reply', 'super', 'comment']
assert self.get_review_actions(
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW).keys() == expected
def test_actions_full_update(self):
expected = ['public', 'reject', 'reply', 'super', 'comment']
assert self.get_review_actions(
addon_status=amo.STATUS_PUBLIC,
file_status=amo.STATUS_AWAITING_REVIEW).keys() == expected
def test_actions_full_nonpending(self):
expected = ['reply', 'super', 'comment']
f_statuses = [amo.STATUS_PUBLIC, amo.STATUS_DISABLED]
for file_status in f_statuses:
assert self.get_review_actions(
addon_status=amo.STATUS_PUBLIC,
file_status=file_status).keys() == expected
def test_actions_public_post_reviewer(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
expected = ['reject_multiple_versions', 'reply', 'super', 'comment']
assert self.get_review_actions(
addon_status=amo.STATUS_PUBLIC,
file_status=amo.STATUS_PUBLIC).keys() == expected
# Now make current version auto-approved...
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
expected = ['confirm_auto_approved', 'reject_multiple_versions',
'reply', 'super', 'comment']
assert self.get_review_actions(
addon_status=amo.STATUS_PUBLIC,
file_status=amo.STATUS_PUBLIC).keys() == expected
def test_actions_content_review(self):
self.grant_permission(self.request.user, 'Addons:ContentReview')
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
expected = ['confirm_auto_approved', 'reject_multiple_versions',
'reply', 'super', 'comment']
assert self.get_review_actions(
addon_status=amo.STATUS_PUBLIC,
file_status=amo.STATUS_PUBLIC,
content_review_only=True).keys() == expected
def test_actions_public_static_theme(self):
# Having Addons:PostReview and dealing with a public add-on would
# normally be enough to give you access to reject multiple versions
# action, but it should not be available for static themes.
self.grant_permission(self.request.user, 'Addons:PostReview')
self.addon.update(type=amo.ADDON_STATICTHEME)
expected = ['public', 'reject', 'reply', 'super', 'comment']
assert self.get_review_actions(
addon_status=amo.STATUS_PUBLIC,
file_status=amo.STATUS_AWAITING_REVIEW).keys() == expected
def test_actions_no_version(self):
"""Deleted addons and addons with no versions in that channel have no
version set."""
expected = ['comment']
self.version = None
assert self.get_review_actions(
addon_status=amo.STATUS_PUBLIC,
file_status=amo.STATUS_PUBLIC).keys() == expected
def test_set_files(self):
self.file.update(datestatuschanged=yesterday)
self.helper.set_data({'addon_files': self.version.files.all()})
self.helper.handler.set_files(amo.STATUS_PUBLIC,
self.helper.handler.data['addon_files'])
self.file = self.version.files.all()[0]
assert self.file.status == amo.STATUS_PUBLIC
assert self.file.datestatuschanged.date() > yesterday.date()
def test_logs(self):
self.helper.set_data({'comments': 'something'})
self.helper.handler.log_action(amo.LOG.APPROVE_VERSION)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
def test_notify_email(self):
self.helper.set_data(self.get_data())
base_fragment = 'To respond, please reply to this email or visit'
user = self.addon.listed_authors[0]
ActivityLogToken.objects.create(version=self.version, user=user)
uuid = self.version.token.get(user=user).uuid.hex
reply_email = (
'reviewreply+%s@%s' % (uuid, settings.INBOUND_EMAIL_DOMAIN))
for template in ('nominated_to_sandbox', 'pending_to_public',
'pending_to_sandbox',):
mail.outbox = []
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert base_fragment in mail.outbox[0].body
assert mail.outbox[0].reply_to == [reply_email]
mail.outbox = []
# This one does not inherit from base.txt because it's for unlisted
# signing notification, which is not really something that necessitates
# reviewer interaction, so it's simpler.
template = 'unlisted_to_reviewed_auto'
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert base_fragment not in mail.outbox[0].body
assert mail.outbox[0].reply_to == [reply_email]
def test_email_links(self):
expected = {
'nominated_to_public': 'addon_url',
'nominated_to_sandbox': 'dev_versions_url',
'pending_to_public': 'addon_url',
'pending_to_sandbox': 'dev_versions_url',
'unlisted_to_reviewed_auto': 'dev_versions_url',
}
self.helper.set_data(self.get_data())
context_data = self.helper.handler.get_context_data()
for template, context_key in expected.iteritems():
mail.outbox = []
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert context_key in context_data
assert context_data.get(context_key) in mail.outbox[0].body
def setup_data(self, status, delete=None,
file_status=amo.STATUS_AWAITING_REVIEW,
channel=amo.RELEASE_CHANNEL_LISTED,
content_review_only=False, type=amo.ADDON_EXTENSION):
if delete is None:
delete = []
mail.outbox = []
ActivityLog.objects.for_addons(self.helper.addon).delete()
self.addon.update(status=status, type=type)
self.file.update(status=file_status)
if channel == amo.RELEASE_CHANNEL_UNLISTED:
self.make_addon_unlisted(self.addon)
self.version.reload()
self.file.reload()
self.helper = self.get_helper(content_review_only=content_review_only)
data = self.get_data().copy()
for key in delete:
del data[key]
self.helper.set_data(data)
def test_send_reviewer_reply(self):
assert not self.addon.pending_info_request
self.setup_data(amo.STATUS_PUBLIC, ['addon_files'])
self.helper.handler.reviewer_reply()
assert not self.addon.pending_info_request
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == self.preamble
assert self.check_log_count(amo.LOG.REVIEWER_REPLY_VERSION.id) == 1
def test_request_more_information(self):
self.setup_data(amo.STATUS_PUBLIC, ['addon_files'])
self.helper.handler.data['info_request'] = True
self.helper.handler.reviewer_reply()
self.assertCloseToNow(
self.addon.pending_info_request,
now=datetime.now() + timedelta(days=7))
assert len(mail.outbox) == 1
assert (
mail.outbox[0].subject ==
'Mozilla Add-ons: Action Required for Delicious Bookmarks 2.1.072')
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_custom_deadline(self):
self.setup_data(amo.STATUS_PUBLIC, ['addon_files'])
self.helper.handler.data['info_request'] = True
self.helper.handler.data['info_request_deadline'] = 42
self.helper.handler.reviewer_reply()
self.assertCloseToNow(
self.addon.pending_info_request,
now=datetime.now() + timedelta(days=42))
assert len(mail.outbox) == 1
assert (
mail.outbox[0].subject ==
'Mozilla Add-ons: Action Required for Delicious Bookmarks 2.1.072')
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_reset_notified_flag(self):
self.setup_data(amo.STATUS_PUBLIC, ['addon_files'])
flags = AddonReviewerFlags.objects.create(
addon=self.addon,
pending_info_request=datetime.now() - timedelta(days=1),
notified_about_expiring_info_request=True)
self.helper.handler.data['info_request'] = True
self.helper.handler.reviewer_reply()
flags.reload()
self.assertCloseToNow(
flags.pending_info_request,
now=datetime.now() + timedelta(days=7))
assert not flags.notified_about_expiring_info_request
assert len(mail.outbox) == 1
assert (
mail.outbox[0].subject ==
'Mozilla Add-ons: Action Required for Delicious Bookmarks 2.1.072')
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_deleted_addon(self):
self.addon.delete()
self.test_request_more_information()
def test_email_no_locale(self):
self.addon.name = {
'es': '¿Dónde está la biblioteca?'
}
self.setup_data(amo.STATUS_NOMINATED, ['addon_files'])
with translation.override('es'):
assert translation.get_language() == 'es'
self.helper.handler.process_public()
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Delicious Bookmarks 2.1.072 Approved')
assert '/en-US/firefox/addon/a3615' not in mail.outbox[0].body
assert '/es/firefox/addon/a3615' not in mail.outbox[0].body
assert '/addon/a3615' in mail.outbox[0].body
assert 'Your add-on, Delicious Bookmarks ' in mail.outbox[0].body
def test_nomination_to_public_no_files(self):
self.setup_data(amo.STATUS_NOMINATED, ['addon_files'])
self.helper.handler.process_public()
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_PUBLIC)
def test_nomination_to_public_and_current_version(self):
self.setup_data(amo.STATUS_NOMINATED, ['addon_files'])
self.addon = Addon.objects.get(pk=3615)
self.addon.update(_current_version=None)
assert not self.addon.current_version
self.helper.handler.process_public()
self.addon = Addon.objects.get(pk=3615)
assert self.addon.current_version
def test_nomination_to_public_new_addon(self):
""" Make sure new add-ons can be made public (bug 637959) """
status = amo.STATUS_NOMINATED
self.setup_data(status)
# Make sure we have no public files
for version in self.addon.versions.all():
version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.helper.handler.process_public()
# Re-fetch the add-on
addon = Addon.objects.get(pk=3615)
assert addon.status == amo.STATUS_PUBLIC
assert addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_PUBLIC)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '%s Approved' % self.preamble
# AddonApprovalsCounter counter is now at 1 for this addon since there
# was a human review.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
self.assertCloseToNow(approval_counter.last_human_review)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
# It wasn't a webextension and not signed by mozilla it should not
# receive the firefox57 tag.
assert self.addon.tags.all().count() == 0
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_public(self, sign_mock):
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_PUBLIC
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_PUBLIC)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Approved' % self.preamble)
assert 'has been approved' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 1 for this addon.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
@patch('olympia.reviewers.utils.sign_file')
def test_old_nomination_to_public_bonus_score(self, sign_mock):
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
self.version.update(nomination=self.days_ago(9))
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_PUBLIC
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_PUBLIC)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Approved' % self.preamble)
assert 'has been approved' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 1 for this addon.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
# Score has bonus points added for reviewing an old add-on.
# 2 days over the limit = 4 points
self._check_score(amo.REVIEWED_ADDON_FULL, bonus=4)
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_public_no_request(self, sign_mock):
self.request = None
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_PUBLIC
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_PUBLIC)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Approved' % self.preamble)
assert 'has been approved' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 0 for this addon since there
# was an automatic approval.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 0
# Since approval counter did not exist for this add-on before, the last
# human review field should be empty.
assert approval_counter.last_human_review is None
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
# No request, no user, therefore no score.
assert ReviewerScore.objects.count() == 0
@patch('olympia.reviewers.utils.sign_file')
def test_public_addon_with_version_awaiting_review_to_public(
self, sign_mock):
sign_mock.reset()
self.addon.current_version.update(created=self.days_ago(1))
self.version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
version='3.0.42',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.preamble = 'Mozilla Add-ons: Delicious Bookmarks 3.0.42'
self.file = self.version.files.all()[0]
self.setup_data(amo.STATUS_PUBLIC)
self.create_paths()
AddonApprovalsCounter.objects.create(
addon=self.addon, counter=1, last_human_review=self.days_ago(42))
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_AWAITING_REVIEW
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_PUBLIC)
self.helper.handler.process_public()
self.addon.reload()
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.reload().status == amo.STATUS_PUBLIC
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_PUBLIC)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Updated' % self.preamble)
assert 'has been updated' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 2 for this addon since there
# was another human review. The last human review date should have been
# updated.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 2
self.assertCloseToNow(approval_counter.last_human_review)
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_UPDATE)
# It wasn't a webextension and not signed by mozilla it should not
# receive the firefox57 tag.
assert self.addon.tags.all().count() == 0
@patch('olympia.reviewers.utils.sign_file')
def test_public_addon_with_version_awaiting_review_to_sandbox(
self, sign_mock):
sign_mock.reset()
self.addon.current_version.update(created=self.days_ago(1))
self.version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
version='3.0.42',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.preamble = 'Mozilla Add-ons: Delicious Bookmarks 3.0.42'
self.file = self.version.files.all()[0]
self.setup_data(amo.STATUS_PUBLIC)
self.create_paths()
AddonApprovalsCounter.objects.create(addon=self.addon, counter=1)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_AWAITING_REVIEW
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_PUBLIC)
self.helper.handler.process_sandbox()
self.addon.reload()
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.reload().status == amo.STATUS_DISABLED
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_PUBLIC)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
"%s didn't pass review" % self.preamble)
assert 'reviewed and did not meet the criteria' in mail.outbox[0].body
# AddonApprovalsCounter counter is still at 1 for this addon.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
assert not sign_mock.called
assert storage.exists(self.file.guarded_file_path)
assert not storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_UPDATE)
def test_public_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
self.setup_data(amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC)
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=151)
assert summary.confirmed is None
self.create_paths()
# Safeguards.
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_PUBLIC
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_PUBLIC)
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is True
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
self.assertCloseToNow(approvals_counter.last_human_review)
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 0
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_public_with_unreviewed_version_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
self.setup_data(amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC)
self.current_version = self.version
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=152)
self.version = version_factory(
addon=self.addon, version='3.0',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.file = self.version.files.all()[0]
self.helper = self.get_helper() # To make it pick up the new version.
self.helper.set_data(self.get_data())
# Confirm approval action should be available even if the latest
# version is not public, what we care about is the current_version.
assert 'confirm_auto_approved' in self.helper.actions
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is True
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
self.assertCloseToNow(approvals_counter.last_human_review)
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 0
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.current_version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_public_with_disabled_version_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
self.setup_data(amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC)
self.current_version = self.version
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=153)
self.version = version_factory(
addon=self.addon, version='3.0',
file_kw={'status': amo.STATUS_DISABLED})
self.file = self.version.files.all()[0]
self.helper = self.get_helper() # To make it pick up the new version.
self.helper.set_data(self.get_data())
# Confirm approval action should be available even if the latest
# version is not public, what we care about is the current_version.
assert 'confirm_auto_approved' in self.helper.actions
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is True
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
self.assertCloseToNow(approvals_counter.last_human_review)
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 0
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.current_version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_unlisted_version_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:ReviewUnlisted')
self.setup_data(amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC)
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED)
self.version = version_factory(
addon=self.addon, version='3.0',
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.file = self.version.files.all()[0]
self.helper = self.get_helper() # To make it pick up the new version.
self.helper.set_data(self.get_data())
# Confirm approval action should be available since the version
# we are looking at is unlisted and reviewer has permission.
assert 'confirm_auto_approved' in self.helper.actions
self.helper.handler.confirm_auto_approved()
assert (
AddonApprovalsCounter.objects.filter(addon=self.addon).count() ==
0) # Not incremented since it was unlisted.
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.version]
@patch('olympia.reviewers.utils.sign_file')
def test_null_to_public_unlisted(self, sign_mock):
sign_mock.reset()
self.setup_data(amo.STATUS_NULL,
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_PUBLIC)
# AddonApprovalsCounter was not touched since the version we made
# public is unlisted.
assert not AddonApprovalsCounter.objects.filter(
addon=self.addon).exists()
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s signed and ready to download' % self.preamble)
assert ('%s is now signed and ready for you to download' %
self.version.version in mail.outbox[0].body)
assert 'You received this email because' not in mail.outbox[0].body
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_public_failed_signing(self, sign_mock):
sign_mock.side_effect = Exception
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
with self.assertRaises(Exception):
self.helper.handler.process_public()
# AddonApprovalsCounter was not touched since we failed signing.
assert not AddonApprovalsCounter.objects.filter(
addon=self.addon).exists()
# Status unchanged.
assert self.addon.status == amo.STATUS_NOMINATED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_AWAITING_REVIEW)
assert len(mail.outbox) == 0
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 0
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_sandbox(self, sign_mock):
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_sandbox()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_DISABLED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s didn\'t pass review' % self.preamble)
assert 'did not meet the criteria' in mail.outbox[0].body
# AddonApprovalsCounter was not touched since we didn't approve.
assert not AddonApprovalsCounter.objects.filter(
addon=self.addon).exists()
assert not sign_mock.called
assert storage.exists(self.file.guarded_file_path)
assert not storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
@patch('olympia.reviewers.utils.sign_file',
lambda *a, **kw: None)
def test_nomination_to_public_webextension(self):
self.file.update(is_webextension=True)
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert (
set(self.addon.tags.all().values_list('tag_text', flat=True)) ==
set(['firefox57']))
@patch('olympia.reviewers.utils.sign_file',
lambda *a, **kw: None)
def test_nomination_to_public_mozilla_signed_extension(self):
"""Test that the firefox57 tag is applied to mozilla signed add-ons"""
self.file.update(is_mozilla_signed_extension=True)
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert (
set(self.addon.tags.all().values_list('tag_text', flat=True)) ==
set(['firefox57']))
@patch('olympia.reviewers.utils.sign_file',
lambda *a, **kw: None)
def test_public_to_public_already_had_webextension_tag(self):
self.file.update(is_webextension=True)
Tag(tag_text='firefox57').save_tag(self.addon)
assert (
set(self.addon.tags.all().values_list('tag_text', flat=True)) ==
set(['firefox57']))
self.addon.current_version.update(created=self.days_ago(1))
self.version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
version='3.0.42',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.file = self.version.files.all()[0]
self.setup_data(amo.STATUS_PUBLIC)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_AWAITING_REVIEW
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_PUBLIC)
self.helper.handler.process_public()
assert (
set(self.addon.tags.all().values_list('tag_text', flat=True)) ==
set(['firefox57']))
def test_email_unicode_monster(self):
self.addon.name = u'TaobaoShopping淘宝网导航按钮'
self.addon.save()
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_sandbox()
assert u'TaobaoShopping淘宝网导航按钮' in mail.outbox[0].subject
def test_nomination_to_super_review(self):
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_CODE.id) == 1
def test_auto_approved_admin_code_review(self):
self.setup_data(amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC)
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_CODE.id) == 1
def test_auto_approved_admin_content_review(self):
self.setup_data(amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC,
content_review_only=True)
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_content_review
assert self.check_log_count(
amo.LOG.REQUEST_ADMIN_REVIEW_CONTENT.id) == 1
def test_auto_approved_admin_theme_review(self):
self.setup_data(amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC,
type=amo.ADDON_STATICTHEME)
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_theme_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_THEME.id) == 1
def test_nomination_to_super_review_and_escalate(self):
self.setup_data(amo.STATUS_NOMINATED)
self.file.update(status=amo.STATUS_AWAITING_REVIEW)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_CODE.id) == 1
def test_operating_system_present(self):
self.setup_data(amo.STATUS_PUBLIC)
self.helper.handler.process_sandbox()
assert 'Tested on osx with Firefox' in mail.outbox[0].body
def test_operating_system_not_present(self):
self.setup_data(amo.STATUS_PUBLIC)
data = self.get_data().copy()
data['operating_systems'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested with Firefox' in mail.outbox[0].body
def test_application_not_present(self):
self.setup_data(amo.STATUS_PUBLIC)
data = self.get_data().copy()
data['applications'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested on osx' in mail.outbox[0].body
def test_both_not_present(self):
self.setup_data(amo.STATUS_PUBLIC)
data = self.get_data().copy()
data['applications'] = ''
data['operating_systems'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested' not in mail.outbox[0].body
def test_pending_to_super_review(self):
for status in PENDING_STATUSES:
self.setup_data(status)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
def test_nominated_review_time_set_version(self):
for process in ('process_sandbox', 'process_public'):
self.version.update(reviewed=None)
self.setup_data(amo.STATUS_NOMINATED)
getattr(self.helper.handler, process)()
assert self.version.reload().reviewed
def test_nominated_review_time_set_file(self):
for process in ('process_sandbox', 'process_public'):
self.file.update(reviewed=None)
self.setup_data(amo.STATUS_NOMINATED)
getattr(self.helper.handler, process)()
assert File.objects.get(pk=self.file.pk).reviewed
def test_review_unlisted_while_a_listed_version_is_awaiting_review(self):
self.make_addon_unlisted(self.addon)
self.version.reload()
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
assert self.get_helper()
def test_reject_multiple_versions(self):
old_version = self.version
self.version = version_factory(addon=self.addon, version='3.0')
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=101)
# An extra file should not change anything.
file_factory(version=self.version, platform=amo.PLATFORM_LINUX.id)
self.setup_data(amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_PUBLIC
assert self.addon.current_version.is_public()
data = self.get_data().copy()
data['versions'] = self.addon.versions.all()
self.helper.set_data(data)
self.helper.handler.reject_multiple_versions()
self.addon.reload()
self.file.reload()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.current_version is None
assert list(self.addon.versions.all()) == [self.version, old_version]
assert self.file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [self.addon.authors.all()[0].email]
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Delicious Bookmarks has been disabled on '
u'addons.mozilla.org')
assert ('your add-on Delicious Bookmarks has been disabled'
in mail.outbox[0].body)
log_token = ActivityLogToken.objects.get()
assert log_token.uuid.hex in mail.outbox[0].reply_to[0]
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 2
assert self.check_log_count(amo.LOG.REJECT_CONTENT.id) == 0
logs = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.REJECT_VERSION.id))
assert logs[0].created == logs[1].created
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_reject_multiple_versions_except_latest(self):
old_version = self.version
extra_version = version_factory(addon=self.addon, version='3.1')
# Add yet another version we don't want to reject.
self.version = version_factory(addon=self.addon, version='42.0')
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=91)
self.setup_data(amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_PUBLIC
assert self.addon.current_version.is_public()
data = self.get_data().copy()
data['versions'] = self.addon.versions.all().exclude(
pk=self.version.pk)
self.helper.set_data(data)
self.helper.handler.reject_multiple_versions()
self.addon.reload()
self.file.reload()
# latest_version is still public so the add-on is still public.
assert self.addon.status == amo.STATUS_PUBLIC
assert self.addon.current_version == self.version
assert list(self.addon.versions.all().order_by('-pk')) == [
self.version, extra_version, old_version]
assert self.file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [self.addon.authors.all()[0].email]
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Versions disabled for Delicious Bookmarks')
assert ('Version(s) affected and disabled:\n3.1, 2.1.072'
in mail.outbox[0].body)
log_token = ActivityLogToken.objects.filter(
version=self.version).get()
assert log_token.uuid.hex in mail.outbox[0].reply_to[0]
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 2
assert self.check_log_count(amo.LOG.REJECT_CONTENT.id) == 0
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_reject_multiple_versions_content_review(self):
self.grant_permission(self.request.user, 'Addons:ContentReview')
old_version = self.version
self.version = version_factory(addon=self.addon, version='3.0')
self.setup_data(
amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC,
content_review_only=True)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_PUBLIC
assert self.addon.current_version.is_public()
data = self.get_data().copy()
data['versions'] = self.addon.versions.all()
self.helper.set_data(data)
self.helper.handler.reject_multiple_versions()
self.addon.reload()
self.file.reload()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.current_version is None
assert list(self.addon.versions.all()) == [self.version, old_version]
assert self.file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [self.addon.authors.all()[0].email]
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Delicious Bookmarks has been disabled on '
u'addons.mozilla.org')
assert ('your add-on Delicious Bookmarks has been disabled'
in mail.outbox[0].body)
log_token = ActivityLogToken.objects.get()
assert log_token.uuid.hex in mail.outbox[0].reply_to[0]
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 0
assert self.check_log_count(amo.LOG.REJECT_CONTENT.id) == 2
def test_confirm_auto_approval_content_review(self):
self.grant_permission(self.request.user, 'Addons:ContentReview')
self.setup_data(
amo.STATUS_PUBLIC, file_status=amo.STATUS_PUBLIC,
content_review_only=True)
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED)
self.create_paths()
# Safeguards.
assert self.addon.status == amo.STATUS_PUBLIC
assert self.file.status == amo.STATUS_PUBLIC
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_PUBLIC)
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is None # unchanged.
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approvals_counter.counter == 0
assert approvals_counter.last_human_review is None
self.assertCloseToNow(approvals_counter.last_content_review)
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 0
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.APPROVE_CONTENT.id)
.get())
assert activity.arguments == [self.addon, self.version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_CONTENT_REVIEW)
def test_dev_versions_url_in_context(self):
self.helper.set_data(self.get_data())
context_data = self.helper.handler.get_context_data()
assert context_data['dev_versions_url'] == absolutify(
self.addon.get_dev_url('versions'))
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
context_data = self.helper.handler.get_context_data()
assert context_data['dev_versions_url'] == absolutify(
reverse('devhub.addons.versions', args=[self.addon.id]))
def test_send_email_autoescape():
s = 'woo&&<>\'""'
# Make sure HTML is not auto-escaped.
send_mail(u'Random subject with %s', s,
recipient_list=['nobody@mozilla.org'],
from_email='nobody@mozilla.org',
use_deny_list=False)
assert len(mail.outbox) == 1
assert mail.outbox[0].body == s
|
lavish205/olympia
|
src/olympia/reviewers/tests/test_utils.py
|
Python
|
bsd-3-clause
| 54,345
|
[
"VisIt"
] |
291ec563acb0ce2b835356b0d34ff5bbe591aa0199a51f099db3e8af320fb29a
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from peewee import fn
import time
from progressbar import ProgressBar, Percentage, Bar, ETA, Counter, RotatingMarker
from fetch.api import make_request, default_requests_session
from models import SlantTopic, Viewpoint
from lock import lock_method
logger = logging.getLogger('data')
LOCK_FILENAME = '/tmp/slant-topics-fetcher.lock'
DEFAULT_PARAMS = {
'page': 0,
'filter': 'feed',
'tags': 'development',
'format': 'json',
}
SLANT_URL = "https://www.slant.co"
SLANT_TOPICS_URL = SLANT_URL + "/topics"
REQUEST_DELAY = 1
def get_slant_topics(show_progress):
# Create a new fetch index
last_fetch_index = SlantTopic.select(fn.Max(SlantTopic.fetch_index)).scalar() or 0
fetch_index = last_fetch_index + 1
params = DEFAULT_PARAMS.copy()
first_request = True
next_url = None
count_of_processed_topics = 0
# Loop through requests to the Slant server until we reach an empty
# response or the end of the pages.
while True:
# All requests after our first one are made to a URL returned by
# the previous request. So there's a little logic here to use verbose
# parameters for the first request. They should be included by
# default in all requests after that.
if first_request:
response = make_request(
default_requests_session.get,
SLANT_TOPICS_URL,
params=params,
)
# We found that for some reason, the next page path is missing a parameter
# to specify that we still want the results of the next page as JSON.
# So we explicitly specify the format here.
else:
response = make_request(
default_requests_session.get,
next_url,
params={'format': 'json'},
)
# Leave this loop if the fetch failed
if response is None:
break
results = response.json()
# If we have somehow ended up on an entry where it has an error field
# with the 404 code, we have probably seen all results. Break out of the loop.
if 'error' in results and results['error'] == 404:
break
# If this is the first request, initialize the progress bar with
# the number of results retrieved from the results
if first_request and show_progress:
progress_bar = ProgressBar(maxval=results['count'], widgets=[
'Progress: ', Percentage(),
' ', Bar(marker=RotatingMarker()),
' ', ETA(),
' Fetched ', Counter(), ' / ' + str(results['count']) + ' topics.'
])
progress_bar.start()
for topic in results['children']:
# Each child in the list is a topic.
# Save each of these as a new topic.
topic_record = SlantTopic.create(
fetch_index=fetch_index,
topic_id=topic['uuid'],
title=topic['revision']['title'],
url_path=topic['URL'],
owner_username=topic['createdEvent']['user']['username'],
)
# A topic on Slant has a number of "viewpoints" or alternatives.
# Save each one and a URL to the site where we can visit each one.
for viewpoint in topic['viewpoints']['children']:
Viewpoint.create(
fetch_index=fetch_index,
viewpoint_index=viewpoint['id'],
title=viewpoint['revision']['title'],
topic=topic_record,
url_path=viewpoint['URL'],
)
count_of_processed_topics += 1
if show_progress:
progress_bar.update(count_of_processed_topics)
# We are also finished looping through results when there is no longer a 'next'
# page in the page properties. It's just a guess on our part that this endpoint
# will always report a next page when there is one, as there isn't an official
# API and there isn't any documentation for it.
if 'next' not in results['properties']['page']:
if show_progress:
progress_bar.finish()
break
next_page_path = results['properties']['page']['next']
next_url = SLANT_URL + next_page_path
# Pause so that we don't bombard the server with requests
time.sleep(REQUEST_DELAY)
# Reset the flag that cues us to take actions for the first request
first_request = False
@lock_method(LOCK_FILENAME)
def main(show_progress, *args, **kwargs):
get_slant_topics(show_progress)
def configure_parser(parser):
parser.description = "Fetch all Slant topics related to development tools."
parser.add_argument(
'--show-progress',
action='store_true',
help="Show progress of the number of topics that have been fetched."
)
|
andrewhead/Package-Qualifiers
|
fetch/slant_topics.py
|
Python
|
mit
| 5,067
|
[
"VisIt"
] |
38dd038268ea685635d6c795a2bdbaab9f00fd24b43a05c20cb6be8d5c9e9317
|
import alhazen.correlation_functions as corr
import numpy as np
ntheta = 100
flavor = 'cltt'
lmin = 100
lmax = 3000
ells = np.arange(lmin,lmax)
clCMB = np.ones(ells.size)
clpp = np.ones(ells.size)
clCMB_lensed = np.ones(ells.size)
accurate_lensing = True
compute_tgradt = True
l2range = list(range(lmin,lmax))
l2dim = len(l2range)-1
b = corr.derivative_dclcmbdclpp_corr_func(lmax,flavor,accurate_lensing,clCMB,clpp,l2range,lmin,lmax,l2dim,lmax)
a = corr.lensed_spectra_corrfunc_allsky(ntheta,flavor,accurate_lensing,compute_tgradt,clCMB,clpp,clCMB_lensed,lmin,lmax)
# subroutine lensed_spectra_corrfunc_allsky(ntheta,flavor,accurate_lensing,compute_tgradt,clCMB,clpp,clCMB_lensed,lmin,lmax)
# block='TTTT'
# ## Maximum multipole
# lmax = int(lmax)
# ## Input lensing potential power spectrum
# clpp = cls_unlensed.clpp_long
# ACCURACY_BOOST = 4 # 4 is conservative. oversampling of the theta grid
# accurate_lensing = True # Gauss-Legendre (true) vs simple integration (False)
# EXTRA_MULTIPOLES = 0 # If you want to add more multipole to compute dC^{CMB}/dC^{\phi \phi} (not necessary)
# #############################################################
# # Initialization of containers
# #############################################################
# cov_order0_tot = np.array([covmat(0,lmax) for i in range(len(blocks))]) ## Gaussian variance
# cov_order1_tot = np.array([covmat(0,lmax) for i in range(len(blocks))]) ## O(clpp)
# cov_order2_tot = np.array([covmat(0,lmax) for i in range(len(blocks))]) ## O(clpp^2)
# ## We have 4 derivatives dC^{CMB}/dC^{phiphi} to compute: TTTT, EEEE, BBBB, TETE
# names_dCMB_over_dcpp_tot = ['TTTT','EEEE','BBBB','TETE']
# dCMB_over_dcpp_tot = np.array([np.zeros((lmax+1,lmax+1)) for i in range(len(names_dCMB_over_dcpp_tot))])
# ## We approximate other derivatives (e.g. dlensedEE/dEE) by ones.
# dBB_over_dEE_tot = np.zeros((lmax+1,lmax+1))
# flavor = 'cl%s%s'%(block[0].lower(),block[2].lower())
# if block in ['EEBB','TTBB','TEBB']:
# ## There is no Gaussian variance contribution for those terms
# continue
# if rank==0: print 'Gaussian Variance: doing block %s (%s)\n'%(block,flavor)
# ## Load weights (lensed spectra, and their noisy version)
# if block in ['TETE', 'TTTE', 'EETE']:
# cl_len_XX, cl_len_YY, cl_len_XY = lib_spectra.load_weights(cls_lensed, 'clte',
# noise_uK_arcmin, fwhm_arcmin, 2*lmax, extra='_long',TTcorr=TTcorr)
# else:
# cl_len_XX, cl_len_YY, cl_len_XY = lib_spectra.load_weights(cls_lensed, flavor,
# noise_uK_arcmin, fwhm_arcmin, 2*lmax, extra='_long',TTcorr=TTcorr)
# ## Gaussian variance
# if block == 'TTEE':
# cov_order0_tot[index_block].data = cross_gaussian_variance(cl1=cl_len_XY[1],cl2=cl_len_XY[1],ells=cls_lensed.ls)
# elif block == 'TTTE':
# cov_order0_tot[index_block].data = cross_gaussian_variance(cl1=cl_len_XX[1],cl2=cl_len_XY[1],ells=cls_lensed.ls)
# elif block == 'EETE':
# cov_order0_tot[index_block].data = cross_gaussian_variance(cl1=cl_len_YY[1],cl2=cl_len_XY[1],ells=cls_lensed.ls)
# else:
# cov_order0_tot[index_block].data = gaussian_variance(cl11=cl_len_XX[1],cl22=cl_len_YY[1],
# cl12=cl_len_XY[1],ells=cls_lensed.ls)
# #############################################################
# ## Contribution of the trispectrum to the covariance: O(clpp)
# #############################################################
# for block in blocks:
# index_block = blocks.index(block)
# flavor = 'cl%s%s'%(block[0].lower(),block[2].lower())
# if block in ['BBBB','TTBB','EEBB','TETE','TTEE','TTTE','EETE','TEBB']:
# ## We do not consider the contribution of those terms (although you can)
# continue
# cov_order1 = covmat(0,lmax)
# if rank==0: print 'Order O(clpp): doing block %s (%s)\n'%(block,flavor)
# ## Load spins
# spinl2_x, spinl3_x, spinl2_y, spinl3_y = lib_spectra.load_spin_values_wigner('clte')
# ## Load weights (lensed spectra, and their noisy version)
# cl_unlen_TT, cl_unlen_EE, cl_unlen_TE = lib_spectra.load_weights(cls_unlensed, 'clte',
# noise_uK_arcmin, fwhm_arcmin, 2*lmax, extra='_long',TTcorr=TTcorr)
# cl_unlen_vec = np.array([cl_unlen_TT[0], cl_unlen_EE[0], np.zeros_like(cl_unlen_TT[0]), cl_unlen_TE[0]])
# ## Load weights (unlensed spectra)
# uup = block[0] + block[2]
# vvp = block[1] + block[3]
# uvp = block[0] + block[3]
# vup = block[1] + block[2]
# ## Define range of ells, and distribute over procs.
# n_tot = comm.size
# l2range = range(lmin+rank,lmax+1,n_tot)
# l2dim = len(l2range)-1
# ## Compute this term
# cov_order1.data = loop_lensing.covariance_cmbxcmb_order1_uvupvp(cl_unlen_vec,clpp,l2range,
# uup,vvp,uvp,vup,lmin,spinl2_x,spinl3_x,spinl2_y,spinl3_y,l2dim,lmax)
# comm.Barrier()
# ## Reduce the results on the root
# comm.Reduce([cov_order1.data, MPI.DOUBLE],[cov_order1_tot[index_block].data, MPI.DOUBLE],op = MPI.SUM,root = 0)
# ## Done for this block
# comm.Barrier()
# #############################################################
# ## Compute dC^{CMB}/dC^{phiphi}
# ## Here, you have two ways of computing the derivatives:
# ## * Using series-expansion. Quick but less accurate.
# ## * Using correlation functions. Less quick, but extra accurate.
# #############################################################
# file_manager_derivatives_CMB = util.file_manager('dCMB_over_dcpp_tot', exp, spec='v1', lmax=lmax,
# force_recomputation=False, folder=folder_cache,rank=rank)
# if file_manager_derivatives_CMB.FileExist is True:
# if rank==0:
# dCMB_over_dcpp_tot, names_dCMB_over_dcpp_tot = file_manager_derivatives_CMB.data
# else:
# for position_block,block in enumerate(names_dCMB_over_dcpp_tot):
# flavor = 'cl%s%s'%(block[0].lower(),block[1].lower())
# if rank==0: print 'Pre-compute derivatives for block %s (%s)\n'%(block,flavor)
# if not use_corrfunc:
# if rank == 0: print 'Use series-expansion to compute derivative (may not be exact)'
# if block == 'BBBB':
# ## BB takes clee as unlensed weights (noiseless!)
# cl_unlen_XX, cl_unlen_YY, cl_unlen_XY = lib_spectra.load_weights(cls_unlensed, 'clee', 0.0,
# 0.0, 2*lmax, extra='_long')
# else:
# ## noiseless!
# cl_unlen_XX, cl_unlen_YY, cl_unlen_XY = lib_spectra.load_weights(cls_unlensed, flavor,
# 0.0, 0.0, 2*lmax, extra='_long')
# ## Define range of ells, and distribute over procs.
# n_tot = comm.size
# l2range = range(lmin+rank,lmax+1,n_tot)
# l2dim = len(l2range)-1
# ## Load spins
# spinl2_x, spinl3_x, spinl2_y, spinl3_y = lib_spectra.load_spin_values_wigner(flavor)
# ## Change order of spins. Why? do not know... but it works :D
# derivatives = loop_lensing.compute_derivatives_dcttdcpp_mpi(cl_unlen_XY[0],l2range,flavor,lmin,spinl3_x,
# spinl2_x,spinl3_y,spinl2_y,l2dim,lmax)
# ## Reduce on the root
# comm.Reduce([derivatives, MPI.DOUBLE],[dCMB_over_dcpp_tot[position_block], MPI.DOUBLE],op = MPI.SUM,root = 0)
# else:
# if rank == 0: print 'Use correlation functions to compute derivative'
# if block == 'BBBB':
# ## BB takes clee as unlensed weights
# ## noiseless!
# cl_unlen_XX, cl_unlen_YY, cl_unlen_XY = lib_spectra.load_weights(cls_unlensed, 'clee',
# 0.0, 0.0, 2*lmax, extra='_long')
# else:
# ## noiseless!
# cl_unlen_XX, cl_unlen_YY, cl_unlen_XY = lib_spectra.load_weights(cls_unlensed, flavor,
# 0.0, 0.0, 2*lmax, extra='_long')
# clpp_long = cls_unlensed.clpp_long
# ## Define container used to reduce results on root
# derivatives_tot_tmp = np.zeros((lmax+1,lmax+1))
# ## Define range of ells, and distribute over procs.
# n_tot = comm.size
# l2range = range(lmin+rank,lmax+1,n_tot)
# l2dim = len(l2range)-1
# ## Compute.
# ## Use cl_unlen_XY which is TT for TT, EE for EE, EE for BB, and TE for TE.
# dxim,dxip,dm,dp = correlation_functions.derivative_dclcmbdclpp_corr_func(ACCURACY_BOOST*lmax,flavor,accurate_lensing,cl_unlen_XY[0][0:lmax+EXTRA_MULTIPOLES+1],clpp_long[0:lmax+EXTRA_MULTIPOLES+1],l2range,lmin,lmax,l2dim,lmax+EXTRA_MULTIPOLES)
|
msyriac/alhazen
|
tests/test_gradt.py
|
Python
|
gpl-3.0
| 8,244
|
[
"Gaussian"
] |
6c483f16a699d9bb3e89a636077785cc0343443292904c3d8510e268eaef689f
|
"""Collection of DIRAC useful list related modules.
By default on Error they return None.
"""
__RCSID__ = "$Id$"
import random
random.seed()
def uniqueElements(aList):
"""Utility to retrieve list of unique elements in a list (order is kept).
:param aList: list of elements
:type aList: python:list
:return: list of unique elements
"""
result = []
seen = set()
try:
for i in aList:
if i not in seen:
result.append(i)
seen.add(i)
return result
except BaseException:
return None
def appendUnique(aList, anObject):
""" Append to list if object does not exist.
:param aList: list of elements
:type aList: python:list
:param anObject: object you want to append
"""
if anObject not in aList:
aList.append(anObject)
def fromChar(inputString, sepChar=","):
"""Generates a list splitting a string by the required character(s)
resulting string items are stripped and empty items are removed.
:param string inputString: list serialised to string
:param string sepChar: separator
:return: list of strings or None if sepChar has a wrong type
"""
# to prevent getting an empty String as argument
if not (isinstance(inputString, basestring) and isinstance(sepChar, basestring) and sepChar):
return None
return [fieldString.strip() for fieldString in inputString.split(sepChar) if len(fieldString.strip()) > 0]
def randomize(aList):
"""Return a randomly sorted list.
:param aList: list to permute
:type aList: python:list
"""
tmpList = list(aList)
random.shuffle(tmpList)
return tmpList
def pop(aList, popElement):
""" Pop the first element equal to popElement from the list.
:param aList: list
:type aList: python:list
:param popElement: element to pop
"""
if popElement in aList:
return aList.pop(aList.index(popElement))
def stringListToString(aList):
"""This function is used for making MySQL queries with a list of string elements.
:param aList: list to be serialized to string for making queries
:type aList: python:list
"""
return ",".join("'%s'" % x for x in aList)
def intListToString(aList):
"""This function is used for making MySQL queries with a list of int elements.
:param aList: list to be serialized to string for making queries
:type aList: python:list
"""
return ",".join(str(x) for x in aList)
def getChunk(aList, chunkSize):
"""Generator yielding chunk from a list of a size chunkSize.
:param aList: list to be splitted
:type aList: python:list
:param int chunkSize: lenght of one chunk
:raise: StopIteration
Usage:
>>> for chunk in getChunk( aList, chunkSize=10):
process( chunk )
"""
for i in xrange(0, len(aList), chunkSize):
yield aList[i:i + chunkSize]
def breakListIntoChunks(aList, chunkSize):
"""This function takes a list as input and breaks it into list of size 'chunkSize'.
It returns a list of lists.
:param aList: list of elements
:type aList: python:list
:param int chunkSize: len of a single chunk
:return: list of lists of length of chunkSize
:raise: RuntimeError if numberOfFilesInChunk is less than 1
"""
if chunkSize < 1:
raise RuntimeError("chunkSize cannot be less than 1")
if isinstance(aList, (set, dict, tuple)):
aList = list(aList)
return [chunk for chunk in getChunk(aList, chunkSize)]
|
petricm/DIRAC
|
Core/Utilities/List.py
|
Python
|
gpl-3.0
| 3,397
|
[
"DIRAC"
] |
d6d08cef9d30a9a112409bf170fc9c22697a08960987cb842e10ac1f98658c2e
|
#!/usr/bin/env python
## category Conversion
## desc Extract BED regions from a reference FASTA file
'''
Extract BED regions from a reference FASTA file.
Note: Sequences that are extracted will be in the same orientation as the
BED region, unless the {-ns} option is given.
'''
import sys
import os
from ngsutils.bed import BedFile
from ngsutils.support import revcomp
import pysam
def bed_tofasta(bed, ref_fasta, min_size=50, stranded=True, include_name=False, out=sys.stdout):
if not os.path.exists('%s.fai' % ref_fasta):
pysam.faidx(ref_fasta)
fasta = pysam.Fastafile(ref_fasta)
refs = set()
with open('%s.fai' % ref_fasta) as f:
for line in f:
refs.add(line.split('\t')[0].strip())
name = ''
for region in bed:
if include_name:
name = '%s|' % (region.name.strip())
if region.end - region.start >= min_size and region.chrom in refs:
seq = fasta.fetch(region.chrom, region.start, region.end)
if stranded and region.strand:
if region.strand == '-':
seq = revcomp(seq)
out.write('>%s%s:%d-%d[%s]\n%s\n' % (name, region.chrom, region.start, region.end, region.strand, seq))
else:
out.write('>%s%s:%d-%d\n%s\n' % (name, region.chrom, region.start, region.end, seq))
fasta.close()
def usage():
print __doc__
print """\
Usage: bedutils tofasta {-min size} {-name} {-ns} bedfile ref.fasta
Outputs the sequences of each BED region to FASTA format.
Option:
-min The minumum size of a region
-name Include the name field of the BED region in the FASTA sequence name
If used, the final name will be in the form:
name|chrX:start-end[strand]
The default is to not include the BED region name (only the genomic
coordinates will be exported).
-ns Ignore the strand of a region (always return seq from the + strand)
"""
if __name__ == "__main__":
min_size = 50
bed = None
ref = None
stranded = True
include_name = False
last = None
for arg in sys.argv[1:]:
if last == '-min':
min_size = int(arg)
last = None
elif arg in ['-min']:
last = arg
elif arg == '-name':
include_name = True
elif arg == '-ns':
stranded = False
elif not bed and os.path.exists(arg):
bed = arg
elif not ref and os.path.exists(arg):
ref = arg
if not bed or not ref:
usage()
sys.exit(1)
bed_tofasta(BedFile(bed), ref, min_size=min_size, stranded=stranded, include_name=include_name)
|
ngsutils/ngsutils
|
ngsutils/bed/tofasta.py
|
Python
|
bsd-3-clause
| 2,685
|
[
"pysam"
] |
69f4619a4d280b2f661ef855dc27c1df6695059578efb3f2d0809707dcfa427f
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from permabots_www import views
from permabots_www.sitemaps import sitemaps
from django.contrib.sitemaps.views import sitemap
def uuidzy(url):
return url.replace('%u', '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt',content_type='text/plain')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
# User management
url(r'^users/', include("permabots_www.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^processing/', include('permabots.urls_processing', namespace="permabots")),
url(r'^api/v1/', include('permabots.urls_api', namespace="api")),
url(r'^privacy-policy/$', TemplateView.as_view(template_name='pages/privacy_policy.html'), name="privacy-policy"),
url(r'^docs/getting-started$', TemplateView.as_view(template_name='pages/getting-started.html'), name="getting-started"),
url(r'^docs/api/', include('rest_framework_swagger.urls', namespace="swagger")),
url(r'^bots/$', views.BotListView.as_view(), name="bot-list"),
url(r'^bots/create/$', views.BotCreateView.as_view(), name="bot-create"),
url(uuidzy(r'^bots/delete/(?P<pk>%u)/$'), views.BotDeleteView.as_view(), name="bot-delete"),
url(uuidzy(r'^bots/update/(?P<pk>%u)/$'), views.BotUpdateView.as_view(), name="bot-update"),
url(uuidzy(r'^bots/(?P<pk>%u)/$'), views.BotDetailView.as_view(), name="bot-detail"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/telegram/create/$'), views.TelegramBotCreateView.as_view(), name="bot-telegram-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/telegram/update/(?P<pk>%u)/$'), views.TelegramBotUpdateView.as_view(), name="bot-telegram-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/telegram/delete/(?P<pk>%u)/$'), views.TelegramBotDeleteView.as_view(), name="bot-telegram-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/kik/create/$'), views.KikBotCreateView.as_view(), name="bot-kik-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/kik/update/(?P<pk>%u)/$'), views.KikBotUpdateView.as_view(), name="bot-kik-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/kik/delete/(?P<pk>%u)/$'), views.KikBotDeleteView.as_view(), name="bot-kik-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/messenger/create/$'), views.MessengerBotCreateView.as_view(), name="bot-messenger-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/messenger/update/(?P<pk>%u)/$'), views.MessengerBotUpdateView.as_view(), name="bot-messenger-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/messenger/delete/(?P<pk>%u)/$'), views.MessengerBotDeleteView.as_view(), name="bot-messenger-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/$'), views.HandlerListView.as_view(), name="handler-list"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/create/$'), views.HandlerCreateView.as_view(), name="handler-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/update/(?P<pk>%u)/$'), views.HandlerUpdateView.as_view(), name="handler-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/delete/(?P<pk>%u)/$'), views.HandlerDeleteView.as_view(), name="handler-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/(?P<pk>%u)/$'), views.HandlerDetailView.as_view(), name="handler-detail"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/(?P<handler_pk>%u)/urlparams/create/$'), views.UrlParameterCreateView.as_view(),
name="handler-urlparameter-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/(?P<handler_pk>%u)/urlparams/update/(?P<pk>%u)/$'), views.UrlParameterUpdateView.as_view(),
name="handler-urlparameter-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/(?P<handler_pk>%u)/urlparams/delete/(?P<pk>%u)/$'), views.UrlParameterDeleteView.as_view(),
name="handler-urlparameter-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/(?P<handler_pk>%u)/headerparams/create/$'), views.HeaderParameterCreateView.as_view(),
name="handler-headerparameter-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/(?P<handler_pk>%u)/headerparams/update/(?P<pk>%u)/$'), views.HeaderParameterUpdateView.as_view(),
name="handler-headerparameter-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/handlers/(?P<handler_pk>%u)/headerparams/delete/(?P<pk>%u)/$'), views.HeaderParameterDeleteView.as_view(),
name="handler-headerparameter-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/$'), views.HookListView.as_view(), name="hook-list"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/create/$'), views.HookCreateView.as_view(), name="hook-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/update/(?P<pk>%u)/$'), views.HookUpdateView.as_view(), name="hook-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/delete/(?P<pk>%u)/$'), views.HookDeleteView.as_view(), name="hook-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/(?P<pk>%u)/$'), views.HookDetailView.as_view(), name="hook-detail"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/(?P<hook_pk>%u)/recipients/telegram/create/$'), views.TelegramRecipientCreateView.as_view(),
name="hook-telegram-recipient-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/(?P<hook_pk>%u)/recipients/telegram/update/(?P<pk>%u)/$'), views.TelegramRecipientUpdateView.as_view(),
name="hook-telegram-recipient-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/(?P<hook_pk>%u)/recipients/telegram/delete/(?P<pk>%u)/$'), views.TelegramRecipientDeleteView.as_view(),
name="hook-telegram-recipient-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/(?P<hook_pk>%u)/recipients/kik/create/$'), views.KikRecipientCreateView.as_view(),
name="hook-kik-recipient-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/(?P<hook_pk>%u)/recipients/kik/update/(?P<pk>%u)/$'), views.KikRecipientUpdateView.as_view(),
name="hook-kik-recipient-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/(?P<hook_pk>%u)/recipients/kik/delete/(?P<pk>%u)/$'), views.KikRecipientDeleteView.as_view(),
name="hook-kik-recipient-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/(?P<hook_pk>%u)/recipients/messenger/create/$'), views.MessengerRecipientCreateView.as_view(),
name="hook-messenger-recipient-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/(?P<hook_pk>%u)/recipients/messenger/update/(?P<pk>%u)/$'), views.MessengerRecipientUpdateView.as_view(),
name="hook-messenger-recipient-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/hooks/(?P<hook_pk>%u)/recipients/messenger/delete/(?P<pk>%u)/$'), views.MessengerRecipientDeleteView.as_view(),
name="hook-messenger-recipient-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/env/$'), views.EnvironmentVarListView.as_view(), name="env-list"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/env/create/$'), views.EnvironmentVarCreateView.as_view(), name="env-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/env/update/(?P<pk>%u)/$'), views.EnvironmentVarUpdateView.as_view(), name="env-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/env/delete/(?P<pk>%u)/$'), views.EnvironmentVarDeleteView.as_view(), name="env-delete"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/env/(?P<pk>%u)/$'), views.EnvironmentVarDetailView.as_view(), name="env-detail"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/states/create/$'), views.StateCreateView.as_view(), name="state-create"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/states/update/(?P<pk>%u)/$'), views.StateUpdateView.as_view(), name="state-update"),
url(uuidzy(r'^bots/(?P<bot_pk>%u)/states/delete/(?P<pk>%u)/$'), views.StateDeleteView.as_view(), name="state-delete"),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permission Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
urlpatterns += [url(r'^silk/', include('silk.urls', namespace='silk'))]
|
jlmadurga/permabots-www
|
config/urls.py
|
Python
|
mit
| 8,926
|
[
"VisIt"
] |
344880983d19ca18dab43e0a9f4ac2107b4b41fa08c90686e77c45db7b6aca81
|
try: paraview.simple
except: from paraview.simple import *
from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
soln_pvd = GetActiveSource()
Calculator2 = Calculator()
RenderView1 = GetRenderView()
DataRepresentation1 = GetDisplayProperties(soln_pvd)
DataRepresentation4 = Show()
DataRepresentation4.ScaleFactor = 0.30000000000000004
DataRepresentation4.ScalarOpacityUnitDistance = 0.19840824707012364
DataRepresentation4.SelectionPointFieldDataArrayName = 'Result'
DataRepresentation4.EdgeColor = [0.0, 0.0, 0.5000076295109483]
Calculator2.Function = 'V1*iHat+V2*jHat+V3*kHat'
DataRepresentation1.Visibility = 0
Glyph2 = Glyph( GlyphType="Arrow", GlyphTransform="Transform2" )
Glyph2.Scalars = ['POINTS', 'V1']
Glyph2.SetScaleFactor = 0.30000000000000004
Glyph2.Vectors = ['POINTS', 'Result']
Glyph2.GlyphTransform = "Transform2"
Glyph2.GlyphType = "Arrow"
Glyph2.SetScaleFactor = 0.0141834026997381
DataRepresentation5 = Show()
DataRepresentation5.ScaleFactor = 0.307979391515255
DataRepresentation5.SelectionPointFieldDataArrayName = 'V1'
DataRepresentation5.EdgeColor = [0.0, 0.0, 0.5000076295109483]
a1_V1_PVLookupTable = GetLookupTableForArray( "V1", 1, RGBPoints=[-16.631860733032227, 0.23, 0.299, 0.754, 6.445072174072266, 0.706, 0.016, 0.15] )
DataRepresentation5.ColorArrayName = ('POINT_DATA', 'V1')
DataRepresentation5.LookupTable = a1_V1_PVLookupTable
a3_GlyphVector_PVLookupTable = GetLookupTableForArray( "GlyphVector", 3, RGBPoints=[0.025796992328870942, 0.23, 0.299, 0.754, 21.1514829234553, 0.706, 0.016, 0.15] )
RenderView1.CameraClippingRange = [9.454775690340611, 14.731091870047191]
DataRepresentation5.ColorArrayName = ('POINT_DATA', 'GlyphVector')
DataRepresentation5.LookupTable = a3_GlyphVector_PVLookupTable
Render()
|
davidshepherd7/oomph-lib-micromagnetics
|
etc/paraview_helpers/hms_vec.py
|
Python
|
gpl-2.0
| 1,786
|
[
"ParaView"
] |
aceb4ed418eae2687d92fe4656f3f719715c9789579f096a4f72b103441e4b78
|
from distutils.core import setup
from setuptools import find_packages
from sistr.version import __version__
classifiers = """
Development Status :: 4 - Beta
Environment :: Console
License :: OSI Approved :: Apache Software License
Intended Audience :: Science/Research
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: Implementation :: CPython
Operating System :: POSIX :: Linux
""".strip().split('\n')
setup(
name='sistr_cmd',
version=__version__,
packages=find_packages(exclude=['tests']),
url='https://github.com/phac-nml/sistr_cmd',
license='Apache 2.0',
author='Peter Kruczkiewicz',
author_email='peter.kruczkiewicz@gmail.com',
description=('Serovar predictions from Salmonella whole-genome sequence assemblies by determination of antigen gene'
'and cgMLST gene alleles using BLAST. Mash MinHash can also be used for serovar prediction.'),
keywords='Salmonella serotyping genotyping cgMLST BLAST Mash MinHash',
classifiers=classifiers,
package_dir={'sistr':'sistr'},
include_package_data=True,
install_requires=[
'numpy>=1.11.1',
'pandas>=0.18.1',
'tables>=3.3.0',
'pycurl>=7.43.0',
'scipy>=1.1.0'
],
extras_require={
'test': ['pytest>=2.9.2',],
},
entry_points={
'console_scripts': [
'sistr=sistr.sistr_cmd:main',
],
},
)
|
peterk87/sistr_cmd
|
setup.py
|
Python
|
apache-2.0
| 1,646
|
[
"BLAST"
] |
af2b39557a415b766709e6e4f309df4ba18ccc5d160eb428212fcb1b41a32de4
|
pythons = {
'Chapman' : 'Graham',
'Cleese' : 'John',
'Idle' : 'Eric',
'Jones': 'Terry',
'Palin': 'Michael',
}
pythons['Gilliam'] = 'Gerry'
pythons['Gilliam'] = 'Terry'
others = {'Marx' : 'Groucho', 'Howard' : 'Moe'}
pythons.update(others)
del pythons['Marx']
del pythons['Howard']
a = pythons.get('Marx', 'Not A Python')
signals = {'green': 'go', 'yellow': 'go faster', 'red': 'smile for the camera'}
"""
print(list(signals.keys()))
print(list(signals.values()))
print(list(signals.items()))
"""
save_signals = signals
signals['blue'] = 'confuse everyone'
#print(save_signals)
signals = {'green': 'go', 'yellow': 'go faster', 'red': 'smile for the camera'}
original_signals = signals.copy()
signals['blue'] = 'confuse everyone'
#print(signals)
#print(original_signals)
empty_set = set()
#print(empty_set)
even_numbers = {0, 2, 4, 6, 8}
#print(even_numbers)
odd_numbers = {1, 3, 5, 7, 9}
#print(odd_numbers)
#print(set('letters'))
#print(set(['Dasher', 'Dancer', 'Prancer', 'Mason-Dixon']))
#print(set(('Ummaguuma', 'Echoes', 'Atom Heart Mother')))
#print(set({'apple': 'red', 'orange': 'orange', 'cherry': 'red'}))
drinks = {
'martini': {'vodka', 'vermouth'},
'black russian': {'vodka', 'kahlua'},
'white russian': {'cream', 'kahlua', 'vodka'},
'manhattan': {'rye', 'vermouth', 'bitters'},
'screwdriver': {'orange juice', 'vodka'}
}
"""
for name, contents in drinks.items():
if 'vodka' in contents:
#print(name)
"""
"""
for name, contents in drinks.items():
if 'vodka' in contents and not ('vermouth' in contents or
'cream' in contents):
print(name)
"""
"""
for name, contents in drinks.items():
if contents & {'vermouth', 'orange juice'}:
print (name)
"""
"""
for name, contents in drinks.items():
if 'vodka' in contents and not contents & {'vermouth', 'cream'}:
print (name)
"""
bruss = drinks['black russian']
wruss = drinks['white russian']
a = {1,2}
b = {2,3}
#print(a&b)
#print(a.intersection(b))
#print(bruss & wruss)
#print(a | b)
#print(a.union(b))
"""
print(a-b)
print(a.difference(b))
print(bruss - wruss)
print(wruss - bruss)
"""
#print(a^b)
#print(bruss^wruss)
#print(a.symmetric_difference(b))
#print(a <= b)
#print(a.issubset(b))
#print(bruss <= wruss)
#print(a<=a)
#print(a<b)
#print(a<a)
#print(bruss < wruss)
#print(a>=b)
#print(wruss>=bruss)
#print(a>b)
#print(wruss>bruss)
marxes =['Groucho', 'Chico', 'Harpo']
pythons = ['Chapman', 'Cleese', 'Gilliam', 'Jones', 'Palin']
stooges = ['Moe', 'Curly', 'Larry']
tuple_of_lists = marxes, pythons, stooges
#print(tuple_of_lists)
list_of_lists = [marxes, pythons, stooges]
#print(list_of_lists)
dict_of_lists = {'Marxes': marxes, 'Pythons': pythons, 'Stooges': stooges}
#print(dict_of_lists)
houses = {
(44.79, -93.14, 284): "My House",
(38.89, -77.03, 13): "The White House"}
#print (houses)
things = ['mozarella', 'cinderella', 'salmonella']
things[1]=things[1].capitalize()
things[0]=things[0].capitalize()
del things[2]
#print(things)
surprise = ['Groucho', 'Chico', 'Harpo']
surprise[2] = surprise[2].lower()
#print(surprise)
surprise[2] = surprise[2].swapcase()
#print(surprise)
e2f = {
'dog': 'chien',
'cat': 'chat',
'walrus': 'morse'}
#print(e2f['walrus'])
#print(e2f.items())
f2e=e2f.copy()
f2e=list(f2e.items())
a = list(f2e[0])
b = list(f2e[1])
c = list(f2e[2])
temp =a[0]
a[0]=a[1]
a[1]=temp
temp = b[0]
b[0] = b[1]
b[1] = temp
temp = c[0]
c[0] = c[1]
c[1] = temp
#print(a)
d=[]
d.append(a)
d.append(b)
d.append(c)
d=dict(d)
#print(d)
f2e=d
#print(f2e)
#print(f2e['chien'])
#print(f2e.values())
top_cats = ['Henry', 'Grumpy', 'Lucy']
animals = {
'cats': top_cats,
'ocpopi':'',
'emus':'',
}
life = {
'animals': animals,
'plants':'',
'other':'',
}
print(life['animals'])
print(life['animals']['cats'])
|
serggrom/python-projects
|
Part3.py
|
Python
|
gpl-3.0
| 3,902
|
[
"MOE"
] |
e4db0db9dac61d35ce1b1a0982a3169e440fcd35fe0493049ec8306de7e4d7c4
|
"""
This is a sample implementation for working DGL with DeepChem!
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
class CGCNNLayer(nn.Module):
"""The convolutional layer of CGCNN.
This class was implemented using DGLGraph methods.
Please confirm how to use DGLGraph methods from below link.
See: https://docs.dgl.ai/en/0.4.x/tutorials/models/1_gnn/9_gat.html
Examples
--------
>>> import deepchem as dc
>>> from pymatgen.core import Lattice, Structure
>>> lattice = Lattice.cubic(4.2)
>>> structure = Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
>>> featurizer = dc.feat.CGCNNFeaturizer()
>>> cgcnn_graph = featurizer.featurize([structure])[0]
>>> cgcnn_graph.num_node_features
92
>>> cgcnn_graph.num_edge_features
41
>>> cgcnn_dgl_graph = cgcnn_graph.to_dgl_graph()
>>> print(type(cgcnn_dgl_graph))
<class 'dgl.heterograph.DGLHeteroGraph'>
>>> layer = CGCNNLayer(hidden_node_dim=92, edge_dim=41)
>>> node_feats = cgcnn_dgl_graph.ndata.pop('x')
>>> edge_feats = cgcnn_dgl_graph.edata.pop('edge_attr')
>>> new_node_feats, new_edge_feats = layer(cgcnn_dgl_graph, node_feats, edge_feats)
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(self,
hidden_node_dim: int,
edge_dim: int,
batch_norm: bool = True):
"""
Parameters
----------
hidden_node_dim: int
The length of the hidden node feature vectors.
edge_dim: int
The length of the edge feature vectors.
batch_norm: bool, default True
Whether to apply batch normalization or not.
"""
super(CGCNNLayer, self).__init__()
z_dim = 2 * hidden_node_dim + edge_dim
liner_out_dim = 2 * hidden_node_dim
self.linear = nn.Linear(z_dim, liner_out_dim)
self.batch_norm = nn.BatchNorm1d(liner_out_dim) if batch_norm else None
def message_func(self, edges):
z = torch.cat(
[edges.src['x'], edges.dst['x'], edges.data['edge_attr']], dim=1)
z = self.linear(z)
if self.batch_norm is not None:
z = self.batch_norm(z)
gated_z, message_z = z.chunk(2, dim=1)
gated_z = torch.sigmoid(gated_z)
message_z = F.softplus(message_z)
return {'message': gated_z * message_z}
def reduce_func(self, nodes):
nbr_sumed = torch.sum(nodes.mailbox['message'], dim=1)
new_x = F.softplus(nodes.data['x'] + nbr_sumed)
return {'new_x': new_x}
def forward(self, dgl_graph, node_feats, edge_feats):
"""Update node representations.
Parameters
----------
dgl_graph: DGLGraph
DGLGraph for a batch of graphs.
node_feats: torch.Tensor
The node features. The shape is `(N, hidden_node_dim)`.
edge_feats: torch.Tensor
The edge features. The shape is `(N, hidden_node_dim)`.
Returns
-------
node_feats: torch.Tensor
The updated node features. The shape is `(N, hidden_node_dim)`.
"""
dgl_graph.ndata['x'] = node_feats
dgl_graph.edata['edge_attr'] = edge_feats
dgl_graph.update_all(self.message_func, self.reduce_func)
node_feats = dgl_graph.ndata.pop('new_x')
return node_feats
class CGCNN(nn.Module):
"""Crystal Graph Convolutional Neural Network (CGCNN).
This model takes arbitary crystal structures as an input, and predict material properties
using the element information and connection of atoms in the crystal. If you want to get
some material properties which has a high computational cost like band gap in the case
of DFT, this model may be useful. This model is one of variants of Graph Convolutional
Networks. The main differences between other GCN models are how to construct graphs and
how to update node representations. This model defines the crystal graph from structures
using distances between atoms. The crystal graph is an undirected multigraph which is defined
by nodes representing atom properties and edges representing connections between atoms
in a crystal. And, this model updates the node representations using both neighbor node
and edge representations. Please confirm the detail algorithms from [1]_.
Examples
--------
>>> import deepchem as dc
>>> from pymatgen.core import Lattice, Structure
>>> lattice = Lattice.cubic(4.2)
>>> structure = Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
>>> featurizer = dc.feat.CGCNNFeaturizer()
>>> cgcnn_feat = featurizer.featurize([structure])[0]
>>> print(type(cgcnn_feat))
<class 'deepchem.feat.graph_data.GraphData'>
>>> cgcnn_dgl_feat = cgcnn_feat.to_dgl_graph()
>>> print(type(cgcnn_dgl_feat))
<class 'dgl.heterograph.DGLHeteroGraph'>
>>> model = dc.models.CGCNN(mode='regression', n_tasks=2)
>>> out = model(cgcnn_dgl_feat)
>>> print(type(out))
<class 'torch.Tensor'>
>>> out.shape == (1, 2)
True
References
----------
.. [1] Xie, Tian, and Jeffrey C. Grossman. "Crystal graph convolutional neural networks
for an accurate and interpretable prediction of material properties." Physical review letters
120.14 (2018): 145301.
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(
self,
in_node_dim: int = 92,
hidden_node_dim: int = 64,
in_edge_dim: int = 41,
num_conv: int = 3,
predictor_hidden_feats: int = 128,
n_tasks: int = 1,
mode: str = 'regression',
n_classes: int = 2,
):
"""
Parameters
----------
in_node_dim: int, default 92
The length of the initial node feature vectors. The 92 is
based on length of vectors in the atom_init.json.
hidden_node_dim: int, default 64
The length of the hidden node feature vectors.
in_edge_dim: int, default 41
The length of the initial edge feature vectors. The 41 is
based on default setting of CGCNNFeaturizer.
num_conv: int, default 3
The number of convolutional layers.
predictor_hidden_feats: int, default 128
The size for hidden representations in the output MLP predictor.
n_tasks: int, default 1
The number of the output size.
mode: str, default 'regression'
The model type, 'classification' or 'regression'.
n_classes: int, default 2
The number of classes to predict (only used in classification mode).
"""
try:
import dgl
except:
raise ImportError("This class requires DGL to be installed.")
super(CGCNN, self).__init__()
if mode not in ['classification', 'regression']:
raise ValueError("mode must be either 'classification' or 'regression'")
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.embedding = nn.Linear(in_node_dim, hidden_node_dim)
self.conv_layers = nn.ModuleList([
CGCNNLayer(
hidden_node_dim=hidden_node_dim,
edge_dim=in_edge_dim,
batch_norm=True) for _ in range(num_conv)
])
self.pooling = dgl.mean_nodes
self.fc = nn.Linear(hidden_node_dim, predictor_hidden_feats)
if self.mode == 'regression':
self.out = nn.Linear(predictor_hidden_feats, n_tasks)
else:
self.out = nn.Linear(predictor_hidden_feats, n_tasks * n_classes)
def forward(self, dgl_graph):
"""Predict labels
Parameters
----------
dgl_graph: DGLGraph
DGLGraph for a batch of graphs. The graph expects that the node features
are stored in `ndata['x']`, and the edge features are stored in `edata['edge_attr']`.
Returns
-------
out: torch.Tensor
The output values of this model.
If mode == 'regression', the shape is `(batch_size, n_tasks)`.
If mode == 'classification', the shape is `(batch_size, n_tasks, n_classes)` (n_tasks > 1)
or `(batch_size, n_classes)` (n_tasks == 1) and the output values are probabilities of each class label.
"""
graph = dgl_graph
# embedding node features
node_feats = graph.ndata.pop('x')
edge_feats = graph.edata.pop('edge_attr')
node_feats = self.embedding(node_feats)
# convolutional layer
for conv in self.conv_layers:
node_feats = conv(graph, node_feats, edge_feats)
# pooling
graph.ndata['updated_x'] = node_feats
graph_feat = F.softplus(self.pooling(graph, 'updated_x'))
graph_feat = F.softplus(self.fc(graph_feat))
out = self.out(graph_feat)
if self.mode == 'regression':
return out
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
# for n_tasks == 1 case
logits = torch.squeeze(logits)
proba = F.softmax(logits)
return proba, logits
class CGCNNModel(TorchModel):
"""Crystal Graph Convolutional Neural Network (CGCNN).
Here is a simple example of code that uses the CGCNNModel with
materials dataset.
Examples
--------
>>> import deepchem as dc
>>> dataset_config = {"reload": False, "featurizer": dc.feat.CGCNNFeaturizer(), "transformers": []}
>>> tasks, datasets, transformers = dc.molnet.load_perovskite(**dataset_config)
>>> train, valid, test = datasets
>>> model = dc.models.CGCNNModel(mode='regression', batch_size=32, learning_rate=0.001)
>>> avg_loss = model.fit(train, nb_epoch=50)
This model takes arbitary crystal structures as an input, and predict material properties
using the element information and connection of atoms in the crystal. If you want to get
some material properties which has a high computational cost like band gap in the case
of DFT, this model may be useful. This model is one of variants of Graph Convolutional
Networks. The main differences between other GCN models are how to construct graphs and
how to update node representations. This model defines the crystal graph from structures
using distances between atoms. The crystal graph is an undirected multigraph which is defined
by nodes representing atom properties and edges representing connections between atoms
in a crystal. And, this model updates the node representations using both neighbor node
and edge representations. Please confirm the detail algorithms from [1]_.
References
----------
.. [1] Xie, Tian, and Jeffrey C. Grossman. "Crystal graph convolutional neural networks
for an accurate and interpretable prediction of material properties." Physical review letters
120.14 (2018): 145301.
Notes
-----
This class requires DGL and PyTorch to be installed.
"""
def __init__(self,
in_node_dim: int = 92,
hidden_node_dim: int = 64,
in_edge_dim: int = 41,
num_conv: int = 3,
predictor_hidden_feats: int = 128,
n_tasks: int = 1,
mode: str = 'regression',
n_classes: int = 2,
**kwargs):
"""
This class accepts all the keyword arguments from TorchModel.
Parameters
----------
in_node_dim: int, default 92
The length of the initial node feature vectors. The 92 is
based on length of vectors in the atom_init.json.
hidden_node_dim: int, default 64
The length of the hidden node feature vectors.
in_edge_dim: int, default 41
The length of the initial edge feature vectors. The 41 is
based on default setting of CGCNNFeaturizer.
num_conv: int, default 3
The number of convolutional layers.
predictor_hidden_feats: int, default 128
The size for hidden representations in the output MLP predictor.
n_tasks: int, default 1
The number of the output size.
mode: str, default 'regression'
The model type, 'classification' or 'regression'.
n_classes: int, default 2
The number of classes to predict (only used in classification mode).
kwargs: Dict
This class accepts all the keyword arguments from TorchModel.
"""
model = CGCNN(in_node_dim, hidden_node_dim, in_edge_dim, num_conv,
predictor_hidden_feats, n_tasks, mode, n_classes)
if mode == "regression":
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(CGCNNModel, self).__init__(
model, loss=loss, output_types=output_types, **kwargs)
def _prepare_batch(self, batch):
"""Create batch data for CGCNN.
Parameters
----------
batch: Tuple
The tuple are `(inputs, labels, weights)`.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: List[torch.Tensor] or None
The labels converted to torch.Tensor
weights: List[torch.Tensor] or None
The weights for each sample or sample/task pair converted to torch.Tensor
"""
try:
import dgl
except:
raise ImportError("This class requires DGL to be installed.")
inputs, labels, weights = batch
dgl_graphs = [graph.to_dgl_graph() for graph in inputs[0]]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(CGCNNModel, self)._prepare_batch(([], labels,
weights))
return inputs, labels, weights
|
deepchem/deepchem
|
deepchem/models/torch_models/cgcnn.py
|
Python
|
mit
| 13,277
|
[
"CRYSTAL",
"pymatgen"
] |
3974b263acc82729d4f1ef7cca3e6e30ffba140aa0e4d6f440f3397908a2c71c
|
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import sys
import numpy as np
import ast
import inspect
import operator
import types as pytypes
from contextlib import contextmanager
from copy import deepcopy
import numba
from numba import njit, stencil
from numba.core.utils import PYVERSION
from numba.core import types, registry
from numba.core.compiler import compile_extra, Flags
from numba.core.cpu import ParallelOptions
from numba.tests.support import tag, skip_parfors_unsupported, _32bit
from numba.core.errors import LoweringError, TypingError
import unittest
skip_unsupported = skip_parfors_unsupported
@stencil
def stencil1_kernel(a):
return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0])
@stencil(neighborhood=((-5, 0), ))
def stencil2_kernel(a):
cum = a[-5]
for i in range(-4, 1):
cum += a[i]
return 0.3 * cum
@stencil(cval=1.0)
def stencil3_kernel(a):
return 0.25 * a[-2, 2]
@stencil
def stencil_multiple_input_kernel(a, b):
return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0] +
b[0, 1] + b[1, 0] + b[0, -1] + b[-1, 0])
@stencil
def stencil_multiple_input_kernel_var(a, b, w):
return w * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0] +
b[0, 1] + b[1, 0] + b[0, -1] + b[-1, 0])
@stencil
def stencil_multiple_input_mixed_types_2d(a, b, f):
return a[0, 0] if f[0, 0] else b[0, 0]
@stencil(standard_indexing=("b",))
def stencil_with_standard_indexing_1d(a, b):
return a[-1] * b[0] + a[0] * b[1]
@stencil(standard_indexing=("b",))
def stencil_with_standard_indexing_2d(a, b):
return (a[0, 1] * b[0, 1] + a[1, 0] * b[1, 0]
+ a[0, -1] * b[0, -1] + a[-1, 0] * b[-1, 0])
@njit
def addone_njit(a):
return a + 1
if not _32bit: # prevent compilation on unsupported 32bit targets
@njit(parallel=True)
def addone_pjit(a):
return a + 1
@unittest.skipIf(PYVERSION != (3, 7), "Run under 3.7 only, AST unstable")
class TestStencilBase(unittest.TestCase):
_numba_parallel_test_ = False
def __init__(self, *args):
# flags for njit()
self.cflags = Flags()
self.cflags.nrt = True
super(TestStencilBase, self).__init__(*args)
def _compile_this(self, func, sig, flags):
return compile_extra(registry.cpu_target.typing_context,
registry.cpu_target.target_context, func, sig,
None, flags, {})
def compile_parallel(self, func, sig, **kws):
flags = Flags()
flags.nrt = True
options = True if not kws else kws
flags.auto_parallel=ParallelOptions(options)
return self._compile_this(func, sig, flags)
def compile_njit(self, func, sig):
return self._compile_this(func, sig, flags=self.cflags)
def compile_all(self, pyfunc, *args, **kwargs):
sig = tuple([numba.typeof(x) for x in args])
# compile with parallel=True
cpfunc = self.compile_parallel(pyfunc, sig)
# compile a standard njit of the original function
cfunc = self.compile_njit(pyfunc, sig)
return cfunc, cpfunc
def check(self, no_stencil_func, pyfunc, *args):
cfunc, cpfunc = self.compile_all(pyfunc, *args)
# results without stencil macro
expected = no_stencil_func(*args)
# python result
py_output = pyfunc(*args)
# njit result
njit_output = cfunc.entry_point(*args)
# parfor result
parfor_output = cpfunc.entry_point(*args)
np.testing.assert_almost_equal(py_output, expected, decimal=3)
np.testing.assert_almost_equal(njit_output, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
# make sure parfor set up scheduling
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
class TestStencil(TestStencilBase):
def __init__(self, *args, **kwargs):
super(TestStencil, self).__init__(*args, **kwargs)
@skip_unsupported
def test_stencil1(self):
"""Tests whether the optional out argument to stencil calls works.
"""
def test_with_out(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
B = stencil1_kernel(A, out=B)
return B
def test_without_out(n):
A = np.arange(n**2).reshape((n, n))
B = stencil1_kernel(A)
return B
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
B[i, j] = 0.25 * (A[i, j + 1] +
A[i + 1, j] + A[i, j - 1] + A[i - 1, j])
return B
n = 100
self.check(test_impl_seq, test_with_out, n)
self.check(test_impl_seq, test_without_out, n)
@skip_unsupported
def test_stencil2(self):
"""Tests whether the optional neighborhood argument to the stencil
decorate works.
"""
def test_seq(n):
A = np.arange(n)
B = stencil2_kernel(A)
return B
def test_impl_seq(n):
A = np.arange(n)
B = np.zeros(n)
for i in range(5, len(A)):
B[i] = 0.3 * sum(A[i - 5:i + 1])
return B
n = 100
self.check(test_impl_seq, test_seq, n)
# variable length neighborhood in numba.stencil call
# only supported in parallel path
def test_seq(n, w):
A = np.arange(n)
def stencil2_kernel(a, w):
cum = a[-w]
for i in range(-w + 1, w + 1):
cum += a[i]
return 0.3 * cum
B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ))(A, w)
return B
def test_impl_seq(n, w):
A = np.arange(n)
B = np.zeros(n)
for i in range(w, len(A) - w):
B[i] = 0.3 * sum(A[i - w:i + w + 1])
return B
n = 100
w = 5
cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp))
expected = test_impl_seq(n, w)
# parfor result
parfor_output = cpfunc.entry_point(n, w)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
# test index_offsets
def test_seq(n, w, offset):
A = np.arange(n)
def stencil2_kernel(a, w):
cum = a[-w + 1]
for i in range(-w + 1, w + 1):
cum += a[i + 1]
return 0.3 * cum
B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ),
index_offsets=(-offset, ))(A, w)
return B
offset = 1
cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp,
types.intp))
parfor_output = cpfunc.entry_point(n, w, offset)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
# test slice in kernel
def test_seq(n, w, offset):
A = np.arange(n)
def stencil2_kernel(a, w):
return 0.3 * np.sum(a[-w + 1:w + 2])
B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ),
index_offsets=(-offset, ))(A, w)
return B
offset = 1
cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp,
types.intp))
parfor_output = cpfunc.entry_point(n, w, offset)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
@skip_unsupported
def test_stencil3(self):
"""Tests whether a non-zero optional cval argument to the stencil
decorator works. Also tests integer result type.
"""
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = stencil3_kernel(A)
return B
test_njit = njit(test_seq)
test_par = njit(test_seq, parallel=True)
n = 5
seq_res = test_seq(n)
njit_res = test_njit(n)
par_res = test_par(n)
self.assertTrue(seq_res[0, 0] == 1.0 and seq_res[4, 4] == 1.0)
self.assertTrue(njit_res[0, 0] == 1.0 and njit_res[4, 4] == 1.0)
self.assertTrue(par_res[0, 0] == 1.0 and par_res[4, 4] == 1.0)
@skip_unsupported
def test_stencil_standard_indexing_1d(self):
"""Tests standard indexing with a 1d array.
"""
def test_seq(n):
A = np.arange(n)
B = [3.0, 7.0]
C = stencil_with_standard_indexing_1d(A, B)
return C
def test_impl_seq(n):
A = np.arange(n)
B = [3.0, 7.0]
C = np.zeros(n)
for i in range(1, n):
C[i] = A[i - 1] * B[0] + A[i] * B[1]
return C
n = 100
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_standard_indexing_2d(self):
"""Tests standard indexing with a 2d array and multiple stencil calls.
"""
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.ones((3, 3))
C = stencil_with_standard_indexing_2d(A, B)
D = stencil_with_standard_indexing_2d(C, B)
return D
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.ones((3, 3))
C = np.zeros(n**2).reshape((n, n))
D = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
C[i, j] = (A[i, j + 1] * B[0, 1] + A[i + 1, j] * B[1, 0] +
A[i, j - 1] * B[0, -1] + A[i - 1, j] * B[-1, 0])
for i in range(1, n - 1):
for j in range(1, n - 1):
D[i, j] = (C[i, j + 1] * B[0, 1] + C[i + 1, j] * B[1, 0] +
C[i, j - 1] * B[0, -1] + C[i - 1, j] * B[-1, 0])
return D
n = 5
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_multiple_inputs(self):
"""Tests whether multiple inputs of the same size work.
"""
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.arange(n**2).reshape((n, n))
C = stencil_multiple_input_kernel(A, B)
return C
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.arange(n**2).reshape((n, n))
C = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
C[i, j] = 0.25 * \
(A[i, j + 1] + A[i + 1, j]
+ A[i, j - 1] + A[i - 1, j]
+ B[i, j + 1] + B[i + 1, j]
+ B[i, j - 1] + B[i - 1, j])
return C
n = 3
self.check(test_impl_seq, test_seq, n)
# test stencil with a non-array input
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.arange(n**2).reshape((n, n))
w = 0.25
C = stencil_multiple_input_kernel_var(A, B, w)
return C
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_mixed_types(self):
def test_impl_seq(n):
A = np.arange(n ** 2).reshape((n, n))
B = n ** 2 - np.arange(n ** 2).reshape((n, n))
S = np.eye(n, dtype=np.bool_)
O = np.zeros((n, n), dtype=A.dtype)
for i in range(0, n):
for j in range(0, n):
O[i, j] = A[i, j] if S[i, j] else B[i, j]
return O
def test_seq(n):
A = np.arange(n ** 2).reshape((n, n))
B = n ** 2 - np.arange(n ** 2).reshape((n, n))
S = np.eye(n, dtype=np.bool_)
O = stencil_multiple_input_mixed_types_2d(A, B, S)
return O
n = 3
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_call(self):
"""Tests 2D numba.stencil calls.
"""
def test_impl1(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
numba.stencil(lambda a: 0.25 * (a[0, 1] + a[1, 0] + a[0, -1]
+ a[-1, 0]))(A, out=B)
return B
def test_impl2(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
def sf(a):
return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0])
B = numba.stencil(sf)(A)
return B
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
B[i, j] = 0.25 * (A[i, j + 1] + A[i + 1, j]
+ A[i, j - 1] + A[i - 1, j])
return B
n = 100
self.check(test_impl_seq, test_impl1, n)
self.check(test_impl_seq, test_impl2, n)
@skip_unsupported
def test_stencil_call_1D(self):
"""Tests 1D numba.stencil calls.
"""
def test_impl(n):
A = np.arange(n)
B = np.zeros(n)
numba.stencil(lambda a: 0.3 * (a[-1] + a[0] + a[1]))(A, out=B)
return B
def test_impl_seq(n):
A = np.arange(n)
B = np.zeros(n)
for i in range(1, n - 1):
B[i] = 0.3 * (A[i - 1] + A[i] + A[i + 1])
return B
n = 100
self.check(test_impl_seq, test_impl, n)
@skip_unsupported
def test_stencil_call_const(self):
"""Tests numba.stencil call that has an index that can be inferred as
constant from a unary expr. Otherwise, this would raise an error since
neighborhood length is not specified.
"""
def test_impl1(n):
A = np.arange(n)
B = np.zeros(n)
c = 1
numba.stencil(lambda a,c : 0.3 * (a[-c] + a[0] + a[c]))(
A, c, out=B)
return B
def test_impl2(n):
A = np.arange(n)
B = np.zeros(n)
c = 2
numba.stencil(lambda a,c : 0.3 * (a[1-c] + a[0] + a[c-1]))(
A, c, out=B)
return B
# recursive expr case
def test_impl3(n):
A = np.arange(n)
B = np.zeros(n)
c = 2
numba.stencil(lambda a,c : 0.3 * (a[-c+1] + a[0] + a[c-1]))(
A, c, out=B)
return B
# multi-constant case
def test_impl4(n):
A = np.arange(n)
B = np.zeros(n)
d = 1
c = 2
numba.stencil(lambda a,c,d : 0.3 * (a[-c+d] + a[0] + a[c-d]))(
A, c, d, out=B)
return B
def test_impl_seq(n):
A = np.arange(n)
B = np.zeros(n)
c = 1
for i in range(1, n - 1):
B[i] = 0.3 * (A[i - c] + A[i] + A[i + c])
return B
n = 100
# constant inference is only possible in parallel path
cpfunc1 = self.compile_parallel(test_impl1, (types.intp,))
cpfunc2 = self.compile_parallel(test_impl2, (types.intp,))
cpfunc3 = self.compile_parallel(test_impl3, (types.intp,))
cpfunc4 = self.compile_parallel(test_impl4, (types.intp,))
expected = test_impl_seq(n)
# parfor result
parfor_output1 = cpfunc1.entry_point(n)
parfor_output2 = cpfunc2.entry_point(n)
parfor_output3 = cpfunc3.entry_point(n)
parfor_output4 = cpfunc4.entry_point(n)
np.testing.assert_almost_equal(parfor_output1, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output2, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output3, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output4, expected, decimal=3)
# check error in regular Python path
with self.assertRaises(ValueError) as e:
test_impl4(4)
self.assertIn("stencil kernel index is not constant, "
"'neighborhood' option required", str(e.exception))
# check error in njit path
# TODO: ValueError should be thrown instead of LoweringError
with self.assertRaises(LoweringError) as e:
njit(test_impl4)(4)
self.assertIn("stencil kernel index is not constant, "
"'neighborhood' option required", str(e.exception))
@skip_unsupported
def test_stencil_parallel_off(self):
"""Tests 1D numba.stencil calls without parallel translation
turned off.
"""
def test_impl(A):
return numba.stencil(lambda a: 0.3 * (a[-1] + a[0] + a[1]))(A)
cpfunc = self.compile_parallel(test_impl, (numba.float64[:],), stencil=False)
self.assertNotIn('@do_scheduling', cpfunc.library.get_llvm_str())
@skip_unsupported
def test_stencil_nested1(self):
"""Tests whether nested stencil decorator works.
"""
@njit(parallel=True)
def test_impl(n):
@stencil
def fun(a):
c = 2
return a[-c+1]
B = fun(n)
return B
def test_impl_seq(n):
B = np.zeros(len(n), dtype=int)
for i in range(1, len(n)):
B[i] = n[i-1]
return B
n = np.arange(10)
np.testing.assert_equal(test_impl(n), test_impl_seq(n))
@skip_unsupported
def test_out_kwarg_w_cval(self):
""" Issue #3518, out kwarg did not work with cval."""
# test const value that matches the arg dtype, and one that can be cast
const_vals = [7, 7.0]
def kernel(a):
return (a[0, 0] - a[1, 0])
for const_val in const_vals:
stencil_fn = numba.stencil(kernel, cval=const_val)
def wrapped():
A = np.arange(12).reshape((3, 4))
ret = np.ones_like(A)
stencil_fn(A, out=ret)
return ret
# stencil function case
A = np.arange(12).reshape((3, 4))
expected = np.full_like(A, -4)
expected[-1, :] = const_val
ret = np.ones_like(A)
stencil_fn(A, out=ret)
np.testing.assert_almost_equal(ret, expected)
# wrapped function case, check njit, then njit(parallel=True)
impls = self.compile_all(wrapped,)
for impl in impls:
got = impl.entry_point()
np.testing.assert_almost_equal(got, expected)
# now check exceptions for cval dtype mismatch with out kwarg dtype
stencil_fn = numba.stencil(kernel, cval=1j)
def wrapped():
A = np.arange(12).reshape((3, 4))
ret = np.ones_like(A)
stencil_fn(A, out=ret)
return ret
A = np.arange(12).reshape((3, 4))
ret = np.ones_like(A)
with self.assertRaises(ValueError) as e:
stencil_fn(A, out=ret)
msg = "cval type does not match stencil return type."
self.assertIn(msg, str(e.exception))
for compiler in [self.compile_njit, self.compile_parallel]:
try:
compiler(wrapped,())
except(ValueError, LoweringError) as e:
self.assertIn(msg, str(e))
else:
raise AssertionError("Expected error was not raised")
@skip_unsupported
def test_out_kwarg_w_cval_np_attr(self):
""" Test issue #7286 where the cval is a np attr/string-based numerical
constant"""
for cval in (np.nan, np.inf, -np.inf, float('inf'), -float('inf')):
def kernel(a):
return (a[0, 0] - a[1, 0])
stencil_fn = numba.stencil(kernel, cval=cval)
def wrapped():
A = np.arange(12.).reshape((3, 4))
ret = np.ones_like(A)
stencil_fn(A, out=ret)
return ret
# stencil function case
A = np.arange(12.).reshape((3, 4))
expected = np.full_like(A, -4)
expected[-1, :] = cval
ret = np.ones_like(A)
stencil_fn(A, out=ret)
np.testing.assert_almost_equal(ret, expected)
# wrapped function case, check njit, then njit(parallel=True)
impls = self.compile_all(wrapped,)
for impl in impls:
got = impl.entry_point()
np.testing.assert_almost_equal(got, expected)
class pyStencilGenerator:
"""
Holds the classes and methods needed to generate a python stencil
implementation from a kernel purely using AST transforms.
"""
class Builder:
"""
Provides code generation for the AST manipulation pipeline.
The class methods largely produce AST nodes/trees.
"""
def __init__(self):
self.__state = 0
ids = [chr(ord(v) + x) for v in ['a', 'A'] for x in range(26)]
def varidx(self):
"""
a monotonically increasing index for use in labelling variables.
"""
tmp = self.__state
self.__state = self.__state + 1
return tmp
# builder functions
def gen_alloc_return(self, orig, var, dtype_var, init_val=0):
"""
Generates an AST equivalent to:
`var = np.full(orig.shape, init_val, dtype = dtype_var)`
"""
new = ast.Assign(
targets=[
ast.Name(
id=var,
ctx=ast.Store())],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(
id='np',
ctx=ast.Load()),
attr='full',
ctx=ast.Load()),
args=[
ast.Attribute(
value=ast.Name(
id=orig,
ctx=ast.Load()),
attr='shape',
ctx=ast.Load()),
self.gen_num(init_val)],
keywords=[ast.keyword(arg='dtype',
value=self.gen_call('type', [dtype_var.id]).value)],
starargs=None,
kwargs=None),
)
return new
def gen_assign(self, var, value, index_names):
"""
Generates an AST equivalent to:
`retvar[(*index_names,)] = value[<already present indexing>]`
"""
elts_info = [ast.Name(id=x, ctx=ast.Load()) for x in index_names]
new = ast.Assign(
targets=[
ast.Subscript(
value=ast.Name(
id=var,
ctx=ast.Load()),
slice=ast.Index(
value=ast.Tuple(
elts=elts_info,
ctx=ast.Load())),
ctx=ast.Store())],
value=value)
return new
def gen_loop(self, var, start=0, stop=0, body=None):
"""
Generates an AST equivalent to a loop in `var` from
`start` to `stop` with body `body`.
"""
if isinstance(start, int):
start_val = ast.Num(n=start)
else:
start_val = start
if isinstance(stop, int):
stop_val = ast.Num(n=stop)
else:
stop_val = stop
return ast.For(
target=ast.Name(id=var, ctx=ast.Store()),
iter=ast.Call(
func=ast.Name(id='range', ctx=ast.Load()),
args=[start_val, stop_val],
keywords=[],
starargs=None, kwargs=None),
body=body, orelse=[])
def gen_return(self, var):
"""
Generates an AST equivalent to `return var`
"""
return ast.Return(value=ast.Name(id=var, ctx=ast.Load()))
def gen_slice(self, value):
"""Generates an Index with the given value"""
return ast.Index(value=ast.Num(n=value))
def gen_attr(self, name, attr):
"""
Generates AST equivalent to `name.attr`
"""
return ast.Attribute(
value=ast.Name(id=name, ctx=ast.Load()),
attr=attr, ctx=ast.Load())
def gen_subscript(self, name, attr, index, offset=None):
"""
Generates an AST equivalent to a subscript, something like:
name.attr[slice(index) +/- offset]
"""
attribute = self.gen_attr(name, attr)
slise = self.gen_slice(index)
ss = ast.Subscript(value=attribute, slice=slise, ctx=ast.Load())
if offset:
pm = ast.Add() if offset >= 0 else ast.Sub()
ss = ast.BinOp(left=ss, op=pm, right=ast.Num(n=abs(offset)))
return ss
def gen_num(self, value):
"""
Generates an ast.Num of value `value`
"""
# pretend bools are ints, ast has no boolean literal support
if isinstance(value, bool):
return ast.Num(int(value))
if abs(value) >= 0:
return ast.Num(value)
else:
return ast.UnaryOp(ast.USub(), ast.Num(-value))
def gen_call(self, call_name, args, kwargs=None):
"""
Generates an AST equivalent to a call, something like:
`call_name(*args, **kwargs)
"""
fixed_args = [ast.Name(id='%s' % x, ctx=ast.Load()) for x in args]
if kwargs is not None:
keywords = [ast.keyword(
arg='%s' %
x, value=ast.parse(str(x)).body[0].value)
for x in kwargs]
else:
keywords = []
func = ast.Name(id=call_name, ctx=ast.Load())
return ast.Expr(value=ast.Call(
func=func, args=fixed_args,
keywords=keywords,
starargs=None, kwargs=None), ctx=ast.Load())
# AST transformers
class FoldConst(ast.NodeTransformer, Builder):
"""
Folds const expr, this is so const expressions in the relidx are
more easily handled
"""
# just support a few for testing purposes
supported_ops = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
}
def visit_BinOp(self, node):
# does const expr folding
node = self.generic_visit(node)
op = self.supported_ops.get(node.op.__class__)
lhs = getattr(node, 'left', None)
rhs = getattr(node, 'right', None)
if not (lhs and rhs and op):
return node
if (isinstance(lhs, ast.Num) and
isinstance(rhs, ast.Num)):
return ast.Num(op(node.left.n, node.right.n))
else:
return node
class FixRelIndex(ast.NodeTransformer, Builder):
""" Fixes the relative indexes to be written in as
induction index + relative index
"""
def __init__(self, argnames, const_assigns,
standard_indexing, neighborhood, *args, **kwargs):
ast.NodeTransformer.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._argnames = argnames
self._const_assigns = const_assigns
self._idx_len = -1
self._mins = None
self._maxes = None
self._imin = np.iinfo(int).min
self._imax = np.iinfo(int).max
self._standard_indexing = standard_indexing \
if standard_indexing else []
self._neighborhood = neighborhood
self._id_pat = '__%sn' if neighborhood else '__%s'
def get_val_from_num(self, node):
"""
Gets the literal value from a Num or UnaryOp
"""
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.UnaryOp):
return -node.operand.n
else:
raise ValueError(
"get_val_from_num: Unknown indexing operation")
def visit_Subscript(self, node):
"""
Transforms subscripts of the form `a[x]` and `a[x, y, z, ...]`
where `x, y, z` are relative indexes, to forms such as:
`a[x + i]` and `a[x + i, y + j, z + k]` for use in loop induced
indexing.
"""
def handle2dindex(node):
idx = []
for x, val in enumerate(node.slice.value.elts):
useval = self._const_assigns.get(val, val)
idx.append(
ast.BinOp(
left=ast.Name(
id=self._id_pat % self.ids[x],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load()))
if self._idx_len == -1:
self._idx_len = len(idx)
else:
if(self._idx_len != len(idx)):
raise ValueError(
"Relative indexing mismatch detected")
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=ast.Index(
value=ast.Tuple(
elts=idx,
ctx=ast.Load()),
ctx=ast.Load()),
ctx=context)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax] * self._idx_len
self._maxes = [self._imin] * self._idx_len
if not self._neighborhood:
for x, lnode in enumerate(node.slice.value.elts):
if isinstance(lnode, ast.Num) or\
isinstance(lnode, ast.UnaryOp):
relvalue = self.get_val_from_num(lnode)
elif (hasattr(lnode, 'id') and
lnode.id in self._const_assigns):
relvalue = self._const_assigns[lnode.id]
else:
raise ValueError(
"Cannot interpret indexing value")
if relvalue < self._mins[x]:
self._mins[x] = relvalue
if relvalue > self._maxes[x]:
self._maxes[x] = relvalue
else:
for x, lnode in enumerate(self._neighborhood):
self._mins[x] = self._neighborhood[x][0]
self._maxes[x] = self._neighborhood[x][1]
return newnode
def handle1dindex(node):
useval = self._const_assigns.get(
node.slice.value, node.slice.value)
idx = ast.BinOp(left=ast.Name(
id=self._id_pat % self.ids[0],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load())
if self._idx_len == -1:
self._idx_len = 1
else:
if(self._idx_len != 1):
raise ValueError(
"Relative indexing mismatch detected")
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=ast.Index(
value=idx,
ctx=ast.Load()),
ctx=context)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax, ]
self._maxes = [self._imin, ]
if not self._neighborhood:
if isinstance(node.slice.value, ast.Num) or\
isinstance(node.slice.value, ast.UnaryOp):
relvalue = self.get_val_from_num(node.slice.value)
elif (hasattr(node.slice.value, 'id') and
node.slice.value.id in self._const_assigns):
relvalue = self._const_assigns[node.slice.value.id]
else:
raise ValueError("Cannot interpret indexing value")
if relvalue < self._mins[0]:
self._mins[0] = relvalue
if relvalue > self._maxes[0]:
self._maxes[0] = relvalue
else:
self._mins[0] = self._neighborhood[0][0]
self._maxes[0] = self._neighborhood[0][1]
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def computeSlice(i, node):
def gen_idx(val, x):
useval = self._const_assigns.get(val, val)
value = self.get_val_from_num(val)
tmp = ast.BinOp(
left=ast.Name(
id=self._id_pat % self.ids[x],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load())
ast.copy_location(tmp, node)
ast.fix_missing_locations(tmp)
return tmp
newnode = ast.Slice(gen_idx(node.lower, i),
gen_idx(node.upper, i),
node.step)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def computeIndex(i, node):
useval = self._const_assigns.get(node.value, node.value)
idx = ast.BinOp(left=ast.Name(
id=self._id_pat % self.ids[i],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load())
newnode = ast.Index(value=idx, ctx=ast.Load())
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def handleExtSlice(node):
idx = []
for i, val in enumerate(node.slice.dims):
if isinstance(val, ast.Slice):
idx.append(computeSlice(i, val))
if isinstance(val, ast.Index):
idx.append(computeIndex(i, val))
# TODO: handle more node types
if self._idx_len == -1:
self._idx_len = len(node.slice.dims)
else:
if(self._idx_len != len(node.slice.dims)):
raise ValueError(
"Relative indexing mismatch detected")
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=ast.ExtSlice(
dims=idx,
ctx=ast.Load()),
ctx=context
)
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax] * self._idx_len
self._maxes = [self._imin] * self._idx_len
if not self._neighborhood:
for x, anode in enumerate(node.slice.dims):
if isinstance(anode, ast.Slice):
for lnode in [anode.lower, anode.upper]:
if isinstance(lnode, ast.Num) or\
isinstance(lnode, ast.UnaryOp):
relvalue = self.get_val_from_num(lnode)
elif (hasattr(lnode, 'id') and
lnode.id in self._const_assigns):
relvalue = self._const_assigns[lnode.id]
else:
raise ValueError(
"Cannot interpret indexing value")
if relvalue < self._mins[x]:
self._mins[x] = relvalue
if relvalue > self._maxes[x]:
self._maxes[x] = relvalue
else:
val = anode.value
if isinstance(val, ast.Num) or\
isinstance(val, ast.UnaryOp):
relvalue = self.get_val_from_num(val)
elif (hasattr(val, 'id') and
val.id in self._const_assigns):
relvalue = self._const_assigns[val.id]
else:
raise ValueError(
"Cannot interpret indexing value")
if relvalue < self._mins[x]:
self._mins[x] = relvalue
if relvalue > self._maxes[x]:
self._maxes[x] = relvalue
else:
for x, lnode in enumerate(self._neighborhood):
self._mins[x] = self._neighborhood[x][0]
self._maxes[x] = self._neighborhood[x][1]
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def handleSlice(node):
idx = computeSlice(0, node.slice)
idx.ctx=ast.Load()
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=idx,
ctx=context)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
if self._idx_len == -1:
self._idx_len = 1
else:
if(self._idx_len != 1):
raise ValueError(
"Relative indexing mismatch detected")
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax]
self._maxes = [self._imin]
if not self._neighborhood:
if isinstance(node.slice.value, ast.Num) or\
isinstance(node.slice.value, ast.UnaryOp):
relvalue = self.get_val_from_num(node.slice.value)
elif (hasattr(node.slice.value, 'id') and
node.slice.value.id in self._const_assigns):
relvalue = self._const_assigns[node.slice.value.id]
else:
raise ValueError("Cannot interpret indexing value")
if relvalue < self._mins[0]:
self._mins[0] = relvalue
if relvalue > self._maxes[0]:
self._maxes[0] = relvalue
else:
self._mins[0] = self._neighborhood[0][0]
self._maxes[0] = self._neighborhood[0][1]
return newnode
node = self.generic_visit(node)
if (node.value.id in self._argnames) and (
node.value.id not in self._standard_indexing):
# fancy slice
if isinstance(node.slice, ast.ExtSlice):
return handleExtSlice(node)
# plain slice
if isinstance(node.slice, ast.Slice):
return handleSlice(node)
# 2D index
if isinstance(node.slice.value, ast.Tuple):
return handle2dindex(node)
# 1D index
elif isinstance(node.slice, ast.Index):
return handle1dindex(node)
else: # unknown
raise ValueError("Unhandled subscript")
else:
return node
@property
def idx_len(self):
if self._idx_len == -1:
raise ValueError(
'Transform has not been run/no indexes found')
else:
return self._idx_len
@property
def maxes(self):
return self._maxes
@property
def mins(self):
return self._mins
@property
def id_pattern(self):
return self._id_pat
class TransformReturns(ast.NodeTransformer, Builder):
"""
Transforms return nodes into assignments.
"""
def __init__(self, relidx_info, *args, **kwargs):
ast.NodeTransformer.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._relidx_info = relidx_info
self._ret_var_idx = self.varidx()
retvar = '__b%s' % self._ret_var_idx
self._retvarname = retvar
def visit_Return(self, node):
self.generic_visit(node)
nloops = self._relidx_info.idx_len
var_pattern = self._relidx_info.id_pattern
return self.gen_assign(
self._retvarname, node.value,
[var_pattern % self.ids[l] for l in range(nloops)])
@property
def ret_var_name(self):
return self._retvarname
class FixFunc(ast.NodeTransformer, Builder):
""" The main function rewriter, takes the body of the kernel and generates:
* checking function calls
* return value allocation
* loop nests
* return site
* Function definition as an entry point
"""
def __init__(self, kprops, relidx_info, ret_info,
cval, standard_indexing, neighborhood, *args, **kwargs):
ast.NodeTransformer.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._original_kernel = kprops.original_kernel
self._argnames = kprops.argnames
self._retty = kprops.retty
self._relidx_info = relidx_info
self._ret_info = ret_info
self._standard_indexing = standard_indexing \
if standard_indexing else []
self._neighborhood = neighborhood if neighborhood else tuple()
self._relidx_args = [
x for x in self._argnames if x not in self._standard_indexing]
# switch cval to python type
if hasattr(cval, 'dtype'):
self.cval = cval.tolist()
else:
self.cval = cval
self.stencil_arr = self._argnames[0]
def visit_FunctionDef(self, node):
"""
Transforms the kernel function into a function that will perform
the stencil like behaviour on the kernel.
"""
self.generic_visit(node)
# this function validates arguments and is injected into the top
# of the stencil call
def check_stencil_arrays(*args, **kwargs):
# the first has to be an array due to parfors requirements
neighborhood = kwargs.get('neighborhood')
init_shape = args[0].shape
if neighborhood is not None:
if len(init_shape) != len(neighborhood):
raise ValueError("Invalid neighborhood supplied")
for x in args[1:]:
if hasattr(x, 'shape'):
if init_shape != x.shape:
raise ValueError(
"Input stencil arrays do not commute")
checksrc = inspect.getsource(check_stencil_arrays)
check_impl = ast.parse(
checksrc.strip()).body[0] # don't need module
ast.fix_missing_locations(check_impl)
checker_call = self.gen_call(
'check_stencil_arrays',
self._relidx_args,
kwargs=['neighborhood'])
nloops = self._relidx_info.idx_len
def computebound(mins, maxs):
minlim = 0 if mins >= 0 else -mins
maxlim = -maxs if maxs > 0 else 0
return (minlim, maxlim)
var_pattern = self._relidx_info.id_pattern
loop_body = node.body
# create loop nests
loop_count = 0
for l in range(nloops):
minlim, maxlim = computebound(
self._relidx_info.mins[loop_count],
self._relidx_info.maxes[loop_count])
minbound = minlim
maxbound = self.gen_subscript(
self.stencil_arr, 'shape', loop_count, maxlim)
loops = self.gen_loop(
var_pattern % self.ids[loop_count],
minbound, maxbound, body=loop_body)
loop_body = [loops]
loop_count += 1
# patch loop location
ast.copy_location(loops, node)
_rettyname = self._retty.targets[0]
# allocate a return
retvar = self._ret_info.ret_var_name
allocate = self.gen_alloc_return(
self.stencil_arr, retvar, _rettyname, self.cval)
ast.copy_location(allocate, node)
# generate the return
returner = self.gen_return(retvar)
ast.copy_location(returner, node)
add_kwarg = [ast.arg('neighborhood', None)]
defaults = []
newargs = ast.arguments(
args=node.args.args +
add_kwarg,
defaults=defaults,
vararg=None,
kwarg=None,
kwonlyargs=[],
kw_defaults=[],
posonlyargs=[])
new = ast.FunctionDef(
name='__%s' %
node.name,
args=newargs,
body=[
check_impl,
checker_call,
self._original_kernel,
self._retty,
allocate,
loops,
returner],
decorator_list=[])
ast.copy_location(new, node)
return new
class GetKernelProps(ast.NodeVisitor, Builder):
""" Gets the argument names and other properties
of the original kernel.
"""
def __init__(self, *args, **kwargs):
ast.NodeVisitor.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._argnames = None
self._kwargnames = None
self._retty = None
self._original_kernel = None
self._const_assigns = {}
def visit_FunctionDef(self, node):
if self._argnames is not None or self._kwargnames is not None:
raise RuntimeError("multiple definition of function/args?")
attr = 'arg'
self._argnames = [getattr(x, attr) for x in node.args.args]
if node.args.kwarg:
self._kwargnames = [x.arg for x in node.args.kwarg]
compute_retdtype = self.gen_call(node.name, self._argnames)
self._retty = ast.Assign(targets=[ast.Name(
id='__retdtype',
ctx=ast.Store())], value=compute_retdtype.value)
self._original_kernel = ast.fix_missing_locations(deepcopy(node))
self.generic_visit(node)
def visit_Assign(self, node):
self.generic_visit(node)
tgt = node.targets
if len(tgt) == 1:
target = tgt[0]
if isinstance(target, ast.Name):
if isinstance(node.value, ast.Num):
self._const_assigns[target.id] = node.value.n
elif isinstance(node.value, ast.UnaryOp):
if isinstance(node.value, ast.UAdd):
self._const_assigns[target.id] = node.value.n
else:
self._const_assigns[target.id] = -node.value.n
@property
def argnames(self):
"""
The names of the arguments to the function
"""
return self._argnames
@property
def const_assigns(self):
"""
A map of variable name to constant for variables that are simple
constant assignments
"""
return self._const_assigns
@property
def retty(self):
"""
The return type
"""
return self._retty
@property
def original_kernel(self):
"""
The original unmutated kernel
"""
return self._original_kernel
class FixCalls(ast.NodeTransformer):
""" Fixes call sites for astor (in case it is in use) """
def visit_Call(self, node):
self.generic_visit(node)
# Add in starargs and kwargs to calls
new = ast.Call(
func=node.func,
args=node.args,
keywords=node.keywords,
starargs=None,
kwargs=None)
return new
def generate_stencil_tree(
self, func, cval, standard_indexing, neighborhood):
"""
Generates the AST tree for a stencil from:
func - a python stencil kernel
cval, standard_indexing and neighborhood as per the @stencil decorator
"""
src = inspect.getsource(func)
tree = ast.parse(src.strip())
# Prints debugging information if True.
# If astor is installed the decompilation of the AST is also printed
DEBUG = False
if DEBUG:
print("ORIGINAL")
print(ast.dump(tree))
def pipeline(tree):
""" the pipeline of manipulations """
# get the arg names
kernel_props = self.GetKernelProps()
kernel_props.visit(tree)
argnm = kernel_props.argnames
const_asgn = kernel_props.const_assigns
if standard_indexing:
for x in standard_indexing:
if x not in argnm:
msg = ("Non-existent variable "
"specified in standard_indexing")
raise ValueError(msg)
# fold consts
fold_const = self.FoldConst()
fold_const.visit(tree)
# rewrite the relative indices as induced indices
relidx_fixer = self.FixRelIndex(
argnm, const_asgn, standard_indexing, neighborhood)
relidx_fixer.visit(tree)
# switch returns into assigns
return_transformer = self.TransformReturns(relidx_fixer)
return_transformer.visit(tree)
# generate the function body and loop nests and assemble
fixer = self.FixFunc(
kernel_props,
relidx_fixer,
return_transformer,
cval,
standard_indexing,
neighborhood)
fixer.visit(tree)
# fix up the call sites so they work better with astor
callFixer = self.FixCalls()
callFixer.visit(tree)
ast.fix_missing_locations(tree.body[0])
# run the pipeline of transforms on the tree
pipeline(tree)
if DEBUG:
print("\n\n\nNEW")
print(ast.dump(tree, include_attributes=True))
try:
import astor
print(astor.to_source(tree))
except ImportError:
pass
return tree
def pyStencil(func_or_mode='constant', **options):
"""
A pure python implementation of (a large subset of) stencil functionality,
equivalent to StencilFunc.
"""
if not isinstance(func_or_mode, str):
mode = 'constant' # default style
func = func_or_mode
else:
assert isinstance(func_or_mode, str), """stencil mode should be
a string"""
mode = func_or_mode
func = None
for option in options:
if option not in ["cval", "standard_indexing", "neighborhood"]:
raise ValueError("Unknown stencil option " + option)
if mode != 'constant':
raise ValueError("Unsupported mode style " + mode)
cval = options.get('cval', 0)
standard_indexing = options.get('standard_indexing', None)
neighborhood = options.get('neighborhood', None)
# generate a new AST tree from the kernel func
gen = pyStencilGenerator()
tree = gen.generate_stencil_tree(func, cval, standard_indexing,
neighborhood)
# breathe life into the tree
mod_code = compile(tree, filename="<ast>", mode="exec")
func_code = mod_code.co_consts[0]
full_func = pytypes.FunctionType(func_code, globals())
return full_func
@skip_unsupported
class TestManyStencils(TestStencilBase):
def __init__(self, *args, **kwargs):
super(TestManyStencils, self).__init__(*args, **kwargs)
def check(self, pyfunc, *args, **kwargs):
"""
For a given kernel:
The expected result is computed from a pyStencil version of the
stencil.
The following results are then computed:
* from a pure @stencil decoration of the kernel.
* from the njit of a trivial wrapper function around the pure @stencil
decorated function.
* from the njit(parallel=True) of a trivial wrapper function around
the pure @stencil decorated function.
The results are then compared.
"""
options = kwargs.get('options', dict())
expected_exception = kwargs.get('expected_exception')
# DEBUG print output arrays
DEBUG_OUTPUT = False
# collect fails
should_fail = []
should_not_fail = []
# runner that handles fails
@contextmanager
def errorhandler(exty=None, usecase=None):
try:
yield
except Exception as e:
if exty is not None:
lexty = exty if hasattr(exty, '__iter__') else [exty, ]
found = False
for ex in lexty:
found |= isinstance(e, ex)
if not found:
raise
else:
should_not_fail.append(
(usecase, "%s: %s" %
(type(e), str(e))))
else:
if exty is not None:
should_fail.append(usecase)
if isinstance(expected_exception, dict):
pystencil_ex = expected_exception['pyStencil']
stencil_ex = expected_exception['stencil']
njit_ex = expected_exception['njit']
parfor_ex = expected_exception['parfor']
else:
pystencil_ex = expected_exception
stencil_ex = expected_exception
njit_ex = expected_exception
parfor_ex = expected_exception
stencil_args = {'func_or_mode': pyfunc}
stencil_args.update(options)
expected_present = True
try:
# ast impl
ast_impl = pyStencil(func_or_mode=pyfunc, **options)
expected = ast_impl(
*args, neighborhood=options.get('neighborhood'))
if DEBUG_OUTPUT:
print("\nExpected:\n", expected)
except Exception as ex:
# check exception is expected
with errorhandler(pystencil_ex, "pyStencil"):
raise ex
pyStencil_unhandled_ex = ex
expected_present = False
stencilfunc_output = None
with errorhandler(stencil_ex, "@stencil"):
stencil_func_impl = stencil(**stencil_args)
# stencil result
stencilfunc_output = stencil_func_impl(*args)
# wrapped stencil impl, could this be generated?
if len(args) == 1:
def wrap_stencil(arg0):
return stencil_func_impl(arg0)
elif len(args) == 2:
def wrap_stencil(arg0, arg1):
return stencil_func_impl(arg0, arg1)
elif len(args) == 3:
def wrap_stencil(arg0, arg1, arg2):
return stencil_func_impl(arg0, arg1, arg2)
else:
raise ValueError(
"Up to 3 arguments can be provided, found %s" %
len(args))
sig = tuple([numba.typeof(x) for x in args])
njit_output = None
with errorhandler(njit_ex, "njit"):
wrapped_cfunc = self.compile_njit(wrap_stencil, sig)
# njit result
njit_output = wrapped_cfunc.entry_point(*args)
parfor_output = None
with errorhandler(parfor_ex, "parfors"):
wrapped_cpfunc = self.compile_parallel(wrap_stencil, sig)
# parfor result
parfor_output = wrapped_cpfunc.entry_point(*args)
if DEBUG_OUTPUT:
print("\n@stencil_output:\n", stencilfunc_output)
print("\nnjit_output:\n", njit_output)
print("\nparfor_output:\n", parfor_output)
if expected_present:
try:
if not stencil_ex:
np.testing.assert_almost_equal(
stencilfunc_output, expected, decimal=1)
self.assertEqual(expected.dtype, stencilfunc_output.dtype)
except Exception as e:
should_not_fail.append(
('@stencil', "%s: %s" %
(type(e), str(e))))
print("@stencil failed: %s" % str(e))
try:
if not njit_ex:
np.testing.assert_almost_equal(
njit_output, expected, decimal=1)
self.assertEqual(expected.dtype, njit_output.dtype)
except Exception as e:
should_not_fail.append(('njit', "%s: %s" % (type(e), str(e))))
print("@njit failed: %s" % str(e))
try:
if not parfor_ex:
np.testing.assert_almost_equal(
parfor_output, expected, decimal=1)
self.assertEqual(expected.dtype, parfor_output.dtype)
try:
self.assertIn(
'@do_scheduling',
wrapped_cpfunc.library.get_llvm_str())
except AssertionError:
msg = 'Could not find `@do_scheduling` in LLVM IR'
raise AssertionError(msg)
except Exception as e:
should_not_fail.append(
('parfors', "%s: %s" %
(type(e), str(e))))
print("@njit(parallel=True) failed: %s" % str(e))
if DEBUG_OUTPUT:
print("\n\n")
if should_fail:
msg = ["%s" % x for x in should_fail]
raise RuntimeError(("The following implementations should have "
"raised an exception but did not:\n%s") % msg)
if should_not_fail:
impls = ["%s" % x[0] for x in should_not_fail]
errs = ''.join(["%s: Message: %s\n\n" %
x for x in should_not_fail])
str1 = ("The following implementations should not have raised an "
"exception but did:\n%s\n" % impls)
str2 = "Errors were:\n\n%s" % errs
raise RuntimeError(str1 + str2)
if not expected_present:
if expected_exception is None:
raise RuntimeError(
"pyStencil failed, was not caught/expected",
pyStencil_unhandled_ex)
def exception_dict(self, **kwargs):
d = dict()
d['pyStencil'] = None
d['stencil'] = None
d['njit'] = None
d['parfor'] = None
for k, v in kwargs.items():
d[k] = v
return d
def test_basic00(self):
"""rel index"""
def kernel(a):
return a[0, 0]
a = np.arange(12).reshape(3, 4)
self.check(kernel, a)
def test_basic01(self):
"""rel index add const"""
def kernel(a):
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a)
def test_basic02(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, -1]
self.check(kernel, a)
def test_basic03(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1, 0]
self.check(kernel, a)
def test_basic04(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 0]
self.check(kernel, a)
def test_basic05(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 1]
self.check(kernel, a)
def test_basic06(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1, -1]
self.check(kernel, a)
def test_basic07(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1, 1]
self.check(kernel, a)
def test_basic08(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, -1]
self.check(kernel, a)
def test_basic09(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-2, 2]
self.check(kernel, a)
def test_basic10(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[1, 0]
self.check(kernel, a)
def test_basic11(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 0] + a[1, 0]
self.check(kernel, a)
def test_basic12(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 1] + a[1, -1]
self.check(kernel, a)
def test_basic13(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, -1] + a[1, 1]
self.check(kernel, a)
def test_basic14(self):
"""rel index add domain change const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + 1j
self.check(kernel, a)
def test_basic14b(self):
"""rel index add domain change const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
t = 1.j
return a[0, 0] + t
self.check(kernel, a)
def test_basic15(self):
"""two rel index, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[1, 0] + 1.
self.check(kernel, a)
def test_basic16(self):
"""two rel index OOB, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[10, 0] + 1.
# only pyStencil bounds checks
ex = self.exception_dict(pyStencil=IndexError)
self.check(kernel, a, expected_exception=ex)
def test_basic17(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[2, 0] + 1.
self.check(kernel, a)
def test_basic18(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[-2, 0] + 1.
self.check(kernel, a)
def test_basic19(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[0, 3] + 1.
self.check(kernel, a)
def test_basic20(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[0, -3] + 1.
self.check(kernel, a)
def test_basic21(self):
"""same rel, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[0, 0] + 1.
self.check(kernel, a)
def test_basic22(self):
"""rel idx const expr folding, add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1 + 0, 0] + a[0, 0] + 1.
self.check(kernel, a)
def test_basic23(self):
"""rel idx, work in body"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
x = np.sin(10 + a[2, 1])
return a[1 + 0, 0] + a[0, 0] + x
self.check(kernel, a)
def test_basic23a(self):
"""rel idx, dead code should not impact rel idx"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
x = np.sin(10 + a[2, 1])
return a[1 + 0, 0] + a[0, 0]
self.check(kernel, a)
def test_basic24(self):
"""1d idx on 2d arr"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0] + 1.
self.check(kernel, a, expected_exception=[ValueError, TypingError])
def test_basic25(self):
"""no idx on 2d arr"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return 1.
self.check(kernel, a, expected_exception=[ValueError, LoweringError])
def test_basic26(self):
"""3d arr"""
a = np.arange(64).reshape(4, 8, 2)
def kernel(a):
return a[0, 0, 0] - a[0, 1, 0] + 1.
self.check(kernel, a)
def test_basic27(self):
"""4d arr"""
a = np.arange(128).reshape(4, 8, 2, 2)
def kernel(a):
return a[0, 0, 0, 0] - a[0, 1, 0, -1] + 1.
self.check(kernel, a)
def test_basic28(self):
"""type widen """
a = np.arange(12).reshape(3, 4).astype(np.float32)
def kernel(a):
return a[0, 0] + np.float64(10.)
self.check(kernel, a)
def test_basic29(self):
"""const index from func """
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, int(np.cos(0))]
self.check(kernel, a, expected_exception=[ValueError, LoweringError])
def test_basic30(self):
"""signed zeros"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-0, -0]
self.check(kernel, a)
def test_basic31(self):
"""does a const propagate? 2D"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
t = 1
return a[t, 0]
self.check(kernel, a)
@unittest.skip("constant folding not implemented")
def test_basic31b(self):
"""does a const propagate?"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
s = 1
t = 1 - s
return a[t, 0]
self.check(kernel, a)
def test_basic31c(self):
"""does a const propagate? 1D"""
a = np.arange(12.)
def kernel(a):
t = 1
return a[t]
self.check(kernel, a)
def test_basic32(self):
"""typed int index"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[np.int8(1), 0]
self.check(kernel, a, expected_exception=[ValueError, LoweringError])
def test_basic33(self):
"""add 0d array"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, 0] + np.array(1)
self.check(kernel, a)
def test_basic34(self):
"""More complex rel index with dependency on addition rel index"""
def kernel(a):
g = 4. + a[0, 1]
return g + (a[0, 1] + a[1, 0] + a[0, -1] + np.sin(a[-2, 0]))
a = np.arange(144).reshape(12, 12)
self.check(kernel, a)
def test_basic35(self):
"""simple cval """
def kernel(a):
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
stencil=ValueError,
parfor=ValueError,
njit=LoweringError)
self.check(kernel, a, options={'cval': 5}, expected_exception=ex)
def test_basic36(self):
"""more complex with cval"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a, options={'cval': 5.})
def test_basic37(self):
"""cval is expr"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a, options={'cval': 5 + 63.})
def test_basic38(self):
"""cval is complex"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
stencil=ValueError,
parfor=ValueError,
njit=LoweringError)
self.check(kernel, a, options={'cval': 1.j}, expected_exception=ex)
def test_basic39(self):
"""cval is func expr"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a, options={'cval': np.sin(3.) + np.cos(2)})
def test_basic40(self):
"""2 args!"""
def kernel(a, b):
return a[0, 1] + b[0, -2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b)
def test_basic41(self):
"""2 args! rel arrays wildly not same size!"""
def kernel(a, b):
return a[0, 1] + b[0, -2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(1.).reshape(1, 1)
self.check(
kernel, a, b, expected_exception=[
ValueError, AssertionError])
def test_basic42(self):
"""2 args! rel arrays very close in size"""
def kernel(a, b):
return a[0, 1] + b[0, -2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(9.).reshape(3, 3)
self.check(
kernel, a, b, expected_exception=[
ValueError, AssertionError])
def test_basic43(self):
"""2 args more complexity"""
def kernel(a, b):
return a[0, 1] + a[1, 2] + b[-2, 0] + b[0, -1]
a = np.arange(30.).reshape(5, 6)
b = np.arange(30.).reshape(5, 6)
self.check(kernel, a, b)
def test_basic44(self):
"""2 args, has assignment before use"""
def kernel(a, b):
a[0, 1] = 12
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, expected_exception=[
ValueError, LoweringError])
def test_basic45(self):
"""2 args, has assignment and then cross dependency"""
def kernel(a, b):
a[0, 1] = 12
return a[0, 1] + a[1, 0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, expected_exception=[
ValueError, LoweringError])
def test_basic46(self):
"""2 args, has cross relidx assignment"""
def kernel(a, b):
a[0, 1] = b[1, 2]
return a[0, 1] + a[1, 0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, expected_exception=[
ValueError, LoweringError])
def test_basic47(self):
"""3 args"""
def kernel(a, b, c):
return a[0, 1] + b[1, 0] + c[-1, 0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
c = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, c)
# matches pyStencil, but all ought to fail
# probably hard to detect?
def test_basic48(self):
"""2 args, has assignment before use via memory alias"""
def kernel(a):
c = a.T
c[:, :] = 10
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a)
def test_basic49(self):
"""2 args, standard_indexing on second"""
def kernel(a, b):
return a[0, 1] + b[0, 3]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
@unittest.skip("dynamic range checking not implemented")
def test_basic50(self):
"""2 args, standard_indexing OOB"""
def kernel(a, b):
return a[0, 1] + b[0, 15]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'b'},
expected_exception=IndexError)
def test_basic51(self):
"""2 args, standard_indexing, no relidx"""
def kernel(a, b):
return a[0, 1] + b[0, 2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, options={
'standard_indexing': [
'a', 'b']}, expected_exception=[
ValueError, LoweringError])
def test_basic52(self):
"""3 args, standard_indexing on middle arg """
def kernel(a, b, c):
return a[0, 1] + b[0, 1] + c[1, 2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(4.).reshape(2, 2)
c = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, c, options={'standard_indexing': 'b'})
def test_basic53(self):
"""2 args, standard_indexing on variable that does not exist"""
def kernel(a, b):
return a[0, 1] + b[0, 2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=Exception,
parfor=ValueError,
njit=Exception)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'c'},
expected_exception=ex)
def test_basic54(self):
"""2 args, standard_indexing, index from var"""
def kernel(a, b):
t = 2
return a[0, 1] + b[0, t]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic55(self):
"""2 args, standard_indexing, index from more complex var"""
def kernel(a, b):
s = 1
t = 2 - s
return a[0, 1] + b[0, t]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic56(self):
"""2 args, standard_indexing, added complexity """
def kernel(a, b):
s = 1
acc = 0
for k in b[0, :]:
acc += k
t = 2 - s - 1
return a[0, 1] + b[0, t] + acc
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic57(self):
"""2 args, standard_indexing, split index operation """
def kernel(a, b):
c = b[0]
return a[0, 1] + c[1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic58(self):
"""2 args, standard_indexing, split index with broadcast mutation """
def kernel(a, b):
c = b[0] + 1
return a[0, 1] + c[1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic59(self):
"""3 args, mix of array, relative and standard indexing and const"""
def kernel(a, b, c):
return a[0, 1] + b[1, 1] + c
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
c = 10
self.check(kernel, a, b, c, options={'standard_indexing': ['b', 'c']})
def test_basic60(self):
"""3 args, mix of array, relative and standard indexing,
tuple pass through"""
def kernel(a, b, c):
return a[0, 1] + b[1, 1] + c[0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
c = (10,)
# parfors does not support tuple args for stencil kernels
ex = self.exception_dict(parfor=ValueError)
self.check(
kernel, a, b, c, options={
'standard_indexing': [
'b', 'c']}, expected_exception=ex)
def test_basic61(self):
"""2 args, standard_indexing on first"""
def kernel(a, b):
return a[0, 1] + b[1, 1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'a'},
expected_exception=Exception)
def test_basic62(self):
"""2 args, standard_indexing and cval"""
def kernel(a, b):
return a[0, 1] + b[1, 1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'b',
'cval': 10.})
def test_basic63(self):
"""2 args, standard_indexing applied to relative, should fail,
non-const idx"""
def kernel(a, b):
return a[0, b[0, 1]]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12).reshape(3, 4)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=ValueError,
parfor=ValueError,
njit=LoweringError)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'b'},
expected_exception=ex)
# stencil, njit, parfors all fail. Does this make sense?
def test_basic64(self):
"""1 arg that uses standard_indexing"""
def kernel(a):
return a[0, 0]
a = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
options={
'standard_indexing': 'a'},
expected_exception=[
ValueError,
LoweringError])
def test_basic65(self):
"""basic induced neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-29, 1):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
# Should this work? a[0] is out of neighborhood?
def test_basic66(self):
"""basic const neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-29, 1):
cumul += a[0]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
def test_basic67(self):
"""basic 2d induced neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-5, 1):
for j in range(-10, 1):
cumul += a[i, j]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
def test_basic67b(self):
"""basic 2d induced 1D neighborhood"""
def kernel(a):
cumul = 0
for j in range(-10, 1):
cumul += a[0, j]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(
kernel,
a,
options={
'neighborhood': (
(-10,
0),
)},
expected_exception=[
TypingError,
ValueError])
# Should this work or is it UB? a[i, 0] is out of neighborhood?
def test_basic68(self):
"""basic 2d one induced, one cost neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-5, 1):
for j in range(-10, 1):
cumul += a[i, 0]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
# Should this work or is it UB? a[0, 0] is out of neighborhood?
def test_basic69(self):
"""basic 2d two cost neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-5, 1):
for j in range(-10, 1):
cumul += a[0, 0]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
def test_basic70(self):
"""neighborhood adding complexity"""
def kernel(a):
cumul = 0
zz = 12.
for i in range(-5, 1):
t = zz + i
for j in range(-10, 1):
cumul += a[i, j] + t
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
def test_basic71(self):
"""neighborhood, type change"""
def kernel(a):
cumul = 0
for i in range(-29, 1):
k = 0.
if i > -15:
k = 1j
cumul += a[i] + k
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
def test_basic72(self):
"""neighborhood, narrower range than specified"""
def kernel(a):
cumul = 0
for i in range(-19, -3):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
def test_basic73(self):
"""neighborhood, +ve range"""
def kernel(a):
cumul = 0
for i in range(5, 11):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((5, 10),)})
def test_basic73b(self):
"""neighborhood, -ve range"""
def kernel(a):
cumul = 0
for i in range(-10, -4):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-10, -5),)})
def test_basic74(self):
"""neighborhood, -ve->+ve range span"""
def kernel(a):
cumul = 0
for i in range(-5, 11):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-5, 10),)})
def test_basic75(self):
"""neighborhood, -ve->-ve range span"""
def kernel(a):
cumul = 0
for i in range(-10, -1):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-10, -2),)})
def test_basic76(self):
"""neighborhood, mixed range span"""
def kernel(a):
cumul = 0
zz = 12.
for i in range(-3, 0):
t = zz + i
for j in range(-3, 4):
cumul += a[i, j] + t
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-3, -1), (-3, 3),)})
def test_basic77(self):
""" neighborhood, two args """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, b, options={'neighborhood': ((-3, 0), (-3, 0),)})
def test_basic78(self):
""" neighborhood, two args, -ve range, -ve range """
def kernel(a, b):
cumul = 0
for i in range(-6, -2):
for j in range(-7, -1):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(15. * 20.).reshape(15, 20)
b = np.arange(15. * 20.).reshape(15, 20)
self.check(
kernel, a, b, options={
'neighborhood': (
(-6, -3), (-7, -2),)})
def test_basic78b(self):
""" neighborhood, two args, -ve range, +ve range """
def kernel(a, b):
cumul = 0
for i in range(-6, -2):
for j in range(2, 10):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(15. * 20.).reshape(15, 20)
b = np.arange(15. * 20.).reshape(15, 20)
self.check(kernel, a, b, options={'neighborhood': ((-6, -3), (2, 9),)})
def test_basic79(self):
""" neighborhood, two incompatible args """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = np.arange(10. * 20.).reshape(10, 10, 2)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=TypingError,
parfor=TypingError,
njit=TypingError)
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0),)}, expected_exception=ex)
def test_basic80(self):
""" neighborhood, type change """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = 12.j
self.check(kernel, a, b, options={'neighborhood': ((-3, 0), (-3, 0))})
def test_basic81(self):
""" neighborhood, dimensionally incompatible arrays """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[i]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = a[0].copy()
ex = self.exception_dict(
pyStencil=ValueError,
stencil=TypingError,
parfor=AssertionError,
njit=TypingError)
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0))}, expected_exception=ex)
def test_basic82(self):
""" neighborhood, with standard_indexing"""
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[1, 3]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = a.copy()
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0)), 'standard_indexing': 'b'})
def test_basic83(self):
""" neighborhood, with standard_indexing and cval"""
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[1, 3]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = a.copy()
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0)), 'standard_indexing': 'b', 'cval': 1.5})
def test_basic84(self):
""" kernel calls njit """
def kernel(a):
return a[0, 0] + addone_njit(a[0, 1])
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic85(self):
""" kernel calls njit(parallel=True)"""
def kernel(a):
return a[0, 0] + addone_pjit(a[0, 1])
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
# njit/parfors fail correctly, but the error message isn't very informative
def test_basic86(self):
""" bad kwarg """
def kernel(a):
return a[0, 0]
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'bad': 10},
expected_exception=[ValueError, TypingError])
def test_basic87(self):
""" reserved arg name in use """
def kernel(__sentinel__):
return __sentinel__[0, 0]
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic88(self):
""" use of reserved word """
def kernel(a, out):
return out * a[0, 1]
a = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=ValueError,
parfor=ValueError,
njit=LoweringError)
self.check(
kernel,
a,
1.0,
options={},
expected_exception=ex)
def test_basic89(self):
""" basic multiple return"""
def kernel(a):
if a[0, 1] > 10:
return 10.
elif a[0, 3] < 8:
return a[0, 0]
else:
return 7.
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic90(self):
""" neighborhood, with standard_indexing and cval, multiple returns"""
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[1, 3]
res = cumul / (9.)
if res > 200.0:
return res + 1.0
else:
return res
a = np.arange(10. * 20.).reshape(10, 20)
b = a.copy()
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0)), 'standard_indexing': 'b', 'cval': 1.5})
def test_basic91(self):
""" Issue #3454, const(int) == const(int) evaluating incorrectly. """
def kernel(a):
b = 0
if(2 == 0):
b = 2
return a[0, 0] + b
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic92(self):
""" Issue #3497, bool return type evaluating incorrectly. """
def kernel(a):
return (a[-1, -1] ^ a[-1, 0] ^ a[-1, 1] ^
a[0, -1] ^ a[0, 0] ^ a[0, 1] ^
a[1, -1] ^ a[1, 0] ^ a[1, 1])
A = np.array(np.arange(20) % 2).reshape(4, 5).astype(np.bool_)
self.check(kernel, A)
def test_basic93(self):
""" Issue #3497, bool return type evaluating incorrectly. """
def kernel(a):
return (a[-1, -1] ^ a[-1, 0] ^ a[-1, 1] ^
a[0, -1] ^ a[0, 0] ^ a[0, 1] ^
a[1, -1] ^ a[1, 0] ^ a[1, 1])
A = np.array(np.arange(20) % 2).reshape(4, 5).astype(np.bool_)
self.check(kernel, A, options={'cval': True})
def test_basic94(self):
""" Issue #3528. Support for slices. """
def kernel(a):
return np.median(a[-1:2, -1:2])
a = np.arange(20, dtype=np.uint32).reshape(4, 5)
self.check(kernel, a, options={'neighborhood': ((-1, 1), (-1, 1),)})
@unittest.skip("not yet supported")
def test_basic95(self):
""" Slice, calculate neighborhood. """
def kernel(a):
return np.median(a[-1:2, -3:4])
a = np.arange(20, dtype=np.uint32).reshape(4, 5)
self.check(kernel, a)
def test_basic96(self):
""" 1D slice. """
def kernel(a):
return np.median(a[-1:2])
a = np.arange(20, dtype=np.uint32)
self.check(kernel, a, options={'neighborhood': ((-1, 1),)})
@unittest.skip("not yet supported")
def test_basic97(self):
""" 2D slice and index. """
def kernel(a):
return np.median(a[-1:2, 3])
a = np.arange(20, dtype=np.uint32).reshape(4, 5)
self.check(kernel, a)
def test_basic98(self):
""" Test issue #7286 where the cval is a np attr/string-based numerical
constant"""
for cval in (np.nan, np.inf, -np.inf, float('inf'), -float('inf')):
def kernel(a):
return a[0, 0]
a = np.arange(6.).reshape((2, 3))
self.check(kernel, a, options={'neighborhood': ((-1, 1), (-1, 1),),
'cval':cval})
if __name__ == "__main__":
unittest.main()
|
cpcloud/numba
|
numba/tests/test_stencils.py
|
Python
|
bsd-2-clause
| 98,211
|
[
"VisIt"
] |
ffed9ebab5c60a24cd8ebc5f62cd988c4c67a6b177a2d5870ea1baf4605b9c4d
|
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import lxml.etree
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from pywps import __version__
from pywps import Process
from pywps.inout import LiteralInput, LiteralOutput, ComplexInput, ComplexOutput, BoundingBoxInput, BoundingBoxOutput
from pywps.inout import Format
from pywps.app.Common import Metadata
import re
import logging
logging.disable(logging.CRITICAL)
class DocExampleProcess(Process):
"""This first line is going to be skipped by the :skiplines:1 option.
Notes
-----
This is additional documentation that can be added following the Numpy docstring convention.
"""
def __init__(self):
inputs = [LiteralInput('literal_input', "Literal input title", 'integer', "Literal input value abstract.",
min_occurs=0, max_occurs=1, uoms=['meters', 'feet'], default=1),
LiteralInput('date_input', 'The title is shown when no abstract is provided.', 'date',
allowed_values=['2000-01-01', '2018-01-01']),
ComplexInput('complex_input', 'Complex input title',
[Format('application/json'), Format('application/x-netcdf')],
abstract="Complex input abstract.", ),
BoundingBoxInput('bb_input', 'BoundingBox input title', ['EPSG:4326', ],
metadata=[Metadata('EPSG.io', 'http://epsg.io/'), ]),
]
outputs = [LiteralOutput('literal_output', 'Literal output title', 'boolean', 'Boolean output abstract.',),
ComplexOutput('complex_output', 'Complex output', [Format('text/plain'), ], ),
BoundingBoxOutput('bb_output', 'BoundingBox output title', ['EPSG:4326', ])]
super(DocExampleProcess, self).__init__(
self._handler,
identifier='doc_example_process_identifier',
title="Process title",
abstract="Multiline process abstract.",
version="4.0",
metadata=[Metadata('PyWPS docs', 'http://pywps.org'),
Metadata('NumPy docstring conventions',
'https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt')],
inputs=inputs,
outputs=outputs,
)
def _handler(self, request, response):
pass
class WpsClient(Client):
def post_xml(self, *args, **kwargs):
doc = kwargs.pop('doc')
data = lxml.etree.tostring(doc, pretty_print=True)
kwargs['data'] = data
return self.post(*args, **kwargs)
class WpsTestResponse(BaseResponse):
def __init__(self, *args):
super(WpsTestResponse, self).__init__(*args)
if re.match(r'text/xml(;\s*charset=.*)?', self.headers.get('Content-Type')):
self.xml = lxml.etree.fromstring(self.get_data())
def xpath(self, path):
version = self.xml.attrib["version"]
if version == "2.0.0":
from pywps import namespaces200
namespaces = namespaces200
else:
from pywps import namespaces100
namespaces = namespaces100
return self.xml.xpath(path, namespaces=namespaces)
def xpath_text(self, path):
return ' '.join(e.text for e in self.xpath(path))
def client_for(service):
return WpsClient(service, WpsTestResponse)
def assert_response_accepted(resp):
assert resp.status_code == 200
assert re.match(r'text/xml(;\s*charset=.*)?', resp.headers['Content-Type'])
success = resp.xpath_text('/wps:ExecuteResponse'
'/wps:Status'
'/wps:ProcessAccepted')
assert success is not None
# TODO: assert status URL is present
def assert_process_started(resp):
assert resp.status_code == 200
assert re.match(r'text/xml(;\s*charset=.*)?', resp.headers['Content-Type'])
success = resp.xpath_text('/wps:ExecuteResponse'
'/wps:Status'
'ProcessStarted')
# Is it still like this in PyWPS-4 ?
assert success.split[0] == "processstarted"
def assert_response_success(resp):
assert resp.status_code == 200
assert re.match(r'text/xml(;\s*charset=.*)?', resp.headers['Content-Type'])
success = resp.xpath('/wps:ExecuteResponse/wps:Status/wps:ProcessSucceeded')
assert len(success) == 1
def assert_process_exception(resp, code=None):
assert resp.status_code == 400
assert re.match(r'text/xml(;\s*charset=.*)?', resp.headers['Content-Type'])
elem = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert elem[0].attrib['exceptionCode'] == code
def assert_pywps_version(resp):
# get first child of root element
root_firstchild = resp.xpath('/*')[0].getprevious()
assert isinstance(root_firstchild, lxml.etree._Comment)
tokens = root_firstchild.text.split()
assert len(tokens) == 2
assert tokens[0] == 'PyWPS'
assert tokens[1] == __version__
def assert_wps_version(response, version="1.0.0"):
elem = response.xpath('/wps:Capabilities'
'/ows:ServiceIdentification'
'/ows:ServiceTypeVersion')
found_version = elem[0].text
assert version == found_version
with open("/tmp/out.xml", "wb") as out:
out.writelines(response.response)
|
tomkralidis/pywps
|
pywps/tests.py
|
Python
|
mit
| 5,694
|
[
"NetCDF"
] |
a4b69406140c0e9e03bfa4e63c8362edd6835398a6eca4d4722d6fecab8da88e
|
# This is a modification of GPflow/vgp.py by Keisuke Fujii.
#
# The original source file is distributed at
# https://github.com/GPflow/GPflow/blob/master/GPflow/svgp.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
from GPflow.densities import gaussian
from GPflow.model import GPModel
from GPflow import transforms,kullback_leiblers
from GPflow.param import AutoFlow
from GPflow.tf_wraps import eye
from GPflow._settings import settings
from .mean_functions import Zero
from .param import Param, DataHolder
from . import conditionals
float_type = settings.dtypes.float_type
np_float_type = np.float32 if float_type is tf.float32 else np.float64
class StVGP(GPModel):
"""
Stochastic approximation of the Variational Gaussian process
"""
def __init__(self, X, Y, kern, likelihood,
mean_function=None, num_latent=None,
q_diag=False,
KL_analytic=False,
num_samples=20):
"""
X is a data matrix, size N x D
Y is a data matrix, size N x R
kern, likelihood, mean_function are appropriate GPflow objects
q_diag: True for diagonal approximation of q.
KL_analytic: True for the use of the analytical expression for KL.
num_samples: number of samples to approximate the posterior.
"""
self.num_data = X.shape[0] # number of data, n
self.num_latent = num_latent or Y.shape[1] # number of latent function, R
self.num_samples = num_samples # number of samples to approximate integration, N
if mean_function is None:
mean_function = Zero(self.num_latent)
# if minibatch_size is not None, Y is stored as MinibatchData.
# Note that X is treated as DataHolder.
Y = DataHolder(Y, on_shape_change='recompile')
X = DataHolder(X, on_shape_change='recompile')
GPModel.__init__(self, X, Y, kern, likelihood, mean_function)
# variational parameter.
# Mean of the posterior
self.q_mu = Param(np.zeros((self.num_data, self.num_latent)))
# If true, mean-field approimation is made.
self.q_diag = q_diag
# Sqrt of the covariance of the posterior
if self.q_diag:
self.q_sqrt = Param(np.ones((self.num_data, self.num_latent)),
transforms.positive)
else:
q_sqrt = np.array([np.eye(self.num_data)
for _ in range(self.num_latent)]).swapaxes(0, 2)
self.q_sqrt = Param(q_sqrt) # , transforms.LowerTriangular(q_sqrt.shape[2])) # Temp remove transform transforms.positive)
self.KL_analytic = KL_analytic
def _compile(self, optimizer=None, **kw):
"""
Before calling the standard compile function, check to see if the size
of the data has changed and add variational parameters appropriately.
This is necessary because the shape of the parameters depends on the
shape of the data.
"""
if not self.num_data == self.X.shape[0]:
self.num_data = self.X.shape[0]
self.q_mu = Param(np.zeros((self.num_data, self.num_latent)))
if self.q_diag:
self.q_sqrt = Param(np.ones((self.num_data, self.num_latent)),
transforms.positive)
else:
q_sqrt = np.array([np.eye(self.num_data)
for _ in range(self.num_latent)]).swapaxes(0, 2)
self.q_sqrt = Param(q_sqrt) # , transforms.LowerTriangular(q_sqrt.shape[2])) # Temp remove transform transforms.positive)
return super(StVGP, self)._compile(optimizer=optimizer, **kw)
def build_likelihood(self):
"""
This method computes the variational lower bound on the likelihood, with
stochastic approximation.
"""
f_samples = self._sample(self.num_samples)
# In likelihood, dimensions of f_samples and self.Y must be matched.
lik = tf.reduce_sum(self.likelihood.logp(f_samples, self.Y))
if not self.KL_analytic:
return (lik - self._KL)/self.num_samples
else:
return lik/self.num_samples - self._analytical_KL()
def build_predict(self, Xnew, full_cov=False):
"""
Prediction of the latent functions.
The posterior is approximated by multivariate Gaussian distribution.
:param tf.tensor Xnew: Coordinate where the prediction should be made.
:param bool full_cov: True for return full covariance.
:return tf.tensor mean: The posterior mean sized [n,R]
:return tf.tensor var: The posterior mean sized [n,R] for full_cov=False
[n,n,R] for full_cov=True.
"""
mu, var = conditionals.conditional(Xnew, self.X, self.kern, self.q_mu,
q_sqrt=self.q_sqrt, full_cov=full_cov, whiten=True)
return mu + self.mean_function(Xnew), var
@AutoFlow((tf.int32, []))
def sample_F(self, n_sample):
"""
Get samples of the latent function values at the observation points.
:param integer n_sample: number of samples.
:return tf.tensor: Samples sized [N,n,R]
"""
f_samples = self._sample(n_sample[0])
return self.likelihood.sample_F(f_samples)
@AutoFlow((tf.int32, []))
def sample_Y(self, n_sample):
"""
Get samples of the latent function values at the observation points.
:param integer n_sample: number of samples.
:return tf.tensor: Samples sized [N,n,R]
"""
f_samples = self._sample(n_sample[0])
return self.likelihood.sample_Y(f_samples)
def _sample(self, N):
"""
:param integer N: number of samples
:Returns
samples picked from the variational posterior.
The Kulback_leibler divergence is stored as self._KL
"""
n = self.num_data
R = self.num_latent
# Match dimension of the posterior variance to the data.
if self.q_diag:
sqrt = tf.batch_matrix_diag(tf.transpose(self.q_sqrt)) # [R,n,n]
else:
sqrt = tf.batch_matrix_band_part(
tf.transpose(self.q_sqrt,[2,0,1]), -1, 0) # [R,n,n]
# Log determinant of matrix S = q_sqrt * q_sqrt^T
logdet_S = tf.cast(N, float_type)*tf.reduce_sum(
tf.log(tf.square(tf.batch_matrix_diag_part(sqrt))))
sqrt = tf.tile(tf.expand_dims(sqrt, 1), [1,N,1,1]) # [R,N,n,n]
# noraml random samples, [R,N,n,1]
v_samples = tf.random_normal([R,N,n,1], dtype=float_type)
# Match dimension of the posterior mean, [R,N,n,1]
mu = tf.tile(tf.expand_dims(tf.expand_dims(
tf.transpose(self.q_mu), 1), -1), [1,N,1,1])
u_samples = mu + tf.batch_matmul(sqrt, v_samples)
# Stochastic approximation of the Kulback_leibler KL[q(f)||p(f)]
self._KL = - 0.5 * logdet_S\
- 0.5 * tf.reduce_sum(tf.square(v_samples)) \
+ 0.5 * tf.reduce_sum(tf.square(u_samples))
# Cholesky factor of kernel [R,N,n,n]
L = tf.tile(tf.expand_dims(
tf.transpose(self.kern.Cholesky(self.X), [2,0,1]),1), [1,N,1,1])
# mean, sized [N,n,R]
mean = tf.tile(tf.expand_dims(
self.mean_function(self.X),
0), [N,1,1])
# sample from posterior, [N,n,R]
f_samples = tf.transpose(
tf.squeeze(tf.batch_matmul(L, u_samples),[-1]), # [R,N,n]
[1,2,0]) + mean
# return as Dict to deal with
return f_samples
def _analytical_KL(self):
"""
Analytically evaluate KL
"""
if self.q_diag:
KL = kullback_leiblers.gauss_kl_white_diag(self.q_mu, self.q_sqrt)
else:
KL = kullback_leiblers.gauss_kl_white(self.q_mu, self.q_sqrt)
return KL
|
fujii-team/GPinv
|
GPinv/stvgp.py
|
Python
|
apache-2.0
| 8,670
|
[
"Gaussian"
] |
8f8cee003baa6daa30f20d5db9c1e293004a3d7fe99909d4cd4bb36363242f68
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# This script shows the result of an ideal highpass filter in spatial domain
# Image pipeline
createReader = vtk.vtkImageReader2Factory()
reader = createReader.CreateImageReader2("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
fft = vtk.vtkImageFFT()
fft.SetInputConnection(reader.GetOutputPort())
highPass = vtk.vtkImageIdealHighPass()
highPass.SetInputConnection(fft.GetOutputPort())
highPass.SetXCutOff(0.1)
highPass.SetYCutOff(0.1)
highPass.ReleaseDataFlagOff()
rfft = vtk.vtkImageRFFT()
rfft.SetInputConnection(highPass.GetOutputPort())
real = vtk.vtkImageExtractComponents()
real.SetInputConnection(rfft.GetOutputPort())
real.SetComponents(0)
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(real.GetOutputPort())
viewer.SetColorWindow(500)
viewer.SetColorLevel(0)
viewer.Render()
reader.UnRegister(viewer) # not needed in python
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Imaging/Core/Testing/Python/IdealHighPass.py
|
Python
|
bsd-3-clause
| 1,105
|
[
"VTK"
] |
55fd4b5d8dc498ed19afc2d4af8cd786040dca1c8e1432cc2f41f8851717ea64
|
###
### GPAW benchmark: Carbon Nanotube
###
from __future__ import print_function
from gpaw.mpi import size, rank
from gpaw import GPAW, Mixer, PoissonSolver, ConvergenceError
from gpaw.occupations import FermiDirac
try:
from ase.build import nanotube
except ImportError:
from ase.structure import nanotube
try:
from gpaw import use_mic
except ImportError:
use_mic = False
try:
from gpaw import use_cuda
use_cuda = True
except ImportError:
use_cuda = False
use_cpu = not (use_mic or use_cuda)
# dimensions of the nanotube
n = 6
m = 6
length = 10
# other parameters
txt = 'output.txt'
maxiter = 16
conv = {'eigenstates' : 1e-4, 'density' : 1e-2, 'energy' : 1e-3}
# uncomment to use ScaLAPACK
#parallel = {'sl_auto': True}
# output benchmark parameters
if rank == 0:
print("#"*60)
print("GPAW benchmark: Carbon Nanotube")
print(" nanotube dimensions: n=%d, m=%d, length=%d" % (n, m, length))
print(" MPI tasks: %d" % size)
print(" using CUDA (GPGPU): " + str(use_cuda))
print(" using pyMIC (KNC) : " + str(use_mic))
print(" using CPU (or KNL): " + str(use_cpu))
print("#"*60)
print("")
# setup parameters
args = {'h': 0.2,
'nbands': -60,
'occupations': FermiDirac(0.1),
'mixer': Mixer(0.1, 5, 50),
'poissonsolver': PoissonSolver(eps=1e-12),
'eigensolver': 'rmm-diis',
'maxiter': maxiter,
'convergence': conv,
'xc_thread': False,
'txt': txt}
if use_cuda:
args['gpu'] = {'cuda': True, 'hybrid_blas': True}
try:
args['parallel'] = parallel
except: pass
# setup the system
atoms = nanotube(n, m, length)
atoms.center(vacuum=4.068, axis=0)
atoms.center(vacuum=4.068, axis=1)
calc = GPAW(**args)
atoms.set_calculator(calc)
# execute the run
try:
atoms.get_potential_energy()
except ConvergenceError:
pass
|
mlouhivu/gpaw-accelerator-benchmarks
|
carbon-nanotube/input.py
|
Python
|
mit
| 1,861
|
[
"ASE",
"GPAW"
] |
321276664b823418772c37b910bbca7ac6c1731b7dfea717fe88b153d152c233
|
#!/usr/bin/env python
# import modules
from netCDF4 import Dataset as nc
from optparse import OptionParser
import os, stat, datetime, csv, re
from collections import OrderedDict as od
from numpy import zeros, array, concatenate, savetxt, double, logical_and
# search for patterns in variable list
def isin(var, varlist):
vararr = array(varlist)
patt = re.compile(var + '_*')
matches = array([bool(patt.match(v)) for v in vararr])
return list(vararr[matches])
# parse inputs
parser = OptionParser()
parser.add_option("-i", "--input", dest = "inputfile", default = "Generic.psims.nc", type = "string",
help = "NetCDF3 file to parse", metavar = "FILE")
parser.add_option("-v", "--variables", dest = "variables", default = "tmin,tmax,pr,rsds", type = "string",
help = "Comma-separated list of variables to parse", metavar = "FILE")
parser.add_option("-c", "--conc", dest = "conc", default = "NASA.CO2.Annual.1850-2013.csv", type = "string",
help = "CSV file containing annual CO2 concentration", metavar = "FILE")
parser.add_option("-o", "--output", dest = "outputfile", default = "Generic.met", type = "string",
help = "Output met file", metavar = "FILE")
(options, args) = parser.parse_args()
# open netcdf file for reading
infile = nc(options.inputfile, 'r', format = 'NETCDF3_CLASSIC')
# variable list
variables = options.variables.split(',')
# only CSV file
csvfile = csv.reader(open(options.conc))
csvdata = []
for row in csvfile:
csvdata.append(row)
csvdata = array(csvdata[2 :])
csvyears = csvdata[:, 0].astype(int)
csvco2 = csvdata[:, 1].astype(double)
# get time
vlist = infile.variables.keys()
if 'time' in vlist: # make sure time is in file
time = infile.variables['time'][:]
time_units = infile.variables['time'].units
nt = len(time)
else:
raise Exception('Missing variable time')
# get all data
var_lists = od([('radn', ['solar', 'rad', 'rsds', 'srad']), \
('maxt', ['tmax', 'tasmax']), \
('mint', ['tmin', 'tasmin']), \
('rain', ['precip', 'pr', 'rain']), \
('hur', ['hur', 'rh']), \
('wind', ['wind', 'windspeed'])])
var_names = array(var_lists.keys()); nv = len(var_names)
alldata = zeros((nt, nv)) # includes time
alldata[:, 1 : 4] = 999. # fill in missing data
for i in range(nv):
var_name = var_names[i]
var_list = var_lists[var_name]
found_var = False
for v in var_list:
matchvar = isin(v, variables)
if matchvar != []:
matchvar = matchvar[0] # take first match
if matchvar in vlist:
alldata[:, i] = infile.variables[matchvar][:].squeeze()
if 'units' in infile.variables[matchvar].ncattrs():
units = infile.variables[matchvar].units
# convert units, if necessary
if var_name == 'radn' and units == 'W m-2': # solar (MJ m-2)
alldata[:, i] *= 0.0864
elif (var_name == 'maxt' or var_name == 'mint') and units == 'K': # temperature (oC)
alldata[:, i] -= 273.15
elif var_name == 'rain' and units == 'kg m-2 s-1': # precip (mm)
alldata[:, i] *= 86400
elif var_name == 'hur' and units == '%': # hur (0-1)
alldata[:, i] /= 100.
elif var_name == 'wind': # wind (m s-1)
if units == 'km day-1':
alldata[:, i] *= 1000. / 86400
elif units == 'km h-1':
alldata[:, i] *= 1000. / 3600
elif units == 'miles h-1':
alldata[:, i] *= 1609.34 / 3600
found_var = True
break
if not found_var and var_name in ['radn', 'maxt', 'mint', 'rain']:
raise Exception('Missing necessary variable {:s}'.format(var_name))
# close input file
infile.close()
# compute day, month, year for every entry
yr0, mth0, day0 = time_units.split('days since ')[1].split(' ')[0].split('-')[0 : 3]
hr0, min0, sec0 = time_units.split('days since ')[1].split(' ')[1].split(':')[0 : 3]
ref = datetime.datetime(int(yr0), int(mth0), int(day0), int(hr0), int(min0), int(sec0))
datear = array([ref + datetime.timedelta(int(t)) for t in time])
days = array([d.day for d in datear]).reshape((nt, 1)) # convert to numpy array
months = array([d.month for d in datear]).reshape((nt, 1))
years = array([d.year for d in datear]).reshape((nt, 1))
# write output file
yj = array([int(d.strftime('%Y%j')) for d in datear]) # year + julian day
with open(options.outputfile, 'w') as f:
for y in range(years[0], years[-1] + 1):
# first day
idx = yj == y * 1000 + 1
ymd = array([y, 1, 1]).reshape(1, 3)
co2 = csvco2[csvyears == y].reshape(1, 1)
mat = concatenate((ymd, alldata[idx], co2), axis = 1)
savetxt(f, mat, fmt = ['%6d', '%4d', '%4d'] + ['%6.1f'] * 4 + ['%6.2f', '%6.1f', ' %-.1f'], delimiter = '')
# subsequent days
idx = logical_and(yj > y * 1000 + 1, yj < y * 1000 + 1001)
ymd = array(zip(years[idx], months[idx], days[idx])).reshape(sum(idx), 3)
mat = concatenate((ymd, alldata[idx]), axis = 1)
savetxt(f, mat, fmt = ['%6d', '%4d', '%4d'] + ['%6.1f'] * 4 + ['%6.2f', '%6.1f'], delimiter = '')
# change permissions
f = os.open(options.outputfile, os.O_RDONLY)
os.fchmod(f, stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
os.close(f)
|
glotter/psims
|
data/tapps/psims2dly.py
|
Python
|
agpl-3.0
| 5,653
|
[
"NetCDF"
] |
26b4c9117dd4bbc3f7994bd8b8c6cd618de0248ccbcd4ad7dc063f432aa96865
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=protected-access
import os
import argparse
from azure.cli.core.commands.validators import validate_key_value_pairs
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.util import get_file_json, shell_safe_json_parse
from azure.cli.command_modules.storage._client_factory import (get_storage_data_service_client,
blob_data_service_factory,
file_data_service_factory,
storage_client_factory,
cf_adls_file_system)
from azure.cli.command_modules.storage.util import glob_files_locally, guess_content_type
from azure.cli.command_modules.storage.sdkutil import get_table_data_type
from azure.cli.command_modules.storage.url_quote_util import encode_for_url
from azure.cli.command_modules.storage.oauth_token_util import TokenUpdater
from knack.log import get_logger
from knack.util import CLIError
storage_account_key_options = {'primary': 'key1', 'secondary': 'key2'}
logger = get_logger(__name__)
# Utilities
# pylint: disable=inconsistent-return-statements,too-many-lines
def _query_account_key(cli_ctx, account_name):
"""Query the storage account key. This is used when the customer doesn't offer account key but name."""
rg, scf = _query_account_rg(cli_ctx, account_name)
t_storage_account_keys = get_sdk(
cli_ctx, ResourceType.MGMT_STORAGE, 'models.storage_account_keys#StorageAccountKeys')
logger.debug('Disable HTTP logging to avoid having storage keys in debug logs')
if t_storage_account_keys:
return scf.storage_accounts.list_keys(rg, account_name, logging_enable=False).key1
# of type: models.storage_account_list_keys_result#StorageAccountListKeysResult
return scf.storage_accounts.list_keys(rg, account_name, logging_enable=False).keys[0].value # pylint: disable=no-member
def _query_account_rg(cli_ctx, account_name):
"""Query the storage account's resource group, which the mgmt sdk requires."""
scf = storage_client_factory(cli_ctx)
acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None)
if acc:
from msrestazure.tools import parse_resource_id
return parse_resource_id(acc.id)['resource_group'], scf
raise ValueError("Storage account '{}' not found.".format(account_name))
def _create_token_credential(cli_ctx):
from knack.cli import EVENT_CLI_POST_EXECUTE
TokenCredential = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#TokenCredential')
token_credential = TokenCredential()
updater = TokenUpdater(token_credential, cli_ctx)
def _cancel_timer_event_handler(_, **__):
updater.cancel()
cli_ctx.register_event(EVENT_CLI_POST_EXECUTE, _cancel_timer_event_handler)
return token_credential
# region PARAMETER VALIDATORS
def parse_storage_account(cmd, namespace):
"""Parse storage account which can be either account name or account id"""
from msrestazure.tools import parse_resource_id, is_valid_resource_id
if namespace.account_name and is_valid_resource_id(namespace.account_name):
namespace.resource_group_name = parse_resource_id(namespace.account_name)['resource_group']
namespace.account_name = parse_resource_id(namespace.account_name)['name']
elif namespace.account_name and not is_valid_resource_id(namespace.account_name) and \
not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
def process_resource_group(cmd, namespace):
"""Processes the resource group parameter from the account name"""
if namespace.account_name and not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
def validate_table_payload_format(cmd, namespace):
t_table_payload = get_table_data_type(cmd.cli_ctx, 'table', 'TablePayloadFormat')
if namespace.accept:
formats = {
'none': t_table_payload.JSON_NO_METADATA,
'minimal': t_table_payload.JSON_MINIMAL_METADATA,
'full': t_table_payload.JSON_FULL_METADATA
}
namespace.accept = formats[namespace.accept.lower()]
def validate_bypass(namespace):
if namespace.bypass:
namespace.bypass = ', '.join(namespace.bypass) if isinstance(namespace.bypass, list) else namespace.bypass
def validate_hns_migration_type(namespace):
if namespace.request_type and namespace.request_type.lower() == 'validation':
namespace.request_type = 'HnsOnValidationRequest'
if namespace.request_type and namespace.request_type.lower() == 'upgrade':
namespace.request_type = 'HnsOnHydrationRequest'
def get_config_value(cmd, section, key, default):
logger.info("Try to get %s %s value from environment variables or config file.", section, key)
return cmd.cli_ctx.config.get(section, key, default)
def is_storagev2(import_prefix):
return import_prefix.startswith('azure.multiapi.storagev2.')
def validate_client_parameters(cmd, namespace):
""" Retrieves storage connection parameters from environment variables and parses out connection string into
account name and key """
n = namespace
if hasattr(n, 'auth_mode'):
auth_mode = n.auth_mode or get_config_value(cmd, 'storage', 'auth_mode', None)
del n.auth_mode
if not n.account_name:
n.account_name = get_config_value(cmd, 'storage', 'account', None)
if auth_mode == 'login':
prefix = cmd.command_kwargs['resource_type'].value[0]
# is_storagv2() is used to distinguish if the command is in track2 SDK
# If yes, we will use get_login_credentials() as token credential
if is_storagev2(prefix):
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cmd.cli_ctx)
n.token_credential, _, _ = profile.get_login_credentials(subscription_id=n._subscription)
# Otherwise, we will assume it is in track1 and keep previous token updater
else:
n.token_credential = _create_token_credential(cmd.cli_ctx)
if hasattr(n, 'token_credential') and n.token_credential:
# give warning if there are account key args being ignored
account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token",
n.connection_string and "--connection-string"]
account_key_args = [arg for arg in account_key_args if arg]
if account_key_args:
logger.warning('In "login" auth mode, the following arguments are ignored: %s',
' ,'.join(account_key_args))
return
# When there is no input for credential, we will read environment variable
if not n.connection_string and not n.account_key and not n.sas_token:
n.connection_string = get_config_value(cmd, 'storage', 'connection_string', None)
# if connection string supplied or in environment variables, extract account key and name
if n.connection_string:
conn_dict = validate_key_value_pairs(n.connection_string)
n.account_name = conn_dict.get('AccountName')
n.account_key = conn_dict.get('AccountKey')
n.sas_token = conn_dict.get('SharedAccessSignature')
# otherwise, simply try to retrieve the remaining variables from environment variables
if not n.account_name:
n.account_name = get_config_value(cmd, 'storage', 'account', None)
if not n.account_key and not n.sas_token:
n.account_key = get_config_value(cmd, 'storage', 'key', None)
if not n.sas_token:
n.sas_token = get_config_value(cmd, 'storage', 'sas_token', None)
# strip the '?' from sas token. the portal and command line are returns sas token in different
# forms
if n.sas_token:
n.sas_token = n.sas_token.lstrip('?')
# account name with secondary
if n.account_name and n.account_name.endswith('-secondary'):
n.location_mode = 'secondary'
n.account_name = n.account_name[:-10]
# if account name is specified but no key, attempt to query
if n.account_name and not n.account_key and not n.sas_token:
message = """
There are no credentials provided in your command and environment, we will query for account key for your storage account.
It is recommended to provide --connection-string, --account-key or --sas-token in your command as credentials.
"""
if 'auth_mode' in cmd.arguments:
message += """
You also can add `--auth-mode login` in your command to use Azure Active Directory (Azure AD) for authorization if your login account is assigned required RBAC roles.
For more information about RBAC roles in storage, visit https://docs.microsoft.com/azure/storage/common/storage-auth-aad-rbac-cli.
"""
logger.warning('%s\nIn addition, setting the corresponding environment variables can avoid inputting '
'credentials in your command. Please use --help to get more information about environment '
'variable usage.', message)
try:
n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("\nSkip querying account key due to failure: %s", ex)
def validate_encryption_key(cmd, namespace):
encryption_key_source = cmd.get_models('EncryptionScopeSource', resource_type=ResourceType.MGMT_STORAGE)
if namespace.key_source == encryption_key_source.microsoft_key_vault and \
not namespace.key_uri:
raise CLIError("usage error: Please specify --key-uri when using {} as key source."
.format(encryption_key_source.microsoft_key_vault))
if namespace.key_source != encryption_key_source.microsoft_key_vault and namespace.key_uri:
raise CLIError("usage error: Specify `--key-source={}` and --key-uri to configure key vault properties."
.format(encryption_key_source.microsoft_key_vault))
def process_blob_source_uri(cmd, namespace):
"""
Validate the parameters referenced to a blob source and create the source URI from them.
"""
from .util import create_short_lived_blob_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & sas] ' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & key] '
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, sas, snapshot, source_account_name, source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
# simplest scenario--no further processing necessary
return
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
# determine if the copy will happen in the same storage account
if not source_account_name and source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
if not source_account_name and not source_account_key:
# neither source account name or key is given, assume that user intends to copy blob in
# the same account
source_account_name = ns.get('account_name', None)
source_account_key = ns.get('account_key', None)
elif source_account_name and not source_account_key:
if source_account_name == ns.get('account_name', None):
# the source account name is same as the destination account name
source_account_key = ns.get('account_key', None)
else:
# the source account is different from destination account but the key is missing
# try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# else: both source account name and key are given by user
if not source_account_name:
raise ValueError(usage_string.format('Storage account name not found'))
if not sas:
sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if sas:
query_params.append(sas)
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
uri = 'https://{}.blob.{}/{}/{}{}{}'.format(source_account_name,
cmd.cli_ctx.cloud.suffixes.storage_endpoint,
container,
blob,
'?' if query_params else '',
'&'.join(query_params))
namespace.copy_source = uri
def validate_source_uri(cmd, namespace): # pylint: disable=too-many-statements
from .util import create_short_lived_blob_sas, create_short_lived_file_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri [--source-sas]' \
'\n\tOR --source-container --source-blob [--source-account-name & sas] [--source-snapshot]' \
'\n\tOR --source-container --source-blob [--source-account-name & key] [--source-snapshot]' \
'\n\tOR --source-share --source-path' \
'\n\tOR --source-share --source-path [--source-account-name & sas]' \
'\n\tOR --source-share --source-path [--source-account-name & key]'
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source as file
share = ns.pop('source_share', None)
path = ns.pop('source_path', None)
file_snapshot = ns.pop('file_snapshot', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
source_sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, snapshot, share, path, file_snapshot, source_account_name,
source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
if source_sas:
source_sas = source_sas.lstrip('?')
uri = '{}{}{}'.format(uri, '?', source_sas)
namespace.copy_source = uri
return
# ensure either a file or blob source is specified
valid_blob_source = container and blob and not share and not path and not file_snapshot
valid_file_source = share and path and not container and not blob and not snapshot
if not valid_blob_source and not valid_file_source:
raise ValueError(usage_string.format('Neither a valid blob or file source is specified'))
if valid_blob_source and valid_file_source:
raise ValueError(usage_string.format('Ambiguous parameters, both blob and file sources are '
'specified'))
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
if not source_account_name:
if source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
# assume that user intends to copy blob in the same account
source_account_name = ns.get('account_name', None)
# determine if the copy will happen in the same storage account
same_account = False
if not source_account_key and not source_sas:
if source_account_name == ns.get('account_name', None):
same_account = True
source_account_key = ns.get('account_key', None)
source_sas = ns.get('sas_token', None)
else:
# the source account is different from destination account but the key is missing try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# Both source account name and either key or sas (or both) are now available
if not source_sas:
# generate a sas token even in the same account when the source and destination are not the same kind.
if valid_file_source and (ns.get('container_name', None) or not same_account):
dir_name, file_name = os.path.split(path) if path else (None, '')
source_sas = create_short_lived_file_sas(cmd, source_account_name, source_account_key, share,
dir_name, file_name)
elif valid_blob_source and (ns.get('share_name', None) or not same_account):
source_sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if source_sas:
query_params.append(source_sas.lstrip('?'))
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
if file_snapshot:
query_params.append('sharesnapshot={}'.format(file_snapshot))
uri = 'https://{0}.{1}.{6}/{2}/{3}{4}{5}'.format(
source_account_name,
'blob' if valid_blob_source else 'file',
container if valid_blob_source else share,
encode_for_url(blob if valid_blob_source else path),
'?' if query_params else '',
'&'.join(query_params),
cmd.cli_ctx.cloud.suffixes.storage_endpoint)
namespace.copy_source = uri
def validate_source_url(cmd, namespace): # pylint: disable=too-many-statements, too-many-locals
from .util import create_short_lived_blob_sas, create_short_lived_blob_sas_v2, create_short_lived_file_sas
from azure.cli.core.azclierror import InvalidArgumentValueError, RequiredArgumentMissingError, \
MutuallyExclusiveArgumentError
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri [--source-sas]' \
'\n\tOR --source-container --source-blob [--source-account-name & sas] [--source-snapshot]' \
'\n\tOR --source-container --source-blob [--source-account-name & key] [--source-snapshot]' \
'\n\tOR --source-share --source-path' \
'\n\tOR --source-share --source-path [--source-account-name & sas]' \
'\n\tOR --source-share --source-path [--source-account-name & key]'
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source as file
share = ns.pop('source_share', None)
path = ns.pop('source_path', None)
file_snapshot = ns.pop('file_snapshot', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
source_sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('source_url', None)
if uri:
if any([container, blob, snapshot, share, path, file_snapshot, source_account_name,
source_account_key]):
raise InvalidArgumentValueError(usage_string.format(
'Unused parameters are given in addition to the source URI'))
if source_sas:
source_sas = source_sas.lstrip('?')
uri = '{}{}{}'.format(uri, '?', source_sas)
namespace.copy_source = uri
return
# ensure either a file or blob source is specified
valid_blob_source = container and blob and not share and not path and not file_snapshot
valid_file_source = share and path and not container and not blob and not snapshot
if not valid_blob_source and not valid_file_source:
raise RequiredArgumentMissingError(usage_string.format('Neither a valid blob or file source is specified'))
if valid_blob_source and valid_file_source:
raise MutuallyExclusiveArgumentError(usage_string.format(
'Ambiguous parameters, both blob and file sources are specified'))
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
if not source_account_name:
if source_account_key:
raise RequiredArgumentMissingError(usage_string.format(
'Source account key is given but account name is not'))
# assume that user intends to copy blob in the same account
source_account_name = ns.get('account_name', None)
# determine if the copy will happen in the same storage account
same_account = False
if not source_account_key and not source_sas:
if source_account_name == ns.get('account_name', None):
same_account = True
source_account_key = ns.get('account_key', None)
source_sas = ns.get('sas_token', None)
else:
# the source account is different from destination account but the key is missing try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise RequiredArgumentMissingError('Source storage account {} not found.'.format(source_account_name))
# Both source account name and either key or sas (or both) are now available
if not source_sas:
# generate a sas token even in the same account when the source and destination are not the same kind.
if valid_file_source and (ns.get('container_name', None) or not same_account):
dir_name, file_name = os.path.split(path) if path else (None, '')
source_sas = create_short_lived_file_sas(cmd, source_account_name, source_account_key, share,
dir_name, file_name)
elif valid_blob_source and (ns.get('share_name', None) or not same_account):
prefix = cmd.command_kwargs['resource_type'].value[0]
# is_storagev2() is used to distinguish if the command is in track2 SDK
# If yes, we will use get_login_credentials() as token credential
if is_storagev2(prefix):
source_sas = create_short_lived_blob_sas_v2(cmd, source_account_name, source_account_key, container,
blob)
else:
source_sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if source_sas:
query_params.append(source_sas.lstrip('?'))
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
if file_snapshot:
query_params.append('sharesnapshot={}'.format(file_snapshot))
uri = 'https://{0}.{1}.{6}/{2}/{3}{4}{5}'.format(
source_account_name,
'blob' if valid_blob_source else 'file',
container if valid_blob_source else share,
encode_for_url(blob if valid_blob_source else path),
'?' if query_params else '',
'&'.join(query_params),
cmd.cli_ctx.cloud.suffixes.storage_endpoint)
namespace.source_url = uri
def validate_blob_type(namespace):
if not namespace.blob_type:
namespace.blob_type = 'page' if namespace.file_path.endswith('.vhd') else 'block'
def validate_storage_data_plane_list(namespace):
if namespace.num_results == '*':
namespace.num_results = None
else:
namespace.num_results = int(namespace.num_results)
def get_content_setting_validator(settings_class, update, guess_from_file=None):
def _class_name(class_type):
return class_type.__module__ + "." + class_type.__class__.__name__
def validator(cmd, namespace):
t_base_blob_service, t_file_service, t_blob_content_settings, t_file_content_settings = cmd.get_models(
'blob.baseblobservice#BaseBlobService',
'file#FileService',
'blob.models#ContentSettings',
'file.models#ContentSettings')
# must run certain validators first for an update
if update:
validate_client_parameters(cmd, namespace)
if update and _class_name(settings_class) == _class_name(t_file_content_settings):
get_file_path_validator()(namespace)
ns = vars(namespace)
clear_content_settings = ns.pop('clear_content_settings', False)
# retrieve the existing object properties for an update
if update and not clear_content_settings:
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
token_credential = ns.get('token_credential')
if _class_name(settings_class) == _class_name(t_blob_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx,
service=t_base_blob_service,
name=account,
key=key, connection_string=cs, sas_token=sas,
token_credential=token_credential)
container = ns.get('container_name')
blob = ns.get('blob_name')
lease_id = ns.get('lease_id')
props = client.get_blob_properties(container, blob, lease_id=lease_id).properties.content_settings
elif _class_name(settings_class) == _class_name(t_file_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx, t_file_service, account, key, cs, sas)
share = ns.get('share_name')
directory = ns.get('directory_name')
filename = ns.get('file_name')
props = client.get_file_properties(share, directory, filename).properties.content_settings
# create new properties
new_props = settings_class(
content_type=ns.pop('content_type', None),
content_disposition=ns.pop('content_disposition', None),
content_encoding=ns.pop('content_encoding', None),
content_language=ns.pop('content_language', None),
content_md5=ns.pop('content_md5', None),
cache_control=ns.pop('content_cache_control', None)
)
# if update, fill in any None values with existing
if update:
if not clear_content_settings:
for attr in ['content_type', 'content_disposition', 'content_encoding', 'content_language',
'content_md5', 'cache_control']:
if getattr(new_props, attr) is None:
setattr(new_props, attr, getattr(props, attr))
else:
if guess_from_file:
new_props = guess_content_type(ns[guess_from_file], new_props, settings_class)
ns['content_settings'] = new_props
return validator
def validate_custom_domain(namespace):
if namespace.use_subdomain and not namespace.custom_domain:
raise ValueError('usage error: --custom-domain DOMAIN [--use-subdomain]')
def validate_encryption_services(cmd, namespace):
"""
Builds up the encryption services object for storage account operations based on the list of services passed in.
"""
if namespace.encryption_services:
t_encryption_services, t_encryption_service = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE,
'EncryptionServices', 'EncryptionService', mod='models')
services = {service: t_encryption_service(enabled=True) for service in namespace.encryption_services}
namespace.encryption_services = t_encryption_services(**services)
def validate_encryption_source(namespace):
if namespace.encryption_key_source == 'Microsoft.Keyvault' and \
not (namespace.encryption_key_name and namespace.encryption_key_vault):
raise ValueError('--encryption-key-name and --encryption-key-vault are required '
'when --encryption-key-source=Microsoft.Keyvault is specified.')
if namespace.encryption_key_name or namespace.encryption_key_version is not None or namespace.encryption_key_vault:
if namespace.encryption_key_source and namespace.encryption_key_source != 'Microsoft.Keyvault':
raise ValueError('--encryption-key-name, --encryption-key-vault, and --encryption-key-version are not '
'applicable without Microsoft.Keyvault key-source.')
def validate_entity(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
RowKey and PartitionKey are converted to the correct case and included. """
values = dict(x.split('=', 1) for x in namespace.entity)
keys = values.keys()
for key in list(keys):
if key.lower() == 'rowkey':
val = values[key]
del values[key]
values['RowKey'] = val
elif key.lower() == 'partitionkey':
val = values[key]
del values[key]
values['PartitionKey'] = val
keys = values.keys()
missing_keys = 'RowKey ' if 'RowKey' not in keys else ''
missing_keys = '{}PartitionKey'.format(missing_keys) \
if 'PartitionKey' not in keys else missing_keys
if missing_keys:
raise argparse.ArgumentError(
None, 'incorrect usage: entity requires: {}'.format(missing_keys))
def cast_val(key, val):
""" Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they
can be queried correctly. """
if key in ['PartitionKey', 'RowKey']:
return val
def try_cast(to_type):
try:
return to_type(val)
except ValueError:
return None
return try_cast(int) or try_cast(float) or val
# ensure numbers are converted from strings so querying will work correctly
values = {key: cast_val(key, val) for key, val in values.items()}
namespace.entity = values
def validate_marker(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
nextrowkey and nextpartitionkey are included. """
if not namespace.marker:
return
marker = dict(x.split('=', 1) for x in namespace.marker)
expected_keys = {'nextrowkey', 'nextpartitionkey'}
for key in list(marker.keys()):
new_key = key.lower()
if new_key in expected_keys:
expected_keys.remove(key.lower())
val = marker[key]
del marker[key]
marker[new_key] = val
if expected_keys:
raise argparse.ArgumentError(
None, 'incorrect usage: marker requires: {}'.format(' '.join(expected_keys)))
namespace.marker = marker
def get_file_path_validator(default_file_param=None):
""" Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'.
Allows another path-type parameter to be named which can supply a default filename. """
def validator(namespace):
if not hasattr(namespace, 'path'):
return
path = namespace.path
dir_name, file_name = os.path.split(path) if path else (None, '')
if default_file_param and '.' not in file_name:
dir_name = path
file_name = os.path.split(getattr(namespace, default_file_param))[1]
dir_name = None if dir_name in ('', '.') else dir_name
namespace.directory_name = dir_name
namespace.file_name = file_name
del namespace.path
return validator
def validate_included_datasets(cmd, namespace):
if namespace.include:
include = namespace.include
if set(include) - set('cmsd'):
help_string = '(c)opy-info (m)etadata (s)napshots (d)eleted'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
t_blob_include = cmd.get_models('blob#Include')
namespace.include = t_blob_include('s' in include, 'm' in include, False, 'c' in include, 'd' in include)
def get_include_help_string(include_list):
if include_list is None:
return ''
item = []
for include in include_list:
if include.value == 'uncommittedblobs':
continue
item.append('(' + include.value[0] + ')' + include[1:])
return ', '.join(item)
def validate_included_datasets_validator(include_class):
allowed_values = [x.lower() for x in dir(include_class) if not x.startswith('__')]
allowed_string = ''.join(x[0] for x in allowed_values)
def validator(namespace):
if namespace.include:
if set(namespace.include) - set(allowed_string):
help_string = get_include_help_string(include_class)
raise ValueError(
'valid values are {} or a combination thereof.'.format(help_string))
include = []
if 's' in namespace.include:
include.append(include_class.snapshots)
if 'm' in namespace.include:
include.append(include_class.metadata)
if 'c' in namespace.include:
include.append(include_class.copy)
if 'd' in namespace.include:
include.append(include_class.deleted)
if 'v' in namespace.include:
include.append(include_class.versions)
if 't' in namespace.include:
include.append(include_class.tags)
namespace.include = include
return validator
def validate_key_name(namespace):
key_options = {'primary': '1', 'secondary': '2'}
if hasattr(namespace, 'key_type') and namespace.key_type:
namespace.key_name = namespace.key_type + key_options[namespace.key_name]
else:
namespace.key_name = storage_account_key_options[namespace.key_name]
if hasattr(namespace, 'key_type'):
del namespace.key_type
def validate_metadata(namespace):
if namespace.metadata:
namespace.metadata = dict(x.split('=', 1) for x in namespace.metadata)
def get_permission_help_string(permission_class):
allowed_values = get_permission_allowed_values(permission_class)
return ' '.join(['({}){}'.format(x[0], x[1:]) for x in allowed_values])
def get_permission_allowed_values(permission_class):
if permission_class:
instance = permission_class()
allowed_values = [x.lower() for x in dir(instance) if not x.startswith('_')]
if 'from_string' in allowed_values:
allowed_values.remove('from_string')
for i, item in enumerate(allowed_values):
if item == 'delete_previous_version':
allowed_values[i] = 'x' + item
if item == 'manage_access_control':
allowed_values[i] = 'permissions'
if item == 'manage_ownership':
allowed_values[i] = 'ownership'
return allowed_values
return None
def get_permission_validator(permission_class):
allowed_values = get_permission_allowed_values(permission_class)
allowed_string = ''.join(x[0] for x in allowed_values)
def validator(namespace):
if namespace.permission:
if set(namespace.permission) - set(allowed_string):
help_string = get_permission_help_string(permission_class)
raise ValueError(
'valid values are {} or a combination thereof.'.format(help_string))
if hasattr(permission_class, 'from_string'):
namespace.permission = permission_class.from_string(namespace.permission)
else:
namespace.permission = permission_class(_str=namespace.permission)
return validator
def table_permission_validator(cmd, namespace):
""" A special case for table because the SDK associates the QUERY permission with 'r' """
t_table_permissions = get_table_data_type(cmd.cli_ctx, 'table', 'TablePermissions')
if namespace.permission:
if set(namespace.permission) - set('raud'):
help_string = '(r)ead/query (a)dd (u)pdate (d)elete'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = t_table_permissions(_str=namespace.permission)
def validate_container_public_access(cmd, namespace):
from .sdkutil import get_container_access_type
t_base_blob_svc = cmd.get_models('blob.baseblobservice#BaseBlobService')
if namespace.public_access:
namespace.public_access = get_container_access_type(cmd.cli_ctx, namespace.public_access.lower())
if hasattr(namespace, 'signed_identifiers'):
# must retrieve the existing ACL to simulate a patch operation because these calls
# are needlessly conflated
ns = vars(namespace)
validate_client_parameters(cmd, namespace)
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
client = get_storage_data_service_client(cmd.cli_ctx, t_base_blob_svc, account, key, cs, sas)
container = ns.get('container_name')
lease_id = ns.get('lease_id')
ns['signed_identifiers'] = client.get_container_acl(container, lease_id=lease_id)
def validate_container_nfsv3_squash(cmd, namespace):
t_root_squash = cmd.get_models('RootSquashType', resource_type=ResourceType.MGMT_STORAGE)
if namespace.root_squash and namespace.root_squash == t_root_squash.NO_ROOT_SQUASH:
namespace.enable_nfs_v3_root_squash = False
namespace.enable_nfs_v3_all_squash = False
elif namespace.root_squash and namespace.root_squash == t_root_squash.ROOT_SQUASH:
namespace.enable_nfs_v3_root_squash = True
namespace.enable_nfs_v3_all_squash = False
elif namespace.root_squash and namespace.root_squash == t_root_squash.ALL_SQUASH:
namespace.enable_nfs_v3_all_squash = True
del namespace.root_squash
def validate_fs_public_access(cmd, namespace):
from .sdkutil import get_fs_access_type
if namespace.public_access:
namespace.public_access = get_fs_access_type(cmd.cli_ctx, namespace.public_access.lower())
def validate_select(namespace):
if namespace.select:
namespace.select = ','.join(namespace.select)
# pylint: disable=too-many-statements
def get_source_file_or_blob_service_client(cmd, namespace):
"""
Create the second file service or blob service client for batch copy command, which is used to
list the source files or blobs. If both the source account and source URI are omitted, it
indicates that user want to copy files or blobs in the same storage account, therefore the
destination client will be set None hence the command will use destination client.
"""
t_file_svc, t_block_blob_svc = cmd.get_models('file#FileService', 'blob.blockblobservice#BlockBlobService')
usage_string = 'invalid usage: supply only one of the following argument sets:' + \
'\n\t --source-uri [--source-sas]' + \
'\n\tOR --source-container' + \
'\n\tOR --source-container --source-account-name --source-account-key' + \
'\n\tOR --source-container --source-account-name --source-sas' + \
'\n\tOR --source-share --source-account-name --source-account-key' + \
'\n\tOR --source-share --source-account-name --source-account-sas'
ns = vars(namespace)
source_account = ns.pop('source_account_name', None)
source_key = ns.pop('source_account_key', None)
source_uri = ns.pop('source_uri', None)
source_sas = ns.get('source_sas', None)
source_container = ns.get('source_container', None)
source_share = ns.get('source_share', None)
if source_uri and source_account:
raise ValueError(usage_string)
if not source_uri and bool(source_container) == bool(source_share): # must be container or share
raise ValueError(usage_string)
if (not source_account) and (not source_uri):
# Set the source_client to None if neither source_account or source_uri is given. This
# indicates the command that the source files share or blob container is in the same storage
# account as the destination file share or blob container.
#
# The command itself should create the source service client since the validator can't
# access the destination client through the namespace.
#
# A few arguments check will be made as well so as not to cause ambiguity.
if source_key or source_sas:
raise ValueError('invalid usage: --source-account-name is missing; the source account is assumed to be the'
' same as the destination account. Do not provide --source-sas or --source-account-key')
ns['source_client'] = None
if 'token_credential' not in ns: # not using oauth
return
# oauth is only possible through destination, must still get source creds
source_account, source_key, source_sas = ns['account_name'], ns['account_key'], ns['sas_token']
if source_account:
if not (source_key or source_sas):
# when neither storage account key or SAS is given, try to fetch the key in the current
# subscription
source_key = _query_account_key(cmd.cli_ctx, source_account)
if source_container:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_share:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_uri:
if source_key or source_container or source_share:
raise ValueError(usage_string)
from .storage_url_helpers import StorageResourceIdentifier
if source_sas:
source_uri = '{}{}{}'.format(source_uri, '?', source_sas.lstrip('?'))
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, source_uri)
nor_container_or_share = not identifier.container and not identifier.share
if not identifier.is_url():
raise ValueError('incorrect usage: --source-uri expects a URI')
if identifier.blob or identifier.directory or identifier.filename or nor_container_or_share:
raise ValueError('incorrect usage: --source-uri has to be blob container or file share')
if identifier.sas_token:
ns['source_sas'] = identifier.sas_token
else:
source_key = _query_account_key(cmd.cli_ctx, identifier.account_name)
if identifier.container:
ns['source_container'] = identifier.container
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
elif identifier.share:
ns['source_share'] = identifier.share
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
def add_progress_callback(cmd, namespace):
def _update_progress(current, total):
message = getattr(_update_progress, 'message', 'Alive')
reuse = getattr(_update_progress, 'reuse', False)
if total:
hook.add(message=message, value=current, total_val=total)
if total == current and not reuse:
hook.end()
hook = cmd.cli_ctx.get_progress_controller(det=True)
_update_progress.hook = hook
if not namespace.no_progress:
namespace.progress_callback = _update_progress
del namespace.no_progress
def process_container_delete_parameters(cmd, namespace):
"""Process the parameters for storage container delete command"""
# check whether to use mgmt or data-plane
if namespace.bypass_immutability_policy:
# use management-plane
namespace.processed_account_name = namespace.account_name
namespace.processed_resource_group, namespace.mgmt_client = _query_account_rg(
cmd.cli_ctx, namespace.account_name)
del namespace.auth_mode
else:
# use data-plane, like before
validate_client_parameters(cmd, namespace)
def process_blob_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage blob download command"""
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and container name from source string
_process_blob_batch_container_parameters(cmd, namespace)
# 3. Call validators
add_progress_callback(cmd, namespace)
def process_blob_upload_batch_parameters(cmd, namespace):
"""Process the source and destination of storage blob upload command"""
# 1. quick check
if not os.path.exists(namespace.source) or not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be an existing directory')
# 2. try to extract account name and container name from destination string
_process_blob_batch_container_parameters(cmd, namespace, source=False)
# 3. collect the files to be uploaded
namespace.source = os.path.realpath(namespace.source)
namespace.source_files = list(glob_files_locally(namespace.source, namespace.pattern))
# 4. determine blob type
if namespace.blob_type is None:
vhd_files = [f for f in namespace.source_files if f[0].endswith('.vhd')]
if any(vhd_files) and len(vhd_files) == len(namespace.source_files):
# when all the listed files are vhd files use page
namespace.blob_type = 'page'
elif any(vhd_files):
# source files contain vhd files but not all of them
raise CLIError("""Fail to guess the required blob type. Type of the files to be
uploaded are not consistent. Default blob type for .vhd files is "page", while
others are "block". You can solve this problem by either explicitly set the blob
type or ensure the pattern matches a correct set of files.""")
else:
namespace.blob_type = 'block'
# 5. call other validators
validate_metadata(namespace)
t_blob_content_settings = cmd.loader.get_sdk('blob.models#ContentSettings')
get_content_setting_validator(t_blob_content_settings, update=False)(cmd, namespace)
add_progress_callback(cmd, namespace)
def process_blob_delete_batch_parameters(cmd, namespace):
_process_blob_batch_container_parameters(cmd, namespace)
def _process_blob_batch_container_parameters(cmd, namespace, source=True):
"""Process the container parameters for storage blob batch commands before populating args from environment."""
if source:
container_arg, container_name_arg = 'source', 'source_container_name'
else:
# destination
container_arg, container_name_arg = 'destination', 'destination_container_name'
# try to extract account name and container name from source string
from .storage_url_helpers import StorageResourceIdentifier
container_arg_val = getattr(namespace, container_arg) # either a url or name
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, container_arg_val)
if not identifier.is_url():
setattr(namespace, container_name_arg, container_arg_val)
elif identifier.blob:
raise ValueError('incorrect usage: {} should be either a container URL or name'.format(container_arg))
else:
setattr(namespace, container_name_arg, identifier.container)
if namespace.account_name is None:
namespace.account_name = identifier.account_name
elif namespace.account_name != identifier.account_name:
raise ValueError('The given storage account name is not consistent with the '
'account name in the destination URL')
# if no sas-token is given and the container url contains one, use it
if not namespace.sas_token and identifier.sas_token:
namespace.sas_token = identifier.sas_token
# Finally, grab missing storage connection parameters from environment variables
validate_client_parameters(cmd, namespace)
def process_file_upload_batch_parameters(cmd, namespace):
"""Process the parameters of storage file batch upload command"""
# 1. quick check
if not os.path.exists(namespace.source):
raise ValueError('incorrect usage: source {} does not exist'.format(namespace.source))
if not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be a directory')
# 2. try to extract account name and container name from destination string
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.destination)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: destination must be a file share url')
namespace.destination = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
namespace.source = os.path.realpath(namespace.source)
def process_file_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage file batch download command"""
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and share name from source string
process_file_batch_source_parameters(cmd, namespace)
def process_file_batch_source_parameters(cmd, namespace):
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.source)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: source should be either share URL or name')
namespace.source = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
def process_file_download_namespace(namespace):
get_file_path_validator()(namespace)
dest = namespace.file_path
if not dest or os.path.isdir(dest):
namespace.file_path = os.path.join(dest, namespace.file_name) \
if dest else namespace.file_name
def process_metric_update_namespace(namespace):
namespace.hour = namespace.hour == 'true'
namespace.minute = namespace.minute == 'true'
namespace.api = namespace.api == 'true' if namespace.api else None
if namespace.hour is None and namespace.minute is None:
raise argparse.ArgumentError(
None, 'incorrect usage: must specify --hour and/or --minute')
if (namespace.hour or namespace.minute) and namespace.api is None:
raise argparse.ArgumentError(
None, 'incorrect usage: specify --api when hour or minute metrics are enabled')
def validate_subnet(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
subnet = namespace.subnet
subnet_is_id = is_valid_resource_id(subnet)
vnet = namespace.vnet_name
if (subnet_is_id and not vnet) or (not subnet and not vnet):
return
if subnet and not subnet_is_id and vnet:
namespace.subnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet,
child_type_1='subnets',
child_name_1=subnet)
else:
raise CLIError('incorrect usage: [--subnet ID | --subnet NAME --vnet-name NAME]')
def get_datetime_type(to_string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
from datetime import datetime
def datetime_type(string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ',
'%Y-%m-%dT%HZ', '%Y-%m-%d']
for form in accepted_date_formats:
try:
if to_string:
return datetime.strptime(string, form).strftime(form)
return datetime.strptime(string, form)
except ValueError:
continue
raise ValueError("Input '{}' not valid. Valid example: 2000-12-31T12:59:59Z".format(string))
return datetime_type
def get_api_version_type():
""" Examples of accepted forms: 2017-12-31 """
from datetime import datetime
def api_version_type(string):
""" Validates api version format. Examples of accepted form: 2017-12-31 """
accepted_format = '%Y-%m-%d'
try:
return datetime.strptime(string, accepted_format).strftime(accepted_format)
except ValueError:
from azure.cli.core.azclierror import InvalidArgumentValueError
raise InvalidArgumentValueError("Input '{}' not valid. Valid example: 2008-10-27.".format(string))
return api_version_type
def ipv4_range_type(string):
""" Validates an IPv4 address or address range. """
import re
ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if not re.match("^{}$".format(ip_format), string):
if not re.match("^{ip_format}-{ip_format}$".format(ip_format=ip_format), string):
raise CLIError("Please use the following format to specify ip range: '{ip1}-{ip2}'.")
return string
def resource_type_type(loader):
""" Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. """
def impl(string):
t_resources = loader.get_models('common.models#ResourceTypes')
if set(string) - set("sco"):
raise ValueError
return t_resources(_str=''.join(set(string)))
return impl
def services_type(loader):
""" Returns a function which validates that services string contains only a combination of blob, queue, table,
and file. Their shorthand representations are b, q, t, and f. """
def impl(string):
t_services = loader.get_models('common.models#Services')
if set(string) - set("bqtf"):
raise ValueError
return t_services(_str=''.join(set(string)))
return impl
def get_char_options_validator(types, property_name):
def _validator(namespace):
service_types = set(getattr(namespace, property_name, []))
if not service_types:
raise ValueError('Missing options --{}.'.format(property_name.replace('_', '-')))
if service_types - set(types):
raise ValueError(
'--{}: only valid values are: {}.'.format(property_name.replace('_', '-'), ', '.join(types)))
setattr(namespace, property_name, service_types)
return _validator
def page_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'page' and namespace.tier:
raise ValueError('Blob tier is only applicable to page blobs on premium storage accounts.')
try:
if is_storagev2(cmd.command_kwargs['resource_type'].value[0]):
namespace.tier = getattr(cmd.get_models('_models#PremiumPageBlobTier'), namespace.tier)
else:
namespace.tier = getattr(cmd.get_models('blob.models#PremiumPageBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown premium page blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'PremiumPageBlobTier'))))
def block_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'block' and namespace.tier:
raise ValueError('Blob tier is only applicable to block blobs on standard storage accounts.')
try:
if is_storagev2(cmd.command_kwargs['resource_type'].value[0]):
namespace.tier = getattr(cmd.get_models('_models#StandardBlobTier'), namespace.tier)
else:
namespace.tier = getattr(cmd.get_models('blob.models#StandardBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown block blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'StandardBlobTier'))))
def blob_tier_validator(cmd, namespace):
if namespace.blob_type == 'page':
page_blob_tier_validator(cmd, namespace)
elif namespace.blob_type == 'block':
block_blob_tier_validator(cmd, namespace)
else:
raise ValueError('Blob tier is only applicable to block or page blob.')
def blob_download_file_path_validator(namespace):
if os.path.isdir(namespace.file_path):
from azure.cli.core.azclierror import FileOperationError
raise FileOperationError('File is expected, not a directory: {}'.format(namespace.file_path))
def blob_rehydrate_priority_validator(namespace):
if namespace.blob_type == 'page' and namespace.rehydrate_priority:
raise ValueError('--rehydrate-priority is only applicable to block blob.')
if namespace.tier == 'Archive' and namespace.rehydrate_priority:
raise ValueError('--rehydrate-priority is only applicable to rehydrate blob data from the archive tier.')
if namespace.rehydrate_priority is None:
namespace.rehydrate_priority = 'Standard'
def validate_azcopy_upload_destination_url(cmd, namespace):
client = blob_data_service_factory(cmd.cli_ctx, {
'account_name': namespace.account_name, 'connection_string': namespace.connection_string})
destination_path = namespace.destination_path
if not destination_path:
destination_path = ''
url = client.make_blob_url(namespace.destination_container, destination_path)
namespace.destination = url
del namespace.destination_container
del namespace.destination_path
def validate_azcopy_remove_arguments(cmd, namespace):
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --container-name [--name]' \
'\n\tOR --share-name [--path]'
ns = vars(namespace)
# source as blob
container = ns.pop('container_name', None)
blob = ns.pop('blob_name', None)
# source as file
share = ns.pop('share_name', None)
path = ns.pop('path', None)
# ensure either a file or blob source is specified
valid_blob = container and not share
valid_file = share and not container
if not valid_blob and not valid_file:
raise ValueError(usage_string.format('Neither a valid blob or file source is specified'))
if valid_blob and valid_file:
raise ValueError(usage_string.format('Ambiguous parameters, both blob and file sources are '
'specified'))
if valid_blob:
client = blob_data_service_factory(cmd.cli_ctx, {
'account_name': namespace.account_name})
if not blob:
blob = ''
url = client.make_blob_url(container, blob)
namespace.service = 'blob'
namespace.target = url
if valid_file:
client = file_data_service_factory(cmd.cli_ctx, {
'account_name': namespace.account_name,
'account_key': namespace.account_key})
dir_name, file_name = os.path.split(path) if path else (None, '')
dir_name = None if dir_name in ('', '.') else dir_name
url = client.make_file_url(share, dir_name, file_name)
namespace.service = 'file'
namespace.target = url
def as_user_validator(namespace):
if hasattr(namespace, 'token_credential') and not namespace.as_user:
raise CLIError('incorrect usage: specify --as-user when --auth-mode login is used to get user delegation key.')
if namespace.as_user:
if namespace.expiry is None:
raise argparse.ArgumentError(
None, 'incorrect usage: specify --expiry when as-user is enabled')
expiry = get_datetime_type(False)(namespace.expiry)
from datetime import datetime, timedelta
if expiry > datetime.utcnow() + timedelta(days=7):
raise argparse.ArgumentError(
None, 'incorrect usage: --expiry should be within 7 days from now')
if ((not hasattr(namespace, 'token_credential') or namespace.token_credential is None) and
(not hasattr(namespace, 'auth_mode') or namespace.auth_mode != 'login')):
raise argparse.ArgumentError(
None, "incorrect usage: specify '--auth-mode login' when as-user is enabled")
def validator_change_feed_retention_days(namespace):
enable = namespace.enable_change_feed
days = namespace.change_feed_retention_days
from azure.cli.core.azclierror import InvalidArgumentValueError
if enable is False and days is not None:
raise InvalidArgumentValueError("incorrect usage: "
"'--change-feed-retention-days' is invalid "
"when '--enable-change-feed' is set to false")
if enable is None and days is not None:
raise InvalidArgumentValueError("incorrect usage: "
"please specify '--enable-change-feed true' if you "
"want to set the value for '--change-feed-retention-days'")
if days is not None:
if days < 1:
raise InvalidArgumentValueError("incorrect usage: "
"'--change-feed-retention-days' must be greater than or equal to 1")
if days > 146000:
raise InvalidArgumentValueError("incorrect usage: "
"'--change-feed-retention-days' must be less than or equal to 146000")
def validator_delete_retention_days(namespace, enable=None, days=None):
enable_param = '--' + enable.replace('_', '-')
days_param = '--' + enable.replace('_', '-')
enable = getattr(namespace, enable)
days = getattr(namespace, days)
if enable is True and days is None:
raise ValueError(
"incorrect usage: you have to provide value for '{}' when '{}' "
"is set to true".format(days_param, enable_param))
if enable is False and days is not None:
raise ValueError(
"incorrect usage: '{}' is invalid when '{}' is set to false".format(days_param, enable_param))
if enable is None and days is not None:
raise ValueError(
"incorrect usage: please specify '{} true' if you want to set the value for "
"'{}'".format(enable_param, days_param))
if days or days == 0:
if days < 1:
raise ValueError(
"incorrect usage: '{}' must be greater than or equal to 1".format(days_param))
if days > 365:
raise ValueError(
"incorrect usage: '{}' must be less than or equal to 365".format(days_param))
def validate_container_delete_retention_days(namespace):
validator_delete_retention_days(namespace, enable='enable_container_delete_retention',
days='container_delete_retention_days')
def validate_delete_retention_days(namespace):
validator_delete_retention_days(namespace, enable='enable_delete_retention',
days='delete_retention_days')
def validate_file_delete_retention_days(namespace):
from azure.cli.core.azclierror import ValidationError
if namespace.enable_delete_retention is True and namespace.delete_retention_days is None:
raise ValidationError(
"incorrect usage: you have to provide value for '--delete-retention-days' when '--enable-delete-retention' "
"is set to true")
if namespace.enable_delete_retention is False and namespace.delete_retention_days is not None:
raise ValidationError(
"incorrect usage: '--delete-retention-days' is invalid when '--enable-delete-retention' is set to false")
# pylint: disable=too-few-public-methods
class BlobRangeAddAction(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
if not namespace.blob_ranges:
namespace.blob_ranges = []
if isinstance(values, list):
values = ' '.join(values)
BlobRange = namespace._cmd.get_models('BlobRestoreRange', resource_type=ResourceType.MGMT_STORAGE)
try:
start_range, end_range = values.split(' ')
except (ValueError, TypeError):
raise CLIError('usage error: --blob-range VARIABLE OPERATOR VALUE')
namespace.blob_ranges.append(BlobRange(
start_range=start_range,
end_range=end_range
))
def validate_private_endpoint_connection_id(cmd, namespace):
if namespace.connection_id:
from azure.cli.core.util import parse_proxy_resource_id
result = parse_proxy_resource_id(namespace.connection_id)
namespace.resource_group_name = result['resource_group']
namespace.account_name = result['name']
namespace.private_endpoint_connection_name = result['child_name_1']
if namespace.account_name and not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
if not all([namespace.account_name, namespace.resource_group_name, namespace.private_endpoint_connection_name]):
raise CLIError('incorrect usage: [--id ID | --name NAME --account-name NAME]')
del namespace.connection_id
def pop_data_client_auth(ns):
del ns.auth_mode
del ns.account_key
del ns.connection_string
del ns.sas_token
def validate_client_auth_parameter(cmd, ns):
from .sdkutil import get_container_access_type
if ns.public_access:
ns.public_access = get_container_access_type(cmd.cli_ctx, ns.public_access.lower())
if ns.default_encryption_scope and ns.prevent_encryption_scope_override is not None:
# simply try to retrieve the remaining variables from environment variables
if not ns.account_name:
ns.account_name = get_config_value(cmd, 'storage', 'account', None)
if ns.account_name and not ns.resource_group_name:
ns.resource_group_name = _query_account_rg(cmd.cli_ctx, account_name=ns.account_name)[0]
pop_data_client_auth(ns)
elif (ns.default_encryption_scope and ns.prevent_encryption_scope_override is None) or \
(not ns.default_encryption_scope and ns.prevent_encryption_scope_override is not None):
raise CLIError("usage error: You need to specify both --default-encryption-scope and "
"--prevent-encryption-scope-override to set encryption scope information "
"when creating container.")
else:
validate_client_parameters(cmd, ns)
validate_metadata(ns)
def validate_encryption_scope_client_params(ns):
if ns.encryption_scope:
# will use track2 client and socket_timeout is unused
del ns.socket_timeout
def validate_access_control(namespace):
if namespace.acl and namespace.permissions:
raise CLIError('usage error: invalid when specifying both --acl and --permissions.')
def validate_service_type(services, service_type):
if service_type == 'table':
return 't' in services
if service_type == 'blob':
return 'b' in services
if service_type == 'queue':
return 'q' in services
def validate_logging_version(namespace):
if validate_service_type(namespace.services, 'table') and namespace.version and namespace.version != 1.0:
raise CLIError(
'incorrect usage: for table service, the supported version for logging is `1.0`. For more information, '
'please refer to https://docs.microsoft.com/rest/api/storageservices/storage-analytics-log-format.')
def validate_match_condition(namespace):
from .track2_util import _if_match, _if_none_match
if namespace.if_match:
namespace = _if_match(if_match=namespace.if_match, **namespace)
del namespace.if_match
if namespace.if_none_match:
namespace = _if_none_match(if_none_match=namespace.if_none_match, **namespace)
del namespace.if_none_match
def validate_or_policy(cmd, namespace):
from msrestazure.tools import is_valid_resource_id, resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
error_elements = []
if namespace.properties is None:
error_msg = "Please provide --policy in JSON format or the following arguments: "
if namespace.source_account is None:
error_elements.append("--source-account")
# Apply account name when there is no destination account provided
if namespace.destination_account is None:
namespace.destination_account = namespace.account_name
if error_elements:
error_msg += ", ".join(error_elements)
error_msg += " to initialize Object Replication Policy for storage account."
raise ValueError(error_msg)
else:
if os.path.exists(namespace.properties):
or_policy = get_file_json(namespace.properties)
else:
or_policy = shell_safe_json_parse(namespace.properties)
try:
namespace.source_account = or_policy["sourceAccount"]
except KeyError:
namespace.source_account = or_policy["source_account"]
if namespace.source_account is None:
error_elements.append("source_account")
try:
namespace.destination_account = or_policy["destinationAccount"]
except KeyError:
namespace.destination_account = or_policy["destination_account"]
if "rules" not in or_policy.keys() or not or_policy["rules"]:
error_elements.append("rules")
error_msg = "Missing input parameters: "
if error_elements:
error_msg += ", ".join(error_elements)
error_msg += " in properties to initialize Object Replication Policy for storage account."
raise ValueError(error_msg)
namespace.properties = or_policy
if "policyId" in or_policy.keys() and or_policy["policyId"]:
namespace.policy_id = or_policy['policyId']
if not is_valid_resource_id(namespace.source_account):
namespace.source_account = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=namespace.source_account)
if not is_valid_resource_id(namespace.destination_account):
namespace.destination_account = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=namespace.destination_account)
def get_url_with_sas(cmd, namespace, url=None, container=None, blob=None, share=None, file_path=None):
import re
# usage check
if not container and blob:
raise CLIError('incorrect usage: please specify container information for your blob resource.')
if not share and file_path:
raise CLIError('incorrect usage: please specify share information for your file resource.')
if url and container:
raise CLIError('incorrect usage: you only can specify one between url and container information.')
if url and share:
raise CLIError('incorrect usage: you only can specify one between url and share information.')
if share and container:
raise CLIError('incorrect usage: you only can specify one between share and container information.')
# get url
storage_endpoint = cmd.cli_ctx.cloud.suffixes.storage_endpoint
service = None
if url is not None:
# validate source is uri or local path
storage_pattern = re.compile(r'https://(.*?)\.(blob|dfs|file).%s' % storage_endpoint)
result = re.findall(storage_pattern, url)
if result: # source is URL
storage_info = result[0]
namespace.account_name = storage_info[0]
if storage_info[1] in ['blob', 'dfs']:
service = 'blob'
elif storage_info[1] in ['file']:
service = 'file'
else:
raise ValueError('{} is not valid storage endpoint.'.format(url))
else:
logger.info("%s is not Azure storage url.", url)
return service, url
# validate credential
validate_client_parameters(cmd, namespace)
kwargs = {'account_name': namespace.account_name,
'account_key': namespace.account_key,
'connection_string': namespace.connection_string,
'sas_token': namespace.sas_token}
if container:
client = blob_data_service_factory(cmd.cli_ctx, kwargs)
if blob is None:
blob = ''
url = client.make_blob_url(container, blob)
service = 'blob'
elif share:
client = file_data_service_factory(cmd.cli_ctx, kwargs)
dir_name, file_name = os.path.split(file_path) if file_path else (None, '')
dir_name = None if dir_name in ('', '.') else dir_name
url = client.make_file_url(share, dir_name, file_name)
service = 'file'
elif not any([url, container, share]): # In account level, only blob service is supported
service = 'blob'
url = 'https://{}.{}.{}'.format(namespace.account_name, service, storage_endpoint)
return service, url
def _is_valid_uri(uri):
if not uri:
return False
if os.path.isdir(os.path.dirname(uri)) or os.path.isdir(uri):
return uri
if "?" in uri: # sas token exists
logger.debug("Find ? in %s. ", uri)
return uri
return False
def _add_sas_for_url(cmd, url, account_name, account_key, sas_token, service, resource_types, permissions):
from azure.cli.command_modules.storage.azcopy.util import _generate_sas_token
if sas_token:
sas_token = sas_token.lstrip('?')
else:
try:
sas_token = _generate_sas_token(cmd, account_name, account_key, service,
resource_types=resource_types, permissions=permissions)
except Exception as ex: # pylint: disable=broad-except
logger.info("Cannot generate sas token. %s", ex)
sas_token = None
if sas_token:
return'{}?{}'.format(url, sas_token)
return url
def validate_azcopy_credential(cmd, namespace):
# Get destination uri
if not _is_valid_uri(namespace.destination):
namespace.url = namespace.destination
service, namespace.destination = get_url_with_sas(
cmd, namespace, url=namespace.destination,
container=namespace.destination_container, blob=namespace.destination_blob,
share=namespace.destination_share, file_path=namespace.destination_file_path)
namespace.destination = _add_sas_for_url(cmd, url=namespace.destination, account_name=namespace.account_name,
account_key=namespace.account_key, sas_token=namespace.sas_token,
service=service, resource_types='co', permissions='wac')
if not _is_valid_uri(namespace.source):
# determine if source account is same with destination
if not namespace.source_account_key and not namespace.source_sas and not namespace.source_connection_string:
if namespace.source_account_name == namespace.account_name:
namespace.source_account_key = namespace.account_key
namespace.source_sas = namespace.sas_token
namespace.source_connection_string = namespace.connection_string
namespace.account_name = namespace.source_account_name
namespace.account_key = namespace.source_account_key
namespace.sas_token = namespace.source_sas
namespace.connection_string = namespace.source_connection_string
# Get source uri
namespace.url = namespace.source
service, namespace.source = get_url_with_sas(
cmd, namespace, url=namespace.source,
container=namespace.source_container, blob=namespace.source_blob,
share=namespace.source_share, file_path=namespace.source_file_path)
namespace.source = _add_sas_for_url(cmd, url=namespace.source, account_name=namespace.account_name,
account_key=namespace.account_key, sas_token=namespace.sas_token,
service=service, resource_types='sco', permissions='rl')
def is_directory(props):
return 'hdi_isfolder' in props.metadata.keys() and props.metadata['hdi_isfolder'] == 'true'
def validate_fs_directory_upload_destination_url(cmd, namespace):
kwargs = {'account_name': namespace.account_name,
'account_key': namespace.account_key,
'connection_string': namespace.connection_string,
'sas_token': namespace.sas_token,
'file_system_name': namespace.destination_fs}
client = cf_adls_file_system(cmd.cli_ctx, kwargs)
url = client.url
if namespace.destination_path:
from azure.core.exceptions import AzureError
from azure.cli.core.azclierror import InvalidArgumentValueError
file_client = client.get_file_client(file_path=namespace.destination_path)
try:
props = file_client.get_file_properties()
if not is_directory(props):
raise InvalidArgumentValueError('usage error: You are specifying --destination-path with a file name, '
'not directory name. Please change to a valid directory name. '
'If you want to upload to a file, please use '
'`az storage fs file upload` command.')
except AzureError:
pass
url = file_client.url
if _is_valid_uri(url):
namespace.destination = url
else:
namespace.destination = _add_sas_for_url(cmd, url=url, account_name=namespace.account_name,
account_key=namespace.account_key, sas_token=namespace.sas_token,
service='blob', resource_types='co', permissions='rwdlac')
del namespace.destination_fs
del namespace.destination_path
def validate_fs_directory_download_source_url(cmd, namespace):
kwargs = {'account_name': namespace.account_name,
'account_key': namespace.account_key,
'connection_string': namespace.connection_string,
'sas_token': namespace.sas_token,
'file_system_name': namespace.source_fs}
client = cf_adls_file_system(cmd.cli_ctx, kwargs)
url = client.url
if namespace.source_path:
file_client = client.get_file_client(file_path=namespace.source_path)
url = file_client.url
if _is_valid_uri(url):
namespace.source = url
else:
namespace.source = _add_sas_for_url(cmd, url=url, account_name=namespace.account_name,
account_key=namespace.account_key, sas_token=namespace.sas_token,
service='blob', resource_types='co', permissions='rl')
del namespace.source_fs
del namespace.source_path
def validate_text_configuration(cmd, ns):
DelimitedTextDialect = cmd.get_models('_models#DelimitedTextDialect', resource_type=ResourceType.DATA_STORAGE_BLOB)
DelimitedJsonDialect = cmd.get_models('_models#DelimitedJsonDialect', resource_type=ResourceType.DATA_STORAGE_BLOB)
if ns.input_format == 'csv':
ns.input_config = DelimitedTextDialect(
delimiter=ns.in_column_separator,
quotechar=ns.in_quote_char,
lineterminator=ns.in_record_separator,
escapechar=ns.in_escape_char,
has_header=ns.in_has_header)
if ns.input_format == 'json':
ns.input_config = DelimitedJsonDialect(delimiter=ns.in_line_separator)
if ns.output_format == 'csv':
ns.output_config = DelimitedTextDialect(
delimiter=ns.out_column_separator,
quotechar=ns.out_quote_char,
lineterminator=ns.out_record_separator,
escapechar=ns.out_escape_char,
has_header=ns.out_has_header)
if ns.output_format == 'json':
ns.output_config = DelimitedJsonDialect(delimiter=ns.out_line_separator)
del ns.input_format, ns.in_line_separator, ns.in_column_separator, ns.in_quote_char, ns.in_record_separator, \
ns.in_escape_char, ns.in_has_header
del ns.output_format, ns.out_line_separator, ns.out_column_separator, ns.out_quote_char, ns.out_record_separator, \
ns.out_escape_char, ns.out_has_header
def add_acl_progress_hook(namespace):
if namespace.progress_hook:
return
failed_entries = []
# the progress callback is invoked each time a batch is completed
def progress_callback(acl_changes):
# keep track of failed entries if there are any
print(acl_changes.batch_failures)
failed_entries.append(acl_changes.batch_failures)
namespace.progress_hook = progress_callback
def get_not_none_validator(attribute_name):
def validate_not_none(cmd, namespace):
attribute = getattr(namespace, attribute_name, None)
options_list = cmd.arguments[attribute_name].type.settings.get('options_list')
if attribute in (None, ''):
from azure.cli.core.azclierror import InvalidArgumentValueError
raise InvalidArgumentValueError('Argument {} should be specified'.format('/'.join(options_list)))
return validate_not_none
def validate_policy(namespace):
if namespace.id is not None:
logger.warning("\nPlease do not specify --expiry and --permissions if they are already specified in your "
"policy.")
def validate_immutability_arguments(namespace):
from azure.cli.core.azclierror import InvalidArgumentValueError
if not namespace.enable_alw:
if any([namespace.immutability_period_since_creation_in_days,
namespace.immutability_policy_state, namespace.allow_protected_append_writes is not None]):
raise InvalidArgumentValueError("Incorrect usage: To enable account level immutability, "
"need to specify --enable-alw true. "
"Cannot set --enable_alw to false and specify "
"--immutability-period --immutability-state "
"--allow-append")
def validate_allow_protected_append_writes_all(namespace):
from azure.cli.core.azclierror import InvalidArgumentValueError
if namespace.allow_protected_append_writes_all and namespace.allow_protected_append_writes:
raise InvalidArgumentValueError("usage error: The 'allow-protected-append-writes' "
"and 'allow-protected-append-writes-all' "
"properties are mutually exclusive. 'allow-protected-append-writes-all' allows "
"new blocks to be written to both Append and Block Blobs, while "
"'allow-protected-append-writes' allows new blocks to be written to "
"Append Blobs only.")
def validate_blob_name_for_upload(namespace):
if not namespace.blob_name:
namespace.blob_name = os.path.basename(namespace.file_path)
def validate_share_close_handle(namespace):
from azure.cli.core.azclierror import InvalidArgumentValueError
if namespace.close_all and namespace.handle_id:
raise InvalidArgumentValueError("usage error: Please only specify either --handle-id or --close-all, not both.")
if not namespace.close_all and not namespace.handle_id:
raise InvalidArgumentValueError("usage error: Please specify either --handle-id or --close-all.")
|
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/storage/_validators.py
|
Python
|
mit
| 87,495
|
[
"VisIt"
] |
2bb88edaf11b5323a89caaaaef57088dc8e7bd8fecaf519e1d417d0ceb8773b7
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
Pedro Cerqueira
github: @pedrorvc
DESCRIPTION
This script serves to create xml files contaning the information necessary
for the execution of BRIG (Blast Ring Image Generator), reducing the time
performing the tedious task of setting up all the information on the GUI
and provides a quick way to produce an image.
The arguments for this script provide some (but not all)
of the available options in BRIG, which were the ones I used to change the most.
USAGE:
brigaid.py -q reference_sequence.fna -rfd path/to/reference/dir -od path/to/output/dir -of path/to/output/dir/output_file
-oi path/to/output/BRIG/output_image -t Image_title -a annotation_file.gbk --genes genes_of_interest.txt
--contig-order contig_order.tsv
"""
import argparse
import csv
import os
import xml.etree.ElementTree as ET
from collections import OrderedDict
from xml.dom import minidom
from Bio import SeqIO
from matplotlib import cm
def listdir_fullpath(path):
""" Gets the full path of the files from a directory
Args:
path (str): full path to a directory
Returns:
list containing the full path of every file contained in the input directory
"""
return [os.path.join(path, f) for f in os.listdir(path)]
def ring_attributes(colour, name, position):
""" Creates ring attributes.
Args:
colour (str): color of the ring.
name (str): name of the ring.
position (str): position of the ring.
Returns:
ring_attrs (dict): attributes of any regular ring of the BRIG xml.
"""
ring_attrs = {"colour" : colour,
"name": name,
"position" : position,
"upperInt" : "90",
"lowerInt" : "70",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return ring_attrs
def annotation_ring_attributes(position):
""" Creates annotation ring attributes.
Args:
position (str): position of the ring.
Returns:
annotation_ring_attrs (dict): attributes of the annotation ring of the BRIG xml.
"""
annotation_ring_attrs = {"colour" : '172,14,225',
"name": 'null',
"position" : position,
"upperInt" : "70",
"lowerInt" : "50",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return annotation_ring_attrs
def create_feature_attrs(label, colour, decoration, start, stop):
""" Create attributes for the Feature SubElements of the annotation ring.
Args:
label (str): name of the gene/CDS to annotate
colour (str): colour of the decoration for the annotation
decoration (str): shape of the gene/CDS to annotate, for example, 'clockwise-arrow'
start (str): start of the gene/CDS to annotate
stop (str): stop of the gene/CDS to annotate
Results:
feature_element_attrs (dict): attributes of the feature element.
feature_range_element_attrs (dict): attributes of the feature range element
"""
feature_element_attrs = {'label' : label,
'colour' : colour,
'decoration' : decoration}
feature_range_element_attrs = {'start' : start,
'stop' : stop}
return feature_element_attrs, feature_range_element_attrs
def create_annotation_ring_tsv(annotation_ring, annotation_file):
""" Uses a tsv file to annotate the reference genome.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
"""
with open(annotation_file) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Obtain the annotations from the file contents
for row in reader:
start = row['#START']
stop = row['STOP']
label = row['Label']
colour = row['Colour']
decoration = row['Decoration']
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, colour, decoration, start, stop)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
def annotation_ring_feature_elements_gbk_concat(annotation_ring, record, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
#if type(genome_size) == int:
# Obtain the features of the Genbank file records
for fea in record.features:
# Get the start and end position of the genome
# Also get the strand
if fea.type == 'CDS':
start = str(fea.location.start.position)
end = str(fea.location.end.position)
strand = fea.location.strand
# Get the label of the gene or product
if 'gene' in fea.qualifiers:
label = str(fea.qualifiers['gene'][0])
elif 'product' in fea.qualifiers:
product = fea.qualifiers['product'][0]
label = str(product)
else:
continue
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if fea.type == 'source':
size = fea.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, record, genes, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file
and specific gene annotations.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
for f in record.features:
if f.type == 'CDS':
# Find the 'gene' tag and determine if the gene belongs to the specified genes to be annotated
if 'gene' in f.qualifiers and f.qualifiers['gene'][0] in genes:
label = f.qualifiers['gene'][0]
elif 'product' in f.qualifiers and f.qualifiers['product'][0] in genes:
product = f.qualifiers['product'][0]
label = product
else:
continue
# Determine the start, stop and strand of the gene
start = str(f.location.start.position + genome_size)
end = str(f.location.end.position + genome_size)
strand = f.location.strand
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if f.type == "source":
size = f.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records):
""" Create annotation ring using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
"""
if genes_of_interest != []:
# Get the genes to serach in the Genbank file
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
# Create feature elements of the annotation ring
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order):
""" Create annotation ring using a Genbank annotation file divided by contigs.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
contig_order (str): Full path to the file containing the order of the contigs.
"""
if contig_order != []:
with open(contig_order) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Create an OrderedDict with the contents of the file
# The keys are the order are a number representing the order of the contig
# The values are the names of the contigs
content_dict = OrderedDict()
for r in reader:
content_dict[r["order"]] = r["contig"]
# Create an OrderedDict with the content of each contig
# The keys are the names of the contigs
# The values are SeqRecord objects from BipPython
seq_records_dict = OrderedDict()
for record in records:
seq_records_dict[record.id] = record
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, ord_record, genes, genome_size)
genome_size = gsize
else:
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_gbk_concat(annotation_ring, ord_record, genome_size)
genome_size = gsize
else:
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def write_xml(root_elem, output_file):
""" Writes a xml file.
Args:
root_elem is a ElementTree Element object containing all the information
required for the output file.
output_file (str): full path to the output file
"""
xml_file = ET.tostring(root_elem, encoding='utf8').decode('utf8')
pretty_xml_file = minidom.parseString(xml_file).toprettyxml(indent=' ')
output_file = output_file + ".xml"
with open(output_file, "w") as f:
f.write(pretty_xml_file)
####### Create xml elemnts
# Create root element
def create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format):
"""
Creates the root element of the xml file and its attributes.
Args:
blast_options (str): additional options for blast, for example, -evalue or num_threads
legend_position (str): position of the legend on the image
query_file (str): full path to the query file
output_folder (str): full path to the output folder
image_output_file (str): full path to the image output file
title (str): title of the output image
image_format (str): format of the image output file
Returns:
root: ElementTree Element object containing the BRIG tag and its attributes
"""
root_attrs = {"blastOptions" : blast_options,
"legendPosition" : legend_position,
"queryFile" : query_file,
"outputFolder" : output_folder,
"blastPlus" : "yes",
"outputFile" : os.path.join(output_folder, image_output_file),
"title" : title,
"imageFormat" : image_format,
"queryFastaFile" : query_file,
"cgXML" : os.path.join(output_folder + "/scratch", os.path.basename(query_file) + ".xml")}
root = ET.Element('BRIG', attrib=root_attrs)
return root
#### Create root children
# Create cgview_settings element
def create_cgview_settings_element(root, height, width):
""" Creates the cgview_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
height (str): height of the output image in pixels
width (str): width of the output image in pixels
Returns:
cgview_settings: ElementTree SubElement object containing the cgview settings tag and its attributes
"""
cgview_settings_attrs = {"arrowheadLength" : "medium",
"backboneColor" : "black",
"backboneRadius" : "600",
"backboneThickness" : "medium",
"backgroundColor" : "white",
"borderColor" : "black",
"featureSlotSpacing" : "medium",
"featureThickness" : "30",
"giveFeaturePositions" : "false",
"globalLabel" : "true",
"height" : height,
"isLinear" : "false",
"labelFont" : "SansSerif,plain,25",
"labelLineLength" : "medium",
"labelLineThickness" : "medium",
"labelPlacementQuality" : "best",
"labelsToKeep" : "1000",
"longTickColor" : "black",
"minimumFeatureLength" : "medium",
"moveInnerLabelsToOuter" :"true",
"origin" : "12",
"rulerFont" : "SansSerif,plain,35",
"rulerFontColor" : "black",
"rulerPadding" : "40",
"rulerUnits" : "bases",
"shortTickColor" : "black",
"shortTickThickness" : "medium",
"showBorder" : "false",
"showShading" : "true",
"showWarning" : "false",
"tickDensity" : "0.2333",
"tickThickness" : "medium",
"titleFont" : "SansSerif,plain,45",
"titleFontColor" : "black",
"useColoredLabelBackgrounds" : "false",
"useInnerLabels" : "true",
"warningFont" : "Default,plain,35",
"warningFontColor" : "black",
"width" : width,
"zeroTickColor" : "black",
"tickLength" : "medium"}
cgview_settings = ET.SubElement(root, 'cgview_settings', attrib=cgview_settings_attrs)
return cgview_settings
# Create brig_settings element
def create_brig_settings_element(root, java_memory):
""" Creates the brig_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
java_memory (str): amount of memory (in bytes) java is allowed to use for BRIG
Returns:
brig_settings: ElementTree SubElement object containing the brig settings tag and its attributes
"""
brig_settings_attrs = {"Ring1" : "172,14,225",
"Ring2" : "222,149,220",
"Ring3" : "161,221,231",
"Ring4" : "49,34,221",
"Ring5" : "116,152,226",
"Ring6" : "224,206,38",
"Ring7" : "40,191,140",
"Ring8" : "158,223,139",
"Ring9" : "226,38,122",
"Ring10" :"211,41,77",
"defaultUpper" : "70",
"defaultLower" : "50",
"defaultMinimum" : "50",
"genbankFiles" : "gbk,gb,genbank",
"fastaFiles" : "fna,faa,fas,fasta,fa",
"emblFiles" : "embl",
"blastLocation" : "",
"divider" : "3",
"multiplier" : "3",
"memory" : java_memory,
"defaultSpacer" : "0"}
brig_settings = ET.SubElement(root,
"brig_settings",
attrib=brig_settings_attrs)
return brig_settings
## Create special element
def create_special_element(root):
"""Creates the 'special' element of the xml file and its attributes
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
Returns:
gc_content_special: ElementTree SubElement object containing the 'special' tag and its attributes
gc_skew_special: ElementTree SubElement object containing the 'special' tag and its attributes
"""
gc_content_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Content'})
gc_skew_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Skew'})
return gc_content_special, gc_skew_special
# Create reference dir element
def create_reference_directory_element(root, reference_directory):
""" Creates the 'reference directory' element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
Returns:
ref_file: ElementTree SubElement object containing the 'refFile' tag and its attributes
"""
ref_dir = ET.SubElement(root,
"refDir",
attrib={"location" : reference_directory})
# Obtain the full path for all the files in the directory
ref_dir_list = listdir_fullpath(reference_directory)
for f in ref_dir_list:
ref_file = ET.SubElement(ref_dir,
"refFile",
attrib={"location" : f})
return ref_file
# Create the ring where the annotations are defined
def create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order):
""" Creates the ring that will contain the annotations for the reference genome.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing a list of specific genes.
contig_order (str): Full path to the tab-delimited file containing the order of the contigs.
"""
# Determine the position of the annotation ring, which will be the position after the last reference genome
ring_position = len(os.listdir(reference_directory)) + 2
# Create the annotation ring element
annotation_ring = ET.SubElement(root, 'ring', attrib=annotation_ring_attributes(str(ring_position)))
# Check for tab-delimited annotation file input
if list(SeqIO.parse(annotation_file, "genbank")) == []:
create_annotation_ring_tsv(annotation_ring, annotation_file)
else:
# Get the records of the Genbank file
records = [r for r in SeqIO.parse(annotation_file, "genbank")]
### Check if a contig order file has been provided
if len(records) > 1: # If more than 1 record exists, then the Genbank file is divided by contigs
create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order)
else:
create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records)
## Create remaining rings
def create_ring_element(root, reference_directory, colormap):
""" Creates the ring elements of the xml file, containing the position and color of the rings.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
colormap (str): name of the colormap (available in matplotlib) to use for the color of the rings
Returns:
ring_number_element: ElementTree SubElement object containing the 'ring' tag and its attributes
ring_sequence_element: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
ref_dir_list = listdir_fullpath(reference_directory)
# Gets the colormap from matplotlib with as many colors as the number of files
cmap = cm.get_cmap(colormap, len(ref_dir_list))
list_colormap = cmap.colors.tolist()
# Remove the fourth element (transparency) because it is not necessary
colors_to_use = []
for l in list_colormap:
convert = [round(x * 255) for x in l]
convert.pop()
colors_to_use.append(convert)
#reversed_colors_to_use = colors_to_use[::-1]
# Check if the user provided an order for the rings
has_digit = [os.path.basename(x).split("_")[0].isdigit() for x in ref_dir_list]
if True in has_digit:
# Obtain the ring positions
ring_positions = [os.path.basename(x).split("_")[0] for x in ref_dir_list]
# Reverse sort the positions of the rings, because they will be created
# in a descending order of their positions
ring_positions.sort(reverse=True)
ref_dir_list.sort(reverse=True)
for ring in range(len(ref_dir_list)):
# The ring positions start at 2 due to the special rings (GC Content and GC Skew)
ring_position = int(ring_positions[ring]) + 1
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[1]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_position)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
else:
# Sort files by lowercase
ref_dir_list.sort(key=lambda y: y.lower())
# The number of rings starts at 2 due to the GC Content and GC Skew
ring_number = len(ref_dir_list) + 1
for ring in range(len(ref_dir_list)):
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[0]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_number)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
ring_number -= 1
return ring_number_element, ring_sequence_element
## Create special rings
def create_special_ring_element(root):
""" Create the 'special' ring element and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
Returns:
gc_content_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
gc_skew_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
# Create ring attributes
gc_content_ring_attrs = ring_attributes('225,0,0', "GC Content", "0")
gc_skew_ring_attrs = ring_attributes('225,0,0', "GC Skew", "1")
# Add ring element to root
gc_skew_ring = ET.SubElement(root, 'ring', attrib=gc_skew_ring_attrs)
gc_content_ring = ET.SubElement(root, 'ring', attrib=gc_content_ring_attrs)
# Add sequence element to ring
gc_content_location = ET.SubElement(gc_content_ring, 'sequence', attrib={'location' : 'GC Content'})
gc_skew_location = ET.SubElement(gc_skew_ring, 'sequence', attrib={'location' : 'GC Skew'})
return gc_content_location, gc_skew_location
def main(query_file, reference_directory, output_folder, output_xml, image_output_file, title, annotation_file,
genes_of_interest, contig_order, blast_options, legend_position, image_format, height, width, java_memory, colormap):
root = create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format)
cgview_settings = create_cgview_settings_element(root, height, width)
brig_settings = create_brig_settings_element(root, java_memory)
special = create_special_element(root)
refdir = create_reference_directory_element(root, reference_directory)
if annotation_file:
create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order)
rings = create_ring_element(root, reference_directory, colormap)
special_ring = create_special_ring_element(root)
write_xml(root, output_xml)
print("\n File written to {}".format(output_xml))
def parse_arguments():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-q', '--query', type=str, required=True, dest='query_file',
help='Path to the query/reference FASTA file.')
parser.add_argument('-rfd', '--ref_dir', type=str, required=True, dest='reference_directory',
help='Path to the directory where the FASTA files to compare against the reference are located.')
parser.add_argument('-od', '--out_dir', type=str, required=True, dest='output_folder',
help='Path to the output directory for the results of BRIG.')
parser.add_argument('-of', '--out_xml', type=str, required=True, dest='output_file',
help='Path to the output of this script.')
parser.add_argument('-oi', '--out_img', type=str, required=True, dest='image_output_file',
help='Path to the output file of the resulting image of BRIG.')
parser.add_argument('-t', '--title', type=str, required=True, dest='title',
help='Title of the resulting image from BRIG.')
parser.add_argument('-a', '--annotation', type=str, required=False, dest='annotation_file', default=False,
help='File containing annotations for the reference genome. '
'The annoation file can be a tab-delimited file (.tsv) or a Genbank format file (.gbk, .gb)')
parser.add_argument('--genes', type=str, required=False, dest='genes_of_interest', default=[],
help='File containing a list of specific genes (one gene per line) to search when a Genbank annotation file is provided. ')
parser.add_argument('--contig_order', type=str, required=False, dest='contig_order', default=[],
help='Tab-delimited file containing the order of the contigs when a Genbank (divided by contigs) annotation file is provided. '
'Example: order contig '
'1 Contig8')
parser.add_argument('-b', '--blast_options', type=str, required=False, dest="blast_options", default="-evalue 0.001 -num_threads 6",
help='Options for running BLAST.')
parser.add_argument('-l', '--legend_pos', type=str, required=False, dest="legend_position", default="middle-right",
help='Positon of the legend on the resulting image.'
'The options available are upper, center or lower, '
'paired with left, center or right')
parser.add_argument('-if', '--image_format', type=str, required=False, dest="image_format", default="jpg",
help='Format of the resulting image file.'
'The available options are: jpg, png, svg or svgz.')
parser.add_argument('-ht', '--height', type=str, required=False, dest="height", default="3000",
help='Height (in pixels) of the resulting image.')
parser.add_argument('-wd', '--width', type=str, required=False, dest="width", default="3000",
help='Width (in pixels) of the resulting image.')
parser.add_argument('-jm', '--java_memory', type=str, required=False, dest="java_memory", default="1500",
help='Amount of memory (in bytes) that Java is allowed to use for BRIG.')
parser.add_argument('-cm', '--colormap', type=str, required=False, dest="colormap", default="viridis",
help='Colormap from matplotlib to use for the color of the rings. '
'The available options are: viridis, plasma, inferno, magma and cividis.'
'More options for colormaps at: '
'https://matplotlib.org/users/colormaps.html')
args = parser.parse_args()
return [args.query_file, args.reference_directory, args.output_folder, args.output_file,
args.image_output_file, args.title, args.annotation_file, args.genes_of_interest, args.contig_order,
args.blast_options, args.legend_position, args.image_format, args.height, args.width, args.java_memory, args.colormap]
if __name__ == '__main__':
args = parse_arguments()
main(args[0], args[1], args[2], args[3], args[4], args[5], args[6],
args[7], args[8], args[9], args[10], args[11], args[12], args[13],
args[14], args[15])
|
TAMU-CPT/galaxy-tools
|
tools/genome_viz/brigaid.py
|
Python
|
gpl-3.0
| 36,126
|
[
"BLAST",
"Biopython"
] |
5425645f852becaeb43c78c4feafd26722e00b0ba5a63d49540e1aa08ca01096
|
# -*- coding: utf-8 -*-
#
# escpos/impl/epson.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import re
import time
import six
from six.moves import range
from .. import barcode
from .. import constants
from .. import feature
from ..exceptions import CashDrawerException
from ..helpers import ByteValue
from ..helpers import is_value_in
from ..helpers import _Model
VENDOR = 'Seiko-Epson Corporation'
FONT_A = b'\x00'
FONT_B = b'\x01'
FONT_C = b'\x02'
FONT_D = b'\x03'
FONT_E = b'\x04'
FONT_SPECIAL_A = b'\x61'
FONT_SPECIAL_B = b'\x62'
AVAILABLE_FONTS = (
(FONT_A, 'Font A'),
(FONT_B, 'Font B'),
(FONT_C, 'Font C'),
(FONT_D, 'Font D'),
(FONT_E, 'Font E'),
(FONT_SPECIAL_A, 'Special Font A'),
(FONT_SPECIAL_B, 'Special Font B'),
)
QRCODE_ERROR_CORRECTION_MAP = {
barcode.QRCODE_ERROR_CORRECTION_L: b'\x30', # 48d (~7%, default)
barcode.QRCODE_ERROR_CORRECTION_M: b'\x31', # 49d (~15%)
barcode.QRCODE_ERROR_CORRECTION_Q: b'\x32', # 50d (~25%)
barcode.QRCODE_ERROR_CORRECTION_H: b'\x33', # 51d (~30%)
}
QRCODE_MODULE_SIZE_MAP = {
barcode.QRCODE_MODULE_SIZE_4: b'\x04',
barcode.QRCODE_MODULE_SIZE_5: b'\x05',
barcode.QRCODE_MODULE_SIZE_6: b'\x06',
barcode.QRCODE_MODULE_SIZE_7: b'\x07',
barcode.QRCODE_MODULE_SIZE_8: b'\x08',
}
class GenericESCPOS(object):
"""The ESC/POS base class implementation.
.. todo::
Provide default 'GS k' symbology: UPC-A.
.. todo::
Provide default 'GS k' symbology: UPC-E.
.. todo::
Provide default 'GS k' symbology: Code 39.
.. todo::
Provide default 'GS k' symbology: ITF-14.
.. todo::
Provide default 'GS k' symbology: Codabar NW-7.
.. todo::
Provide default 'GS k' symbology: Code 93.
.. todo::
Provide default 'GS k' symbology: GS1-128 (UCC/EAN-128).
.. todo::
Provide default 'GS k' symbology: GS1 DataBar Omnidirectional.
.. todo::
Provide default 'GS k' symbology: GS1 DataBar Truncated.
.. todo::
Provide default 'GS k' symbology: GS1 DataBar Limited.
.. todo::
Provide default 'GS k' symbology: GS1 DataBar Expanded.
"""
device = None
"""The device where ESCPOS commands will be written.
Indeed, it is an instance of a connection that represents a real device on
the other end. It may be a serial RS232 connection, a bluetooth connection,
a USB connection, a network connection, or whatever any other way we can
``catch`` it, ``write`` to and ``read`` from.
"""
hardware_features = None
"""A mapping of hardware features."""
model = _Model(name='Generic ESC/POS', vendor=VENDOR)
"""Basic metadata with vendor and model name."""
encoding = constants.DEFAULT_ENCODING
"""Default encoding used to encode data before sending to device."""
encoding_errors = constants.DEFAULT_ENCODING_ERRORS
"""How to deal with ``UnicodeEncodingError``.
See ``errors`` argument to ``str.encode()`` for details.
"""
def __init__(
self,
device,
features=None,
encoding=constants.DEFAULT_ENCODING,
encoding_errors=constants.DEFAULT_ENCODING_ERRORS):
super(GenericESCPOS, self).__init__()
self._feature_attrs = feature.FeatureAttributes(self)
self.hardware_features = feature._SET.copy()
self.hardware_features.update(features or {})
self.encoding = encoding
self.encoding_errors = encoding_errors
self.device = device
self.device.catch()
@property
def feature(self):
return self._feature_attrs
def init(self):
self.device.write(b'\x1B\x40')
def lf(self, lines=1):
"""Line feed. Issues a line feed to printer *n*-times."""
for i in range(lines):
self.device.write(b'\x0A')
def textout(self, text):
"""Write text without line feed."""
self.device.write(text.encode(self.encoding, self.encoding_errors))
def text(self, text):
"""Write text followed by a line feed."""
self.textout(text)
self.lf()
def text_center(self, text):
"""Shortcut method for print centered text."""
self.justify_center()
self.text(text)
def justify_center(self):
self.device.write(b'\x1B\x61\x01')
def justify_left(self):
self.device.write(b'\x1B\x61\x00')
def justify_right(self):
self.device.write(b'\x1B\x61\x02')
def set_code_page(self, code_page):
"""Set code page for character printing.
Default code page values are described on page 8 from Epson's
`FAQ about ESC/POS <http://content.epson.de/fileadmin/content/files/RSD/downloads/escpos.pdf>`_
transcribed here for convenience:
+-----------+----------------------------------+
| Code Page | Character Code |
+===========+==================================+
| ``0`` | PC437 (USA: Standard Europe) |
+-----------+----------------------------------+
| ``1`` | Katana |
+-----------+----------------------------------+
| ``2`` | PC850 (Multilingual) |
+-----------+----------------------------------+
| ``3`` | PC860 (Portuguese) |
+-----------+----------------------------------+
| ``4`` | PC863 (Canadian-French) |
+-----------+----------------------------------+
| ``5`` | PC865 (Nordic) |
+-----------+----------------------------------+
| ``16`` | WPC1252 |
+-----------+----------------------------------+
| ``17`` | PC866 (Cyrillic #2) |
+-----------+----------------------------------+
| ``18`` | PC852 (Latin 2) |
+-----------+----------------------------------+
| ``19`` | PC858 (Euro) |
+-----------+----------------------------------+
| ``20`` | Thai character code 42 |
+-----------+----------------------------------+
| ``21`` | Thai character code 11 |
+-----------+----------------------------------+
| ``22`` | Thai character code 13 |
+-----------+----------------------------------+
| ``23`` | Thai character code 14 |
+-----------+----------------------------------+
| ``24`` | Thai character code 16 |
+-----------+----------------------------------+
| ``25`` | Thai character code 17 |
+-----------+----------------------------------+
| ``26`` | Thai character code 18 |
+-----------+----------------------------------+
| ``254`` | User-defined page |
+-----------+----------------------------------+
| ``255`` | User-defined page |
+-----------+----------------------------------+
.. note::
Be aware of "encoding" attribute versus the code page set.
Usually they must match, unless you know what you are doing.
For example, if your encoding is "cp850", then the code page
set should be "PC850", according to the above table.
Take a look at the Python documentation for the ``codec``'s
module `Standard Encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_.
Also, check you printer's manual for the code page table.
:param int code_page: The code page to set. This must be an integer
randing from 0 to 255, whose meaning depends upon your printer
model.
""" # noqa: E501
if not 0 <= code_page <= 255:
raise ValueError((
'Code page value should be between 0 and 255;'
'got: {!r}'
).format(code_page))
self.device.write(b'\x1B\x74' + six.int2byte(code_page))
def set_font(self, font=FONT_A):
"""Set font to one of :attr:`AVAILABLE_FONTS`."""
valid_fonts = [param for param, value in AVAILABLE_FONTS]
if font not in valid_fonts:
raise ValueError(
(
'Invalid font: {!r} (valid fonts are {!r})'
).format(font, valid_fonts))
self.device.write(b'\x1B\x4D' + font) # ESC M <n>
def set_mode(
self,
font=FONT_A,
emphasized=False,
underline=False,
expanded=False):
"""Set font, emphasized mode, underline mode and expanded mode."""
commands = []
param = ByteValue()
if font in (FONT_A, FONT_B):
if font == FONT_B:
param.set_bit(0)
else:
# set character font using ESC M (after ESC !)
commands.append(b'\x1B\x4D' + font)
if emphasized:
param.set_bit(3)
if underline:
# TODO: control underline thickness using ESC -
# https://reference.epson-biz.com/modules/ref_escpos/index.php?content_id=24
param.set_bit(7)
if expanded:
param.set_bit(4)
param.set_bit(5)
commands.insert(0, b'\x1B\x21' + param.byte) # ESC !
for cmd in commands:
self.device.write(cmd)
def set_text_size(self, width, height):
"""Set text size to ``width`` and ``height``.
:param int width: An integer ranging from 0 to 7 (inclusive) whose
meaning is the magnification of the text in horizontal direction,
it is, ``0`` for 1x (normal text), ``1`` for 2x, and so on.
:param int height: An integer ranging from 0 to 7 (inclusive) whose
meaning is the magnification of the text in vertical direction,
it is, ``0`` for 1x (normal text), ``1`` for 2x, and so on.
"""
if (0 <= width <= 7) and (0 <= height <= 7):
size = 16 * width + height
self.device.write(b'\x1D\x21' + six.int2byte(size))
else:
raise ValueError((
'Width and height should be between 0 and 7 '
'(1x through 8x of magnification); '
'got: width={!r}, height={!r}'
).format(width, height))
def set_expanded(self, flag):
"""Turns on/off expanded mode. Usually this means a text size of 2x
magnification in both horizontal and vertical directions.
:param bool flag: If ``True`` sets expanded on.
"""
param = ByteValue()
if flag:
# set character size to double width and height
param.set_bit(4) # bits 6, 5, 4 = 0, 0, 1 (x2 width)
param.set_bit(0) # bits 2, 1, 0 = 0, 0, 1 (x2 height)
self.device.write(b'\x1D\x21' + param.byte) # GS !
def set_condensed(self, flag):
"""Turns on/off condensed mode by switching between :attr:`FONT_A`
(normal) and :attr:`FONT_B` (condensed).
:param bool flag: If ``True`` sets condensed on.
"""
param = FONT_B if flag else FONT_A
self.set_font(font=param)
def set_emphasized(self, flag):
"""Turns on/off emphasized mode. See :meth:`set_double_strike`.
:param bool flag: If ``True`` sets emphasized on.
"""
param = b'\x01' if flag else b'\x00'
self.device.write(b'\x1B\x45' + param) # ESC E
def set_double_strike(self, flag):
"""Turns on/off double strike mode. In practice, double strike and
emphasized modes produces same results.
:param bool flag: If ``True`` sets double strike on.
"""
param = b'\x01' if flag else b'\x00'
self.device.write(b'\x1B\x47' + param) # ESC G
def ean8(self, data, **kwargs):
"""Render given data as **JAN-8/EAN-8** barcode symbology.
:param str data: The JAN-8/EAN-8 data to be rendered.
"""
if not re.match(r'\d{8}', data):
raise ValueError((
'JAN-8/EAN-8 symbology requires 8 digits of data; '
'got {:d} digits: {!r}'
).format(len(data), data))
barcode.validate_barcode_args(**kwargs)
return self._ean8_impl(data, **kwargs)
def _ean8_impl(self, data, **kwargs):
ean8_data = data.encode(self.encoding, self.encoding_errors)
commands = barcode.gs_k_barcode(
barcode.JAN8_EAN8,
ean8_data,
**kwargs
)
for cmd in commands:
self.device.write(cmd)
time.sleep(0.25) # wait for barcode to be printed
return self.device.read()
def ean13(self, data, **kwargs):
"""Render given data as **JAN-13/EAN-13** barcode symbology.
:param str data: The JAN-13/EAN-13 data to be rendered.
"""
if not re.match(r'\d{13}', data):
raise ValueError((
'JAN-13/EAN-13 symbology requires 13 digits of '
'data; got {:d} digits: {!r}'
).format(len(data), data))
barcode.validate_barcode_args(**kwargs)
return self._ean13_impl(data, **kwargs)
def _ean13_impl(self, data, **kwargs):
ean13_data = data.encode(self.encoding, self.encoding_errors)
commands = barcode.gs_k_barcode(
barcode.JAN13_EAN13,
ean13_data,
**kwargs
)
for cmd in commands:
self.device.write(cmd)
time.sleep(0.25) # wait for barcode to be printed
return self.device.read()
def code128(self, data, **kwargs):
"""Renders given data as **Code 128** barcode symbology.
:param str data: The Code 128 data to be rendered.
:param bytes codeset: Optional. Keyword argument for the subtype (code
set) to render. Defaults to :attr:`escpos.barcode.CODE128_A`.
.. warning::
You should draw up your data according to the subtype (code set).
The default is **Code 128 A** and there is no way (yet) to mix code
sets in a single barcode rendering (at least not uniformly).
Implementations may simply ignore the code set.
"""
if not re.match(r'^[\x20-\x7F]+$', data):
raise ValueError((
'Invalid Code 128 symbology. Code 128 can encode any '
'ASCII character ranging from 32 (20h) to 127 (7Fh); '
'got: {!r}'
).format(data))
codeset = kwargs.pop('codeset', barcode.CODE128_A)
barcode.validate_barcode_args(**kwargs)
return self._code128_impl(data, codeset=codeset, **kwargs)
def _code128_impl(self, data, **kwargs):
codeset = kwargs.get('codeset', barcode.CODE128_A)
if not is_value_in(barcode.CODE128_CODESETS, codeset):
raise ValueError('Unknown Code 128 code set: {!r}'.format(codeset))
encoded_data = (
b'\x7B'
+ codeset
+ data.encode(self.encoding, self.encoding_errors)
) # {<codeset><data>
commands = barcode.gs_k_barcode(
barcode.CODE128,
encoded_data,
**kwargs
)
for cmd in commands:
self.device.write(cmd)
time.sleep(0.25) # wait for barcode to be printed
return self.device.read()
def qrcode(self, data, **kwargs):
"""Render given data as `QRCode <http://www.qrcode.com/en/>`_.
:param str data: Data (QRCode contents) to be rendered.
"""
barcode.validate_qrcode_args(**kwargs)
return self._qrcode_impl(data, **kwargs)
def _qrcode_impl(self, data, **kwargs):
qr_data = data.encode(self.encoding, self.encoding_errors)
# compute HI,LO bytes for the number of bytes (parameters) after `pH`;
# this is possibly the safest way, but alternatives are:
#
# size_H = num_bytes // 256 # (!) integer division (rounding down)
# size_L = num_bytes % 256
#
# or:
#
# size_H, size_L = divmod(num_bytes, 256)
#
num_bytes = 3 + len(qr_data) # 3 is the number of bytes after `pH`
size_H = (num_bytes >> 8) & 0xff
size_L = num_bytes & 0xff
commands = [
b'\x1D\x28\x6B' # GS(k
+ six.int2byte(size_L)
+ six.int2byte(size_H)
+ b'\x31' # cn (49 <=> 0x31 <=> QRCode)
+ b'\x50' # fn (80 <=> 0x50 <=> store symbol in memory)
+ b'\x30' # m (48 <=> 0x30 <=> literal value)
+ qr_data
]
commands.append(
b'\x1D\x28\x6B' # GS(k
+ b'\x03' # pL
+ b'\x00' # pH
+ b'\x31' # cn (49 <=> 0x31 <=> QRCode)
+ b'\x45' # fn (69 <=> 0x45 <=> error correction)
+ _get_qrcode_error_correction(**kwargs)
)
commands.append(
b'\x1D\x28\x6B' # GS(k
+ b'\x03' # pL
+ b'\x00' # pH
+ b'\x31' # cn (49 <=> 0x31 <=> QRCode)
+ b'\x43' # fn (67 <=> 0x43 <=> module size)
+ _get_qrcode_module_size(**kwargs)
)
commands.append(
b'\x1D\x28\x6B' # GS(k
+ b'\x03' # pL
+ b'\x00' # pH
+ b'\x31' # cn (49 <=> 0x31 <=> QRCode)
+ b'\x51' # fn (81 <=> 0x51 <=> print 2D symbol)
+ b'\x30' # m (48 <=> 0x30 <=> literal value)
)
for cmd in commands:
self.device.write(cmd)
time.sleep(1) # sleeps one second for qrcode to be printed
return self.device.read()
def cut(self, partial=True, feed=0):
"""Trigger cutter to perform partial (default) or full paper cut.
:param bool partial: Optional. Indicates a partial (``True``, the
default value) or a full cut (``False``).
:param int feed: Optional. Value from 0 (default) to 255 (inclusive).
This value should be multiple of the vertical motion unit, feeding
paper "until current position reaches the cutter".
For details, visit `GS V <https://www.epson-biz.com/modules/ref_escpos/index.php?content_id=87>`_
command documentation.
""" # noqa: E501
if self.hardware_features.get(feature.CUTTER, False):
# TODO: implement hardware alternative for unavailable features
# For example:
#
# self.hardware_alternatives.get('cutter-full-cut')(self)
#
# So, implementations or end-user-applications can replace
# certain hardware functionalites, based on available features.
#
# The above mapping can replace full cuts with line feeds for
# printer hardware that do not have an automatic paper cutter:
#
# self.hardware_alternatives.update({
# # skip 7 lines if we do not have a paper cutter
# 'cutter-full-cut': lambda impl: impl.lf(7)
# })
#
func_b = b'\x42' if partial else b'\x41' # function B
feed_n = six.int2byte(feed)
self.device.write(b'\x1D\x56' + func_b + feed_n) # GS V m n
def kick_drawer(self, port=0, **kwargs):
"""Kick drawer connected to the given port.
In this implementation, cash drawers are identified according to the
port in which they are connected. This relation between drawers and
ports does not exists in the ESC/POS specification and it is just a
design decision to normalize cash drawers handling. From the user
application perspective, drawers are simply connected to port 0, 1, 2,
and so on.
If printer does not have this feature then no exception should be
raised.
:param int number: The port number to kick drawer (default is ``0``).
:raises CashDrawerException: If given port does not exists.
"""
if self.hardware_features.get(feature.CASHDRAWER_PORTS, False):
# if feature is available assume at least one port is available
max_ports = self.hardware_features.get(
feature.CASHDRAWER_AVAILABLE_PORTS, 1
)
if port not in range(max_ports):
raise CashDrawerException((
'invalid cash drawer port: {!r} (available '
'ports are {!r})'
).format(port, list(range(max_ports))))
return self._kick_drawer_impl(port=port, **kwargs)
def _kick_drawer_impl(self, port=0, **kwargs):
if port not in range(2):
raise CashDrawerException((
'invalid cash drawer port: {!r}'
).format(port))
param = b'\x00' if port == 0 else b'\x01' # pulse to pin 2 or 5
self.device.write(b'\x1B\x70' + param)
class TMT20(GenericESCPOS):
"""Epson TM-T20 thermal printer."""
model = _Model(name='Epson TM-T20', vendor=VENDOR)
def __init__(self, device, features={}, **kwargs):
super(TMT20, self).__init__(device, **kwargs)
self.hardware_features.update({
feature.CUTTER: True,
feature.CASHDRAWER_PORTS: True,
feature.CASHDRAWER_AVAILABLE_PORTS: 1,
})
self.hardware_features.update(features)
def _get_qrcode_error_correction(**kwargs):
# adapt from PyESCPOS to Epson's own QRCode ECC level byte value
return QRCODE_ERROR_CORRECTION_MAP.get(
kwargs.get(
'qrcode_ecc_level',
barcode.QRCODE_ERROR_CORRECTION_L
)
)
def _get_qrcode_module_size(**kwargs):
# adapt from PyESCPOS to Epson's own QRCode module size byte value
return QRCODE_MODULE_SIZE_MAP.get(
kwargs.get(
'qrcode_module_size',
barcode.QRCODE_MODULE_SIZE_4
)
)
|
base4sistemas/pyescpos
|
escpos/impl/epson.py
|
Python
|
apache-2.0
| 23,294
|
[
"VisIt"
] |
b7af3798723994a309b0d0ceeb6c519d4c8ed161aa20adfd7f7ca9c08d95eb01
|
'''
IMPORTANT NOTES:
In the .oms file, the first and last RA/DEC represent a reference slit at the bottom of the mask and the center of the mask respectively.
Please list the calibration lamp(s) used during your observations here
'''
cal_lamp = ['Xenon','Argon'] #'Xenon','Argon','HgNe','Neon'
print 'Using calibration lamps: ', cal_lamp
import numpy as np
from astropy.io import fits as pyfits
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.widgets import RadioButtons, Button, CheckButtons
import scipy.signal as signal
from pyds9 import *
import sys
import re
import subprocess
import pandas as pd
import copy
import os
import fnmatch
import time
from testopt import *
import pickle
import pdb
from scipy import fftpack
from get_photoz import *
from zpy import *
#from redshift_estimate import *
from sncalc import *
#from redshift_checker import *
from gal_trace import *
from slit_find import *
import pprint
def getch():
import tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd,termios.TCSADRAIN,old_settings)
return ch
def filter_image(img):
img_sm = signal.medfilt(np.float64(img),5)
sigma = 2.0
bad = np.abs(img-img_sm) / sigma > 8.0
img_cr = img.copy()
img_cr[bad] = img_sm[bad]
return img_cr
pixscale = 0.273 #pixel scale at for OSMOS
xbin = 1
ybin = 1
yshift = 13.0
wm = []
fm = []
if 'Xenon' in cal_lamp:
wm_Xe,fm_Xe = np.loadtxt('osmos_Xenon.dat',usecols=(0,2),unpack=True)
wm_Xe = air_to_vacuum(wm_Xe)
wm.extend(wm_Xe)
fm.extend(fm_Xe)
if 'Argon' in cal_lamp:
wm_Ar,fm_Ar = np.loadtxt('osmos_Argon.dat',usecols=(0,2),unpack=True)
wm_Ar = air_to_vacuum(wm_Ar)
wm.extend(wm_Ar)
fm.extend(fm_Ar)
if 'HgNe' in cal_lamp:
wm_HgNe,fm_HgNe = np.loadtxt('osmos_HgNe.dat',usecols=(0,2),unpack=True)
wm_HgNe = air_to_vacuum(wm_HgNe)
wm.extend(wm_HgNe)
fm.extend(fm_HgNe)
if 'Neon' in cal_lamp:
wm_Ne,fm_Ne = np.loadtxt('osmos_Ne.dat',usecols=(0,2),unpack=True)
wm_Ne = air_to_vacuum(wm_Ne)
wm.extend(wm_Ne)
fm.extend(fm_Ne)
fm = np.array(fm)[np.argsort(wm)]
wm = np.array(wm)[np.argsort(wm)]
###################
#Define Cluster ID#
###################
try:
id_import = str(sys.argv[1])
clus_id = id_import
except:
print "Cluster Name Error: You must enter a cluster name to perform reduction"
print ' '
idnew = str(raw_input("Cluster ID: "))
clus_id = idnew
print 'Reducing cluster: ',clus_id
###############################################################
#ask if you want to only reduce sdss galaxies with spectra
try:
sdss_check = str(sys.argv[2])
if sdss_check == 'sdss':
sdss_check = True
else:
raise Exception(sdss_check+' is not an accepted input. \'sdss\' is the only accepted input here.')
except IndexError:
sdss_check = False
############################
#Import Cluster .fits files#
############################
for file in os.listdir('./'+clus_id+'/'): #search and import all mosaics
if fnmatch.fnmatch(file, 'mosaic_*'):
image_file = file
#create reduced files if they don't exist
def reduce_files(filetype):
for file in os.listdir('./'+clus_id+'/'+filetype+'/'):
if fnmatch.fnmatch(file, '*.????.fits'):
if not os.path.isfile(clus_id+'/'+filetype+'/'+file[:-5]+'b.fits'):
print 'Creating '+clus_id+'/'+filetype+'/'+file[:-5]+'b.fits'
p = subprocess.Popen('python proc4k.py '+clus_id+'/'+filetype+'/'+file,shell=True)
p.wait()
else:
print 'Reduced '+filetype+' files exist'
filetypes = ['science','arcs','flats']
for filetype in filetypes:
reduce_files(filetype)
#import, clean, and add science fits files
sciencefiles = np.array([])
hdulists_science = np.array([])
for file in os.listdir('./'+clus_id+'/science/'): #search and import all science filenames
if fnmatch.fnmatch(file, '*b.fits'):
sciencefiles = np.append(sciencefiles,file)
scifits = pyfits.open(clus_id+'/science/'+file)
hdulists_science = np.append(hdulists_science,scifits)
print sciencefiles
science_file = sciencefiles[0]
hdulist_science = pyfits.open(clus_id+'/science/'+science_file)
naxis1 = hdulist_science[0].header['NAXIS1']
naxis2 = hdulist_science[0].header['NAXIS2']
#import flat data
flatfiles = np.array([])
hdulists_flat = np.array([])
for file in os.listdir('./'+clus_id+'/flats/'): #search and import all science filenames
if fnmatch.fnmatch(file, '*b.fits'):
flatfiles = np.append(flatfiles,file)
flatfits = pyfits.open(clus_id+'/flats/'+file)
hdulists_flat = np.append(hdulists_flat,flatfits)
if len(hdulists_flat) < 1:
raise Exception('proc4k.py did not detect any flat files')
#import arc data
arcfiles = np.array([])
hdulists_arc = np.array([])
for file in os.listdir('./'+clus_id+'/arcs/'): #search and import all science filenames
if fnmatch.fnmatch(file, '*b.fits'):
arcfiles = np.append(arcfiles,file)
arcfits = pyfits.open(clus_id+'/arcs/'+file)
hdulists_arc = np.append(hdulists_arc,arcfits)
if len(hdulists_arc) < 1:
raise Exception('proc4k.py did not detect any arc files')
###############################################################
#########################################################
#Need to parse .oms file for ra,dec and slit information#
#########################################################
RA = np.array([])
DEC = np.array([])
SLIT_NUM = np.array([])
SLIT_WIDTH = np.array([])
SLIT_LENGTH = np.array([])
SLIT_X = np.array([])
SLIT_Y = np.array([])
for file in os.listdir('./'+clus_id+'/'):
if fnmatch.fnmatch(file, '*.oms'):
omsfile = file
inputfile = open(clus_id+'/'+omsfile)
alltext = inputfile.readlines()
for line in alltext:
RAmatch = re.search('TARG(.*)\.ALPHA\s*(..)(..)(.*)',line)
DECmatch = re.search('DELTA\s*(...)(..)(.*)',line)
WIDmatch = re.search('WID\s\s*(.*)',line)
LENmatch = re.search('LEN\s\s*(.*)',line)
Xmatch = re.search('XMM\s\s*(.*)',line)
Ymatch = re.search('YMM\s\s*(.*)',line)
if RAmatch:
SLIT_NUM = np.append(SLIT_NUM,RAmatch.group(1))
RA = np.append(RA,RAmatch.group(2)+':'+RAmatch.group(3)+':'+RAmatch.group(4))
if DECmatch:
DEC = np.append(DEC,DECmatch.group(1)+':'+DECmatch.group(2)+':'+DECmatch.group(3))
if WIDmatch:
SLIT_WIDTH = np.append(SLIT_WIDTH,WIDmatch.group(1))
if LENmatch:
SLIT_LENGTH = np.append(SLIT_LENGTH,LENmatch.group(1))
if Xmatch:
SLIT_X = np.append(SLIT_X,0.5*naxis1+np.float(Xmatch.group(1))*(11.528)/(pixscale))
if Ymatch:
SLIT_Y = np.append(SLIT_Y,0.5*naxis2+np.float(Ymatch.group(1))*(11.528)/(pixscale)+yshift)
#remove throw away rows and dump into Gal_dat dataframe
Gal_dat = pd.DataFrame({'RA':RA[1:SLIT_WIDTH.size],'DEC':DEC[1:SLIT_WIDTH.size],'SLIT_WIDTH':SLIT_WIDTH[1:],'SLIT_LENGTH':SLIT_LENGTH[1:],'SLIT_X':SLIT_X[1:],'SLIT_Y':SLIT_Y[1:]})
###############################################################
############################
#Query SDSS for galaxy data#
############################
if os.path.isfile(clus_id+'/'+clus_id+'_sdssinfo.csv'):
redshift_dat = pd.read_csv(clus_id+'/'+clus_id+'_sdssinfo.csv')
else:
#returns a Pandas dataframe with columns
#objID','SpecObjID','ra','dec','umag','gmag','rmag','imag','zmag','redshift','photo_z','extra'
redshift_dat = query_galaxies(Gal_dat.RA,Gal_dat.DEC)
redshift_dat.to_csv(clus_id+'/'+clus_id+'_sdssinfo.csv',index=False)
#merge into Gal_dat
Gal_dat = Gal_dat.join(redshift_dat)
gal_z = Gal_dat['spec_z']
gal_gmag = Gal_dat['gmag']
gal_rmag = Gal_dat['rmag']
gal_imag = Gal_dat['imag']
####################
#Open images in ds9#
####################
p = subprocess.Popen('ds9 '+clus_id+'/'+image_file+' -geometry 1200x900 -scale sqrt -scale mode zscale -fits '+clus_id+'/arcs/'+arcfiles[0],shell=True)
#p = subprocess.Popen('ds9 '+clus_id+'/'+image_file+' -geometry 1200x900 -scale sqrt -scale mode zscale -fits '+clus_id+'/arcs/'+arcfiles[0],shell=True)
time.sleep(3)
print "Have the images loaded? (y/n)"
while True: #check to see if images have loaded correctly
char = getch()
if char.lower() in ("y", "n"):
if char.lower() == "y":
print 'Image has been loaded'
break
else:
sys.exit('Check to make sure file '+image_file+' exists in '+clus_id+'/')
d = ds9() #start pyds9 and set parameters
d.set('frame 1')
d.set('single')
d.set('zscale contrast 9.04')
d.set('zscale bias 0.055')
d.set('zoom 2')
d.set('cmap Heat')
d.set('regions sky fk5')
#################################################################
####################################################################################
#Loop through mosaic image and decide if objects are galaxies, stars, sky, or other#
####################################################################################
reassign = 'n'
keys = np.arange(0,Gal_dat.SLIT_WIDTH.size,1).astype('string')
if os.path.isfile(clus_id+'/'+clus_id+'_slittypes.pkl'):
reassign = raw_input('Detected slit types file in path. Do you wish to use this (y) or remove and re-assign slit types (n)? ')
if reassign == 'n':
slit_type = {}
print 'Is this a galaxy (g), a reference star (r), or empty sky (s)?'
for i in range(len(Gal_dat)):
d.set('pan to '+Gal_dat.RA[i]+' '+Gal_dat.DEC[i]+' wcs fk5')
if Gal_dat.SLIT_WIDTH[i] == '1.0':
d.set('regions command {box('+Gal_dat.RA[i]+' '+Gal_dat.DEC[i]+' 3 24) #color=green}')
else:
d.set('regions command {box('+Gal_dat.RA[i]+' '+Gal_dat.DEC[i]+' 12 12) #color=green}')
while True:
char = getch()
if char.lower() in ("g", "r", "s"):
break
slit_type[keys[i]] = char.lower()
pickle.dump(slit_type,open(clus_id+'/'+clus_id+'_slittypes.pkl','wb'))
else:
slit_type = pickle.load(open(clus_id+'/'+clus_id+'_slittypes.pkl','rb'))
stypes = pd.DataFrame(slit_type.values(),index=np.array(slit_type.keys()).astype('int'),columns=['slit_type'])
Gal_dat = Gal_dat.join(stypes)
##################################################################
d.set('frame 2')
d.set('zscale contrast 0.25')
d.set('zoom 0.40')
#######################################
#Reduction steps to prep science image#
#######################################
redo = 'n'
if os.path.isfile(clus_id+'/science/'+clus_id+'_science.cr.fits'):
redo = raw_input('Detected cosmic ray filtered file exists. Do you wish to use this (y) or remove and re-calculate (n)? ')
if redo == 'n':
try:
os.remove(clus_id+'/science/'+clus_id+'_science.cr.fits')
except: pass
scifits_c = copy.copy(hdulists_science[0]) #copy I will use to hold the smoothed and added results
scifits_c.data = np.multiply(0.0,scifits_c.data, casting="unsafe")
print 'SCIENCE REDUCTION'
for scifits in hdulists_science:
filt = filter_image(scifits.data)
scifits_c.data += filt + np.abs(np.nanmin(filt))
scifits_c.writeto(clus_id+'/science/'+clus_id+'_science.cr.fits')
else:
scifits_c = pyfits.open(clus_id+'/science/'+clus_id+'_science.cr.fits')[0]
print 'loading pre-prepared cosmic ray filtered files...'
print 'FLAT REDUCTION'
if redo == 'n':
try:
os.remove(clus_id+'/flats/'+clus_id+'_flat.cr.fits')
except: pass
flatfits_c = copy.copy(hdulists_flat[0]) #copy I will use to hold the smoothed and added results
flat_data = np.zeros((hdulists_flat.size,naxis1,naxis2))
i = 0
for flatfits in hdulists_flat:
filt = filter_image(flatfits.data)
flat_data[i] = (filt+np.abs(np.nanmin(filt)))/np.max(filt+np.abs(np.nanmin(filt)))
i += 1
flatfits_c.data = np.median(flat_data,axis=0)
flatfits_c.writeto(clus_id+'/flats/'+clus_id+'_flat.cr.fits')
else: flatfits_c = pyfits.open(clus_id+'/flats/'+clus_id+'_flat.cr.fits')[0]
print 'ARC REDUCTION'
if redo == 'n':
try:
os.remove(clus_id+'/arcs/'+clus_id+'_arc.cr.fits')
except: pass
arcfits_c = copy.copy(hdulists_arc[0]) #copy I will use to hold the smoothed and added results
arcfits_c.data = np.multiply(arcfits_c.data,0.0,casting="unsafe")
for arcfits in hdulists_arc:
filt = arcfits.data#filter_image(arcfits.data)
arcfits_c.data += filt + np.abs(np.nanmin(filt))
arcfits_c.writeto(clus_id+'/arcs/'+clus_id+'_arc.cr.fits')
else: arcfits_c = pyfits.open(clus_id+'/arcs/'+clus_id+'_arc.cr.fits')[0]
##################################################################
#Loop through regions and shift regions for maximum effectiveness#
##################################################################
reassign = 'n'
if os.path.isfile(clus_id+'/'+clus_id+'_slit_pos_qual.tab'):
reassign = raw_input('Detected slit position and quality file in path. Do you wish to use this (y) or remove and re-adjust (n)? ')
if reassign == 'n':
good_spectra = np.array(['n']*len(Gal_dat))
FINAL_SLIT_X = np.zeros(len(Gal_dat))
FINAL_SLIT_Y = np.zeros(len(Gal_dat))
SLIT_WIDTH = np.zeros(len(Gal_dat))
lower_lim = int(0.0)
upper_lim = int(100.0)
spectra = {}
print 'If needed, move region box to desired location. To increase the size, drag on corners'
for i in range(SLIT_WIDTH.size):
print 'SLIT ',i
d.set('pan to 1150.0 '+str(Gal_dat.SLIT_Y[i])+' physical')
print 'Galaxy at ',Gal_dat.RA[i],Gal_dat.DEC[i]
d.set('regions command {box(2000 '+str(Gal_dat.SLIT_Y[i])+' 4300 65) #color=green highlite=1}')
#raw_input('Once done: hit ENTER')
if Gal_dat.slit_type[i] == 'g':
if sdss_check:
if Gal_dat.spec_z[i] != 0.0: skipgal = False
else: skipgal = True
else: skipgal = False
if not skipgal:
good = False
loops = 1
while not good and loops <=3:
good = True
print 'Move/stretch region box. Hit (y) when ready'
while True:
char = getch()
if char.lower() in ("y"):
break
newpos_str = d.get('regions').split('\n')
for n_string in newpos_str:
if n_string[:3] == 'box':
newpos = re.search('box\(.*,(.*),.*,(.*),.*\)',n_string)
FINAL_SLIT_X[i] = Gal_dat.SLIT_X[i]
FINAL_SLIT_Y[i] = newpos.group(1)
SLIT_WIDTH[i] = newpos.group(2)
print FINAL_SLIT_X[i], FINAL_SLIT_Y[i], SLIT_WIDTH[i]
##
#Sky subtract code
##
try:
istart = int(FINAL_SLIT_Y[i]-SLIT_WIDTH[i]/2.0)
iend = int(FINAL_SLIT_Y[i]+SLIT_WIDTH[i]/2.0)
result = slit_find(flatfits_c.data[istart:iend,:],scifits_c.data[istart:iend,:],arcfits_c.data[istart:iend,:],lower_lim,upper_lim)
science_spec = result[0]
arc_spec = result[1]
gal_spec = result[2]
gal_cuts = result[3]
lower_lim = result[4]
upper_lim = result[5]
spectra[keys[i]] = {'science_spec':science_spec,'arc_spec':arc_spec,'gal_spec':gal_spec,'gal_cuts':gal_cuts}
print 'Is this spectra good (y) or bad (n)?'
while True:
char = getch()
if char.lower() in ("y","n"):
break
plt.close()
good_spectra[i] = char.lower()
break
except:
print 'Fit did not fall within the chosen box. Please re-define the area of interest.'
good = False
loops += 1
if loops == 4:
good_spectra[i] = 'n'
FINAL_SLIT_X[i] = Gal_dat.SLIT_X[i]
FINAL_SLIT_Y[i] = Gal_dat.SLIT_Y[i]
SLIT_WIDTH[i] = 40
else:
good_spectra[i] = 'n'
FINAL_SLIT_X[i] = Gal_dat.SLIT_X[i]
FINAL_SLIT_Y[i] = Gal_dat.SLIT_Y[i]
SLIT_WIDTH[i] = 40
else:
good_spectra[i] = 'n'
FINAL_SLIT_X[i] = Gal_dat.SLIT_X[i]
FINAL_SLIT_Y[i] = Gal_dat.SLIT_Y[i]
SLIT_WIDTH[i] = 40
print FINAL_SLIT_X[i],FINAL_SLIT_Y[i],SLIT_WIDTH[i]
d.set('regions delete all')
print FINAL_SLIT_X
np.savetxt(clus_id+'/'+clus_id+'_slit_pos_qual.tab',np.array(zip(FINAL_SLIT_X,FINAL_SLIT_Y,SLIT_WIDTH,good_spectra),dtype=[('float',float),('float2',float),('int',int),('str','|S1')]),delimiter='\t',fmt='%10.2f %10.2f %3d %s')
pickle.dump(spectra,open(clus_id+'/'+clus_id+'_reduced_spectra.pkl','wb'))
else:
FINAL_SLIT_X,FINAL_SLIT_Y,SLIT_WIDTH = np.loadtxt(clus_id+'/'+clus_id+'_slit_pos_qual.tab',dtype='float',usecols=(0,1,2),unpack=True)
good_spectra = np.loadtxt(clus_id+'/'+clus_id+'_slit_pos_qual.tab',dtype='string',usecols=(3,),unpack=True)
spectra = pickle.load(open(clus_id+'/'+clus_id+'_reduced_spectra.pkl','rb'))
Gal_dat['FINAL_SLIT_X'],Gal_dat['FINAL_SLIT_Y'],Gal_dat['SLIT_WIDTH'],Gal_dat['good_spectra'] = FINAL_SLIT_X,FINAL_SLIT_Y,SLIT_WIDTH,good_spectra
#Need to flip FINAL_SLIT_X coords to account for reverse wavelength spectra
Gal_dat['FINAL_SLIT_X_FLIP'] = 4064 - Gal_dat.FINAL_SLIT_X
####################################################################
########################
#Wavelength Calibration#
########################
reassign = 'n'
#wave = np.zeros((len(Gal_dat),4064))
if os.path.isfile(clus_id+'/'+clus_id+'_stretchshift.tab'):
reassign = raw_input('Detected file with stretch and shift parameters for each spectra. Do you wish to use this (y) or remove and re-adjust (n)? ')
if reassign == 'n':
#create write file
f = open(clus_id+'/'+clus_id+'_stretchshift.tab','w')
f.write('#X_SLIT_FLIP Y_SLIT SHIFT STRETCH QUAD CUBE FOURTH FIFTH WIDTH \n')
#initialize polynomial arrays
fifth,fourth,cube,quad,stretch,shift = np.zeros((6,len(Gal_dat)))
shift_est = 4.71e-6*(Gal_dat['FINAL_SLIT_X'] - 2500.0)**2 + 4.30e-6*(Gal_dat['FINAL_SLIT_Y'] - 2000)**2 + 4469.72
stretch_est = -9.75e-9*(Gal_dat['FINAL_SLIT_X'] - 1800.0)**2 - 2.84e-9*(Gal_dat['FINAL_SLIT_Y'] - 2000)**2 + 0.7139
quad_est = 8.43e-9*(Gal_dat['FINAL_SLIT_X'] - 1800.0) + 1.55e-10*(Gal_dat['FINAL_SLIT_Y'] - 2000) + 1.3403e-5
cube_est = 7.76e-13*(Gal_dat['FINAL_SLIT_X'] - 1800.0) + 4.23e-15*(Gal_dat['FINAL_SLIT_Y'] - 2000) - 5.96e-9
fifth_est,fourth_est = np.zeros((2,len(Gal_dat)))
calib_data = arcfits_c.data
p_x = np.arange(0,4064,1)
ii = 0
#do reduction for initial galaxy
while ii <= stretch.size:
if good_spectra[ii]=='y':
f_x = np.sum(spectra[keys[ii]]['arc_spec'],axis=0)
d.set('pan to 1150.0 '+str(Gal_dat.FINAL_SLIT_Y[ii])+' physical')
d.set('regions command {box(2000 '+str(Gal_dat.FINAL_SLIT_Y[ii])+' 4500 '+str(Gal_dat.SLIT_WIDTH[ii])+') #color=green highlite=1}')
#initial stretch and shift
stretch_est[ii],shift_est[ii],quad_est[ii] = interactive_plot(p_x,f_x,stretch_est[ii],shift_est[ii],quad_est[ii],cube_est[ii],fourth_est[ii],fifth_est[ii],Gal_dat.FINAL_SLIT_X_FLIP[ii],wm,fm)
#run peak identifier and match lines to peaks
line_matches = {'lines':[],'peaks_p':[],'peaks_w':[],'peaks_h':[]}
est_features = [fifth_est[ii],fourth_est[ii],cube_est[ii],quad_est[ii],stretch_est[ii],shift_est[ii]]
xspectra = fifth_est[ii]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii])**5 + fourth_est[ii]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii])**4 + cube_est[ii]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii])**3 + quad_est[ii]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii])**2 + stretch_est[ii]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii]) + shift_est[ii]
fydat = f_x[::-1] - signal.medfilt(f_x[::-1],171) #used to find noise
fyreal = (f_x[::-1]-f_x.min())/10.0
peaks = argrelextrema(fydat,np.greater) #find peaks
fxpeak = xspectra[peaks] #peaks in wavelength
fxrpeak = p_x[peaks] #peaks in pixels
fypeak = fydat[peaks] #peaks heights (for noise)
fyrpeak = fyreal[peaks] #peak heights
here = int(np.round(fydat.size*0.5))
noise = np.std(np.sort(fydat)[:here]) #noise level
peaks = peaks[0][fypeak>noise]
fxpeak = fxpeak[fypeak>noise] #significant peaks in wavelength
fxrpeak = fxrpeak[fypeak>noise] #significant peaks in pixels
fypeak = fyrpeak[fypeak>noise] #significant peaks height
for j in range(wm.size):
line_matches['lines'].append(wm[j]) #line positions
line_matches['peaks_p'].append(fxrpeak[np.argsort(np.abs(wm[j]-fxpeak))][0]) #closest peak (in pixels)
line_matches['peaks_w'].append(fxpeak[np.argsort(np.abs(wm[j]-fxpeak))][0]) #closest peak (in wavelength)
line_matches['peaks_h'].append(fypeak[np.argsort(np.abs(wm[j]-fxpeak))][0]) #closest peak (height)
#Pick lines for initial parameter fit
cal_states = {'Xe':True,'Ar':False,'HgNe':False,'Ne':False}
fig,ax = plt.subplots(1)
#maximize window
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.subplots_adjust(right=0.8,left=0.05,bottom=0.20)
vlines = []
for j in range(wm.size):
vlines.append(ax.axvline(wm[j],color='r',alpha=0.5))
line, = ax.plot(wm,np.zeros(wm.size),'ro')
yspectra = (f_x[::-1]-f_x.min())/10.0
fline, = plt.plot(xspectra,yspectra,'b',lw=1.5,picker=5)
browser = LineBrowser(fig,ax,est_features,wm,fm,p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii],Gal_dat.FINAL_SLIT_X_FLIP[ii],vlines,fline,xspectra,yspectra,peaks,fxpeak,fxrpeak,fypeak,line_matches,cal_states)
fig.canvas.mpl_connect('button_press_event', browser.onclick)
fig.canvas.mpl_connect('key_press_event',browser.onpress)
finishax = plt.axes([0.83,0.85,0.15,0.1])
finishbutton = Button(finishax,'Finish',hovercolor='0.975')
finishbutton.on_clicked(browser.finish)
closeax = plt.axes([0.83, 0.65, 0.15, 0.1])
button = Button(closeax, 'Replace (m)', hovercolor='0.975')
button.on_clicked(browser.replace_b)
nextax = plt.axes([0.83, 0.45, 0.15, 0.1])
nextbutton = Button(nextax, 'Next (n)', hovercolor='0.975')
nextbutton.on_clicked(browser.next_go)
deleteax = plt.axes([0.83,0.25,0.15,0.1])
delete_button = Button(deleteax,'Delete (j)',hovercolor='0.975')
delete_button.on_clicked(browser.delete_b)
#stateax = plt.axes([0.83,0.05,0.15,0.1])
#states = CheckButtons(stateax,cal_states.keys(), cal_states.values())
#states.on_clicked(browser.set_calib_lines)
fig.canvas.draw()
plt.show()
#fit 5th order polynomial to peak/line selections
params,pcov = curve_fit(polyfour,(np.sort(browser.line_matches['peaks_p'])-Gal_dat.FINAL_SLIT_X_FLIP[ii]),np.sort(browser.line_matches['lines']),p0=[shift_est[ii],stretch_est[ii],quad_est[ii],cube_est[ii],1e-12,1e-12])
#cube_est = cube_est + params[3]
fourth_est = fourth_est + params[4]
fifth_est = fifth_est + params[5]
#make calibration and clip on lower anchor point. Apply to Flux as well
wave_model = params[0]+params[1]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii])+params[2]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii])**2+params[3]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii])**3.0+params[4]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii])**4.0+params[5]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[ii])**5.0
spectra[keys[ii]]['wave'] = wave_model
spectra[keys[ii]]['wave2'] = wave_model[p_x >= np.sort(browser.line_matches['peaks_p'])[0]]
spectra[keys[ii]]['gal_spec2'] = ((np.array(spectra[keys[ii]]['gal_spec']).T[::-1])[p_x >= np.sort(browser.line_matches['peaks_p'])[0]]).T
flu = f_x - np.min(f_x)
flu = flu[::-1][p_x >= np.sort(browser.line_matches['peaks_p'])[0]]
Flux = flu/signal.medfilt(flu,201)
fifth[ii],fourth[ii],cube[ii],quad[ii],stretch[ii],shift[ii] = params[5],params[4],params[3],params[2],params[1],params[0]
plt.plot(spectra[keys[ii]]['wave2'],Flux/np.max(Flux[np.isfinite(Flux)]))
plt.plot(wm,fm/np.max(fm),'ro')
for j in range(browser.wm.size):
plt.axvline(browser.wm[j],color='r')
plt.xlim(3800,6000)
try:
plt.savefig(clus_id+'/figs/'+str(ii)+'.wave.png')
except:
os.mkdir(clus_id+'/figs')
plt.savefig(clus_id+'/figs/'+str(ii)+'.wave.png')
plt.show()
f.write(str(Gal_dat.FINAL_SLIT_X_FLIP[ii])+'\t')
f.write(str(Gal_dat.FINAL_SLIT_Y[ii])+'\t')
f.write(str(shift[ii])+'\t')
f.write(str(stretch[ii])+'\t')
f.write(str(quad[ii])+'\t')
f.write(str(cube[ii])+'\t')
f.write(str(fourth[ii])+'\t')
f.write(str(fifth[ii])+'\t')
f.write(str(Gal_dat.SLIT_WIDTH[ii])+'\t')
f.write('\n')
print 'Wave calib',ii
ii += 1
break
f.write(str(Gal_dat.FINAL_SLIT_X_FLIP[ii])+'\t')
f.write(str(Gal_dat.FINAL_SLIT_Y[ii])+'\t')
f.write(str(shift[ii])+'\t')
f.write(str(stretch[ii])+'\t')
f.write(str(quad[ii])+'\t')
f.write(str(cube[ii])+'\t')
f.write(str(fourth[ii])+'\t')
f.write(str(fifth[ii])+'\t')
f.write(str(Gal_dat.SLIT_WIDTH[ii])+'\t')
f.write('\n')
ii+=1
#estimate stretch,shift,quad terms with sliders for 2nd - all galaxies
for i in range(ii,len(Gal_dat)):
print 'Calibrating',i,'of',stretch.size
if Gal_dat.good_spectra[i] == 'y':
if sdss_check:
if Gal_dat.spec_z[i] != 0.0: skipgal = False
else: skipgal = True
else: skipgal = False
if not skipgal:
p_x = np.arange(0,4064,1)
f_x = np.sum(spectra[keys[i]]['arc_spec'],axis=0)
d.set('pan to 1150.0 '+str(Gal_dat.FINAL_SLIT_Y[i])+' physical')
d.set('regions command {box(2000 '+str(Gal_dat.FINAL_SLIT_Y[i])+' 4500 '+str(Gal_dat.SLIT_WIDTH[i])+') #color=green highlite=1}')
#stretch_est[i],shift_est[i],quad_est[i] = interactive_plot(p_x,f_x,stretch_est[i-1],shift_est[i-1]-(Gal_dat.FINAL_SLIT_X_FLIP[i]*stretch_est[0]-Gal_dat.FINAL_SLIT_X_FLIP[i-1]*stretch_est[i-1]),quad[i-1],cube[i-1],fourth[i-1],fifth[i-1],Gal_dat.FINAL_SLIT_X_FLIP[i])
reduced_slits = np.where(stretch != 0.0)
stretch_est[i],shift_est[i],quad_est[i] = interactive_plot(p_x,f_x,stretch_est[i],shift_est[i],quad_est[i],cube_est[i],fourth_est[i],fifth_est[i],Gal_dat.FINAL_SLIT_X_FLIP[i],wm,fm)
est_features = [fifth_est[i],fourth_est[i],cube_est[i],quad_est[i],stretch_est[i],shift_est[i]]
#run peak identifier and match lines to peaks
line_matches = {'lines':[],'peaks_p':[],'peaks_w':[],'peaks_h':[]}
xspectra = fifth_est[i]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[i])**5 + fourth_est[i]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[i])**4 + cube_est[i]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[i])**3 + quad_est[i]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[i])**2 + stretch_est[i]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[i]) + shift_est[i]
fydat = f_x[::-1] - signal.medfilt(f_x[::-1],171) #used to find noise
fyreal = (f_x[::-1]-f_x.min())/10.0
peaks = argrelextrema(fydat,np.greater) #find peaks
fxpeak = xspectra[peaks] #peaks in wavelength
fxrpeak = p_x[peaks] #peaks in pixels
fypeak = fydat[peaks] #peaks heights (for noise)
fyrpeak = fyreal[peaks] #peak heights
here = int(np.round(fydat.size*0.5))
noise = np.std(np.sort(fydat)[:here]) #noise level
peaks = peaks[0][fypeak>noise]
fxpeak = fxpeak[fypeak>noise] #significant peaks in wavelength
fxrpeak = fxrpeak[fypeak>noise] #significant peaks in pixels
fypeak = fyrpeak[fypeak>noise] #significant peaks height
for j in range(wm.size):
line_matches['lines'].append(wm[j]) #line positions
line_matches['peaks_p'].append(fxrpeak[np.argsort(np.abs(wm[j]-fxpeak))][0]) #closest peak (in pixels)
line_matches['peaks_w'].append(fxpeak[np.argsort(np.abs(wm[j]-fxpeak))][0]) #closest peak (in wavelength)
line_matches['peaks_h'].append(fypeak[np.argsort(np.abs(wm[j]-fxpeak))][0]) #closest peak (height)
#Pick lines for initial parameter fit
cal_states = {'Xe':True,'Ar':False,'HgNe':False,'Ne':False}
fig,ax = plt.subplots(1)
#maximize window
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.subplots_adjust(right=0.8,left=0.05,bottom=0.20)
vlines = []
for j in range(wm.size):
vlines.append(ax.axvline(wm[j],color='r'))
line, = ax.plot(wm,fm/2.0,'ro',picker=5)# 5 points tolerance
yspectra = (f_x[::-1]-f_x.min())/10.0
fline, = plt.plot(xspectra,yspectra,'b',lw=1.5,picker=5)
estx = quad_est[i]*(line_matches['peaks_p']-Gal_dat.FINAL_SLIT_X_FLIP[i])**2 + stretch_est[i]*(line_matches['peaks_p']-Gal_dat.FINAL_SLIT_X_FLIP[i]) + shift_est[i]
browser = LineBrowser(fig,ax,est_features,wm,fm,p_x-Gal_dat.FINAL_SLIT_X_FLIP[i],Gal_dat.FINAL_SLIT_X_FLIP[i],vlines,fline,xspectra,yspectra,peaks,fxpeak,fxrpeak,fypeak,line_matches,cal_states)
fig.canvas.mpl_connect('button_press_event', browser.onclick)
fig.canvas.mpl_connect('key_press_event',browser.onpress)
finishax = plt.axes([0.83,0.85,0.15,0.1])
finishbutton = Button(finishax,'Finish',hovercolor='0.975')
finishbutton.on_clicked(browser.finish)
closeax = plt.axes([0.83, 0.65, 0.15, 0.1])
button = Button(closeax, 'Replace (m)', hovercolor='0.975')
button.on_clicked(browser.replace_b)
nextax = plt.axes([0.83, 0.45, 0.15, 0.1])
nextbutton = Button(nextax, 'Next (n)', hovercolor='0.975')
nextbutton.on_clicked(browser.next_go)
deleteax = plt.axes([0.83,0.25,0.15,0.1])
delete_button = Button(deleteax,'Delete (j)',hovercolor='0.975')
delete_button.on_clicked(browser.delete_b)
#stateax = plt.axes([0.83,0.05,0.15,0.1])
#states = CheckButtons(stateax,cal_states.keys(), cal_states.values())
#states.on_clicked(browser.set_calib_lines)
fig.canvas.draw()
plt.show()
#fit 5th order polynomial to peak/line selections
try:
params,pcov = curve_fit(polyfour,(np.sort(browser.line_matches['peaks_p'])-Gal_dat.FINAL_SLIT_X_FLIP[i]),np.sort(browser.line_matches['lines']),p0=[shift_est[i],stretch_est[i],quad_est[i],1e-8,1e-12,1e-12])
cube_est[i] = params[3]
fourth_est[i] = params[4]
fifth_est[i] = params[5]
except TypeError:
params = [shift_est[i],stretch_est[i],quad_est[i],cube_est[i-1],fourth_est[i-1],fifth_est[i-1]]
#make calibration and clip on lower anchor point. Apply to Flux as well
wave_model = params[0]+params[1]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[i])+params[2]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[i])**2+params[3]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[i])**3.0+params[4]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[i])**4.0+params[5]*(p_x-Gal_dat.FINAL_SLIT_X_FLIP[i])**5.0
spectra[keys[i]]['wave'] = wave_model
spectra[keys[i]]['wave2'] = wave_model[p_x >= np.sort(browser.line_matches['peaks_p'])[0]]
spectra[keys[i]]['gal_spec2'] = ((np.array(spectra[keys[i]]['gal_spec']).T[::-1])[p_x >= np.sort(browser.line_matches['peaks_p'])[0]]).T
flu = f_x[p_x >= np.sort(browser.line_matches['peaks_p'])[0]] - np.min(f_x[p_x >= np.sort(browser.line_matches['peaks_p'])[0]])
flu = flu[::-1]
Flux = flu/signal.medfilt(flu,201)
fifth[i],fourth[i],cube[i],quad[i],stretch[i],shift[i] = params[5],params[4],params[3],params[2],params[1],params[0]
plt.plot(spectra[keys[i]]['wave2'],Flux/np.max(Flux))
plt.plot(wm,fm/np.max(fm),'ro')
for j in range(browser.wm.size):
plt.axvline(browser.wm[j],color='r')
plt.xlim(3800,6000)
try:
plt.savefig(clus_id+'/figs/'+str(i)+'.wave.png')
except:
os.mkdir(clus_id+'/figs')
plt.savefig(clus_id+'/figs/'+str(i)+'.wave.png')
plt.close()
f.write(str(Gal_dat.FINAL_SLIT_X_FLIP[i])+'\t')
f.write(str(Gal_dat.FINAL_SLIT_Y[i])+'\t')
f.write(str(shift[i])+'\t')
f.write(str(stretch[i])+'\t')
f.write(str(quad[i])+'\t')
f.write(str(cube[i])+'\t')
f.write(str(fourth[i])+'\t')
f.write(str(fifth[i])+'\t')
f.write(str(Gal_dat.SLIT_WIDTH[i])+'\t')
f.write('\n')
f.close()
pickle.dump(spectra,open(clus_id+'/'+clus_id+'_reduced_spectra_wavecal.pkl','wb'))
else:
xslit,yslit,shift,stretch,quad,cube,fourth,fifth,wd = np.loadtxt(clus_id+'/'+clus_id+'_stretchshift.tab',dtype='float',usecols=(0,1,2,3,4,5,6,7,8),unpack=True)
spectra = pickle.load(open(clus_id+'/'+clus_id+'_reduced_spectra_wavecal.pkl','rb'))
#summed science slits + filtering to see spectra
#Flux_science_old = np.array([np.sum(scifits_c2.data[Gal_dat.FINAL_SLIT_Y[i]-Gal_dat.SLIT_WIDTH[i]/2.0:Gal_dat.FINAL_SLIT_Y[i]+Gal_dat.SLIT_WIDTH[i]/2.0,:],axis=0)[::-1] for i in range(len(Gal_dat))])
#Flux_science = np.array([np.sum(spectra[keys[i]]['gal_spec'],axis=0)[::-1] for i in range(len(Gal_dat))])
Flux_science = []
for i in range(len(Gal_dat)):
try:
Flux_science.append(np.sum(spectra[keys[i]]['gal_spec2'],axis=0))
except KeyError:
if i != 0:
Flux_science.append(np.zeros(len(Flux_science[i-1])))
else:
Flux_science.append(np.zeros(4064))
Flux_science = np.array(Flux_science)
#Add parameters to Dataframe
Gal_dat['shift'],Gal_dat['stretch'],Gal_dat['quad'],Gal_dat['cube'],Gal_dat['fourth'],Gal_dat['fifth'] = shift,stretch,quad,cube,fourth,fifth
####################
#Redshift Calibrate#
####################
#Import template spectrum (SDSS early type) and continuum subtract the flux
early_type = pyfits.open('spDR2-023.fit')
coeff0 = early_type[0].header['COEFF0']
coeff1 = early_type[0].header['COEFF1']
early_type_flux = early_type[0].data[0] - signal.medfilt(early_type[0].data[0],171)
early_type_wave = 10**(coeff0 + coeff1*np.arange(0,early_type_flux.size,1))
#initialize data arrays
redshift_est = np.zeros(len(Gal_dat))
cor = np.zeros(len(Gal_dat))
HSN = np.zeros(len(Gal_dat))
KSN = np.zeros(len(Gal_dat))
GSN = np.zeros(len(Gal_dat))
SNavg = np.zeros(len(Gal_dat))
SNHKmin = np.zeros(len(Gal_dat))
sdss_elem = np.where(Gal_dat.spec_z > 0.0)[0]
sdss_red = Gal_dat[Gal_dat.spec_z > 0.0].spec_z
qualityval = {'Clear':np.zeros(len(Gal_dat))}
median_sdss_redshift = np.median(Gal_dat.spec_z[Gal_dat.spec_z > 0.0])
print 'Median SDSS redshift',median_sdss_redshift
R = z_est()
for k in range(len(Gal_dat)):
if Gal_dat.slit_type[k] == 'g' and Gal_dat.good_spectra[k] == 'y':
if sdss_check:
if Gal_dat.spec_z[k] != 0.0: skipgal = False
else: skipgal = True
else: skipgal = False
if not skipgal:
F1 = fftpack.rfft(Flux_science[k])
cut = F1.copy()
W = fftpack.rfftfreq(spectra[keys[k]]['wave2'].size,d=spectra[keys[k]]['wave2'][1001]-spectra[keys[k]]['wave2'][1000])
cut[np.where(W>0.15)] = 0
Flux_science2 = fftpack.irfft(cut)
Flux_sc = Flux_science2 - signal.medfilt(Flux_science2,171)
d.set('pan to 1150.0 '+str(Gal_dat.FINAL_SLIT_Y[k])+' physical')
d.set('regions command {box(2000 '+str(Gal_dat.FINAL_SLIT_Y[k])+' 4500 '+str(Gal_dat.SLIT_WIDTH[k])+') #color=green highlite=1}')
redshift_est[k],cor[k],ztest,corr_val,qualityval['Clear'][k] = R.redshift_estimate(early_type_wave,early_type_flux,spectra[keys[k]]['wave2'],Flux_science2,gal_prior=None)
try:
HSN[k],KSN[k],GSN[k] = sncalc(redshift_est[k],spectra[keys[k]]['wave2'],Flux_sc)
except ValueError:
HSN[k],KSN[k],GSN[k] = 0.0,0.0,0.0
SNavg[k] = np.average(np.array([HSN[k],KSN[k],GSN[k]]))
SNHKmin[k] = np.min(np.array([HSN[k],KSN[k]]))
else:
redshift_est[k] = 0.0
cor[k] = 0.0
if k in sdss_elem.astype('int') and redshift_est[k] > 0:
print 'Estimate: %.5f'%(redshift_est[k]), 'SDSS: %.5f'%(sdss_red.values[np.where(sdss_elem==k)][0])
print 'z found for galaxy '+str(k+1)+' of '+str(len(Gal_dat))
print ''
#Add redshift estimates, SN, Corr, and qualityflag to the Dataframe
Gal_dat['est_z'],Gal_dat['cor'],Gal_dat['HSN'],Gal_dat['KSN'],Gal_dat['GSN'],Gal_dat['quality_flag'] = redshift_est,cor,HSN,KSN,GSN,qualityval['Clear']
plt.plot(Gal_dat['spec_z'],Gal_dat['est_z'],'ro')
#plt.plot(sdss_red,redshift_est2[sdss_elem.astype('int')],'bo')
#plt.plot(sdss_red,redshift_est3[sdss_elem.astype('int')],'o',c='purple')
plt.plot(sdss_red,sdss_red,'k')
plt.savefig(clus_id+'/redshift_compare.png')
plt.show()
f = open(clus_id+'/estimated_redshifts.tab','w')
f.write('#RA DEC Z_est Z_sdss correlation H S/N K S/N G S/N gal_gmag gal_rmag gal_imag\n')
for k in range(redshift_est.size):
f.write(Gal_dat.RA[k]+'\t')
f.write(Gal_dat.DEC[k]+'\t')
f.write(str(Gal_dat.est_z[k])+'\t')
f.write(str(Gal_dat.spec_z[k])+'\t')
#if k in sdss_elem.astype('int'):
# f.write(str(sdss_red[sdss_elem==k].values[0])+'\t')
#else:
# f.write(str(0.000)+'\t')
f.write(str(cor[k])+'\t')
f.write(str(HSN[k])+'\t')
f.write(str(KSN[k])+'\t')
f.write(str(GSN[k])+'\t')
f.write(str(gal_gmag[k])+'\t')
f.write(str(gal_rmag[k])+'\t')
f.write(str(gal_imag[k])+'\t')
f.write('\n')
f.close()
#Output dataframe
Gal_dat.to_csv(clus_id+'/results.csv')
|
giffordw/OSMOSreduce
|
OSMOSreduce.py
|
Python
|
bsd-3-clause
| 40,034
|
[
"Galaxy"
] |
f46b41c4b7008ee159f6af5524e90536a361a3d03f34dbda4856b92ad1e80c3e
|
# Copyright 2013-2017, Brian May
#
# This file is part of python-alogger.
#
# python-alogger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-alogger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-alogger If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, unicode_literals
import csv
import datetime
import logging
import subprocess
import sys
import time
from ..base import BaseParser, TextLog
logger = logging.getLogger(__name__)
if sys.version_info < (3, 0):
# Python2: csv module does not support unicode, we must use byte strings.
def _input_csv(csv_data):
for line in csv_data:
assert isinstance(line, bytes)
yield line
def _output_csv(csv_line):
for i, column in enumerate(csv_line):
csv_line[i] = column.decode("ascii", errors='ignore')
assert isinstance(csv_line[i], unicode) # NOQA
else:
# Python3: csv module does support unicode, we must use strings everywhere,
# not byte strings
def _input_csv(unicode_csv_data):
for line in unicode_csv_data:
assert isinstance(line, bytes)
line = line.decode("ascii", errors='ignore')
assert isinstance(line, str)
yield line
def _output_csv(csv_line):
for column in csv_line:
assert isinstance(column, str)
def slurm_suffix_to_megabytes(memory_string):
return slurm_suffix(memory_string) / 1024 / 1024
def slurm_suffix_to_kilobytes(memory_string):
return slurm_suffix(memory_string) / 1024
def slurm_suffix(memory_string):
if memory_string.endswith('K'):
return int(float(memory_string[:-1]) * 1024)
elif memory_string.endswith('M'):
return int(float(memory_string[:-1]) * 1024 * 1024)
elif memory_string.endswith('G'):
return int(float(memory_string[:-1]) * 1024 * 1024 * 1024)
elif memory_string.endswith('T'):
return int(float(memory_string[:-1]) * 1024 * 1024 * 1024 * 1024)
else:
return int(memory_string)
# Maybe there is some isomething in datetime that takes a ISO std string but I
# cannot find it, DRB.
def DateTime_from_String(datetimeSt):
"""Gets a date time string like 2010-09-10T15:54:18 and retuns a datetime
object raises a ValueError if it all goes wrong """
DayTime = datetimeSt.split('T')
if len(DayTime) != 2:
raise ValueError
Date = DayTime[0].split('-')
if len(Date) != 3:
raise ValueError
Time = DayTime[1].split(':')
if len(Time) != 3:
raise ValueError
dt = datetime.datetime(
year=int(Date[0]),
month=int(Date[1]),
day=int(Date[2]),
hour=int(Time[0]),
minute=int(Time[1]),
second=int(Time[2])
)
return dt
def SecondsFromSlurmTime(timeString):
"""This function could be merged into get_in_seconds above but its here to
leave clear break between the Slurm addition and original. It deals with
the fact that slurm may return est_wall_time as 00nnn, 00:00:00 or
0-00:00:00. """
if timeString.find(':') == -1: # straight second format
return int(timeString)
if timeString.find('-') == -1: # must be a (eg) 10:00:00 case
Seconds = (
(int(timeString.split(':')[0]) * 3600)
+ ((int(timeString.split(':')[1]) * 60))
+ int(timeString.split(':')[2])
)
else:
DayRest = timeString.split('-')
Seconds = int(DayRest[0]) * 3600 * 24
Seconds = Seconds + (int(DayRest[1].split(':')[0]) * 3600)
Seconds = Seconds + ((int(DayRest[1].split(':')[1]) * 60))
Seconds = Seconds + int(DayRest[1].split(':')[2])
return Seconds
class Parser(BaseParser):
def read_log(self, date, cfg):
date_from = date
date_to = date + datetime.timedelta(days=1)
cmd = []
cmd.append(cfg['sacct_path'])
cmd.extend([
'-o', 'Cluster,JobID,User,UID,Group,GID,Account,JobName,'
'State,Partition,Timelimit,Elapsed,Start,End,Submit,Eligible,'
'AllocCPUS,NodeList,ReqMem'])
cmd.extend(['-p', '-X', '-s', 'CA,CD,F,NF,TO'])
cmd.append('--starttime='+date_from.strftime('%Y%m%d'))
cmd.append('--endtime='+date_to.strftime('%Y%m%d'))
command = cmd
logger.debug("Cmd %s" % command)
with open('/dev/null', 'w') as null:
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=null)
reader = csv.reader(_input_csv(process.stdout), delimiter=str("|"))
try:
headers = next(reader)
logger.debug("<-- headers %s" % headers)
except StopIteration:
logger.debug("Cmd %s headers not found" % command)
headers = []
with TextLog(date, cfg) as log:
for row in reader:
_output_csv(row)
logger.debug("<-- row %s" % row)
this_row = {}
i = 0
for i in range(0, len(headers)):
key = headers[i]
value = row[i]
this_row[key] = value
JobID = "%s.%s%s" % (
this_row['JobID'], this_row['Cluster'],
cfg.get("jobid_postfix", ""))
AllocCPUS = int(this_row['AllocCPUS'])
State = this_row['State'].split(" ")
if AllocCPUS > 0:
results = []
results.append("JobId=%s" % JobID)
results.append("UserId=%s(%s)"
% (this_row['User'], this_row['UID']))
results.append("GroupId=%s(%s)"
% (this_row['Group'], this_row['GID']))
results.append("Account=%s" % (this_row['Account']))
results.append("Name=%s" % (this_row['JobName']))
results.append("JobState=%s" % (State[0]))
results.append("Partition=%s" % (this_row['Partition']))
results.append("TimeLimit=%s" % (this_row['Timelimit']))
results.append("RunTime=%s" % (this_row['Elapsed']))
results.append("StartTime=%s" % (this_row['Start']))
results.append("EndTime=%s" % (this_row['End']))
results.append("SubmitTime=%s" % (this_row['Submit']))
results.append("EligibleTime=%s" % (this_row['Eligible']))
results.append("ProcCnt=%s" % (this_row['AllocCPUS']))
results.append("NodeList=%s" % (this_row['NodeList']))
if this_row['ReqMem'][-1] == 'n':
results.append(
"MinMemoryNode=%s" % (this_row['ReqMem'][0:-1]))
elif this_row['ReqMem'][-1] == 'c':
results.append(
"MinMemoryCPU=%s" % (this_row['ReqMem'][0:-1]))
line = " ".join(results)
log.line(line)
yield self.line_to_dict(line)
process.stdout.close()
retcode = process.wait()
if retcode != 0:
logger.error("<-- Cmd %s returned %d (error)" % (command, retcode))
raise subprocess.CalledProcessError(retcode, command)
if len(headers) == 0:
logger.error("Cmd %s didn't return any headers." % command)
raise RuntimeError("Cmd %s didn't return any headers." % command)
logger.debug("<-- Returned: %d (good)" % (retcode))
def line_to_dict(self, line):
"""Parses a Slurm log file into dictionary"""
raw_data = line.strip().split(' ')
data = {}
formatted_data = {}
# break up line into a temp dictionary
last_key = False
for d in raw_data:
try:
key, value = d.split('=')
data[key] = value
last_key = key
except ValueError:
if last_key:
data[last_key] = "%s %s" % (data[last_key], d)
continue
# Note that the order these are done in is important !
formatted_data['jobid'] = data['JobId']
formatted_data['cores'] = int(data['ProcCnt'])
# 'mike(543)' - remove the uid in brackets.
formatted_data['user'] = data['UserId'][:data['UserId'].find('(')]
formatted_data['project'] = data['Account']
start_time = DateTime_from_String(data['StartTime'])
end_time = DateTime_from_String(data['EndTime'])
# If SubmitTime is invalid and non-existant use StartTime instead.
try:
# '2010-07-30T15:34:39'
submit_time = DateTime_from_String(data['SubmitTime'])
except (ValueError, KeyError):
submit_time = start_time
# '2010-07-30T15:34:39'
formatted_data['qtime'] = submit_time.isoformat(str(' '))
# for practical purposes, same as etime here.
formatted_data['ctime'] = submit_time.isoformat(str(' '))
# If data['StartTime'] or data['EndTime'] is bad or not given, the
# following statements will fail
formatted_data['start'] = start_time.isoformat(str(' '))
# formatted_data['etime'] # don't care
formatted_data['act_wall_time'] = \
int(time.mktime(end_time.timetuple())) \
- int(time.mktime(start_time.timetuple()))
formatted_data['record_time'] = start_time.isoformat(str(' '))
formatted_data['cpu_usage'] = \
formatted_data['act_wall_time'] * formatted_data['cores']
# Note that this is the name of the script, not --jobname
formatted_data['jobname'] = data['Name']
try:
# might be 5-00:00:00 or 18:00:00
formatted_data['est_wall_time'] = \
SecondsFromSlurmTime(data['TimeLimit'])
except ValueError:
# Sometimes returns 'UNLIMITED' !
formatted_data['est_wall_time'] = -1
try:
# might be "COMPLETED", "CANCELLED", "TIMEOUT" and may have
# multiple entries per line !
formatted_data['exit_status'] = int(data['JobState'])
except ValueError:
# Watch out, Sam says dbase expects an int !!!
formatted_data['exit_status'] = 0
formatted_data['queue'] = data['Partition']
formatted_data['vmem'] = 0
if 'MinMemoryCPU' in data:
formatted_data['list_pmem'] = \
slurm_suffix_to_megabytes(data['MinMemoryCPU'])
else:
formatted_data['list_pmem'] = 0
if 'MinMemoryNode' in data:
formatted_data['list_mem'] = \
slurm_suffix_to_megabytes(data['MinMemoryNode'])
else:
formatted_data['list_mem'] = 0
if 'ReqMem' in data:
if data['ReqMem'].endswith('c'):
formatted_data['list_pmem'] = \
slurm_suffix_to_megabytes(data['ReqMem'][:-1])
elif data['ReqMem'].endswith('n'):
formatted_data['list_mem'] = \
slurm_suffix_to_megabytes(data['ReqMem'][:-1])
else:
logger.error("Weird formatting of ReqMem")
if 'MaxVMSize' in data:
formatted_data['mem'] = \
slurm_suffix_to_kilobytes(data['MaxVMSize'])
else:
formatted_data['mem'] = 0
formatted_data['list_vmem'] = 0
formatted_data['list_pvmem'] = 0
formatted_data['etime'] = formatted_data['qtime']
# Things we don't seem to have available, would like qtime and
# est_wall_time mem, qtime, list_pmem, list_pvmem, queue, vmem,
# list_vmem, jobname. Note that "out of the box" slurm does not report
# on Queue or Creation time.
return formatted_data
|
Karaage-Cluster/python-alogger
|
alogger/parsers/slurm.py
|
Python
|
gpl-3.0
| 12,468
|
[
"Brian"
] |
2fa0ac787e17353dacc6e6c5155aab9f607ddb8c9338c41c0ee63bc2e4854b40
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 19 21:29:43 2016
@author: manon
"""
import numpy as np
import csv
from lien_apriori import dataFromFile,runApriori,printResults
from echantillonnage_apriori import echantillonnage,echantillonnage_glucose
from Bayesian_method import Bayesian_method
data_labels=['Homocysteinemie'] #--
data_labels.append('Glucose_sang') #--
data_labels.append('Glucose_LCS') #--
data_labels.append('Hemoglobine_A1C') #--
data_labels.append('Hemoglobine') #--
data_labels.append('SEGA_total') #--
data_labels.append('PIC_basal') #6
data_labels.append('PIC_plateau')
data_labels.append('AMP_basal')
data_labels.append('AMP_plateau')
data_labels.append('Pss')
data_labels.append('Rcsf_dynamique')
data_labels.append('Rcsf_statique')
data_labels.append('debit_prod_LCS')
data_labels.append('RAP_basal') #14
data_labels.append('resistance') #--
data_labels.append('erreur_normalisee') #--
data_labels.append('PVI') #17
data_labels.append('elastance') #18
data_labels.append('MeanCort_L')
data_labels.append('MeanCort_R')
data_labels.append('MeanCort_gen')
data_labels.append('Indice_diabetique')
#data_labels=['PIC_basal']
#data_labels.append('PIC_plateau')
#data_labels.append('AMP_basal')
#data_labels.append('AMP_plateau')
#data_labels.append('Rcsf')
#data_labels.append('Elastance')
#data_labels.append('PVI')
#data_labels.append('Pss')
#data_labels.append('Debit_prod_LCS')
#data_labels.append('Glucose_sang')
#data_labels.append('Hemoglobine_A1C')
#data_labels.append('Glucose_LCS')
Moy_RS=True
#---------------parameters-----------------------
Filedata='data_22.csv'
data_labels=data_labels
Nbr_max_parents=3
Nbr_sampling=3
SamplingType='Rank'
Weight_Sampling = False
method='Simulated_annealing'
Bs_file = 'None'
NbrTests = 1000
NbrLinks_default = True
Draw_graph=False
Nbr_iter=30
SaveFig=False
GeneratesFiles_toGraph=True
Nbr_Simulations=1000
useDiabet=True
NbrTests_annealing=10
withApriori=True
minSupport=0.15
minConfidence=0.9
#-------------------Echantillonnage---------------------
if useDiabet :
array=echantillonnage_glucose(Filedata)
else :
array=echantillonnage(Filedata)
with open('to_apriori.csv', 'wb') as f:
csv.writer(f).writerows(array)
#-------------------Apriori-----------------------------
inFile=dataFromFile('to_apriori.csv')
items, rules = runApriori(inFile, minSupport, minConfidence)
printResults(items, rules)
#----------------- Matrice probabilité ------------------
def proba_matrix (rules,rlen):
matrix=np.ones([rlen,rlen])
for i in range(rlen):
matrix[i,i]=0
for j in range(len(rules)):
a=rules[j,0]
b=rules[j,1]
matrix[a,b]=5
return matrix
#-----------------------------------------------------------
#-----------------------------------------------------------
data = np.genfromtxt(Filedata,delimiter=',')
clen, rlen = data.shape
rules=np.genfromtxt('CoupleAssocRules.csv',delimiter=',')
with open('Matrix_probabilite.csv', 'wb') as f:
csv.writer(f).writerows(proba_matrix(rules,rlen))
Bayesian_method (Filedata, data_labels, Nbr_max_parents, Nbr_sampling, SamplingType, Weight_Sampling, method,Bs_file, NbrTests, NbrLinks_default,Draw_graph,Nbr_iter,SaveFig,GeneratesFiles_toGraph,Nbr_Simulations,useDiabet,NbrTests_annealing,withApriori,minSupport,minConfidence)
if Moy_RS :
#----------Moyenne des 10 recuits simulés dont on ne garde que les liens>0.5
# + génère les fichiers de ce réseau pour cytoscape
Bs0= np.genfromtxt('Bs_SimA_'+Filedata[:-4]+'_0.csv',delimiter=',')
Bs1= np.genfromtxt('Bs_SimA_'+Filedata[:-4]+'_1.csv',delimiter=',')
Bs2= np.genfromtxt('Bs_SimA_'+Filedata[:-4]+'_2.csv',delimiter=',')
Bs3= np.genfromtxt('Bs_SimA_'+Filedata[:-4]+'_3.csv',delimiter=',')
Bs4= np.genfromtxt('Bs_SimA_'+Filedata[:-4]+'_4.csv',delimiter=',')
Bs5= np.genfromtxt('Bs_SimA_'+Filedata[:-4]+'_5.csv',delimiter=',')
Bs6= np.genfromtxt('Bs_SimA_'+Filedata[:-4]+'_6.csv',delimiter=',')
Bs7= np.genfromtxt('Bs_SimA_'+Filedata[:-4]+'_7.csv',delimiter=',')
Bs8= np.genfromtxt('Bs_SimA_'+Filedata[:-4]+'_8.csv',delimiter=',')
Bs9= np.genfromtxt('Bs_SimA_'+Filedata[:-4]+'_9.csv',delimiter=',')
Bs_moy=(Bs0+Bs1+Bs2+Bs3+Bs4+Bs5+Bs6+Bs7+Bs8+Bs9)/10
Bs=np.zeros(Bs_moy.shape)
Bs[Bs_moy>0.5]=Bs_moy[Bs_moy>0.5]
Nbr_data=len(data_labels)
labels={}
for i in range(len(data_labels)):
labels[i]=data_labels[i]
new_tab=np.zeros([Nbr_data,2],dtype=object)
for i in range(0,Nbr_data):
new_tab[i,0]=i+1
new_tab[i,1]=labels[i]
with open('to_cytoscape_labels_RS_moy_23.csv', 'wb') as f:
csv.writer(f).writerows(new_tab)
#--------------
new=np.empty([1,2],dtype=object)
for j in range(rlen) :
nnz=np.nonzero(Bs[j,:])[0]
if len(nnz)!=0:
for a in range(0,len(nnz)):
new=np.append(new,[[j+1,nnz[:][a]+1]],axis=0)
else :
new=np.append(new,[[j+1,-1]],axis=0)
new=np.delete(new, 0, 0)
np.place(new,new==-1,None)
with open('to_cytoscape_Network_RS_moy_23.csv', 'wb') as f:
csv.writer(f).writerows(new)
|
manonverdier/Bayesian_Network
|
simulated_annealing_test.py
|
Python
|
gpl-3.0
| 5,309
|
[
"Cytoscape"
] |
89e938a927cfd6989663a01dd1d12619a8f13a99d9013eb5461a4f8f67a6ac9b
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import platform
from pyglet.gl.base import Config, CanvasConfig, Context
from pyglet.gl import ContextException
from pyglet.gl import gl
from pyglet.gl import agl
from pyglet.canvas.cocoa import CocoaCanvas
from pyglet.libs.darwin.cocoapy import *
NSOpenGLPixelFormat = ObjCClass('NSOpenGLPixelFormat')
NSOpenGLContext = ObjCClass('NSOpenGLContext')
"""Version is based on Darwin kernel, not OS-X version.
OS-X / Darwin version history
http://en.wikipedia.org/wiki/Darwin_(operating_system)#Release_history
pre-release: 0.1, 0.2, 1.0, 1.1,
kodiak: 1.2.1,
cheetah: 1.3.1,
puma: 1.4.1, 5.1 -> 5.5
jaguar: 6.0.1 -> 6.8
panther: 7.0 -> 7.9
tiger: 8.0 -> 8.11
leopard: 9.0 -> 9.8
snow_leopard: 10.0 -> 10.8
lion: 11.0 -> 11.4
mountain_lion: 12.0 -> ?
"""
os_x_release = {
'pre-release': (0,1),
'kodiak': (1,2,1),
'cheetah': (1,3,1),
'puma': (1,4.1),
'jaguar': (6,0,1),
'panther': (7,),
'tiger': (8,),
'leopard': (9,),
'snow_leopard': (10,),
'lion': (11,),
'mountain_lion': (12,),
}
def os_x_version():
version = tuple(platform.release().split('.'))
# ensure we return a tuple
if len(version) > 0:
return version
return (version,)
_os_x_version = os_x_version()
# Valid names for GL attributes and their corresponding NSOpenGL constant.
_gl_attributes = {
'double_buffer': NSOpenGLPFADoubleBuffer,
'stereo': NSOpenGLPFAStereo,
'buffer_size': NSOpenGLPFAColorSize,
'sample_buffers': NSOpenGLPFASampleBuffers,
'samples': NSOpenGLPFASamples,
'aux_buffers': NSOpenGLPFAAuxBuffers,
'alpha_size': NSOpenGLPFAAlphaSize,
'depth_size': NSOpenGLPFADepthSize,
'stencil_size': NSOpenGLPFAStencilSize,
# Not exposed by pyglet API (set internally)
'all_renderers': NSOpenGLPFAAllRenderers,
'fullscreen': NSOpenGLPFAFullScreen,
'minimum_policy': NSOpenGLPFAMinimumPolicy,
'maximum_policy': NSOpenGLPFAMaximumPolicy,
'screen_mask' : NSOpenGLPFAScreenMask,
# Not supported in current pyglet API
'color_float': NSOpenGLPFAColorFloat,
'offscreen': NSOpenGLPFAOffScreen,
'sample_alpha': NSOpenGLPFASampleAlpha,
'multisample': NSOpenGLPFAMultisample,
'supersample': NSOpenGLPFASupersample,
}
# NSOpenGL constants which do not require a value.
_boolean_gl_attributes = frozenset([
NSOpenGLPFAAllRenderers,
NSOpenGLPFADoubleBuffer,
NSOpenGLPFAStereo,
NSOpenGLPFAMinimumPolicy,
NSOpenGLPFAMaximumPolicy,
NSOpenGLPFAOffScreen,
NSOpenGLPFAFullScreen,
NSOpenGLPFAColorFloat,
NSOpenGLPFAMultisample,
NSOpenGLPFASupersample,
NSOpenGLPFASampleAlpha,
])
# Attributes for which no NSOpenGLPixelFormatAttribute name exists.
# We could probably compute actual values for these using
# NSOpenGLPFAColorSize / 4 and NSOpenGLFAAccumSize / 4, but I'm not that
# confident I know what I'm doing.
_fake_gl_attributes = {
'red_size': 0,
'green_size': 0,
'blue_size': 0,
'accum_red_size': 0,
'accum_green_size': 0,
'accum_blue_size': 0,
'accum_alpha_size': 0
}
class CocoaConfig(Config):
def match(self, canvas):
# Construct array of attributes for NSOpenGLPixelFormat
attrs = []
for name, value in self.get_gl_attributes():
attr = _gl_attributes.get(name)
if not attr or not value:
continue
attrs.append(attr)
if attr not in _boolean_gl_attributes:
attrs.append(int(value))
# Support for RAGE-II, which is not compliant.
attrs.append(NSOpenGLPFAAllRenderers)
# Force selection policy.
attrs.append(NSOpenGLPFAMaximumPolicy)
# NSOpenGLPFAFullScreen is always supplied so we can switch to and
# from fullscreen without losing the context. Also must supply the
# NSOpenGLPFAScreenMask attribute with appropriate display ID.
# Note that these attributes aren't necessary to render in fullscreen
# on Mac OS X 10.6, because there we are simply rendering into a
# screen sized window. See:
# http://developer.apple.com/library/mac/#documentation/GraphicsImaging/Conceptual/OpenGL-MacProgGuide/opengl_fullscreen/opengl_cgl.html%23//apple_ref/doc/uid/TP40001987-CH210-SW6
# this fails on Lion with the OpenGLProfile
# values set
if _os_x_version < os_x_release['lion']:
attrs.append(NSOpenGLPFAFullScreen)
attrs.append(NSOpenGLPFAScreenMask)
attrs.append(quartz.CGDisplayIDToOpenGLDisplayMask(quartz.CGMainDisplayID()))
else:
# check for opengl profile
# This requires OS-X Lion (Darwin 11) or higher
version = (
getattr(self, 'major_version', None),
getattr(self, 'minor_version', None)
)
# tell os-x we want to request a profile
attrs.append(NSOpenGLPFAOpenGLProfile)
# check if we're wanting core or legacy
# as of OS-X (Mountain)Lion, there is only
# Legacy and Core 3.2
if version == (3, 2):
attrs.append(int(NSOpenGLProfileVersion3_2Core))
else:
attrs.append(int(NSOpenGLProfileVersionLegacy))
# Terminate the list.
attrs.append(0)
# Create the pixel format.
attrsArrayType = c_uint32 * len(attrs)
attrsArray = attrsArrayType(*attrs)
pixel_format = NSOpenGLPixelFormat.alloc().initWithAttributes_(attrsArray)
# Return the match list.
if pixel_format is None:
return []
else:
return [CocoaCanvasConfig(canvas, self, pixel_format)]
class CocoaCanvasConfig(CanvasConfig):
def __init__(self, canvas, config, pixel_format):
super(CocoaCanvasConfig, self).__init__(canvas, config)
self._pixel_format = pixel_format
# Query values for the attributes of the pixel format, and then set the
# corresponding attributes of the canvas config.
for name, attr in _gl_attributes.items():
vals = c_int()
self._pixel_format.getValues_forAttribute_forVirtualScreen_(byref(vals), attr, 0)
setattr(self, name, vals.value)
# Set these attributes so that we can run pyglet.info.
for name, value in _fake_gl_attributes.items():
setattr(self, name, value)
# Update the minor/major version from profile if Lion
if _os_x_version >= os_x_release['lion']:
vals = c_int()
profile = self._pixel_format.getValues_forAttribute_forVirtualScreen_(
byref(vals),
NSOpenGLPFAOpenGLProfile,
0
)
if profile == NSOpenGLProfileVersion3_2Core:
setattr(self, "major_version", 3)
setattr(self, "minor_version", 2)
else:
setattr(self, "major_version", 2)
setattr(self, "minor_version", 1)
def create_context(self, share):
# Determine the shared NSOpenGLContext.
if share:
share_context = share._nscontext
else:
share_context = None
# Create a new NSOpenGLContext.
nscontext = NSOpenGLContext.alloc().initWithFormat_shareContext_(
self._pixel_format,
share_context)
return CocoaContext(self, nscontext, share)
def compatible(self, canvas):
return isinstance(canvas, CocoaCanvas)
class CocoaContext(Context):
def __init__(self, config, nscontext, share):
super(CocoaContext, self).__init__(config, share)
self.config = config
self._nscontext = nscontext
def attach(self, canvas):
# See if we want OpenGL 3 in a non-Lion OS
if _os_x_version < os_x_release['lion'] and self.config._requires_gl_3():
raise ContextException('OpenGL 3 not supported')
super(CocoaContext, self).attach(canvas)
# The NSView instance should be attached to a nondeferred window before calling
# setView, otherwise you get an "invalid drawable" message.
self._nscontext.setView_(canvas.nsview)
self.set_current()
def detach(self):
super(CocoaContext, self).detach()
self._nscontext.clearDrawable()
def set_current(self):
self._nscontext.makeCurrentContext()
super(CocoaContext, self).set_current()
def update_geometry(self):
# Need to call this method whenever the context drawable (an NSView)
# changes size or location.
self._nscontext.update()
def set_full_screen(self):
self._nscontext.makeCurrentContext()
self._nscontext.setFullScreen()
def destroy(self):
super(CocoaContext, self).destroy()
self._nscontext.release()
self._nscontext = None
def set_vsync(self, vsync=True):
vals = c_int(vsync)
self._nscontext.setValues_forParameter_(byref(vals), NSOpenGLCPSwapInterval)
def get_vsync(self):
vals = c_int()
self._nscontext.getValues_forParameter_(byref(vals), NSOpenGLCPSwapInterval)
return vals.value
def flip(self):
self._nscontext.flushBuffer()
|
adamlwgriffiths/Pyglet
|
pyglet/gl/cocoa.py
|
Python
|
bsd-3-clause
| 9,549
|
[
"Jaguar"
] |
134060f1e8ac3618f92e708200d291fcad579d450b987d7347b0ca707dff2e82
|
from __future__ import print_function
from copy import deepcopy as dc
import numpy as np
from ase.io.trajectory import Trajectory
from ase.optimize.optimize import Optimizer
from numpy.random import RandomState
from pyiid.adp import has_adp
__author__ = 'christopher'
def leapfrog(atoms, step, center=True):
"""
Propagate the dynamics of the system via the leapfrog algorithm one step
Parameters
-----------
atoms: ase.Atoms
The atomic configuration for the system
step: float
The step size for the simulation, the new momentum/velocity is step *
the force
center: bool
If true, center the atoms in the cell after moving them
Returns
-------
ase.Atoms
The new atomic positions and velocities
"""
latoms = dc(atoms)
adps = None
if has_adp(latoms):
adps = has_adp(latoms)
latoms.set_momenta(latoms.get_momenta() + 0.5 * step * latoms.get_forces())
if adps is not None:
adps.set_momenta(adps.get_momenta() +
0.5 * step * adps.get_forces(latoms))
latoms.set_positions(latoms.get_positions() +
step * latoms.get_velocities())
if adps is not None:
adps.set_positions(adps.get_positions() +
0.5 * step * adps.get_velocities())
latoms.set_momenta(latoms.get_momenta() +
0.5 * step * latoms.get_forces())
if adps is not None:
adps.set_momenta(adps.get_momenta() +
0.5 * step * adps.get_forces(latoms))
if center:
latoms.center()
return latoms
class Ensemble(Optimizer):
def __init__(self, atoms, restart=None, logfile=None, trajectory=None,
seed=None,
verbose=False):
Optimizer.__init__(self, atoms, restart, logfile, trajectory=None)
atoms.get_forces()
atoms.get_potential_energy()
if seed is None:
seed = np.random.randint(0, 2 ** 31)
self.verbose = verbose
self.random_state = RandomState(seed)
self.starting_atoms = dc(atoms)
self.pe = []
self.metadata = {'seed': seed}
self.traj = [dc(atoms)]
print(self.traj[0].get_momenta())
if trajectory is not None:
self.trajectory = Trajectory(trajectory, mode='w')
self.trajectory.write(self.traj[-1])
if self.verbose:
print('Trajectory written', len(self.traj))
else:
self.trajectory = None
if verbose:
print('trajectory file', self.trajectory)
def check_eq(self, eq_steps, tol):
ret = np.cumsum(self.pe, dtype=float)
ret[eq_steps:] = ret[eq_steps:] - ret[:-eq_steps]
ret = ret[eq_steps - 1:] / eq_steps
return np.sum(np.gradient(ret[eq_steps:])) < tol
def run(self, steps=100000000, eq_steps=None, eq_tol=None, **kwargs):
self.metadata['planned iterations'] = steps
i = 0
while i < steps:
# Check if we are at equilibrium, if we want that
if eq_steps is not None:
if self.check_eq(eq_steps, eq_tol):
break
# Verboseness
if self.verbose:
print('iteration number', i)
try:
self.step()
i += 1
# If we blow up, write the last structure down and exit gracefully
except KeyboardInterrupt:
print('Interupted, returning data')
return self.traj, self.metadata
if self.trajectory is not None:
self.trajectory.close()
return self.traj, self.metadata
def step(self):
pass
def estimate_simulation_duration(self, atoms, iterations):
pass
|
CJ-Wright/pyIID
|
pyiid/sim/__init__.py
|
Python
|
bsd-3-clause
| 3,830
|
[
"ASE"
] |
fc561c20ce39fd2aa6f05ffbc611580913d131a46a532af3a8daf28af567fde0
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import logging
import unittest
from telemetry.core import util
from telemetry.core.platform.profiler import perf_profiler
from telemetry.unittest import options_for_unittests
from telemetry.unittest import simple_mock
class TestPerfProfiler(unittest.TestCase):
def testPerfProfiler(self):
options = options_for_unittests.GetCopy()
if not perf_profiler.PerfProfiler.is_supported(options.browser_type):
logging.warning('PerfProfiler is not supported. Skipping test')
return
profile_file = os.path.join(
util.GetUnittestDataDir(), 'perf_report_output.txt')
perf_report_output = open(profile_file, 'r').read()
mock_popen = simple_mock.MockObject()
mock_popen.ExpectCall('communicate').WillReturn([perf_report_output])
mock_subprocess = simple_mock.MockObject()
mock_subprocess.ExpectCall(
'Popen').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_popen)
setattr(mock_subprocess, 'PIPE', simple_mock.MockObject())
real_subprocess = perf_profiler.subprocess
perf_profiler.subprocess = mock_subprocess
try:
self.assertEqual(
perf_profiler.PerfProfiler.GetTopSamples('linux', profile_file, 10),
{ 'v8::internal::StaticMarkingVisitor::MarkMapContents': 63615201,
'v8::internal::RelocIterator::next': 38271931,
'v8::internal::LAllocator::MeetConstraintsBetween': 42913933,
'v8::internal::FlexibleBodyVisitor::Visit': 31909537,
'v8::internal::LiveRange::CreateAssignedOperand': 42913933,
'void v8::internal::RelocInfo::Visit': 96878864,
'WebCore::HTMLTokenizer::nextToken': 48240439,
'v8::internal::Scanner::ScanIdentifierOrKeyword': 46054550,
'sk_memset32_SSE2': 45121317,
'v8::internal::HeapObject::Size': 39786862
})
finally:
perf_profiler.subprocess = real_subprocess
|
mogoweb/chromium-crosswalk
|
tools/telemetry/telemetry/core/platform/profiler/perf_profiler_unittest.py
|
Python
|
bsd-3-clause
| 2,072
|
[
"VisIt"
] |
0be4cab768dc931c56f06e7708d85c3221e4783e96a7234b74d61f17441ee56f
|
from logging import getLogger
try:
import xml.etree.cElementTree as et
except:
import xml.etree.ElementTree as et
try:
from galaxy.model import Job
job_states = Job.states
except ImportError:
# Not in Galaxy, map Galaxy job states to Pulsar ones.
from galaxy.util import enum
job_states = enum(RUNNING='running', OK='complete', QUEUED='queued')
from ..job import BaseJobExec
log = getLogger(__name__)
ERROR_MESSAGE_UNRECOGNIZED_ARG = 'Unrecognized long argument passed to Torque CLI plugin: %s'
argmap = {'destination': '-q',
'Execution_Time': '-a',
'Account_Name': '-A',
'Checkpoint': '-c',
'Error_Path': '-e',
'Group_List': '-g',
'Hold_Types': '-h',
'Join_Paths': '-j',
'Keep_Files': '-k',
'Resource_List': '-l',
'Mail_Points': '-m',
'Mail_Users': '-M',
'Job_Name': '-N',
'Output_Path': '-o',
'Priority': '-p',
'Rerunable': '-r',
'Shell_Path_List': '-S',
'job_array_request': '-t',
'User_List': '-u',
'Variable_List': '-v'}
class Torque(BaseJobExec):
def __init__(self, **params):
self.params = {}
for k, v in params.items():
self.params[k] = v
def job_script_kwargs(self, ofile, efile, job_name):
pbsargs = {'-o': ofile,
'-e': efile,
'-N': job_name}
for k, v in self.params.items():
if k == 'plugin':
continue
try:
if not k.startswith('-'):
k = argmap[k]
pbsargs[k] = v
except KeyError:
log.warning(ERROR_MESSAGE_UNRECOGNIZED_ARG % k)
template_pbsargs = ''
for k, v in pbsargs.items():
template_pbsargs += '#PBS %s %s\n' % (k, v)
return dict(headers=template_pbsargs)
def submit(self, script_file):
return 'qsub %s' % script_file
def delete(self, job_id):
return 'qdel %s' % job_id
def get_status(self, job_ids=None):
return 'qstat -x'
def get_single_status(self, job_id):
return 'qstat -f %s' % job_id
def parse_status(self, status, job_ids):
# in case there's noise in the output, find the big blob 'o xml
tree = None
rval = {}
for line in status.strip().splitlines():
try:
tree = et.fromstring(line.strip())
assert tree.tag == 'Data'
break
except Exception:
tree = None
if tree is None:
log.warning('No valid qstat XML return from `qstat -x`, got the following: %s' % status)
return None
else:
for job in tree.findall('Job'):
id = job.find('Job_Id').text
if id in job_ids:
state = job.find('job_state').text
# map PBS job states to Galaxy job states.
rval[id] = self._get_job_state(state)
return rval
def parse_single_status(self, status, job_id):
for line in status.splitlines():
line = line.split(' = ')
if line[0] == 'job_state':
return self._get_job_state(line[1].strip())
# no state found, job has exited
return job_states.OK
def _get_job_state(self, state):
try:
return {
'E': job_states.RUNNING,
'R': job_states.RUNNING,
'Q': job_states.QUEUED,
'C': job_states.OK
}.get(state)
except KeyError:
raise KeyError("Failed to map torque status code [%s] to job state." % state)
__all__ = ('Torque',)
|
ssorgatem/pulsar
|
pulsar/managers/util/cli/job/torque.py
|
Python
|
apache-2.0
| 3,802
|
[
"Galaxy"
] |
dc16f14308c3c82ff70e383f64539de4ec27244e3548ab319b422b3f12f5fcbe
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# Mip Agent
#
# @author Simon Andreas Frimann Lund
#
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import logging
import mip
import socket
from binascii import hexlify
from struct import pack, unpack
logging.basicConfig(filename='proxyd.log',level=logging.DEBUG)
HOST = 'amigos18.diku.dk'
PORT = 8113
request = mip.request('jegersimon', 80)
print " [%s,%d] " % (hexlify(request), len(request))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.send(request)
while 1: # Wait for setup requests
data = s.recv(1024)
s.close()
print 'Received', repr(data)
|
heromod/migrid
|
mig/vm-proxy/deprecated/proxy/mipagent.py
|
Python
|
gpl-2.0
| 1,429
|
[
"Brian"
] |
c9273c7d7ec7d6f3a58bc8144401ae060a34e132491ced77884f6ecc91f5def0
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
class Thresholding:
def __init__(self):
return
def simple(self,img):
thresh = 50
ret,thresh1 = cv2.threshold(img,thresh,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(img,thresh,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(img,thresh,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(img,thresh,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(img,thresh,255,cv2.THRESH_TOZERO_INV)
# titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
# images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
titles = ['Original Image','BINARY']
images = [img, cv2.bitwise_not(thresh1)]
for i in range(2):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
return
def adaptive(self, img):
blur = cv2.GaussianBlur(img,(5,5),0)
# img = cv2.medianBlur(img,5)
C = 0
blocksize = 11
ret1,th1 = cv2.threshold(img,50,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,blocksize,C)
th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,blocksize,C)
titles = ['Original Image', 'Global Thresholding (v = 127)',
'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
images = [img, cv2.bitwise_not(th1), cv2.bitwise_not(th2), cv2.bitwise_not(th3)]
for i in range(4):
plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
return
def otsu(self, img):
# global thresholding
ret1,th1 = cv2.threshold(img,50,255,cv2.THRESH_BINARY)
per = np.percentile(img.ravel(), np.linspace(0,100,100))
print("percentile = {}".format(per))
# plt.hist(img.ravel(), 256)
# plt.figure()
# Otsu's thresholding
ret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(img,(5,5),0)
ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
print("global = {}, ostu={}, gaussinaostu={}".format(ret1, ret2, ret3))
# plot all the images and their histograms
images = [img, 0, cv2.bitwise_not(th1),
img, 0, cv2.bitwise_not(th2),
blur, 0, cv2.bitwise_not(th3)]
titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)',
'Original Noisy Image','Histogram',"Otsu's Thresholding",
'Gaussian filtered Image','Histogram',"Otsu's Thresholding"]
for i in range(3):
plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
plt.title(titles[i*3])
plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
plt.title(titles[i*3+1])
plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
plt.title(titles[i*3+2])
plt.show()
return
def run(self):
img = cv2.imread('/home/levin/workspace/snrprj/snr/data/banknotes/train/batch_1/20171017163713/F018F26785.jpg',0)
# img = cv2.imread('2.jpg',0)
# self.simple(img)
self.adaptive(img)
# self.otsu(img)
return
if __name__ == "__main__":
obj= Thresholding()
obj.run()
|
LevinJ/CodeSamples
|
imageprocessing/thresholding/thresholding.py
|
Python
|
gpl-2.0
| 3,784
|
[
"Gaussian"
] |
c13b33ca199fa6561c3768fb7ad9525cea495774518a506139167bdc5fe442cb
|
import array
from MB import music
from util import dlinkedlist
debug=True
class Phrase:
"""
Defines the head and tail within a list of events.
events in the list should have a
ev.time --- time of event
ev.data --- (toks,data)
"""
def __init__(self,head,tail):
self.head=head
self.tail=tail
class Phrasifier:
"""
Observers list of events and create a list of phrases.
the parser is responsible for interpretting the list of events inti note events.
tbreak is the time of silence between 2 phrases.
[optional] A client is notified when a phrase is detected.
To run in real time visit must be called periodical.
Phrases reference the original eventlist (start and end pointers)
"""
def __init__(self,eventlist,parser,tbreak,client):
"""
evenlist lsigt to be broken
parser
client is informed about phrases
tbreak -- off time to trigger a break.
"""
self.list=eventlist
self.notesOn=NotesOn(parser)
self.phrases=[]
self.ptr=self.list.head
# pointer to the first event in the current phrase.
self.phrase_start=None
self.parser=parser
self.tbreak=tbreak
self.client=client
def visit_next_event(self):
"""
Advance the self.ptr through the event list.
If a phrase is detected the client is notified.
"""
if debug:
print("visit_next")
nxt=self.ptr.next
if nxt == None:
return False
pitch,vel=self.parser.parse(nxt.data[0],nxt.data[1])
onPrev=self.notesOn.isPlaying()
self.notesOn.process(pitch,vel)
onNow=self.notesOn.isPlaying()
if self.phrase_start == None:
assert vel > 0
assert not onPrev
assert onNow
self.phrase_start=nxt
elif not onPrev and onNow:
if (nxt.time-self.ptr.time)> self.tbreak:
phrase=Phrase(self.phrase_start,self.ptr)
self.phrases.append(phrase)
self.phrase_start=nxt
if debug:
print( "Notify " )
if self.client:
self.client.notify(phrase)
self.ptr=nxt
def visit(self,tNow):
""" process pending events up to tNow
"""
while self.ptr.next !=None:
if self.ptr.next.time > tNow:
return
self.visit_next_event()
""" playing then obviously not a phrase
"""
if self.notesOn.isPlaying():
return
""" No phrase start
"""
if self.phrase_start == None:
return
if tNow-self.ptr.time > self.tbreak:
phrase=Phrase(self.phrase_start,self.ptr)
self.phrases.append(phrase)
self.phrase_start=None
print( "phrased" )
if self.client:
self.client.notify(phrase)
class BasicParser:
"""
takes a message of tokens and maps it onto pith and velocity.
"""
def parse(self,toks,data):
val=float(data[0])
pitch=int(toks[0])+32
print( val,pitch )
vel=int(val*127)
return pitch,vel
class NotesOn:
def __init__(self,parser):#
# if self.score:
# beat=self.seq.get_stamp()
# pitch=self.score.get_tonality(beat).get_note_of_scale(i,self.score.key)+36
# else:
# pitch=i+48
#
self.notesOn={}
def play_toks(self,toks,data):
pitch,vel=self.parser.parse(toks,data)
self.process(pitch,vel)
def process(self,pitch,vel):
if vel != 0:
assert not self.notesOn.get(pitch)
self.notesOn[pitch]=vel
else:
assert self.notesOn.get(pitch)
del self.notesOn[pitch]
def isPlaying(self):
return len(self.notesOn)
class Player:
"""
can play a melody instrument using the OSC message
if memory=True records events
"""
def __init__(self,inst,context,parser=None,seq=None,memory=True,beat_client=None):
self.parser=parser
self.context=context
if parser == None:
self.parser=BasicParser()
self.inst=inst
self.seq=seq
self.messenger=music.Messenger(inst)
if memory:
self.list=dlinkedlist.OrderedDLinkedList()
# put a dummy head to avoid special cases.
self.list.append(0.0,None)
else:
self.list=None
self.beat_client=beat_client
def play(self,toks,data):
"""
Interpret the OSC message and play it
"""
if toks[0] == 'xy':
x=int(float(data[1])*127)
y=int(float(data[0])*127)
#print( " melody xy",x,y
self.messenger.inst.set_cc(12,x)
self.meesenger.inst.set_cc(13,y)
return
pitch,vel=self.parser.parse(toks,data)
#print( "play",pitch,vel
if vel != 0:
self.messenger.inst.note_on(pitch,vel)
else:
self.messenger.inst.note_off(pitch)
stamp=self.seq.get_real_stamp()
if self.list != None:
self.list.append(stamp,(toks,data))
# beat=band.seq.get_beat()
# print( "STOMP",self.stamp
if self.beat_client and vel > 0:
self.beat_client.stomp(stamp)
def set_instrument(self,name):
self.name=name
def get_name(self):
return self.name
def quit(self):
if self.beat_client:
self.beatclient.quit()
def play_phrase(self,phrase,start,period):
self.phrasePlayer=PhrasePlayer(phrase,self.seq,self)
self.phrasePlayer.start(start,period)
def set_ghost(self,ghost_player=None):
"""
Not sure about this
"""
# install a ghost to monitor events and take over if need be.
if not ghost_player:
ghost_player=self.create_ghost()
echoPlayerFirer=PhrasePlayerFirer(ghost_player,self.context)
# detects a phrase then
phraser=Phrasifier(self.list,self.parser,1.0,echoPlayerFirer)
self.context.callback(phraser.visit,0,0.2)
def create_ghost(self):
"""
create another memoryless player using the same parser and instrument
"""
return Player(self.inst,self.context,parser=self.parser,seq=self.seq,memory=False,beat_client=None)
class ChordPlayer:
def __init__(self,inst,score,seq):
self.pitches=[]
self.template=[0,2,4,6]
self.inst=inst
self.score=score
self.seq=seq
def play(self,toks,data):
# print( "chord",toks,data
if toks == "xy":
y=int(float(data[0])*127)
x=int(float(data[1])*127)
self.inst.set_volume(y)
return
for pitch in self.pitches:
self.inst.note_off(pitch)
self.pitches=[]
# inversion=int(toks[1]) # TODO
vel=int(float(data[0])*127)
if vel == 0:
return
beat=self.seq.get_beat()
tonality=self.score.get_tonality(beat)
for p in range(len(self.template)):
pit=tonality.get_note_of_chord(self.template[p],self.score.key)+48
self.inst.note_on(pit,vel)
self.pitches.append(pit)
# Play notes (shift up +12 if pitch is too low)
# for p in self.pitches:
# while p < self.lowest:
# p += 12
#
class MelodyPlayer:
"""
plays a melody instrument using the OSC message
"""
def __init__(self,inst,context,score=None,seq=None):
self.score=score
self.inst=inst
self.seq=seq
self.context=context
self.player=music.Messenger(inst)
def play(self,toks,data):
if toks[0] == 'xy':
x=int(float(data[1])*127)
y=int(float(data[0])*127)
print( " melody xy",x,y )
self.player.inst.set_cc(12,x)
self.player.inst.set_cc(13,y)
return
val=float(data[0])
# assert len(toks) >1
i=int(toks[0])
#print( "Melody ",val,i
vel=int(val*127)
if self.score:
beat=self.seq.get_stamp()
pitch=self.score.get_tonality(beat).get_note_of_scale(i,self.score.key)+36
else:
pitch=i+48
#print( "play",pitch,vel
if vel != 0:
self.player.inst.note_on(pitch,vel)
else:
self.player.inst.note_off(pitch)
# schedule the note off
#playable = music.Playable(music.NoteOff(pitch), self.player)
#self.seq.schedule(beat+0.05, playable)
class DelayedPlayer:
"""
sched()
fire()
"""
def __init__(self,list,seq,player,delay,poll_dt):
self.list=list
self.seq=seq
self.player=player
self.delay=delay
self.poll_dt=poll_dt
assert poll_dt < delay
def start(self):
self.last=None
self.tNow=self.seq.get_stamp()
self.time1=self.tNow+self.delay
self.grazer=dlinkedlist.DLinkedListGrazer(self.list)
# hack to avoid special case of an empty list.
# listmust contian an event in the past so we can have a self.last
assert self.list.head != None
assert self.list.head.time < self.time1
self.last=self.list.head
while self.last.next != None and self.last.next.time <self.time1:
self.last=self.last.next
self.sched()
def sched(self):
"""
schedule to fire at next event in list OR after self.delay
"""
# schedule a fire at next event or after a delay if none
tSched=self.tNow+self.poll_dt
if self.last != None and self.last.next != None:
tNext=self.last.next.time+self.delay
assert tNext > self.seq.get_stamp()
tSched=min(tSched,tNext)
self.seq.schedule(tSched,self)
def fire(self,tt):
"""
tt is the time according to the sequencer
"""
self.tNow=self.seq.get_stamp()
# play all events between time1 and time2
time2=self.seq.get_stamp()-self.delay
self.grazer.set_range(self.time1,time2)
while True:
node=self.grazer.next()
if node and node.data:
toks=node.data[0]
data=node.data[1]
# print( "---- PLAY ",self.tNow,toks,data
self.player.play(toks,data)
self.last=node
else:
break
self.time1=time2
self.sched()
class PhrasePlayer:
"""
sched()
fire()
"""
def __init__(self,phrase,player):
self.phrase=phrase
self.player=player
def start(self,t_shift,tloop=None):
"""
start playing the phrase shifted by t_shift
that is as if the time play = event.time+t_shift
"""
if tloop:
assert tloop >= self.phrase.tail.time-self.phrase.head.time
tNow=self.player.seq.get_stamp()
self.t_shift=t_shift
self.ptr=self.phrase.head
tNext=self.ptr.time+t_shift
self.tloop=tloop
while tNext<tNow:
print( "ooops PhrasePlayer:start: too late")
tNext+=tloop;
self.sched()
def sched(self):
"""
schedule to fire at next event in list OR after self.delay
"""
tEvent=self.ptr.time+self.t_shift
self.player.seq.schedule(tEvent,self)
def fire(self,tt):
"""
tt is the time according to the sequencer
"""
self.tNow=self.player.seq.get_stamp()
while True:
toks=self.ptr.data[0]
data=self.ptr.data[1]
# print( "---- PLAY ",self.tNow,toks,data
self.player.play(toks,data)
if self.ptr == self.phrase.tail:
if self.tloop:
self.t_shift=self.t_shift+self.tloop
self.ptr=self.phrase.head
break
return
self.ptr=self.ptr.next
if self.ptr.time+self.t_shift > tt:
break
self.sched()
class PhrasePlayerFirer:
"""
This is used to start playing the last phrase stored in a phraser
"""
def __init__(self,player,context):
"""
player is responsible for playing the phrase.
"""
self.player=player
self.delay=None
self.context=context
def notify(self,phraser):
"""
Start playing the last phrase in the phraser.
Attempts to sync the start so it is on a bar boundary.
"""
context=self.context
seq=self.player.seq
tNow=seq.get_stamp()
self.phrase=phraser.phrases[-1]
tHead=self.phrase.head.time # time of first event in phrase
tTail=self.phrase.tail.time
if self.delay == None:
context.freeze()
self.delay=context.get_barlength()
print( "Setting delay to bar length ",self.delay)
phraseLen=tNow-tTail
if phraseLen < self.delay:
print( "Phrase ",phraseLen," is less than bar length estimate ",self.delay)
tloop=self.delay
else:
ii=int(phraseLen/self.delay)
tloop=self.delay*(ii+1)
self.pPlayer=PhrasePlayer(self.phrase,seq,self.player)
self.pPlayer.start(tloop,tloop)
|
pauljohnleonard/MusicBox
|
src/MB/players.py
|
Python
|
gpl-2.0
| 16,489
|
[
"VisIt"
] |
abcad2776e1c65d2705218d2f4521c0643a9626c43f2533aa2e559995c8b6929
|
# Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Ingest module for Autopsy with GUI
#
# Difference between other modules in this folder is that it has a GUI
# for user options. This is not needed for very basic modules. If you
# don't need a configuration UI, start with the other sample module.
#
# Search for TODO for the things that you need to change
# See http://sleuthkit.org/autopsy/docs/api-docs/4.4/index.html for documentation
import jarray
import inspect
from java.lang import System
from java.util.logging import Level
from javax.swing import JCheckBox
from javax.swing import BoxLayout
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import FileIngestModule
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestModuleIngestJobSettings
from org.sleuthkit.autopsy.ingest import IngestModuleIngestJobSettingsPanel
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import IngestModuleGlobalSettingsPanel
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.autopsy.coreutils import Logger
from java.lang import IllegalArgumentException
# TODO: Rename this to something more specific
class SampleFileIngestModuleWithUIFactory(IngestModuleFactoryAdapter):
def __init__(self):
self.settings = None
# TODO: give it a unique name. Will be shown in module list, logs, etc.
moduleName = "Sample Data Source Module with UI"
def getModuleDisplayName(self):
return self.moduleName
# TODO: Give it a description
def getModuleDescription(self):
return "Sample module that does X, Y, and Z."
def getModuleVersionNumber(self):
return "1.0"
# TODO: Update class name to one that you create below
def getDefaultIngestJobSettings(self):
return SampleFileIngestModuleWithUISettings()
# TODO: Keep enabled only if you need ingest job-specific settings UI
def hasIngestJobSettingsPanel(self):
return True
# TODO: Update class names to ones that you create below
def getIngestJobSettingsPanel(self, settings):
if not isinstance(settings, SampleFileIngestModuleWithUISettings):
raise IllegalArgumentException("Expected settings argument to be instanceof SampleIngestModuleSettings")
self.settings = settings
return SampleFileIngestModuleWithUISettingsPanel(self.settings)
def isFileIngestModuleFactory(self):
return True
# TODO: Update class name to one that you create below
def createFileIngestModule(self, ingestOptions):
return SampleFileIngestModuleWithUI(self.settings)
# File-level ingest module. One gets created per thread.
# TODO: Rename this to something more specific. Could just remove "Factory" from above name.
# Looks at the attributes of the passed in file.
class SampleFileIngestModuleWithUI(FileIngestModule):
_logger = Logger.getLogger(SampleFileIngestModuleWithUIFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
# Autopsy will pass in the settings from the UI panel
def __init__(self, settings):
self.local_settings = settings
# Where any setup and configuration is done
# TODO: Add any setup code that you need here.
def startUp(self, context):
# As an example, determine if user configured a flag in UI
if self.local_settings.getFlag():
self.log(Level.INFO, "flag is set")
else:
self.log(Level.INFO, "flag is not set")
# Throw an IngestModule.IngestModuleException exception if there was a problem setting up
# raise IngestModuleException("Oh No!")
pass
# Where the analysis is done. Each file will be passed into here.
# TODO: Add your analysis code in here.
def process(self, file):
# See code in pythonExamples/fileIngestModule.py for example code
return IngestModule.ProcessResult.OK
# Where any shutdown code is run and resources are freed.
# TODO: Add any shutdown code that you need here.
def shutDown(self):
pass
# Stores the settings that can be changed for each ingest job
# All fields in here must be serializable. It will be written to disk.
# TODO: Rename this class
class SampleFileIngestModuleWithUISettings(IngestModuleIngestJobSettings):
serialVersionUID = 1L
def __init__(self):
self.flag = False
def getVersionNumber(self):
return serialVersionUID
# TODO: Define getters and settings for data you want to store from UI
def getFlag(self):
return self.flag
def setFlag(self, flag):
self.flag = flag
# UI that is shown to user for each ingest job so they can configure the job.
# TODO: Rename this
class SampleFileIngestModuleWithUISettingsPanel(IngestModuleIngestJobSettingsPanel):
# Note, we can't use a self.settings instance variable.
# Rather, self.local_settings is used.
# https://wiki.python.org/jython/UserGuide#javabean-properties
# Jython Introspector generates a property - 'settings' on the basis
# of getSettings() defined in this class. Since only getter function
# is present, it creates a read-only 'settings' property. This auto-
# generated read-only property overshadows the instance-variable -
# 'settings'
# We get passed in a previous version of the settings so that we can
# prepopulate the UI
# TODO: Update this for your UI
def __init__(self, settings):
self.local_settings = settings
self.initComponents()
self.customizeComponents()
# TODO: Update this for your UI
def checkBoxEvent(self, event):
if self.checkbox.isSelected():
self.local_settings.setFlag(True)
else:
self.local_settings.setFlag(False)
# TODO: Update this for your UI
def initComponents(self):
self.setLayout(BoxLayout(self, BoxLayout.Y_AXIS))
self.checkbox = JCheckBox("Flag", actionPerformed=self.checkBoxEvent)
self.add(self.checkbox)
# TODO: Update this for your UI
def customizeComponents(self):
self.checkbox.setSelected(self.local_settings.getFlag())
# Return the settings used
def getSettings(self):
return self.local_settings
|
dgrove727/autopsy
|
pythonExamples/fileIngestModuleWithGui.py
|
Python
|
apache-2.0
| 8,201
|
[
"Brian"
] |
0848768f465eabd1f4f68904cd8db5c3f47df3c819a9f8aed70a0aaf4b92da36
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# gridstat - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Grid stats by Martin Rehr"""
import os
import fcntl
import datetime
from shared.defaults import default_vgrid, pending_states
from shared.fileio import pickle, unpickle, touch
from shared.serial import pickle as py_pickle
from shared.vgrid import validated_vgrid_list, job_fits_res_vgrid
class GridStat:
"""Stat class"""
# Stat types
VGRID = 'VGRID'
RESOURCE_TOTAL = 'RESOURCE_TOTAL'
RESOURCE_NODE = 'RESOURCE_EXE'
__gridstat_dict = None
__logger = None
__configuration = None
def __init__(self, configuration, logger):
self.__gridstat_dict = {}
self.__logger = logger
self.__configuration = configuration
def __check_dict(self, stattype_key, stattype_value):
"""Checking if dict exists on disk and loads it into memory"""
# If stattype doesn't exist create it
if not self.__gridstat_dict.has_key(stattype_key):
self.__gridstat_dict[stattype_key] = {}
# If stattype not in memory, check if pickled file exists
if not self.__gridstat_dict[stattype_key].has_key(stattype_value):
filename = self.__configuration.gridstat_files_dir\
+ stattype_key + os.sep + stattype_value.upper()\
+ '.pck'
if os.path.exists(filename):
stat_dict = unpickle(filename, self.__logger)
else:
stat_dict = None
if stat_dict:
self.__gridstat_dict[stattype_key][stattype_value] = \
stat_dict
else:
self.__gridstat_dict[stattype_key][stattype_value] = {}
def __add(
self,
stattype_key,
stattype_value,
key,
value,
):
"""Add value to the statistics"""
self.__check_dict(stattype_key, stattype_value)
if self.__gridstat_dict[stattype_key][stattype_value].has_key(key):
self.__gridstat_dict[stattype_key][stattype_value][key] += \
value
else:
self.__gridstat_dict[stattype_key][stattype_value][key] = \
value
def __addre(
self,
stattype_key,
stattype_value,
key,
value,
):
"""Add runtimeenvironment to the statistics"""
self.__check_dict(stattype_key, stattype_value)
if not self.__gridstat_dict[stattype_key][stattype_value].has_key('RUNTIMEENVIRONMENT'
):
self.__gridstat_dict[stattype_key][stattype_value]['RUNTIMEENVIRONMENT'
] = {}
if self.__gridstat_dict[stattype_key][stattype_value]['RUNTIMEENVIRONMENT'
].has_key(key):
self.__gridstat_dict[stattype_key][stattype_value]['RUNTIMEENVIRONMENT'
][key] += value
else:
self.__gridstat_dict[stattype_key][stattype_value]['RUNTIMEENVIRONMENT'
][key] = value
def __add_resource(
self,
unique_resource_name,
resource_id,
key,
value,
):
"""Add resource node to the statistics"""
# Old mRSL files lack the UNIQUE_RESOURCE_NAME field
if unique_resource_name:
self.__add(self.RESOURCE_TOTAL, unique_resource_name, key,
value)
# Old mRSL files lack the RESOURCE_ID field
# Old mRSL files has resource_id == unique_resource_name
if resource_id and resource_id != unique_resource_name:
self.__add(self.RESOURCE_NODE, resource_id, key, value)
def __set(
self,
stattype_key,
stattype_value,
key,
value,
):
"""Set a specific value"""
self.__check_dict(stattype_key, stattype_value)
if self.__gridstat_dict[stattype_key][stattype_value].has_key(key):
self.__gridstat_dict[stattype_key][stattype_value][key] = \
value
def __flush(self):
"""Dumps the statistics to disk and clears memory"""
# Flush dict to file in the statistics
for stat_type in self.__gridstat_dict.keys():
for stat_value in self.__gridstat_dict[stat_type].keys():
filename = self.__configuration.gridstat_files_dir\
+ stat_type + os.sep + stat_value + '.pck'
filedir = os.path.dirname(filename)
if not os.path.exists(filedir):
os.makedirs(filedir)
pickle(self.__gridstat_dict[stat_type][stat_value],
filename, self.__logger)
# When dict has been flushed, clear it to prevent heavy memory load
self.__gridstat_dict = {}
def get_dict(self, stattype_key, stattype_value):
"""Get dict containing data about the stattype requested"""
result = {}
self.__check_dict(stattype_key, stattype_value)
if self.__gridstat_dict.has_key(stattype_key)\
and self.__gridstat_dict[stattype_key].has_key(stattype_value):
result = self.__gridstat_dict[stattype_key][stattype_value]
return result
def get_value(
self,
stattype_key,
stattype_value,
key,
default_value=0,
):
"""Get value from the statistic"""
result = default_value
self.__check_dict(stattype_key, stattype_value)
if self.__gridstat_dict.has_key(stattype_key)\
and self.__gridstat_dict[stattype_key].has_key(stattype_value)\
and self.__gridstat_dict[stattype_key][stattype_value].has_key(key):
result = \
self.__gridstat_dict[stattype_key][stattype_value][key]
return result
def get_cachetime(self):
"""Returns a datetime object containing info about last update"""
buildtimestamp_file = self.__configuration.gridstat_files_dir\
+ 'buildcache.timestamp'
timestamp = os.path.getmtime(buildtimestamp_file)
return datetime.datetime.fromtimestamp(timestamp)
def __update_statistics_from_job(
self,
job_id,
job_vgrid_name,
buildcache_dict,
job_dict,
):
"""The dirty details of what jobinfo is used in the
statistics and buildcache"""
# Fix legacy VGRIDs
job_dict['VGRID'] = validated_vgrid_list(self.__configuration,
job_dict)
# If the mRSL file was modified and this is the first time
# we have seen it, add the request info to the statistics.
if not buildcache_dict.has_key(job_id):
self.__add(self.VGRID, job_vgrid_name, 'NODECOUNT_REQ',
int(job_dict['NODECOUNT']))
self.__add(self.VGRID, job_vgrid_name, 'CPUTIME_REQ',
int(job_dict['CPUTIME']))
self.__add(self.VGRID, job_vgrid_name, 'CPUCOUNT_REQ',
int(job_dict['CPUCOUNT']))
self.__add(self.VGRID, job_vgrid_name, 'DISK_REQ',
int(job_dict['DISK']))
self.__add(self.VGRID, job_vgrid_name, 'MEMORY_REQ',
int(job_dict['MEMORY']))
self.__add(self.VGRID, job_vgrid_name, 'RUNTIMEENVIRONMENT_REQ',
len(job_dict['RUNTIMEENVIRONMENT']))
unique_resource_name = None
resource_id = None
if job_dict.has_key('RESOURCE_CONFIG'):
if job_dict.has_key('UNIQUE_RESOURCE_NAME'):
unique_resource_name = job_dict['UNIQUE_RESOURCE_NAME'
].upper()
if job_dict['RESOURCE_CONFIG'].has_key('RESOURCE_ID'):
resource_id = job_dict['RESOURCE_CONFIG']['RESOURCE_ID'
].upper()
if job_dict['STATUS'] == 'PARSE':
self.__add(self.VGRID, job_vgrid_name, 'PARSE', 1)
elif job_dict['STATUS'] == 'QUEUED':
self.__add(self.VGRID, job_vgrid_name, 'QUEUED', 1)
elif job_dict['STATUS'] == 'EXECUTING':
self.__add(self.VGRID, job_vgrid_name, 'EXECUTING', 1)
elif job_dict['STATUS'] == 'FAILED':
self.__add(self.VGRID, job_vgrid_name, 'FAILED', 1)
self.__add_resource(unique_resource_name, resource_id,
'FAILED', 1)
elif job_dict['STATUS'] == 'RETRY':
self.__add(self.VGRID, job_vgrid_name, 'RETRY', 1)
self.__add_resource(unique_resource_name, resource_id,
'RETRY', 1)
elif job_dict['STATUS'] == 'EXPIRED':
self.__add(self.VGRID, job_vgrid_name, 'EXPIRED', 1)
elif job_dict['STATUS'] == 'FROZEN':
self.__add(self.VGRID, job_vgrid_name, 'FROZEN', 1)
elif job_dict['STATUS'] == 'CANCELED':
self.__add(self.VGRID, job_vgrid_name, 'CANCELED', 1)
elif job_dict['STATUS'] == 'FINISHED':
# Recent jobs have the scheduled resource vgrid available in
# the RESOURCE_VGRID field. However, that vgrid may be a parent of
# the requested job vgrid due to inheritance.
# We repeat the vgrid match up to find the actual job vgrid here.
# Fall back to saved resource prioritized vgrid list for old jobs.
active_res_vgrid = job_dict.get('RESOURCE_VGRID', None)
if active_res_vgrid:
search_vgrids = [active_res_vgrid]
else:
print "WARNING: no RESOURCE_VGRID for job %(JOB_ID)s" % \
job_dict
resource_config = job_dict['RESOURCE_CONFIG']
# Fix legacy VGRIDs
resource_config['VGRID'] = validated_vgrid_list(
self.__configuration, resource_config)
search_vgrids = resource_config['VGRID']
(match, active_job_vgrid, _) = job_fits_res_vgrid(
job_dict['VGRID'], search_vgrids)
if not match:
# This should not happen - scheduled job to wrong vgrid!
print "ERROR: %s no match for vgrids: %s vs %s" % \
(job_dict['JOB_ID'], job_dict['VGRID'], search_vgrids)
active_job_vgrid = '__NO_SUCH_JOB_VGRID__'
active_vgrid = active_job_vgrid.upper()
if active_vgrid == job_vgrid_name:
# Compute used wall time
finished_timestamp = job_dict['FINISHED_TIMESTAMP']
finished_datetime = datetime.datetime(
finished_timestamp.tm_year,
finished_timestamp.tm_mon,
finished_timestamp.tm_mday,
finished_timestamp.tm_hour,
finished_timestamp.tm_min,
finished_timestamp.tm_sec,
)
starting_timestamp = job_dict['EXECUTING_TIMESTAMP']
starting_datetime = datetime.datetime(
starting_timestamp.tm_year,
starting_timestamp.tm_mon,
starting_timestamp.tm_mday,
starting_timestamp.tm_hour,
starting_timestamp.tm_min,
starting_timestamp.tm_sec,
)
used_walltime = finished_datetime - starting_datetime
# VGrid stats
self.__add(self.VGRID, job_vgrid_name, 'FINISHED', 1)
self.__add(self.VGRID, job_vgrid_name, 'NODECOUNT_DONE',
int(job_dict['NODECOUNT']))
self.__add(self.VGRID, job_vgrid_name, 'CPUTIME_DONE',
int(job_dict['CPUTIME']))
self.__add(self.VGRID, job_vgrid_name, 'USED_WALLTIME',
used_walltime)
self.__add(self.VGRID, job_vgrid_name, 'CPUCOUNT_DONE',
int(job_dict['CPUCOUNT']))
self.__add(self.VGRID, job_vgrid_name, 'DISK_DONE',
int(job_dict['DISK']))
self.__add(self.VGRID, job_vgrid_name, 'MEMORY_DONE',
int(job_dict['MEMORY']))
self.__add(self.VGRID, job_vgrid_name, 'RUNTIMEENVIRONMENT_DONE',
len(job_dict['RUNTIMEENVIRONMENT']))
# Resource stats
self.__add_resource(unique_resource_name, resource_id,
'FINISHED', 1)
self.__add_resource(unique_resource_name, resource_id,
'USED_WALLTIME', used_walltime)
# RE stats
for runtime_env in job_dict['RUNTIMEENVIRONMENT']:
self.__addre(self.VGRID, job_vgrid_name, runtime_env, 1)
# Old mRSL files lack the UNIQUE_RESOURCE_NAME field
if unique_resource_name:
self.__addre(self.RESOURCE_TOTAL,
unique_resource_name, runtime_env, 1)
# Old mRSL files lack the RESOURCE_ID field
# Old mRSL files has resource_id == unique_resource_name
if resource_id and resource_id != unique_resource_name:
self.__addre(self.RESOURCE_NODE, resource_id,
runtime_env, 1)
else:
print 'Unknown status: ' + job_dict['STATUS']
# Check and update cache for previous status'
if buildcache_dict.has_key(job_id) and \
buildcache_dict[job_id] in pending_states:
self.__add(self.VGRID, job_vgrid_name, buildcache_dict[job_id], -1)
# Cache current status for use in next iteration.
# Note that status: CANCELED, FAILED, EXPIRED or FINISHED are
# final stages and therefore none of thoose should occur in
# the cache, as the mRSL file should not be modified once it
# reaches one of thoose stages.
if job_dict['STATUS'] in pending_states:
buildcache_dict[job_id] = job_dict['STATUS']
elif buildcache_dict.has_key(job_id):
del buildcache_dict[job_id]
def update(self):
"""Updates the statistics and cache from the mRSL files"""
self.__gridstat_dict = {}
# Cache and timestamp dirs
root_dir = self.__configuration.mrsl_files_dir
buildcache_file = self.__configuration.gridstat_files_dir\
+ 'buildcache.pck'
buildtimestamp_file = self.__configuration.gridstat_files_dir\
+ 'buildcache.timestamp'
# We lock the buildcache, to make sure that only one vgrid is
# updated at a time
if os.path.exists(buildcache_file):
try:
file_handle = open(buildcache_file, 'r+w')
fcntl.flock(file_handle.fileno(), fcntl.LOCK_EX)
buildcache_dict = py_pickle.load(file_handle)
except Exception, err:
msg = 'gridstat::update(): %s could not be loaded! %s'\
% (buildcache_file, err)
print msg
self.__logger.error(msg)
return False
else:
buildcache_dict = {}
try:
file_handle = open(buildcache_file, 'w')
fcntl.flock(file_handle.fileno(), fcntl.LOCK_EX)
except Exception, err:
msg = \
'gridstat::update(): %s could not be opened/locked! %s'\
% (buildcache_file, err)
self.__logger.error(msg)
return False
# Get timestamp for last build and create timestamp for this
# build.
# This is done here to avoid races in the cache
# between mRSL files that are being modified while cache is
# being build.
last_buildtime = 0
if os.path.exists(buildtimestamp_file):
last_buildtime = os.path.getmtime(buildtimestamp_file)
# Touch buildtimestamp file, so the modified time of it is
# updated
touch(buildtimestamp_file)
# Traverse mRSL dir and update cache
for (root, _, files) in os.walk(root_dir, topdown=True):
# skip all dot dirs - they are from repos etc and _not_ jobs
if root.find(os.sep + '.') != -1:
continue
for name in files:
filename = os.path.join(root, name)
# Only files modified since last update is checked
if os.path.getmtime(filename) > last_buildtime:
job_dict = unpickle(filename, self.__logger)
if not job_dict:
msg = 'gridstat::update() could not load: %s '\
% filename
self.__logger.error(msg)
continue
job_vgrids = validated_vgrid_list(self.__configuration,
job_dict)
for job_vgrid_name in job_vgrids:
# Update the statistics and cache
# from the job details
job_vgrid_name = job_vgrid_name.upper()
self.__update_statistics_from_job(name,
job_vgrid_name, buildcache_dict,
job_dict)
# Flush cache and unlock files
try:
file_handle.seek(0, 0)
py_pickle.dump(buildcache_dict, file_handle, 0)
self.__flush()
fcntl.flock(file_handle.fileno(), fcntl.LOCK_UN)
file_handle.close()
except Exception, err:
self.__gridstat_dict = {}
msg = 'gridstat::update(): %s could not be pickled! %s'\
% (buildcache_file, err)
self.__logger.error(msg)
return False
return True
if __name__ == '__main__':
import sys
import fnmatch
from shared.conf import get_configuration_object
configuration = get_configuration_object()
root_dir = configuration.mrsl_files_dir
job_id = '*_2012_*'
search_runtime = "GAP-4.5.X-1"
if sys.argv[1:]:
job_id = sys.argv[1]
if sys.argv[2:]:
search_runtime = sys.argv[2]
matches = []
for (root, _, files) in os.walk(root_dir, topdown=True):
# skip all dot dirs - they are from repos etc and _not_ jobs
if root.find(os.sep + '.') != -1:
continue
for name in fnmatch.filter(files, job_id+'.mRSL'):
filename = os.path.join(root, name)
job_dict = unpickle(filename, configuration.logger)
if not job_dict:
print "could not load %s" % filename
continue
runtime_envs = job_dict["RUNTIMEENVIRONMENT"]
if not search_runtime in runtime_envs:
continue
job_vgrids = validated_vgrid_list(configuration, job_dict)
#print "DEBUG: found matching job %s with vgrids: %s" % (filename,
# job_vgrids)
matches.append(job_dict)
print "DEBUG: found %d matching jobs" % len(matches)
finished_count = 0
default_vgrid_count = 0
resource_vgrid_map = {}
job_vgrid_map = {}
explicit_vgrids = {}
implicit_vgrids = {}
parent_vgrids = {}
for job_dict in matches:
job_id = job_dict['JOB_ID']
job_vgrid = job_dict['VGRID']
job_vgrid_string = str(job_vgrid)
if job_dict['STATUS'] == 'FINISHED':
finished_count += 1
active_res_vgrid = job_dict.get('RESOURCE_VGRID', None)
if active_res_vgrid:
search_vgrids = [active_res_vgrid]
else:
search_vgrids = job_dict['RESOURCE_CONFIG']['VGRID']
(match, active_job_vgrid, _) = job_fits_res_vgrid(
job_dict['VGRID'], search_vgrids)
resource_vgrid_map[active_res_vgrid] = resource_vgrid_map.get(
active_res_vgrid, 0) + 1
job_vgrid_map[active_job_vgrid] = job_vgrid_map.get(
active_job_vgrid, 0) + 1
if active_res_vgrid not in job_vgrid:
implicit_vgrids[active_res_vgrid] = implicit_vgrids.get(
active_res_vgrid, 0) + 1
parent_vgrids[job_vgrid_string] = parent_vgrids.get(
job_vgrid_string, 0) + 1
else:
explicit_vgrids[active_res_vgrid] = explicit_vgrids.get(
active_res_vgrid, 0) + 1
print "DEBUG: found %d finished jobs" % \
finished_count
print "resource vgrid distribution:"
for (key, val) in resource_vgrid_map.items():
print "\t%s: %d" % (key, val)
print " * explicit:"
for (key, val) in explicit_vgrids.items():
print "\t%s: %d" % (key, val)
print " * implicit:"
for (key, val) in implicit_vgrids.items():
print "\t%s: %d" % (key, val)
print " * parent:"
for (key, val) in parent_vgrids.items():
print "\t%s: %d" % (key, val)
print "job vgrid distribution:"
for (key, val) in job_vgrid_map.items():
print "\t%s: %d" % (key, val)
|
heromod/migrid
|
mig/shared/gridstat.py
|
Python
|
gpl-2.0
| 22,530
|
[
"Brian"
] |
c4881563b4255d96954f2c590aa1d5dce250e6767a3485c47c80004c3278a70b
|
# Copyright (C) 2015-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
import logging
import random
import select
from typing import Any, Dict, Iterable, List, Optional, Tuple
from swh.core.db import BaseDb
from swh.core.db.db_utils import execute_values_generator
from swh.core.db.db_utils import jsonize as _jsonize
from swh.core.db.db_utils import stored_procedure
from swh.model.hashutil import DEFAULT_ALGORITHMS
from swh.model.model import SHA1_SIZE, OriginVisit, OriginVisitStatus, Sha1Git
from swh.model.swhids import ObjectType
from swh.storage.interface import ListOrder
logger = logging.getLogger(__name__)
def jsonize(d):
return _jsonize(dict(d) if d is not None else None)
class Db(BaseDb):
"""Proxy to the SWH DB, with wrappers around stored procedures
"""
current_version = 182
def mktemp_dir_entry(self, entry_type, cur=None):
self._cursor(cur).execute(
"SELECT swh_mktemp_dir_entry(%s)", (("directory_entry_%s" % entry_type),)
)
@stored_procedure("swh_mktemp_revision")
def mktemp_revision(self, cur=None):
pass
@stored_procedure("swh_mktemp_release")
def mktemp_release(self, cur=None):
pass
@stored_procedure("swh_mktemp_snapshot_branch")
def mktemp_snapshot_branch(self, cur=None):
pass
def register_listener(self, notify_queue, cur=None):
"""Register a listener for NOTIFY queue `notify_queue`"""
self._cursor(cur).execute("LISTEN %s" % notify_queue)
def listen_notifies(self, timeout):
"""Listen to notifications for `timeout` seconds"""
if select.select([self.conn], [], [], timeout) == ([], [], []):
return
else:
self.conn.poll()
while self.conn.notifies:
yield self.conn.notifies.pop(0)
@stored_procedure("swh_content_add")
def content_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_directory_add")
def directory_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_skipped_content_add")
def skipped_content_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_revision_add")
def revision_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_extid_add")
def extid_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_release_add")
def release_add_from_temp(self, cur=None):
pass
def content_update_from_temp(self, keys_to_update, cur=None):
cur = self._cursor(cur)
cur.execute(
"""select swh_content_update(ARRAY[%s] :: text[])""" % keys_to_update
)
content_get_metadata_keys = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"status",
]
content_add_keys = content_get_metadata_keys + ["ctime"]
skipped_content_keys = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"reason",
"status",
"origin",
]
def content_get_metadata_from_hashes(
self, hashes: List[bytes], algo: str, cur=None
):
cur = self._cursor(cur)
assert algo in DEFAULT_ALGORITHMS
query = f"""
select {", ".join(self.content_get_metadata_keys)}
from (values %s) as t (hash)
inner join content on (content.{algo}=hash)
"""
yield from execute_values_generator(
cur, query, ((hash_,) for hash_ in hashes),
)
def content_get_range(self, start, end, limit=None, cur=None):
"""Retrieve contents within range [start, end].
"""
cur = self._cursor(cur)
query = """select %s from content
where %%s <= sha1 and sha1 <= %%s
order by sha1
limit %%s""" % ", ".join(
self.content_get_metadata_keys
)
cur.execute(query, (start, end, limit))
yield from cur
content_hash_keys = ["sha1", "sha1_git", "sha256", "blake2s256"]
def content_missing_from_list(self, contents, cur=None):
cur = self._cursor(cur)
keys = ", ".join(self.content_hash_keys)
equality = " AND ".join(
("t.%s = c.%s" % (key, key)) for key in self.content_hash_keys
)
yield from execute_values_generator(
cur,
"""
SELECT %s
FROM (VALUES %%s) as t(%s)
WHERE NOT EXISTS (
SELECT 1 FROM content c
WHERE %s
)
"""
% (keys, keys, equality),
(tuple(c[key] for key in self.content_hash_keys) for c in contents),
)
def content_missing_per_sha1(self, sha1s, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT t.sha1 FROM (VALUES %s) AS t(sha1)
WHERE NOT EXISTS (
SELECT 1 FROM content c WHERE c.sha1 = t.sha1
)""",
((sha1,) for sha1 in sha1s),
)
def content_missing_per_sha1_git(self, contents, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT t.sha1_git FROM (VALUES %s) AS t(sha1_git)
WHERE NOT EXISTS (
SELECT 1 FROM content c WHERE c.sha1_git = t.sha1_git
)""",
((sha1,) for sha1 in contents),
)
def skipped_content_missing(self, contents, cur=None):
if not contents:
return []
cur = self._cursor(cur)
query = """SELECT * FROM (VALUES %s) AS t (%s)
WHERE not exists
(SELECT 1 FROM skipped_content s WHERE
s.sha1 is not distinct from t.sha1::sha1 and
s.sha1_git is not distinct from t.sha1_git::sha1 and
s.sha256 is not distinct from t.sha256::bytea);""" % (
(", ".join("%s" for _ in contents)),
", ".join(self.content_hash_keys),
)
cur.execute(
query,
[tuple(cont[key] for key in self.content_hash_keys) for cont in contents],
)
yield from cur
def snapshot_exists(self, snapshot_id, cur=None):
"""Check whether a snapshot with the given id exists"""
cur = self._cursor(cur)
cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,))
return bool(cur.fetchone())
def snapshot_missing_from_list(self, snapshots, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM snapshot d WHERE d.id = t.id
)
""",
((id,) for id in snapshots),
)
def snapshot_add(self, snapshot_id, cur=None):
"""Add a snapshot from the temporary table"""
cur = self._cursor(cur)
cur.execute("""SELECT swh_snapshot_add(%s)""", (snapshot_id,))
snapshot_count_cols = ["target_type", "count"]
def snapshot_count_branches(
self, snapshot_id, branch_name_exclude_prefix=None, cur=None,
):
cur = self._cursor(cur)
query = """\
SELECT %s FROM swh_snapshot_count_branches(%%s, %%s)
""" % ", ".join(
self.snapshot_count_cols
)
cur.execute(query, (snapshot_id, branch_name_exclude_prefix))
yield from cur
snapshot_get_cols = ["snapshot_id", "name", "target", "target_type"]
def snapshot_get_by_id(
self,
snapshot_id,
branches_from=b"",
branches_count=None,
target_types=None,
branch_name_include_substring=None,
branch_name_exclude_prefix=None,
cur=None,
):
cur = self._cursor(cur)
query = """\
SELECT %s
FROM swh_snapshot_get_by_id(%%s, %%s, %%s, %%s :: snapshot_target[], %%s, %%s)
""" % ", ".join(
self.snapshot_get_cols
)
cur.execute(
query,
(
snapshot_id,
branches_from,
branches_count,
target_types,
branch_name_include_substring,
branch_name_exclude_prefix,
),
)
yield from cur
def snapshot_get_random(self, cur=None):
return self._get_random_row_from_table("snapshot", ["id"], "id", cur)
content_find_cols = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"ctime",
"status",
]
def content_find(
self,
sha1: Optional[bytes] = None,
sha1_git: Optional[bytes] = None,
sha256: Optional[bytes] = None,
blake2s256: Optional[bytes] = None,
cur=None,
):
"""Find the content optionally on a combination of the following
checksums sha1, sha1_git, sha256 or blake2s256.
Args:
sha1: sha1 content
git_sha1: the sha1 computed `a la git` sha1 of the content
sha256: sha256 content
blake2s256: blake2s256 content
Returns:
The tuple (sha1, sha1_git, sha256, blake2s256) if found or None.
"""
cur = self._cursor(cur)
checksum_dict = {
"sha1": sha1,
"sha1_git": sha1_git,
"sha256": sha256,
"blake2s256": blake2s256,
}
query_parts = [f"SELECT {','.join(self.content_find_cols)} FROM content WHERE "]
query_params = []
where_parts = []
# Adds only those keys which have values exist
for algorithm in checksum_dict:
if checksum_dict[algorithm] is not None:
where_parts.append(f"{algorithm} = %s")
query_params.append(checksum_dict[algorithm])
query_parts.append(" AND ".join(where_parts))
query = "\n".join(query_parts)
cur.execute(query, query_params)
content = cur.fetchall()
return content
def content_get_random(self, cur=None):
return self._get_random_row_from_table("content", ["sha1_git"], "sha1_git", cur)
def directory_missing_from_list(self, directories, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM directory d WHERE d.id = t.id
)
""",
((id,) for id in directories),
)
directory_ls_cols = [
"dir_id",
"type",
"target",
"name",
"perms",
"status",
"sha1",
"sha1_git",
"sha256",
"length",
]
def directory_walk_one(self, directory, cur=None):
cur = self._cursor(cur)
cols = ", ".join(self.directory_ls_cols)
query = "SELECT %s FROM swh_directory_walk_one(%%s)" % cols
cur.execute(query, (directory,))
yield from cur
def directory_walk(self, directory, cur=None):
cur = self._cursor(cur)
cols = ", ".join(self.directory_ls_cols)
query = "SELECT %s FROM swh_directory_walk(%%s)" % cols
cur.execute(query, (directory,))
yield from cur
def directory_entry_get_by_path(self, directory, paths, cur=None):
"""Retrieve a directory entry by path.
"""
cur = self._cursor(cur)
cols = ", ".join(self.directory_ls_cols)
query = "SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)" % cols
cur.execute(query, (directory, paths))
data = cur.fetchone()
if set(data) == {None}:
return None
return data
directory_get_entries_cols = ["type", "target", "name", "perms"]
def directory_get_entries(self, directory: Sha1Git, cur=None) -> List[Tuple]:
cur = self._cursor(cur)
cur.execute(
"SELECT * FROM swh_directory_get_entries(%s::sha1_git)", (directory,)
)
return list(cur)
def directory_get_raw_manifest(
self, directory_ids: List[Sha1Git], cur=None
) -> Iterable[Tuple[Sha1Git, bytes]]:
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT t.id, raw_manifest FROM (VALUES %s) as t(id)
INNER JOIN directory ON (t.id=directory.id)
""",
((id_,) for id_ in directory_ids),
)
def directory_get_random(self, cur=None):
return self._get_random_row_from_table("directory", ["id"], "id", cur)
def revision_missing_from_list(self, revisions, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM revision r WHERE r.id = t.id
)
""",
((id,) for id in revisions),
)
revision_add_cols = [
"id",
"date",
"date_offset",
"date_neg_utc_offset",
"date_offset_bytes",
"committer_date",
"committer_date_offset",
"committer_date_neg_utc_offset",
"committer_date_offset_bytes",
"type",
"directory",
"message",
"author_fullname",
"author_name",
"author_email",
"committer_fullname",
"committer_name",
"committer_email",
"metadata",
"synthetic",
"extra_headers",
"raw_manifest",
]
revision_get_cols = revision_add_cols + ["parents"]
def origin_visit_add(self, origin, ts, type, cur=None):
"""Add a new origin_visit for origin origin at timestamp ts.
Args:
origin: origin concerned by the visit
ts: the date of the visit
type: type of loader for the visit
Returns:
The new visit index step for that origin
"""
cur = self._cursor(cur)
self._cursor(cur).execute(
"SELECT swh_origin_visit_add(%s, %s, %s)", (origin, ts, type)
)
return cur.fetchone()[0]
origin_visit_status_cols = [
"origin",
"visit",
"date",
"type",
"status",
"snapshot",
"metadata",
]
def origin_visit_status_add(
self, visit_status: OriginVisitStatus, cur=None
) -> None:
"""Add new origin visit status
"""
assert self.origin_visit_status_cols[0] == "origin"
assert self.origin_visit_status_cols[-1] == "metadata"
cols = self.origin_visit_status_cols[1:-1]
cur = self._cursor(cur)
cur.execute(
f"WITH origin_id as (select id from origin where url=%s) "
f"INSERT INTO origin_visit_status "
f"(origin, {', '.join(cols)}, metadata) "
f"VALUES ((select id from origin_id), "
f"{', '.join(['%s']*len(cols))}, %s) "
f"ON CONFLICT (origin, visit, date) do nothing",
[visit_status.origin]
+ [getattr(visit_status, key) for key in cols]
+ [jsonize(visit_status.metadata)],
)
origin_visit_cols = ["origin", "visit", "date", "type"]
def origin_visit_add_with_id(self, origin_visit: OriginVisit, cur=None) -> None:
"""Insert origin visit when id are already set
"""
ov = origin_visit
assert ov.visit is not None
cur = self._cursor(cur)
query = """INSERT INTO origin_visit ({cols})
VALUES ((select id from origin where url=%s), {values})
ON CONFLICT (origin, visit) DO NOTHING""".format(
cols=", ".join(self.origin_visit_cols),
values=", ".join("%s" for col in self.origin_visit_cols[1:]),
)
cur.execute(query, (ov.origin, ov.visit, ov.date, ov.type))
origin_visit_get_cols = [
"origin",
"visit",
"date",
"type",
"status",
"metadata",
"snapshot",
]
origin_visit_select_cols = [
"o.url AS origin",
"ov.visit",
"ov.date",
"ov.type AS type",
"ovs.status",
"ovs.snapshot",
"ovs.metadata",
]
origin_visit_status_select_cols = [
"o.url AS origin",
"ovs.visit",
"ovs.date",
"ovs.type AS type",
"ovs.status",
"ovs.snapshot",
"ovs.metadata",
]
def _make_origin_visit_status(
self, row: Optional[Tuple[Any]]
) -> Optional[Dict[str, Any]]:
"""Make an origin_visit_status dict out of a row
"""
if not row:
return None
return dict(zip(self.origin_visit_status_cols, row))
def origin_visit_status_get_latest(
self,
origin_url: str,
visit: int,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
cur=None,
) -> Optional[Dict[str, Any]]:
"""Given an origin visit id, return its latest origin_visit_status
"""
cur = self._cursor(cur)
query_parts = [
"SELECT %s" % ", ".join(self.origin_visit_status_select_cols),
"FROM origin_visit_status ovs ",
"INNER JOIN origin o ON o.id = ovs.origin",
]
query_parts.append("WHERE o.url = %s")
query_params: List[Any] = [origin_url]
query_parts.append("AND ovs.visit = %s")
query_params.append(visit)
if require_snapshot:
query_parts.append("AND ovs.snapshot is not null")
if allowed_statuses:
query_parts.append("AND ovs.status IN %s")
query_params.append(tuple(allowed_statuses))
query_parts.append("ORDER BY ovs.date DESC LIMIT 1")
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
row = cur.fetchone()
return self._make_origin_visit_status(row)
def origin_visit_status_get_range(
self,
origin: str,
visit: int,
date_from: Optional[datetime.datetime],
order: ListOrder,
limit: int,
cur=None,
):
"""Retrieve visit_status rows for visit (origin, visit) in a paginated way.
"""
cur = self._cursor(cur)
query_parts = [
f"SELECT {', '.join(self.origin_visit_status_select_cols)} "
"FROM origin_visit_status ovs ",
"INNER JOIN origin o ON o.id = ovs.origin ",
]
query_parts.append("WHERE o.url = %s AND ovs.visit = %s ")
query_params: List[Any] = [origin, visit]
if date_from is not None:
op_comparison = ">=" if order == ListOrder.ASC else "<="
query_parts.append(f"and ovs.date {op_comparison} %s ")
query_params.append(date_from)
if order == ListOrder.ASC:
query_parts.append("ORDER BY ovs.date ASC ")
elif order == ListOrder.DESC:
query_parts.append("ORDER BY ovs.date DESC ")
else:
assert False
query_parts.append("LIMIT %s")
query_params.append(limit)
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
yield from cur
def origin_visit_get_range(
self, origin: str, visit_from: int, order: ListOrder, limit: int, cur=None,
):
cur = self._cursor(cur)
origin_visit_cols = ["o.url as origin", "ov.visit", "ov.date", "ov.type"]
query_parts = [
f"SELECT {', '.join(origin_visit_cols)} FROM origin_visit ov ",
"INNER JOIN origin o ON o.id = ov.origin ",
]
query_parts.append("WHERE o.url = %s")
query_params: List[Any] = [origin]
if visit_from > 0:
op_comparison = ">" if order == ListOrder.ASC else "<"
query_parts.append(f"and ov.visit {op_comparison} %s")
query_params.append(visit_from)
if order == ListOrder.ASC:
query_parts.append("ORDER BY ov.visit ASC")
elif order == ListOrder.DESC:
query_parts.append("ORDER BY ov.visit DESC")
query_parts.append("LIMIT %s")
query_params.append(limit)
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
yield from cur
def origin_visit_get(self, origin_id, visit_id, cur=None):
"""Retrieve information on visit visit_id of origin origin_id.
Args:
origin_id: the origin concerned
visit_id: The visit step for that origin
Returns:
The origin_visit information
"""
cur = self._cursor(cur)
query = """\
SELECT %s
FROM origin_visit ov
INNER JOIN origin o ON o.id = ov.origin
INNER JOIN origin_visit_status ovs USING (origin, visit)
WHERE o.url = %%s AND ov.visit = %%s
ORDER BY ovs.date DESC
LIMIT 1
""" % (
", ".join(self.origin_visit_select_cols)
)
cur.execute(query, (origin_id, visit_id))
r = cur.fetchall()
if not r:
return None
return r[0]
def origin_visit_find_by_date(self, origin, visit_date, cur=None):
cur = self._cursor(cur)
cur.execute(
"SELECT * FROM swh_visit_find_by_date(%s, %s)", (origin, visit_date)
)
rows = cur.fetchall()
if rows:
visit = dict(zip(self.origin_visit_get_cols, rows[0]))
visit["origin"] = origin
return visit
def origin_visit_exists(self, origin_id, visit_id, cur=None):
"""Check whether an origin visit with the given ids exists"""
cur = self._cursor(cur)
query = "SELECT 1 FROM origin_visit where origin = %s AND visit = %s"
cur.execute(query, (origin_id, visit_id))
return bool(cur.fetchone())
def origin_visit_get_latest(
self,
origin_id: str,
type: Optional[str],
allowed_statuses: Optional[Iterable[str]],
require_snapshot: bool,
cur=None,
):
"""Retrieve the most recent origin_visit of the given origin,
with optional filters.
Args:
origin_id: the origin concerned
type: Optional visit type to filter on
allowed_statuses: the visit statuses allowed for the returned visit
require_snapshot (bool): If True, only a visit with a known
snapshot will be returned.
Returns:
The origin_visit information, or None if no visit matches.
"""
cur = self._cursor(cur)
query_parts = [
"SELECT %s" % ", ".join(self.origin_visit_select_cols),
"FROM origin_visit ov ",
"INNER JOIN origin o ON o.id = ov.origin",
"INNER JOIN origin_visit_status ovs USING (origin, visit)",
]
query_parts.append("WHERE o.url = %s")
query_params: List[Any] = [origin_id]
if type is not None:
query_parts.append("AND ov.type = %s")
query_params.append(type)
if require_snapshot:
query_parts.append("AND ovs.snapshot is not null")
if allowed_statuses:
query_parts.append("AND ovs.status IN %s")
query_params.append(tuple(allowed_statuses))
query_parts.append(
"ORDER BY ov.date DESC, ov.visit DESC, ovs.date DESC LIMIT 1"
)
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
r = cur.fetchone()
if not r:
return None
return r
def origin_visit_get_random(self, type, cur=None):
"""Randomly select one origin visit that was full and in the last 3
months
"""
cur = self._cursor(cur)
columns = ",".join(self.origin_visit_select_cols)
query = f"""select {columns}
from origin_visit ov
inner join origin o on ov.origin=o.id
inner join origin_visit_status ovs using (origin, visit)
where ovs.status='full'
and ov.type=%s
and ov.date > now() - '3 months'::interval
and random() < 0.1
limit 1
"""
cur.execute(query, (type,))
return cur.fetchone()
@staticmethod
def mangle_query_key(key, main_table, ignore_displayname=False):
if key == "id":
return "t.id"
if key == "parents":
return """
ARRAY(
SELECT rh.parent_id::bytea
FROM revision_history rh
WHERE rh.id = t.id
ORDER BY rh.parent_rank
)"""
if "_" not in key:
return f"{main_table}.{key}"
head, tail = key.split("_", 1)
if head not in ("author", "committer") or tail not in (
"name",
"email",
"id",
"fullname",
):
return f"{main_table}.{key}"
if ignore_displayname:
return f"{head}.{tail}"
else:
if tail == "id":
return f"{head}.{tail}"
elif tail in ("name", "email"):
# These fields get populated again from fullname by
# converters.db_to_author if they're None, so we can just NULLify them
# when displayname is set.
return (
f"CASE"
f" WHEN {head}.displayname IS NULL THEN {head}.{tail} "
f" ELSE NULL "
f"END AS {key}"
)
elif tail == "fullname":
return f"COALESCE({head}.displayname, {head}.fullname) AS {key}"
assert False, "All cases should have been handled here"
def revision_get_from_list(self, revisions, ignore_displayname=False, cur=None):
cur = self._cursor(cur)
query_keys = ", ".join(
self.mangle_query_key(k, "revision", ignore_displayname)
for k in self.revision_get_cols
)
yield from execute_values_generator(
cur,
"""
SELECT %s FROM (VALUES %%s) as t(sortkey, id)
LEFT JOIN revision ON t.id = revision.id
LEFT JOIN person author ON revision.author = author.id
LEFT JOIN person committer ON revision.committer = committer.id
ORDER BY sortkey
"""
% query_keys,
((sortkey, id) for sortkey, id in enumerate(revisions)),
)
extid_cols = ["extid", "extid_version", "extid_type", "target", "target_type"]
def extid_get_from_extid_list(
self, extid_type: str, ids: List[bytes], version: Optional[int] = None, cur=None
):
cur = self._cursor(cur)
query_keys = ", ".join(
self.mangle_query_key(k, "extid") for k in self.extid_cols
)
filter_query = ""
if version is not None:
filter_query = cur.mogrify(
f"WHERE extid_version={version}", (version,)
).decode()
sql = f"""
SELECT {query_keys}
FROM (VALUES %s) as t(sortkey, extid, extid_type)
LEFT JOIN extid USING (extid, extid_type)
{filter_query}
ORDER BY sortkey
"""
yield from execute_values_generator(
cur,
sql,
(((sortkey, extid, extid_type) for sortkey, extid in enumerate(ids))),
)
def extid_get_from_swhid_list(
self,
target_type: str,
ids: List[bytes],
extid_version: Optional[int] = None,
extid_type: Optional[str] = None,
cur=None,
):
cur = self._cursor(cur)
target_type = ObjectType(
target_type
).name.lower() # aka "rev" -> "revision", ...
query_keys = ", ".join(
self.mangle_query_key(k, "extid") for k in self.extid_cols
)
filter_query = ""
if extid_version is not None and extid_type is not None:
filter_query = cur.mogrify(
"WHERE extid_version=%s AND extid_type=%s", (extid_version, extid_type,)
).decode()
sql = f"""
SELECT {query_keys}
FROM (VALUES %s) as t(sortkey, target, target_type)
LEFT JOIN extid USING (target, target_type)
{filter_query}
ORDER BY sortkey
"""
yield from execute_values_generator(
cur,
sql,
(((sortkey, target, target_type) for sortkey, target in enumerate(ids))),
template=b"(%s,%s,%s::object_type)",
)
def revision_log(
self, root_revisions, ignore_displayname=False, limit=None, cur=None
):
cur = self._cursor(cur)
query = """\
SELECT %s
FROM swh_revision_log(
"root_revisions" := %%s, num_revs := %%s, "ignore_displayname" := %%s
)""" % ", ".join(
self.revision_get_cols
)
cur.execute(query, (root_revisions, limit, ignore_displayname))
yield from cur
revision_shortlog_cols = ["id", "parents"]
def revision_shortlog(self, root_revisions, limit=None, cur=None):
cur = self._cursor(cur)
query = """SELECT %s
FROM swh_revision_list(%%s, %%s)
""" % ", ".join(
self.revision_shortlog_cols
)
cur.execute(query, (root_revisions, limit))
yield from cur
def revision_get_random(self, cur=None):
return self._get_random_row_from_table("revision", ["id"], "id", cur)
def release_missing_from_list(self, releases, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM release r WHERE r.id = t.id
)
""",
((id,) for id in releases),
)
object_find_by_sha1_git_cols = ["sha1_git", "type"]
def object_find_by_sha1_git(self, ids, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
WITH t (sha1_git) AS (VALUES %s),
known_objects as ((
select
id as sha1_git,
'release'::object_type as type,
object_id
from release r
where exists (select 1 from t where t.sha1_git = r.id)
) union all (
select
id as sha1_git,
'revision'::object_type as type,
object_id
from revision r
where exists (select 1 from t where t.sha1_git = r.id)
) union all (
select
id as sha1_git,
'directory'::object_type as type,
object_id
from directory d
where exists (select 1 from t where t.sha1_git = d.id)
) union all (
select
sha1_git as sha1_git,
'content'::object_type as type,
object_id
from content c
where exists (select 1 from t where t.sha1_git = c.sha1_git)
))
select t.sha1_git as sha1_git, k.type
from t
left join known_objects k on t.sha1_git = k.sha1_git
""",
((id,) for id in ids),
)
def stat_counters(self, cur=None):
cur = self._cursor(cur)
cur.execute("SELECT * FROM swh_stat_counters()")
yield from cur
def origin_add(self, url, cur=None):
"""Insert a new origin and return the new identifier."""
insert = """INSERT INTO origin (url) values (%s)
ON CONFLICT DO NOTHING
"""
cur.execute(insert, (url,))
return cur.rowcount
origin_cols = ["url"]
def origin_get_by_url(self, origins, cur=None):
"""Retrieve origin `(type, url)` from urls if found."""
cur = self._cursor(cur)
query = """SELECT %s FROM (VALUES %%s) as t(url)
LEFT JOIN origin ON t.url = origin.url
""" % ",".join(
"origin." + col for col in self.origin_cols
)
yield from execute_values_generator(cur, query, ((url,) for url in origins))
def origin_get_by_sha1(self, sha1s, cur=None):
"""Retrieve origin urls from sha1s if found."""
cur = self._cursor(cur)
query = """SELECT %s FROM (VALUES %%s) as t(sha1)
LEFT JOIN origin ON t.sha1 = digest(origin.url, 'sha1')
""" % ",".join(
"origin." + col for col in self.origin_cols
)
yield from execute_values_generator(cur, query, ((sha1,) for sha1 in sha1s))
def origin_id_get_by_url(self, origins, cur=None):
"""Retrieve origin `(type, url)` from urls if found."""
cur = self._cursor(cur)
query = """SELECT id FROM (VALUES %s) as t(url)
LEFT JOIN origin ON t.url = origin.url
"""
for row in execute_values_generator(cur, query, ((url,) for url in origins)):
yield row[0]
origin_get_range_cols = ["id", "url"]
def origin_get_range(self, origin_from: int = 1, origin_count: int = 100, cur=None):
"""Retrieve ``origin_count`` origins whose ids are greater
or equal than ``origin_from``.
Origins are sorted by id before retrieving them.
Args:
origin_from: the minimum id of origins to retrieve
origin_count: the maximum number of origins to retrieve
"""
cur = self._cursor(cur)
query = """SELECT %s
FROM origin WHERE id >= %%s
ORDER BY id LIMIT %%s
""" % ",".join(
self.origin_get_range_cols
)
cur.execute(query, (origin_from, origin_count))
yield from cur
def _origin_query(
self,
url_pattern,
count=False,
offset=0,
limit=50,
regexp=False,
with_visit=False,
visit_types=None,
cur=None,
):
"""
Method factorizing query creation for searching and counting origins.
"""
cur = self._cursor(cur)
if count:
origin_cols = "COUNT(*)"
order_clause = ""
else:
origin_cols = ",".join(self.origin_cols)
order_clause = "ORDER BY id"
if not regexp:
operator = "ILIKE"
query_params = [f"%{url_pattern}%"]
else:
operator = "~*"
query_params = [url_pattern]
query = f"""
WITH filtered_origins AS (
SELECT *
FROM origin
WHERE url {operator} %s
{order_clause}
)
SELECT {origin_cols}
FROM filtered_origins AS o
"""
if with_visit or visit_types:
visit_predicat = (
"""
INNER JOIN origin_visit_status ovs USING (origin, visit)
INNER JOIN snapshot ON ovs.snapshot=snapshot.id
"""
if with_visit
else ""
)
type_predicat = (
f"AND ov.type=any(ARRAY{visit_types})" if visit_types else ""
)
query += f"""
WHERE EXISTS (
SELECT 1
FROM origin_visit ov
{visit_predicat}
WHERE ov.origin=o.id {type_predicat}
)
"""
if not count:
query += "OFFSET %s LIMIT %s"
query_params.extend([offset, limit])
cur.execute(query, query_params)
def origin_search(
self,
url_pattern: str,
offset: int = 0,
limit: int = 50,
regexp: bool = False,
with_visit: bool = False,
visit_types: Optional[List[str]] = None,
cur=None,
):
"""Search for origins whose urls contain a provided string pattern
or match a provided regular expression.
The search is performed in a case insensitive way.
Args:
url_pattern: the string pattern to search for in origin urls
offset: number of found origins to skip before returning
results
limit: the maximum number of found origins to return
regexp: if True, consider the provided pattern as a regular
expression and returns origins whose urls match it
with_visit: if True, filter out origins with no visit
"""
self._origin_query(
url_pattern,
offset=offset,
limit=limit,
regexp=regexp,
with_visit=with_visit,
visit_types=visit_types,
cur=cur,
)
yield from cur
def origin_count(self, url_pattern, regexp=False, with_visit=False, cur=None):
"""Count origins whose urls contain a provided string pattern
or match a provided regular expression.
The pattern search in origin urls is performed in a case insensitive
way.
Args:
url_pattern (str): the string pattern to search for in origin urls
regexp (bool): if True, consider the provided pattern as a regular
expression and returns origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
"""
self._origin_query(
url_pattern, count=True, regexp=regexp, with_visit=with_visit, cur=cur
)
return cur.fetchone()[0]
release_add_cols = [
"id",
"target",
"target_type",
"date",
"date_offset",
"date_neg_utc_offset",
"date_offset_bytes",
"name",
"comment",
"synthetic",
"raw_manifest",
"author_fullname",
"author_name",
"author_email",
]
release_get_cols = release_add_cols
def origin_snapshot_get_all(self, origin_url: str, cur=None) -> Iterable[Sha1Git]:
cur = self._cursor(cur)
query = f"""\
SELECT DISTINCT snapshot FROM origin_visit_status ovs
INNER JOIN origin o ON o.id = ovs.origin
WHERE o.url = '{origin_url}' and snapshot IS NOT NULL;
"""
cur.execute(query)
yield from map(lambda row: row[0], cur)
def release_get_from_list(self, releases, ignore_displayname=False, cur=None):
cur = self._cursor(cur)
query_keys = ", ".join(
self.mangle_query_key(k, "release", ignore_displayname)
for k in self.release_get_cols
)
yield from execute_values_generator(
cur,
"""
SELECT %s FROM (VALUES %%s) as t(sortkey, id)
LEFT JOIN release ON t.id = release.id
LEFT JOIN person author ON release.author = author.id
ORDER BY sortkey
"""
% query_keys,
((sortkey, id) for sortkey, id in enumerate(releases)),
)
def release_get_random(self, cur=None):
return self._get_random_row_from_table("release", ["id"], "id", cur)
_raw_extrinsic_metadata_context_cols = [
"origin",
"visit",
"snapshot",
"release",
"revision",
"path",
"directory",
]
"""The list of context columns for all artifact types."""
_raw_extrinsic_metadata_insert_cols = [
"id",
"type",
"target",
"authority_id",
"fetcher_id",
"discovery_date",
"format",
"metadata",
*_raw_extrinsic_metadata_context_cols,
]
"""List of columns of the raw_extrinsic_metadata table, used when writing
metadata."""
_raw_extrinsic_metadata_insert_query = f"""
INSERT INTO raw_extrinsic_metadata
({', '.join(_raw_extrinsic_metadata_insert_cols)})
VALUES ({', '.join('%s' for _ in _raw_extrinsic_metadata_insert_cols)})
ON CONFLICT (id)
DO NOTHING
"""
raw_extrinsic_metadata_get_cols = [
"raw_extrinsic_metadata.target",
"raw_extrinsic_metadata.type",
"discovery_date",
"metadata_authority.type",
"metadata_authority.url",
"metadata_fetcher.id",
"metadata_fetcher.name",
"metadata_fetcher.version",
*_raw_extrinsic_metadata_context_cols,
"format",
"raw_extrinsic_metadata.metadata",
]
"""List of columns of the raw_extrinsic_metadata, metadata_authority,
and metadata_fetcher tables, used when reading object metadata."""
_raw_extrinsic_metadata_select_query = f"""
SELECT
{', '.join(raw_extrinsic_metadata_get_cols)}
FROM raw_extrinsic_metadata
INNER JOIN metadata_authority
ON (metadata_authority.id=authority_id)
INNER JOIN metadata_fetcher ON (metadata_fetcher.id=fetcher_id)
"""
def raw_extrinsic_metadata_add(
self,
id: bytes,
type: str,
target: str,
discovery_date: datetime.datetime,
authority_id: int,
fetcher_id: int,
format: str,
metadata: bytes,
origin: Optional[str],
visit: Optional[int],
snapshot: Optional[str],
release: Optional[str],
revision: Optional[str],
path: Optional[bytes],
directory: Optional[str],
cur,
):
query = self._raw_extrinsic_metadata_insert_query
args: Dict[str, Any] = dict(
id=id,
type=type,
target=target,
authority_id=authority_id,
fetcher_id=fetcher_id,
discovery_date=discovery_date,
format=format,
metadata=metadata,
origin=origin,
visit=visit,
snapshot=snapshot,
release=release,
revision=revision,
path=path,
directory=directory,
)
params = [args[col] for col in self._raw_extrinsic_metadata_insert_cols]
cur.execute(query, params)
def raw_extrinsic_metadata_get(
self,
target: str,
authority_id: int,
after_time: Optional[datetime.datetime],
after_fetcher: Optional[int],
limit: int,
cur,
):
query_parts = [self._raw_extrinsic_metadata_select_query]
query_parts.append("WHERE raw_extrinsic_metadata.target=%s AND authority_id=%s")
args = [target, authority_id]
if after_fetcher is not None:
assert after_time
query_parts.append("AND (discovery_date, fetcher_id) > (%s, %s)")
args.extend([after_time, after_fetcher])
elif after_time is not None:
query_parts.append("AND discovery_date > %s")
args.append(after_time)
query_parts.append("ORDER BY discovery_date, fetcher_id")
if limit:
query_parts.append("LIMIT %s")
args.append(limit)
cur.execute(" ".join(query_parts), args)
yield from cur
def raw_extrinsic_metadata_get_by_ids(self, ids: List[Sha1Git], cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
self._raw_extrinsic_metadata_select_query
+ "INNER JOIN (VALUES %s) AS t(id) ON t.id = raw_extrinsic_metadata.id",
[(id_,) for id_ in ids],
)
def raw_extrinsic_metadata_get_authorities(self, id: str, cur=None):
cur = self._cursor(cur)
cur.execute(
"""
SELECT
DISTINCT metadata_authority.type, metadata_authority.url
FROM raw_extrinsic_metadata
INNER JOIN metadata_authority
ON (metadata_authority.id=authority_id)
WHERE raw_extrinsic_metadata.target = %s
""",
(id,),
)
yield from cur
metadata_fetcher_cols = ["name", "version"]
def metadata_fetcher_add(self, name: str, version: str, cur=None) -> None:
cur = self._cursor(cur)
cur.execute(
"INSERT INTO metadata_fetcher (name, version) "
"VALUES (%s, %s) ON CONFLICT DO NOTHING",
(name, version),
)
def metadata_fetcher_get(self, name: str, version: str, cur=None):
cur = self._cursor(cur)
cur.execute(
f"SELECT {', '.join(self.metadata_fetcher_cols)} "
f"FROM metadata_fetcher "
f"WHERE name=%s AND version=%s",
(name, version),
)
return cur.fetchone()
def metadata_fetcher_get_id(
self, name: str, version: str, cur=None
) -> Optional[int]:
cur = self._cursor(cur)
cur.execute(
"SELECT id FROM metadata_fetcher WHERE name=%s AND version=%s",
(name, version),
)
row = cur.fetchone()
if row:
return row[0]
else:
return None
metadata_authority_cols = ["type", "url"]
def metadata_authority_add(self, type: str, url: str, cur=None) -> None:
cur = self._cursor(cur)
cur.execute(
"INSERT INTO metadata_authority (type, url) "
"VALUES (%s, %s) ON CONFLICT DO NOTHING",
(type, url),
)
def metadata_authority_get(self, type: str, url: str, cur=None):
cur = self._cursor(cur)
cur.execute(
f"SELECT {', '.join(self.metadata_authority_cols)} "
f"FROM metadata_authority "
f"WHERE type=%s AND url=%s",
(type, url),
)
return cur.fetchone()
def metadata_authority_get_id(self, type: str, url: str, cur=None) -> Optional[int]:
cur = self._cursor(cur)
cur.execute(
"SELECT id FROM metadata_authority WHERE type=%s AND url=%s", (type, url)
)
row = cur.fetchone()
if row:
return row[0]
else:
return None
def _get_random_row_from_table(self, table_name, cols, id_col, cur=None):
random_sha1 = bytes(random.randint(0, 255) for _ in range(SHA1_SIZE))
cur = self._cursor(cur)
query = """
(SELECT {cols} FROM {table} WHERE {id_col} >= %s
ORDER BY {id_col} LIMIT 1)
UNION
(SELECT {cols} FROM {table} WHERE {id_col} < %s
ORDER BY {id_col} DESC LIMIT 1)
LIMIT 1
""".format(
cols=", ".join(cols), table=table_name, id_col=id_col
)
cur.execute(query, (random_sha1, random_sha1))
row = cur.fetchone()
if row:
return row[0]
dbversion_cols = ["version", "release", "description"]
def dbversion(self):
with self.transaction() as cur:
cur.execute(
f"""
SELECT {', '.join(self.dbversion_cols)}
FROM dbversion
ORDER BY version DESC
LIMIT 1
"""
)
return dict(zip(self.dbversion_cols, cur.fetchone()))
def check_dbversion(self):
dbversion = self.dbversion()["version"]
if dbversion != self.current_version:
logger.warning(
"database dbversion (%s) != %s current_version (%s)",
dbversion,
__name__,
self.current_version,
)
return dbversion == self.current_version
|
SoftwareHeritage/swh-storage
|
swh/storage/postgresql/db.py
|
Python
|
gpl-3.0
| 48,474
|
[
"VisIt"
] |
141143d439144daef6be240cf79482264612f6f365c28ab42cd768406d84c62d
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBumphunter(RPackage):
"""Bump Hunter.
Tools for finding bumps in genomic data"""
homepage = "https://bioconductor.org/packages/bumphunter"
git = "https://git.bioconductor.org/packages/bumphunter.git"
version('1.26.0', commit='606bee8708a0911ced3efb197970b4c9fa52f2fa')
version('1.24.5', commit='29b874033a38e86103b58ef2d4a55f285758147b')
version('1.22.0', commit='fb71b193f4ef7fa12d100441e6eb498765f7afde')
version('1.20.0', commit='c9d8e7ab0c19299988e5d7fa74970312e9a1eac0')
version('1.16.0', commit='1c3ab4d1fd2d75b1586ccef12665960b3602080a')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-s4vectors@0.9.25:', type=('build', 'run'))
depends_on('r-iranges@2.3.23:', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-iterators', type=('build', 'run'))
depends_on('r-locfit', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-dorng', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r@3.4:', when='@1.20.0:', type=('build', 'run'))
depends_on('r@3.5:', when='@1.24.5:', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-bumphunter/package.py
|
Python
|
lgpl-2.1
| 1,713
|
[
"Bioconductor"
] |
edf7a751b6ce53f70cb219189b31397788406289f1f12891a861eb8f1910a5c5
|
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://setuptools.readthedocs.io/en/latest/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
import configparser
from sysconfig import get_config_vars, get_path
from setuptools import SetuptoolsDeprecationWarning
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
from setuptools.command import bdist_egg, egg_info
from setuptools.wheel import Wheel
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
def _to_bytes(s):
return s.encode('utf8')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
def _one_liner(text):
return textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed"),
('user', None, "install in user site-package '%s'" % site.USER_SITE)
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version',
'user'
]
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = '{}.{}'.format(*sys.version_info)
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
elif self.user:
log.warn("WARNING: The user site-packages directory is disabled.")
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.org/simple/"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, str):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError as e:
raise DistutilsOptionError(
"--optimize must be 0, 1, or 2"
) from e
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
]
self._expand_attrs(dirs)
def run(self, show_deprecation=True):
if show_deprecation:
self.announce(
"WARNING: The easy_install command is deprecated "
"and will be removed in a future version.",
log.WARN,
)
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except Exception:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
if not os.path.exists(instdir):
try:
os.makedirs(instdir)
except (OSError, IOError):
self.cant_write_to_target()
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir with easy_install
pythonpath = os.environ.get('PYTHONPATH', '')
log.warn(self.__no_default_msg, self.install_dir, pythonpath)
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
if self.multi_version and not os.path.exists(pth_file):
self.pth_file = None # don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip() # noqa
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip() # noqa
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://setuptools.readthedocs.io/en/latest/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip() # noqa
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
os.makedirs(dirname, exist_ok=True)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write(tmpl.format(**locals()))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
try:
# cast to str as workaround for #709 and #710 and #712
yield str(tmpdir)
finally:
os.path.exists(tmpdir) and rmtree(tmpdir)
def easy_install(self, spec, deps=False):
with self._tmpdir() as tmpdir:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e)) from e
except VersionConflict as e:
raise DistutilsError(e.report()) from e
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_bytes(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://github.com/pypa/setuptools/issues/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
if self.dry_run:
return
mask = current_umask()
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.whl'):
return [self.install_wheel(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
(m + " %s to %s") % (
os.path.basename(egg_path),
os.path.dirname(destination)
),
)
update_dist_caches(
destination,
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
def install_wheel(self, wheel_path, tmpdir):
wheel = Wheel(wheel_path)
assert wheel.is_compatible()
destination = os.path.join(self.install_dir, wheel.egg_name())
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
self.execute(
wheel.install_as_egg,
(destination,),
("Installing %s to %s") % (
os.path.basename(wheel_path),
os.path.dirname(destination)
),
)
finally:
update_dist_caches(destination, fix_zipimporter_caches=False)
self.add_output(destination)
return self.egg_distribution(destination)
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip() # noqa
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""") # noqa
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip() # noqa
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError(
"Setup script exited with %s" % (v.args[0],)
) from v
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist,
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.
""").strip()
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in self.config_vars.items():
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def _pythonpath():
items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
return filter(None, items)
def get_site_dirs():
"""
Return a list of 'site' dirs
"""
sitedirs = []
# start with PYTHONPATH
sitedirs.extend(_pythonpath())
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
"lib",
"python{}.{}".format(*sys.version_info),
"site-packages",
),
os.path.join(prefix, "lib", "site-python"),
])
else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(
home,
'Library',
'Python',
'{}.{}'.format(*sys.version_info),
'site-packages',
)
sitedirs.append(home_sp)
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(io.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name).decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
prelude = _one_liner("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _one_liner("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func in [os.unlink, os.remove] and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
# TODO: This code doesn't make sense. What is it trying to do?
raise (ev[0], ev[1] + (" %s %s" % (func, arg)))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# http://bit.ly/2h9itJX
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter:
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent(r"""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = %(spec)r
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)())
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", EasyInstallDeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn(
"Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
if wininst:
executable = "python.exe"
return cls.get_header(script_text, executable)
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
"""
Warning for EasyInstall deprecations, bypassing suppression.
"""
|
RalfBarkow/Zettelkasten
|
venv/lib/python3.9/site-packages/setuptools/command/easy_install.py
|
Python
|
gpl-3.0
| 86,430
|
[
"VisIt"
] |
94c4cc05bc2c35d7fbbd83898cf0996025015d5a8eccab1df8d084d15efa7675
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2011 Edgewall Software
# Copyright (C) 2003-2007 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
"""Trac Environment model and related APIs."""
from __future__ import with_statement
import os.path
import setuptools
import sys
from urlparse import urlsplit
from trac import db_default
from trac.admin import AdminCommandError, IAdminCommandProvider
from trac.cache import CacheManager
from trac.config import *
from trac.core import Component, ComponentManager, implements, Interface, \
ExtensionPoint, TracError
from trac.db.api import (DatabaseManager, QueryContextManager,
TransactionContextManager, with_transaction)
from trac.util import copytree, create_file, get_pkginfo, lazy, makedirs
from trac.util.concurrency import threading
from trac.util.text import exception_to_unicode, path_to_unicode, printerr, \
printout
from trac.util.translation import _, N_
from trac.versioncontrol import RepositoryManager
from trac.web.href import Href
__all__ = ['Environment', 'IEnvironmentSetupParticipant', 'open_environment']
class ISystemInfoProvider(Interface):
"""Provider of system information, displayed in the "About Trac"
page and in internal error reports.
"""
def get_system_info():
"""Yield a sequence of `(name, version)` tuples describing the
name and version information of external packages used by a
component.
"""
class IEnvironmentSetupParticipant(Interface):
"""Extension point interface for components that need to
participate in the creation and upgrading of Trac environments,
for example to create additional database tables."""
def environment_created():
"""Called when a new Trac environment is created."""
def environment_needs_upgrade(db):
"""Called when Trac checks whether the environment needs to be
upgraded.
Should return `True` if this participant needs an upgrade to
be performed, `False` otherwise.
"""
def upgrade_environment(db):
"""Actually perform an environment upgrade.
Implementations of this method don't need to commit any
database transactions. This is done implicitly for each
participant if the upgrade succeeds without an error being
raised.
However, if the `upgrade_environment` consists of small,
restartable, steps of upgrade, it can decide to commit on its
own after each successful step.
"""
class Environment(Component, ComponentManager):
"""Trac environment manager.
Trac stores project information in a Trac environment. It consists
of a directory structure containing among other things:
* a configuration file,
* project-specific templates and plugins,
* the wiki and ticket attachments files,
* the SQLite database file (stores tickets, wiki pages...)
in case the database backend is sqlite
"""
implements(ISystemInfoProvider)
required = True
system_info_providers = ExtensionPoint(ISystemInfoProvider)
setup_participants = ExtensionPoint(IEnvironmentSetupParticipant)
components_section = ConfigSection('components',
"""This section is used to enable or disable components
provided by plugins, as well as by Trac itself. The component
to enable/disable is specified via the name of the
option. Whether its enabled is determined by the option value;
setting the value to `enabled` or `on` will enable the
component, any other value (typically `disabled` or `off`)
will disable the component.
The option name is either the fully qualified name of the
components or the module/package prefix of the component. The
former enables/disables a specific component, while the latter
enables/disables any component in the specified
package/module.
Consider the following configuration snippet:
{{{
[components]
trac.ticket.report.ReportModule = disabled
webadmin.* = enabled
}}}
The first option tells Trac to disable the
[wiki:TracReports report module].
The second option instructs Trac to enable all components in
the `webadmin` package. Note that the trailing wildcard is
required for module/package matching.
To view the list of active components, go to the ''Plugins''
page on ''About Trac'' (requires `CONFIG_VIEW`
[wiki:TracPermissions permissions]).
See also: TracPlugins
""")
shared_plugins_dir = PathOption('inherit', 'plugins_dir', '',
"""Path to the //shared plugins directory//.
Plugins in that directory are loaded in addition to those in
the directory of the environment `plugins`, with this one
taking precedence.
(''since 0.11'')""")
base_url = Option('trac', 'base_url', '',
"""Reference URL for the Trac deployment.
This is the base URL that will be used when producing
documents that will be used outside of the web browsing
context, like for example when inserting URLs pointing to Trac
resources in notification e-mails.""")
base_url_for_redirect = BoolOption('trac', 'use_base_url_for_redirect',
False,
"""Optionally use `[trac] base_url` for redirects.
In some configurations, usually involving running Trac behind
a HTTP proxy, Trac can't automatically reconstruct the URL
that is used to access it. You may need to use this option to
force Trac to use the `base_url` setting also for
redirects. This introduces the obvious limitation that this
environment will only be usable when accessible from that URL,
as redirects are frequently used. ''(since 0.10.5)''""")
secure_cookies = BoolOption('trac', 'secure_cookies', False,
"""Restrict cookies to HTTPS connections.
When true, set the `secure` flag on all cookies so that they
are only sent to the server on HTTPS connections. Use this if
your Trac instance is only accessible through HTTPS. (''since
0.11.2'')""")
project_name = Option('project', 'name', 'My Project',
"""Name of the project.""")
project_description = Option('project', 'descr', 'My example project',
"""Short description of the project.""")
project_url = Option('project', 'url', '',
"""URL of the main project web site, usually the website in
which the `base_url` resides. This is used in notification
e-mails.""")
project_admin = Option('project', 'admin', '',
"""E-Mail address of the project's administrator.""")
project_admin_trac_url = Option('project', 'admin_trac_url', '.',
"""Base URL of a Trac instance where errors in this Trac
should be reported.
This can be an absolute or relative URL, or '.' to reference
this Trac instance. An empty value will disable the reporting
buttons. (''since 0.11.3'')""")
project_footer = Option('project', 'footer',
N_('Visit the Trac open source project at<br />'
'<a href="http://trac.edgewall.org/">'
'http://trac.edgewall.org/</a>'),
"""Page footer text (right-aligned).""")
project_icon = Option('project', 'icon', 'common/trac.ico',
"""URL of the icon of the project.""")
log_type = Option('logging', 'log_type', 'none',
"""Logging facility to use.
Should be one of (`none`, `file`, `stderr`, `syslog`, `winlog`).""")
log_file = Option('logging', 'log_file', 'trac.log',
"""If `log_type` is `file`, this should be a path to the
log-file. Relative paths are resolved relative to the `log`
directory of the environment.""")
log_level = Option('logging', 'log_level', 'DEBUG',
"""Level of verbosity in log.
Should be one of (`CRITICAL`, `ERROR`, `WARN`, `INFO`, `DEBUG`).""")
log_format = Option('logging', 'log_format', None,
"""Custom logging format.
If nothing is set, the following will be used:
Trac[$(module)s] $(levelname)s: $(message)s
In addition to regular key names supported by the Python
logger library (see
http://docs.python.org/library/logging.html), one could use:
- $(path)s the path for the current environment
- $(basename)s the last path component of the current environment
- $(project)s the project name
Note the usage of `$(...)s` instead of `%(...)s` as the latter form
would be interpreted by the ConfigParser itself.
Example:
`($(thread)d) Trac[$(basename)s:$(module)s] $(levelname)s: $(message)s`
''(since 0.10.5)''""")
def __init__(self, path, create=False, options=[]):
"""Initialize the Trac environment.
:param path: the absolute path to the Trac environment
:param create: if `True`, the environment is created and
populated with default data; otherwise, the
environment is expected to already exist.
:param options: A list of `(section, name, value)` tuples that
define configuration options
"""
ComponentManager.__init__(self)
self.path = path
self.systeminfo = []
self._href = self._abs_href = None
if create:
self.create(options)
else:
self.verify()
self.setup_config()
if create:
for setup_participant in self.setup_participants:
setup_participant.environment_created()
def get_systeminfo(self):
"""Return a list of `(name, version)` tuples describing the
name and version information of external packages used by Trac
and plugins.
"""
info = self.systeminfo[:]
for provider in self.system_info_providers:
info.extend(provider.get_system_info() or [])
info.sort(key=lambda (name, version): (name != 'Trac', name.lower()))
return info
# ISystemInfoProvider methods
def get_system_info(self):
from trac import core, __version__ as VERSION
yield 'Trac', get_pkginfo(core).get('version', VERSION)
yield 'Python', sys.version
yield 'setuptools', setuptools.__version__
from trac.util.datefmt import pytz
if pytz is not None:
yield 'pytz', pytz.__version__
def component_activated(self, component):
"""Initialize additional member variables for components.
Every component activated through the `Environment` object
gets three member variables: `env` (the environment object),
`config` (the environment configuration) and `log` (a logger
object)."""
component.env = self
component.config = self.config
component.log = self.log
def _component_name(self, name_or_class):
name = name_or_class
if not isinstance(name_or_class, basestring):
name = name_or_class.__module__ + '.' + name_or_class.__name__
return name.lower()
@property
def _component_rules(self):
try:
return self._rules
except AttributeError:
self._rules = {}
for name, value in self.components_section.options():
if name.endswith('.*'):
name = name[:-2]
self._rules[name.lower()] = value.lower() in ('enabled', 'on')
return self._rules
def is_component_enabled(self, cls):
"""Implemented to only allow activation of components that are
not disabled in the configuration.
This is called by the `ComponentManager` base class when a
component is about to be activated. If this method returns
`False`, the component does not get activated. If it returns
`None`, the component only gets activated if it is located in
the `plugins` directory of the environment.
"""
component_name = self._component_name(cls)
# Disable the pre-0.11 WebAdmin plugin
# Please note that there's no recommendation to uninstall the
# plugin because doing so would obviously break the backwards
# compatibility that the new integration administration
# interface tries to provide for old WebAdmin extensions
if component_name.startswith('webadmin.'):
self.log.info("The legacy TracWebAdmin plugin has been "
"automatically disabled, and the integrated "
"administration interface will be used "
"instead.")
return False
rules = self._component_rules
cname = component_name
while cname:
enabled = rules.get(cname)
if enabled is not None:
return enabled
idx = cname.rfind('.')
if idx < 0:
break
cname = cname[:idx]
# By default, all components in the trac package are enabled
return component_name.startswith('trac.') or None
def enable_component(self, cls):
"""Enable a component or module."""
self._component_rules[self._component_name(cls)] = True
def verify(self):
"""Verify that the provided path points to a valid Trac environment
directory."""
with open(os.path.join(self.path, 'VERSION'), 'r') as fd:
assert fd.read(26) == 'Trac Environment Version 1'
def get_db_cnx(self):
"""Return a database connection from the connection pool
:deprecated: Use :meth:`db_transaction` or :meth:`db_query` instead
`db_transaction` for obtaining the `db` database connection
which can be used for performing any query
(SELECT/INSERT/UPDATE/DELETE)::
with env.db_transaction as db:
...
`db_query` for obtaining a `db` database connection which can
be used for performing SELECT queries only::
with env.db_query as db:
...
"""
return DatabaseManager(self).get_connection()
@lazy
def db_exc(self):
"""Return an object (typically a module) containing all the
backend-specific exception types as attributes, named
according to the Python Database API
(http://www.python.org/dev/peps/pep-0249/).
To catch a database exception, use the following pattern::
try:
with env.db_transaction as db:
...
except env.db_exc.IntegrityError, e:
...
"""
return DatabaseManager(self).get_exceptions()
def with_transaction(self, db=None):
"""Decorator for transaction functions :deprecated:"""
return with_transaction(self, db)
def get_read_db(self):
"""Return a database connection for read purposes :deprecated:
See `trac.db.api.get_read_db` for detailed documentation."""
return DatabaseManager(self).get_connection(readonly=True)
@property
def db_query(self):
"""Return a context manager which can be used to obtain a
read-only database connection.
Example::
with env.db_query as db:
cursor = db.cursor()
cursor.execute("SELECT ...")
for row in cursor.fetchall():
...
Note that a connection retrieved this way can be "called"
directly in order to execute a query::
with env.db_query as db:
for row in db("SELECT ..."):
...
If you don't need to manipulate the connection itself, this
can even be simplified to::
for row in env.db_query("SELECT ..."):
...
:warning: after a `with env.db_query as db` block, though the
`db` variable is still available, you shouldn't use it as it
might have been closed when exiting the context, if this
context was the outermost context (`db_query` or
`db_transaction`).
"""
return QueryContextManager(self)
@property
def db_transaction(self):
"""Return a context manager which can be used to obtain a
writable database connection.
Example::
with env.db_transaction as db:
cursor = db.cursor()
cursor.execute("UPDATE ...")
Upon successful exit of the context, the context manager will
commit the transaction. In case of nested contexts, only the
outermost context performs a commit. However, should an
exception happen, any context manager will perform a rollback.
Like for its read-only counterpart, you can directly execute a
DML query on the `db`::
with env.db_transaction as db:
db("UPDATE ...")
If you don't need to manipulate the connection itself, this
can also be simplified to::
env.db_transaction("UPDATE ...")
:warning: after a `with env.db_transaction` as db` block,
though the `db` variable is still available, you shouldn't
use it as it might have been closed when exiting the
context, if this context was the outermost context
(`db_query` or `db_transaction`).
"""
return TransactionContextManager(self)
def shutdown(self, tid=None):
"""Close the environment."""
RepositoryManager(self).shutdown(tid)
DatabaseManager(self).shutdown(tid)
if tid is None:
self.log.removeHandler(self._log_handler)
self._log_handler.flush()
self._log_handler.close()
del self._log_handler
def get_repository(self, reponame=None, authname=None):
"""Return the version control repository with the given name,
or the default repository if `None`.
The standard way of retrieving repositories is to use the
methods of `RepositoryManager`. This method is retained here
for backward compatibility.
:param reponame: the name of the repository
:param authname: the user name for authorization (not used
anymore, left here for compatibility with
0.11)
"""
return RepositoryManager(self).get_repository(reponame)
def create(self, options=[]):
"""Create the basic directory structure of the environment,
initialize the database and populate the configuration file
with default values.
If options contains ('inherit', 'file'), default values will
not be loaded; they are expected to be provided by that file
or other options.
"""
# Create the directory structure
if not os.path.exists(self.path):
os.mkdir(self.path)
os.mkdir(self.get_log_dir())
os.mkdir(self.get_htdocs_dir())
os.mkdir(os.path.join(self.path, 'plugins'))
# Create a few files
create_file(os.path.join(self.path, 'VERSION'),
'Trac Environment Version 1\n')
create_file(os.path.join(self.path, 'README'),
'This directory contains a Trac environment.\n'
'Visit http://trac.edgewall.org/ for more information.\n')
# Setup the default configuration
os.mkdir(os.path.join(self.path, 'conf'))
create_file(os.path.join(self.path, 'conf', 'trac.ini.sample'))
config = Configuration(os.path.join(self.path, 'conf', 'trac.ini'))
for section, name, value in options:
config.set(section, name, value)
config.save()
self.setup_config()
if not any((section, option) == ('inherit', 'file')
for section, option, value in options):
self.config.set_defaults(self)
self.config.save()
# Create the database
DatabaseManager(self).init_db()
def get_version(self, db=None, initial=False):
"""Return the current version of the database. If the
optional argument `initial` is set to `True`, the version of
the database used at the time of creation will be returned.
In practice, for database created before 0.11, this will
return `False` which is "older" than any db version number.
:since: 0.11
:since 0.13: deprecation warning: the `db` parameter is no
longer used and will be removed in version 0.14
"""
rows = self.db_query("""
SELECT value FROM system WHERE name='%sdatabase_version'
""" % ('initial_' if initial else ''))
return rows and int(rows[0][0])
def setup_config(self):
"""Load the configuration file."""
self.config = Configuration(os.path.join(self.path, 'conf', 'trac.ini'),
{'envname': os.path.basename(self.path)})
self.setup_log()
from trac.loader import load_components
plugins_dir = self.shared_plugins_dir
load_components(self, plugins_dir and (plugins_dir,))
def get_templates_dir(self):
"""Return absolute path to the templates directory."""
return os.path.join(self.path, 'templates')
def get_htdocs_dir(self):
"""Return absolute path to the htdocs directory."""
return os.path.join(self.path, 'htdocs')
def get_log_dir(self):
"""Return absolute path to the log directory."""
return os.path.join(self.path, 'log')
def setup_log(self):
"""Initialize the logging sub-system."""
from trac.log import logger_handler_factory
logtype = self.log_type
logfile = self.log_file
if logtype == 'file' and not os.path.isabs(logfile):
logfile = os.path.join(self.get_log_dir(), logfile)
format = self.log_format
if format:
format = format.replace('$(', '%(') \
.replace('%(path)s', self.path) \
.replace('%(basename)s', os.path.basename(self.path)) \
.replace('%(project)s', self.project_name)
self.log, self._log_handler = logger_handler_factory(
logtype, logfile, self.log_level, self.path, format=format)
from trac import core, __version__ as VERSION
self.log.info('-' * 32 + ' environment startup [Trac %s] ' + '-' * 32,
get_pkginfo(core).get('version', VERSION))
def get_known_users(self, cnx=None):
"""Generator that yields information about all known users,
i.e. users that have logged in to this Trac environment and
possibly set their name and email.
This function generates one tuple for every user, of the form
(username, name, email) ordered alpha-numerically by username.
:param cnx: the database connection; if ommitted, a new
connection is retrieved
:since 0.13: deprecation warning: the `cnx` parameter is no
longer used and will be removed in version 0.14
"""
for username, name, email in self.db_query("""
SELECT DISTINCT s.sid, n.value, e.value
FROM session AS s
LEFT JOIN session_attribute AS n ON (n.sid=s.sid
and n.authenticated=1 AND n.name = 'name')
LEFT JOIN session_attribute AS e ON (e.sid=s.sid
AND e.authenticated=1 AND e.name = 'email')
WHERE s.authenticated=1 ORDER BY s.sid
"""):
yield username, name, email
def backup(self, dest=None):
"""Create a backup of the database.
:param dest: Destination file; if not specified, the backup is
stored in a file called db_name.trac_version.bak
"""
return DatabaseManager(self).backup(dest)
def needs_upgrade(self):
"""Return whether the environment needs to be upgraded."""
with self.db_query as db:
for participant in self.setup_participants:
if participant.environment_needs_upgrade(db):
self.log.warn("Component %s requires environment upgrade",
participant)
return True
return False
def upgrade(self, backup=False, backup_dest=None):
"""Upgrade database.
:param backup: whether or not to backup before upgrading
:param backup_dest: name of the backup file
:return: whether the upgrade was performed
"""
upgraders = []
with self.db_query as db:
for participant in self.setup_participants:
if participant.environment_needs_upgrade(db):
upgraders.append(participant)
if not upgraders:
return
if backup:
self.backup(backup_dest)
for participant in upgraders:
self.log.info("%s.%s upgrading...", participant.__module__,
participant.__class__.__name__)
with self.db_transaction as db:
participant.upgrade_environment(db)
# Database schema may have changed, so close all connections
DatabaseManager(self).shutdown()
return True
@property
def href(self):
"""The application root path"""
if not self._href:
self._href = Href(urlsplit(self.abs_href.base)[2])
return self._href
@property
def abs_href(self):
"""The application URL"""
if not self._abs_href:
if not self.base_url:
self.log.warn("base_url option not set in configuration, "
"generated links may be incorrect")
self._abs_href = Href('')
else:
self._abs_href = Href(self.base_url)
return self._abs_href
class EnvironmentSetup(Component):
"""Manage automatic environment upgrades."""
required = True
implements(IEnvironmentSetupParticipant)
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""Insert default data into the database."""
with self.env.db_transaction as db:
for table, cols, vals in db_default.get_data(db):
db.executemany("INSERT INTO %s (%s) VALUES (%s)"
% (table, ','.join(cols), ','.join(['%s' for c in cols])),
vals)
self._update_sample_config()
def environment_needs_upgrade(self, db):
dbver = self.env.get_version(db)
if dbver == db_default.db_version:
return False
elif dbver > db_default.db_version:
raise TracError(_('Database newer than Trac version'))
self.log.info("Trac database schema version is %d, should be %d",
dbver, db_default.db_version)
return True
def upgrade_environment(self, db):
"""Each db version should have its own upgrade module, named
upgrades/dbN.py, where 'N' is the version number (int).
"""
cursor = db.cursor()
dbver = self.env.get_version()
for i in range(dbver + 1, db_default.db_version + 1):
name = 'db%i' % i
try:
upgrades = __import__('upgrades', globals(), locals(), [name])
script = getattr(upgrades, name)
except AttributeError:
raise TracError(_("No upgrade module for version %(num)i "
"(%(version)s.py)", num=i, version=name))
script.do_upgrade(self.env, i, cursor)
cursor.execute("""
UPDATE system SET value=%s WHERE name='database_version'
""", (i,))
self.log.info("Upgraded database version from %d to %d", i - 1, i)
db.commit()
self._update_sample_config()
# Internal methods
def _update_sample_config(self):
filename = os.path.join(self.env.path, 'conf', 'trac.ini.sample')
if not os.path.isfile(filename):
return
config = Configuration(filename)
for section, default_options in config.defaults().iteritems():
for name, value in default_options.iteritems():
config.set(section, name, value)
try:
config.save()
self.log.info("Wrote sample configuration file with the new "
"settings and their default values: %s",
filename)
except IOError, e:
self.log.warn("Couldn't write sample configuration file (%s)", e,
exc_info=True)
env_cache = {}
env_cache_lock = threading.Lock()
def open_environment(env_path=None, use_cache=False):
"""Open an existing environment object, and verify that the database is up
to date.
:param env_path: absolute path to the environment directory; if
ommitted, the value of the `TRAC_ENV` environment
variable is used
:param use_cache: whether the environment should be cached for
subsequent invocations of this function
:return: the `Environment` object
"""
if not env_path:
env_path = os.getenv('TRAC_ENV')
if not env_path:
raise TracError(_('Missing environment variable "TRAC_ENV". '
'Trac requires this variable to point to a valid '
'Trac environment.'))
env_path = os.path.normcase(os.path.normpath(env_path))
if use_cache:
with env_cache_lock:
env = env_cache.get(env_path)
if env and env.config.parse_if_needed():
# The environment configuration has changed, so shut it down
# and remove it from the cache so that it gets reinitialized
env.log.info('Reloading environment due to configuration '
'change')
env.shutdown()
del env_cache[env_path]
env = None
if env is None:
env = env_cache.setdefault(env_path, open_environment(env_path))
else:
CacheManager(env).reset_metadata()
else:
env = Environment(env_path)
needs_upgrade = False
try:
needs_upgrade = env.needs_upgrade()
except Exception, e: # e.g. no database connection
env.log.error("Exception caught while checking for upgrade: %s",
exception_to_unicode(e, traceback=True))
if needs_upgrade:
raise TracError(_('The Trac Environment needs to be upgraded.\n\n'
'Run "trac-admin %(path)s upgrade"',
path=env_path))
return env
class EnvironmentAdmin(Component):
"""trac-admin command provider for environment administration."""
implements(IAdminCommandProvider)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('deploy', '<directory>',
'Extract static resources from Trac and all plugins',
None, self._do_deploy)
yield ('hotcopy', '<backupdir> [--no-database]',
"""Make a hot backup copy of an environment
The database is backed up to the 'db' directory of the
destination, unless the --no-database option is
specified.
""",
None, self._do_hotcopy)
yield ('upgrade', '',
'Upgrade database to current version',
None, self._do_upgrade)
def _do_deploy(self, dest):
target = os.path.normpath(dest)
chrome_target = os.path.join(target, 'htdocs')
script_target = os.path.join(target, 'cgi-bin')
# Copy static content
makedirs(target, overwrite=True)
makedirs(chrome_target, overwrite=True)
from trac.web.chrome import Chrome
printout(_("Copying resources from:"))
for provider in Chrome(self.env).template_providers:
paths = list(provider.get_htdocs_dirs() or [])
if not len(paths):
continue
printout(' %s.%s' % (provider.__module__,
provider.__class__.__name__))
for key, root in paths:
if not root:
continue
source = os.path.normpath(root)
printout(' ', source)
if os.path.exists(source):
dest = os.path.join(chrome_target, key)
copytree(source, dest, overwrite=True)
# Create and copy scripts
makedirs(script_target, overwrite=True)
printout(_("Creating scripts."))
data = {'env': self.env, 'executable': sys.executable}
for script in ('cgi', 'fcgi', 'wsgi'):
dest = os.path.join(script_target, 'trac.' + script)
template = Chrome(self.env).load_template('deploy_trac.' + script,
'text')
stream = template.generate(**data)
with open(dest, 'w') as out:
stream.render('text', out=out, encoding='utf-8')
def _do_hotcopy(self, dest, no_db=None):
if no_db not in (None, '--no-database'):
raise AdminCommandError(_("Invalid argument '%(arg)s'", arg=no_db),
show_usage=True)
if os.path.exists(dest):
raise TracError(_("hotcopy can't overwrite existing '%(dest)s'",
dest=path_to_unicode(dest)))
import shutil
# Bogus statement to lock the database while copying files
with self.env.db_transaction as db:
db("UPDATE system SET name=NULL WHERE name IS NULL")
printout(_("Hotcopying %(src)s to %(dst)s ...",
src=path_to_unicode(self.env.path),
dst=path_to_unicode(dest)))
db_str = self.env.config.get('trac', 'database')
prefix, db_path = db_str.split(':', 1)
skip = []
if prefix == 'sqlite':
db_path = os.path.join(self.env.path, os.path.normpath(db_path))
# don't copy the journal (also, this would fail on Windows)
skip = [db_path + '-journal', db_path + '-stmtjrnl']
if no_db:
skip.append(db_path)
try:
copytree(self.env.path, dest, symlinks=1, skip=skip)
retval = 0
except shutil.Error, e:
retval = 1
printerr(_("The following errors happened while copying "
"the environment:"))
for (src, dst, err) in e.args[0]:
if src in err:
printerr(' %s' % err)
else:
printerr(" %s: '%s'" % (err, path_to_unicode(src)))
# db backup for non-sqlite
if prefix != 'sqlite' and not no_db:
printout(_("Backing up database ..."))
sql_backup = os.path.join(dest, 'db',
'%s-db-backup.sql' % prefix)
self.env.backup(sql_backup)
printout(_("Hotcopy done."))
return retval
def _do_upgrade(self, no_backup=None):
if no_backup not in (None, '-b', '--no-backup'):
raise AdminCommandError(_("Invalid arguments"), show_usage=True)
if not self.env.needs_upgrade():
printout(_("Database is up to date, no upgrade necessary."))
return
try:
self.env.upgrade(backup=no_backup is None)
except TracError, e:
raise TracError(_("Backup failed: %(msg)s.\nUse '--no-backup' to "
"upgrade without doing a backup.",
msg=unicode(e)))
# Remove wiki-macros if it is empty and warn if it isn't
wiki_macros = os.path.join(self.env.path, 'wiki-macros')
try:
entries = os.listdir(wiki_macros)
except OSError:
pass
else:
if entries:
printerr(_("Warning: the wiki-macros directory in the "
"environment is non-empty, but Trac\n"
"doesn't load plugins from there anymore. "
"Please remove it by hand."))
else:
try:
os.rmdir(wiki_macros)
except OSError, e:
printerr(_("Error while removing wiki-macros: %(err)s\n"
"Trac doesn't load plugins from wiki-macros "
"anymore. Please remove it by hand.",
err=exception_to_unicode(e)))
printout(_("Upgrade done.\n\n"
"You may want to upgrade the Trac documentation now by "
"running:\n\n trac-admin %(path)s wiki upgrade",
path=path_to_unicode(self.env.path)))
|
moreati/trac-gitsvn
|
trac/env.py
|
Python
|
bsd-3-clause
| 38,387
|
[
"VisIt"
] |
b9f02baeece5235d7257d58e5b984cb5e979db004638f424fcaaa9dc32ece164
|
#!/usr/bin/env python
'''
setup board.h for chibios
'''
import argparse
import sys
import fnmatch
import os
import dma_resolver
import shlex
import pickle
import re
import shutil
parser = argparse.ArgumentParser("chibios_pins.py")
parser.add_argument(
'-D', '--outdir', type=str, default=None, help='Output directory')
parser.add_argument(
'--bootloader', action='store_true', default=False, help='configure for bootloader')
parser.add_argument(
'hwdef', type=str, default=None, help='hardware definition file')
parser.add_argument(
'--params', type=str, default=None, help='user default params path')
args = parser.parse_args()
# output variables for each pin
f4f7_vtypes = ['MODER', 'OTYPER', 'OSPEEDR', 'PUPDR', 'ODR', 'AFRL', 'AFRH']
f1_vtypes = ['CRL', 'CRH', 'ODR']
f1_input_sigs = ['RX', 'MISO', 'CTS']
f1_output_sigs = ['TX', 'MOSI', 'SCK', 'RTS', 'CH1', 'CH2', 'CH3', 'CH4']
af_labels = ['USART', 'UART', 'SPI', 'I2C', 'SDIO', 'SDMMC', 'OTG', 'JT', 'TIM', 'CAN']
vtypes = []
# number of pins in each port
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
'H': 2,
'I': 0,
'J': 0,
'K': 0
}
ports = pincount.keys()
portmap = {}
# dictionary of all config lines, indexed by first word
config = {}
# alternate pin mappings
altmap = {}
# list of all pins in config file order
allpins = []
# list of configs by type
bytype = {}
# list of alt configs by type
alttype = {}
# list of configs by label
bylabel = {}
# list of alt configs by label
altlabel = {}
# list of SPI devices
spidev = []
# dictionary of ROMFS files
romfs = {}
# SPI bus list
spi_list = []
# all config lines in order
alllines = []
# allow for extra env vars
env_vars = {}
# build flags for ChibiOS makefiles
build_flags = []
# sensor lists
imu_list = []
compass_list = []
baro_list = []
all_lines = []
mcu_type = None
dual_USB_enabled = False
def is_int(str):
'''check if a string is an integer'''
try:
int(str)
except Exception:
return False
return True
def error(str):
'''show an error and exit'''
print("Error: " + str)
sys.exit(1)
def get_mcu_lib(mcu):
'''get library file for the chosen MCU'''
import importlib
try:
return importlib.import_module(mcu)
except ImportError:
error("Unable to find module for MCU %s" % mcu)
def setup_mcu_type_defaults():
'''setup defaults for given mcu type'''
global pincount, ports, portmap, vtypes, mcu_type
lib = get_mcu_lib(mcu_type)
if hasattr(lib, 'pincount'):
pincount = lib.pincount
if mcu_series.startswith("STM32F1"):
vtypes = f1_vtypes
else:
vtypes = f4f7_vtypes
ports = pincount.keys()
# setup default as input pins
for port in ports:
portmap[port] = []
for pin in range(pincount[port]):
portmap[port].append(generic_pin(port, pin, None, 'INPUT', []))
def get_alt_function(mcu, pin, function):
'''return alternative function number for a pin'''
lib = get_mcu_lib(mcu)
if function.endswith('_TXINV') or function.endswith('_RXINV'):
# RXINV and TXINV are special labels for inversion pins, not alt-functions
return None
if hasattr(lib, "AltFunction_map"):
alt_map = lib.AltFunction_map
else:
# just check if Alt Func is available or not
for l in af_labels:
if function.startswith(l):
return 0
return None
if function and function.endswith("_RTS") and (
function.startswith('USART') or function.startswith('UART')):
# we do software RTS
return None
for l in af_labels:
if function.startswith(l):
s = pin + ":" + function
if s not in alt_map:
error("Unknown pin function %s for MCU %s" % (s, mcu))
return alt_map[s]
return None
def have_type_prefix(ptype):
'''return True if we have a peripheral starting with the given peripheral type'''
for t in list(bytype.keys()) + list(alttype.keys()):
if t.startswith(ptype):
return True
return False
def get_ADC1_chan(mcu, pin):
'''return ADC1 channel for an analog pin'''
import importlib
try:
lib = importlib.import_module(mcu)
ADC1_map = lib.ADC1_map
except ImportError:
error("Unable to find ADC1_Map for MCU %s" % mcu)
if pin not in ADC1_map:
error("Unable to find ADC1 channel for pin %s" % pin)
return ADC1_map[pin]
class generic_pin(object):
'''class to hold pin definition'''
def __init__(self, port, pin, label, type, extra):
global mcu_series
self.portpin = "P%s%u" % (port, pin)
self.port = port
self.pin = pin
self.label = label
self.type = type
self.extra = extra
self.af = None
if type == 'OUTPUT':
self.sig_dir = 'OUTPUT'
else:
self.sig_dir = 'INPUT'
if mcu_series.startswith("STM32F1") and self.label is not None:
self.f1_pin_setup()
# check that labels and pin types are consistent
for prefix in ['USART', 'UART', 'TIM']:
if label is None or type is None:
continue
if type.startswith(prefix):
a1 = label.split('_')
a2 = type.split('_')
if a1[0] != a2[0]:
error("Peripheral prefix mismatch for %s %s %s" % (self.portpin, label, type))
def f1_pin_setup(self):
for label in af_labels:
if self.label.startswith(label):
if self.label.endswith(tuple(f1_input_sigs)):
self.sig_dir = 'INPUT'
self.extra.append('FLOATING')
elif self.label.endswith(tuple(f1_output_sigs)):
self.sig_dir = 'OUTPUT'
elif label == 'I2C':
self.sig_dir = 'OUTPUT'
elif label == 'OTG':
self.sig_dir = 'OUTPUT'
else:
error("Unknown signal type %s:%s for %s!" % (self.portpin, self.label, mcu_type))
def has_extra(self, v):
'''return true if we have the given extra token'''
return v in self.extra
def extra_prefix(self, prefix):
'''find an extra token starting with the given prefix'''
for e in self.extra:
if e.startswith(prefix):
return e
return None
def extra_value(self, name, type=None, default=None):
'''find an extra value of given type'''
v = self.extra_prefix(name)
if v is None:
return default
if v[len(name)] != '(' or v[-1] != ')':
error("Badly formed value for %s: %s\n" % (name, v))
ret = v[len(name) + 1:-1]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed value for %s: %s\n" % (name, ret))
return ret
def is_RTS(self):
'''return true if this is a RTS pin'''
if self.label and self.label.endswith("_RTS") and (
self.type.startswith('USART') or self.type.startswith('UART')):
return True
return False
def is_CS(self):
'''return true if this is a CS pin'''
return self.has_extra("CS") or self.type == "CS"
def get_MODER_value(self):
'''return one of ALTERNATE, OUTPUT, ANALOG, INPUT'''
if self.af is not None:
v = "ALTERNATE"
elif self.type == 'OUTPUT':
v = "OUTPUT"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT"
elif self.is_RTS():
v = "OUTPUT"
else:
v = "INPUT"
return v
def get_MODER(self):
'''return one of ALTERNATE, OUTPUT, ANALOG, INPUT'''
return "PIN_MODE_%s(%uU)" % (self.get_MODER_value(), self.pin)
def get_OTYPER_value(self):
'''return one of PUSHPULL, OPENDRAIN'''
v = 'PUSHPULL'
if self.type.startswith('I2C'):
# default I2C to OPENDRAIN
v = 'OPENDRAIN'
values = ['PUSHPULL', 'OPENDRAIN']
for e in self.extra:
if e in values:
v = e
return v
def get_OTYPER(self):
'''return one of PUSHPULL, OPENDRAIN'''
return "PIN_OTYPE_%s(%uU)" % (self.get_OTYPER_value(), self.pin)
def get_OSPEEDR_value(self):
'''return one of SPEED_VERYLOW, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH'''
# on STM32F4 these speeds correspond to 2MHz, 25MHz, 50MHz and 100MHz
values = ['SPEED_VERYLOW', 'SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in values:
v = e
return v
def get_OSPEEDR_int(self):
'''return value from 0 to 3 for speed'''
values = ['SPEED_VERYLOW', 'SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = self.get_OSPEEDR_value()
if v not in values:
error("Bad OSPEED %s" % v)
return values.index(v)
def get_OSPEEDR(self):
'''return one of SPEED_VERYLOW, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH'''
return "PIN_O%s(%uU)" % (self.get_OSPEEDR_value(), self.pin)
def get_PUPDR_value(self):
'''return one of FLOATING, PULLUP, PULLDOWN'''
values = ['FLOATING', 'PULLUP', 'PULLDOWN']
v = 'FLOATING'
if self.is_CS():
v = "PULLUP"
# generate pullups for UARTs
if (self.type.startswith('USART') or
self.type.startswith('UART')) and (
(self.label.endswith('_TX') or
self.label.endswith('_RX') or
self.label.endswith('_CTS') or
self.label.endswith('_RTS'))):
v = "PULLUP"
# generate pullups for SDIO and SDMMC
if (self.type.startswith('SDIO') or
self.type.startswith('SDMMC')) and (
(self.label.endswith('_D0') or
self.label.endswith('_D1') or
self.label.endswith('_D2') or
self.label.endswith('_D3') or
self.label.endswith('_CMD'))):
v = "PULLUP"
for e in self.extra:
if e in values:
v = e
return v
def get_PUPDR(self):
'''return one of FLOATING, PULLUP, PULLDOWN wrapped in PIN_PUPDR_ macro'''
return "PIN_PUPDR_%s(%uU)" % (self.get_PUPDR_value(), self.pin)
def get_ODR_F1_value(self):
'''return one of LOW, HIGH'''
values = ['LOW', 'HIGH']
v = 'HIGH'
if self.type == 'OUTPUT':
v = 'LOW'
elif self.label is not None and self.label.startswith('I2C'):
v = 'LOW'
for e in self.extra:
if e in values:
v = e
# for some controllers input pull up down is selected by ODR
if self.type == "INPUT":
v = 'LOW'
if 'PULLUP' in self.extra:
v = "HIGH"
return v
def get_ODR_value(self):
'''return one of LOW, HIGH'''
if mcu_series.startswith("STM32F1"):
return self.get_ODR_F1_value()
values = ['LOW', 'HIGH']
v = 'HIGH'
for e in self.extra:
if e in values:
v = e
return v
def get_ODR(self):
'''return one of LOW, HIGH wrapped in PIN_ODR macro'''
return "PIN_ODR_%s(%uU)" % (self.get_ODR_value(), self.pin)
def get_AFIO_value(self):
'''return AFIO'''
af = self.af
if af is None:
af = 0
return af
def get_AFIO(self):
'''return AFIO wrapped in PIN_AFIO_AF macro'''
return "PIN_AFIO_AF(%uU, %uU)" % (self.pin, self.get_AFIO_value())
def get_AFRL(self):
'''return AFIO low 8'''
if self.pin >= 8:
return None
return self.get_AFIO()
def get_AFRH(self):
'''return AFIO high 8'''
if self.pin < 8:
return None
return self.get_AFIO()
def get_CR_F1(self):
'''return CR FLAGS for STM32F1xx'''
# Check Speed
if self.sig_dir != "INPUT" or self.af is not None:
speed_values = ['SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in speed_values:
v = e
speed_str = "PIN_%s(%uU) |" % (v, self.pin)
elif self.is_CS():
speed_str = "PIN_SPEED_LOW(%uU) |" % (self.pin)
else:
speed_str = ""
if self.af is not None:
if self.label.endswith('_RX'):
# uart RX is configured as a input, and can be pullup, pulldown or float
if 'PULLUP' in self.extra or 'PULLDOWN' in self.extra:
v = 'PUD'
else:
v = "NOPULL"
elif self.label.startswith('I2C'):
v = "AF_OD"
else:
v = "AF_PP"
elif self.is_CS():
v = "OUTPUT_PP"
elif self.sig_dir == 'OUTPUT':
if 'OPENDRAIN' in self.extra:
v = 'OUTPUT_OD'
else:
v = "OUTPUT_PP"
elif self.type.startswith('ADC'):
v = "ANALOG"
else:
v = "PUD"
if 'FLOATING' in self.extra:
v = "NOPULL"
mode_str = "PIN_MODE_%s(%uU)" % (v, self.pin)
return "%s %s" % (speed_str, mode_str)
def get_CR(self):
'''return CR FLAGS'''
if mcu_series.startswith("STM32F1"):
return self.get_CR_F1()
if self.sig_dir != "INPUT":
speed_values = ['SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in speed_values:
v = e
speed_str = "PIN_%s(%uU) |" % (v, self.pin)
else:
speed_str = ""
# Check Alternate function
if self.type.startswith('I2C'):
v = "AF_OD"
elif self.sig_dir == 'OUTPUT':
if self.af is not None:
v = "AF_PP"
else:
v = "OUTPUT_PP"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT_PP"
elif self.is_RTS():
v = "OUTPUT_PP"
else:
v = "PUD"
if 'FLOATING' in self.extra:
v = "NOPULL"
mode_str = "PIN_MODE_%s(%uU)" % (v, self.pin)
return "%s %s" % (speed_str, mode_str)
def get_CRH(self):
if self.pin < 8:
return None
return self.get_CR()
def get_CRL(self):
if self.pin >= 8:
return None
return self.get_CR()
def pal_modeline(self):
'''return a mode line suitable for palSetModeLine()'''
# MODER, OTYPER, OSPEEDR, PUPDR, ODR, AFRL, AFRH
ret = 'PAL_STM32_MODE_' + self.get_MODER_value()
ret += '|PAL_STM32_OTYPE_' + self.get_OTYPER_value()
ret += '|PAL_STM32_SPEED(%u)' % self.get_OSPEEDR_int()
ret += '|PAL_STM32_PUPDR_' + self.get_PUPDR_value()
af = self.get_AFIO_value()
if af != 0:
ret += '|PAL_STM32_ALTERNATE(%u)' % af
return ret
def __str__(self):
str = ''
if self.af is not None:
str += " AF%u" % self.af
if self.type.startswith('ADC1'):
str += " ADC1_IN%u" % get_ADC1_chan(mcu_type, self.portpin)
if self.extra_value('PWM', type=int):
str += " PWM%u" % self.extra_value('PWM', type=int)
return "P%s%u %s %s%s" % (self.port, self.pin, self.label, self.type,
str)
def get_config(name, column=0, required=True, default=None, type=None, spaces=False, aslist=False):
'''get a value from config dictionary'''
if name not in config:
if required and default is None:
error("missing required value %s in hwdef.dat" % name)
return default
if aslist:
return config[name]
if len(config[name]) < column + 1:
if not required:
return None
error("missing required value %s in hwdef.dat (column %u)" % (name,
column))
if spaces:
ret = ' '.join(config[name][column:])
else:
ret = config[name][column]
if type is not None:
if type == int and ret.startswith('0x'):
try:
ret = int(ret, 16)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
else:
try:
ret = type(ret)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
return ret
def get_mcu_config(name, required=False):
'''get a value from the mcu dictionary'''
lib = get_mcu_lib(mcu_type)
if not hasattr(lib, 'mcu'):
error("Missing mcu config for %s" % mcu_type)
if name not in lib.mcu:
if required:
error("Missing required mcu config %s for %s" % (name, mcu_type))
return None
return lib.mcu[name]
def make_line(label):
'''return a line for a label'''
if label in bylabel:
p = bylabel[label]
line = 'PAL_LINE(GPIO%s,%uU)' % (p.port, p.pin)
else:
line = "0"
return line
def enable_can(f, num_ifaces):
'''setup for a CAN enabled board'''
f.write('#define HAL_NUM_CAN_IFACES %d\n' % num_ifaces)
env_vars['HAL_NUM_CAN_IFACES'] = str(num_ifaces)
def has_sdcard_spi():
'''check for sdcard connected to spi bus'''
for dev in spidev:
if(dev[0] == 'sdcard'):
return True
return False
def write_mcu_config(f):
'''write MCU config defines'''
f.write('// MCU type (ChibiOS define)\n')
f.write('#define %s_MCUCONF\n' % get_config('MCU'))
mcu_subtype = get_config('MCU', 1)
if mcu_subtype.endswith('xx'):
f.write('#define %s_MCUCONF\n\n' % mcu_subtype[:-2])
f.write('#define %s\n\n' % mcu_subtype)
f.write('// crystal frequency\n')
f.write('#define STM32_HSECLK %sU\n\n' % get_config('OSCILLATOR_HZ'))
f.write('// UART used for stdout (printf)\n')
if get_config('STDOUT_SERIAL', required=False):
f.write('#define HAL_STDOUT_SERIAL %s\n\n' % get_config('STDOUT_SERIAL'))
f.write('// baudrate used for stdout (printf)\n')
f.write('#define HAL_STDOUT_BAUDRATE %u\n\n' % get_config('STDOUT_BAUDRATE', type=int))
if have_type_prefix('SDIO'):
f.write('// SDIO available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
build_flags.append('USE_FATFS=yes')
elif have_type_prefix('SDMMC'):
f.write('// SDMMC available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
f.write('#define STM32_SDC_USE_SDMMC1 TRUE\n')
build_flags.append('USE_FATFS=yes')
elif has_sdcard_spi():
f.write('// MMC via SPI available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_MMC_SPI TRUE\n')
f.write('#define HAL_USE_SDC FALSE\n')
f.write('#define HAL_SDCARD_SPI_HOOK TRUE\n')
build_flags.append('USE_FATFS=yes')
else:
f.write('#define HAL_USE_SDC FALSE\n')
build_flags.append('USE_FATFS=no')
env_vars['DISABLE_SCRIPTING'] = True
if 'OTG1' in bytype:
f.write('#define STM32_USB_USE_OTG1 TRUE\n')
f.write('#define HAL_USE_USB TRUE\n')
f.write('#define HAL_USE_SERIAL_USB TRUE\n')
if 'OTG2' in bytype:
f.write('#define STM32_USB_USE_OTG2 TRUE\n')
if get_config('PROCESS_STACK', required=False):
env_vars['PROCESS_STACK'] = get_config('PROCESS_STACK')
else:
env_vars['PROCESS_STACK'] = "0x2000"
if get_config('MAIN_STACK', required=False):
env_vars['MAIN_STACK'] = get_config('MAIN_STACK')
else:
env_vars['MAIN_STACK'] = "0x400"
if get_config('IOMCU_FW', required=False):
env_vars['IOMCU_FW'] = get_config('IOMCU_FW')
else:
env_vars['IOMCU_FW'] = 0
if get_config('PERIPH_FW', required=False):
env_vars['PERIPH_FW'] = get_config('PERIPH_FW')
else:
env_vars['PERIPH_FW'] = 0
# write any custom STM32 defines
using_chibios_can = False
for d in alllines:
if d.startswith('STM32_'):
f.write('#define %s\n' % d)
if d.startswith('define '):
if 'HAL_USE_CAN' in d:
using_chibios_can = True
f.write('#define %s\n' % d[7:])
if have_type_prefix('CAN') and not using_chibios_can:
if 'CAN1' in bytype and 'CAN2' in bytype:
enable_can(f, 2)
else:
enable_can(f, 1)
flash_size = get_config('FLASH_SIZE_KB', type=int)
f.write('#define BOARD_FLASH_SIZE %u\n' % flash_size)
env_vars['BOARD_FLASH_SIZE'] = flash_size
f.write('#define CRT1_AREAS_NUMBER 1\n')
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
f.write('\n// location of loaded firmware\n')
f.write('#define FLASH_LOAD_ADDRESS 0x%08x\n' % (0x08000000 + flash_reserve_start*1024))
if args.bootloader:
f.write('#define FLASH_BOOTLOADER_LOAD_KB %u\n' % get_config('FLASH_BOOTLOADER_LOAD_KB', type=int))
f.write('#define FLASH_RESERVE_END_KB %u\n' % get_config('FLASH_RESERVE_END_KB', default=0, type=int))
f.write('#define APP_START_OFFSET_KB %u\n' % get_config('APP_START_OFFSET_KB', default=0, type=int))
f.write('\n')
ram_map = get_mcu_config('RAM_MAP', True)
f.write('// memory regions\n')
regions = []
total_memory = 0
for (address, size, flags) in ram_map:
regions.append('{(void*)0x%08x, 0x%08x, 0x%02x }' % (address, size*1024, flags))
total_memory += size
f.write('#define HAL_MEMORY_REGIONS %s\n' % ', '.join(regions))
f.write('#define HAL_MEMORY_TOTAL_KB %u\n' % total_memory)
f.write('#define HAL_RAM0_START 0x%08x\n' % ram_map[0][0])
ram_reserve_start = get_config('RAM_RESERVE_START', default=0, type=int)
if ram_reserve_start > 0:
f.write('#define HAL_RAM_RESERVE_START 0x%08x\n' % ram_reserve_start)
f.write('\n// CPU serial number (12 bytes)\n')
f.write('#define UDID_START 0x%08x\n\n' % get_mcu_config('UDID_START', True))
f.write('\n// APJ board ID (for bootloaders)\n')
f.write('#define APJ_BOARD_ID %s\n' % get_config('APJ_BOARD_ID'))
lib = get_mcu_lib(mcu_type)
build_info = lib.build
if get_mcu_config('CPU_FLAGS') and get_mcu_config('CORTEX'):
# CPU flags specified in mcu file
cortex = get_mcu_config('CORTEX')
env_vars['CPU_FLAGS'] = get_mcu_config('CPU_FLAGS').split()
build_info['MCU'] = cortex
print("MCU Flags: %s %s" % (cortex, env_vars['CPU_FLAGS']))
elif mcu_series.startswith("STM32F1"):
cortex = "cortex-m3"
env_vars['CPU_FLAGS'] = ["-mcpu=%s" % cortex]
build_info['MCU'] = cortex
else:
cortex = "cortex-m4"
env_vars['CPU_FLAGS'] = ["-mcpu=%s" % cortex, "-mfpu=fpv4-sp-d16", "-mfloat-abi=hard"]
build_info['MCU'] = cortex
if get_mcu_config('EXPECTED_CLOCK'):
f.write('#define HAL_EXPECTED_SYSCLOCK %u\n' % get_mcu_config('EXPECTED_CLOCK'))
env_vars['CORTEX'] = cortex
if not args.bootloader:
if cortex == 'cortex-m4':
env_vars['CPU_FLAGS'].append('-DARM_MATH_CM4')
elif cortex == 'cortex-m7':
env_vars['CPU_FLAGS'].append('-DARM_MATH_CM7')
if not mcu_series.startswith("STM32F1") and not args.bootloader:
env_vars['CPU_FLAGS'].append('-u_printf_float')
build_info['ENV_UDEFS'] = "-DCHPRINTF_USE_FLOAT=1"
# setup build variables
for v in build_info.keys():
build_flags.append('%s=%s' % (v, build_info[v]))
# setup for bootloader build
if args.bootloader:
f.write('''
#define HAL_BOOTLOADER_BUILD TRUE
#define HAL_USE_ADC FALSE
#define HAL_USE_EXT FALSE
#define HAL_NO_UARTDRIVER
#define HAL_NO_PRINTF
#define HAL_NO_CCM
#define CH_DBG_STATISTICS FALSE
#define CH_CFG_USE_TM FALSE
#define CH_CFG_USE_REGISTRY FALSE
#define CH_CFG_USE_WAITEXIT FALSE
#define CH_CFG_USE_DYNAMIC FALSE
#define CH_CFG_USE_MEMPOOLS FALSE
#define CH_CFG_USE_OBJ_FIFOS FALSE
#define CH_DBG_FILL_THREADS FALSE
#define CH_CFG_USE_SEMAPHORES FALSE
#define CH_CFG_USE_HEAP FALSE
#define CH_CFG_USE_MUTEXES FALSE
#define CH_CFG_USE_CONDVARS FALSE
#define CH_CFG_USE_CONDVARS_TIMEOUT FALSE
#define CH_CFG_USE_EVENTS FALSE
#define CH_CFG_USE_EVENTS_TIMEOUT FALSE
#define CH_CFG_USE_MESSAGES FALSE
#define CH_CFG_USE_MAILBOXES FALSE
#define CH_CFG_USE_FACTORY FALSE
#define CH_CFG_USE_MEMCORE FALSE
#define HAL_USE_I2C FALSE
#define HAL_USE_PWM FALSE
#define CH_DBG_ENABLE_STACK_CHECK FALSE
''')
if env_vars.get('ROMFS_UNCOMPRESSED', False):
f.write('#define HAL_ROMFS_UNCOMPRESSED\n')
if 'AP_PERIPH' in env_vars:
f.write('''
#define CH_DBG_ENABLE_STACK_CHECK FALSE
''')
def write_ldscript(fname):
'''write ldscript.ld for this board'''
flash_size = get_config('FLASH_USE_MAX_KB', type=int, default=0)
if flash_size == 0:
flash_size = get_config('FLASH_SIZE_KB', type=int)
# space to reserve for bootloader and storage at start of flash
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
env_vars['FLASH_RESERVE_START_KB'] = str(flash_reserve_start)
# space to reserve for storage at end of flash
flash_reserve_end = get_config('FLASH_RESERVE_END_KB', default=0, type=int)
# ram layout
ram_map = get_mcu_config('RAM_MAP', True)
flash_base = 0x08000000 + flash_reserve_start * 1024
if not args.bootloader:
flash_length = flash_size - (flash_reserve_start + flash_reserve_end)
else:
flash_length = get_config('FLASH_BOOTLOADER_LOAD_KB', type=int)
env_vars['FLASH_TOTAL'] = flash_length * 1024
print("Generating ldscript.ld")
f = open(fname, 'w')
ram0_start = ram_map[0][0]
ram0_len = ram_map[0][1] * 1024
# possibly reserve some memory for app/bootloader comms
ram_reserve_start = get_config('RAM_RESERVE_START', default=0, type=int)
ram0_start += ram_reserve_start
ram0_len -= ram_reserve_start
f.write('''/* generated ldscript.ld */
MEMORY
{
flash : org = 0x%08x, len = %uK
ram0 : org = 0x%08x, len = %u
}
INCLUDE common.ld
''' % (flash_base, flash_length, ram0_start, ram0_len))
def copy_common_linkerscript(outdir, hwdef):
dirpath = os.path.dirname(hwdef)
shutil.copy(os.path.join(dirpath, "../common/common.ld"),
os.path.join(outdir, "common.ld"))
def get_USB_IDs():
'''return tuple of USB VID/PID'''
global dual_USB_enabled
if dual_USB_enabled:
# use pidcodes allocated ID
default_vid = 0x1209
default_pid = 0x5740
else:
default_vid = 0x1209
default_pid = 0x5741
return (get_config('USB_VENDOR', type=int, default=default_vid), get_config('USB_PRODUCT', type=int, default=default_pid))
def write_USB_config(f):
'''write USB config defines'''
if not have_type_prefix('OTG'):
return
f.write('// USB configuration\n')
(USB_VID, USB_PID) = get_USB_IDs()
f.write('#define HAL_USB_VENDOR_ID 0x%04x\n' % int(USB_VID))
f.write('#define HAL_USB_PRODUCT_ID 0x%04x\n' % int(USB_PID))
f.write('#define HAL_USB_STRING_MANUFACTURER %s\n' % get_config("USB_STRING_MANUFACTURER", default="\"ArduPilot\""))
default_product = "%BOARD%"
if args.bootloader:
default_product += "-BL"
f.write('#define HAL_USB_STRING_PRODUCT %s\n' % get_config("USB_STRING_PRODUCT", default="\"%s\""%default_product))
f.write('#define HAL_USB_STRING_SERIAL %s\n' % get_config("USB_STRING_SERIAL", default="\"%SERIAL%\""))
f.write('\n\n')
def write_SPI_table(f):
'''write SPI device table'''
f.write('\n// SPI device table\n')
devlist = []
for dev in spidev:
if len(dev) != 7:
print("Badly formed SPIDEV line %s" % dev)
name = '"' + dev[0] + '"'
bus = dev[1]
devid = dev[2]
cs = dev[3]
mode = dev[4]
lowspeed = dev[5]
highspeed = dev[6]
if not bus.startswith('SPI') or bus not in spi_list:
error("Bad SPI bus in SPIDEV line %s" % dev)
if not devid.startswith('DEVID') or not is_int(devid[5:]):
error("Bad DEVID in SPIDEV line %s" % dev)
if cs not in bylabel or not bylabel[cs].is_CS():
error("Bad CS pin in SPIDEV line %s" % dev)
if mode not in ['MODE0', 'MODE1', 'MODE2', 'MODE3']:
error("Bad MODE in SPIDEV line %s" % dev)
if not lowspeed.endswith('*MHZ') and not lowspeed.endswith('*KHZ'):
error("Bad lowspeed value %s in SPIDEV line %s" % (lowspeed, dev))
if not highspeed.endswith('*MHZ') and not highspeed.endswith('*KHZ'):
error("Bad highspeed value %s in SPIDEV line %s" % (highspeed,
dev))
cs_pin = bylabel[cs]
pal_line = 'PAL_LINE(GPIO%s,%uU)' % (cs_pin.port, cs_pin.pin)
devidx = len(devlist)
f.write(
'#define HAL_SPI_DEVICE%-2u SPIDesc(%-17s, %2u, %2u, %-19s, SPIDEV_%s, %7s, %7s)\n'
% (devidx, name, spi_list.index(bus), int(devid[5:]), pal_line,
mode, lowspeed, highspeed))
devlist.append('HAL_SPI_DEVICE%u' % devidx)
f.write('#define HAL_SPI_DEVICE_LIST %s\n\n' % ','.join(devlist))
for dev in spidev:
f.write("#define HAL_WITH_SPI_%s 1\n" % dev[0].upper().replace("-","_"))
f.write("\n")
def write_SPI_config(f):
'''write SPI config defines'''
global spi_list
for t in list(bytype.keys()) + list(alttype.keys()):
if t.startswith('SPI'):
spi_list.append(t)
spi_list = sorted(spi_list)
if len(spi_list) == 0:
f.write('#define HAL_USE_SPI FALSE\n')
return
devlist = []
for dev in spi_list:
n = int(dev[3:])
devlist.append('HAL_SPI%u_CONFIG' % n)
f.write(
'#define HAL_SPI%u_CONFIG { &SPID%u, %u, STM32_SPI_SPI%u_DMA_STREAMS }\n'
% (n, n, n, n))
f.write('#define HAL_SPI_BUS_LIST %s\n\n' % ','.join(devlist))
write_SPI_table(f)
def parse_spi_device(dev):
'''parse a SPI:xxx device item'''
a = dev.split(':')
if len(a) != 2:
error("Bad SPI device: %s" % dev)
return 'hal.spi->get_device("%s")' % a[1]
def parse_i2c_device(dev):
'''parse a I2C:xxx:xxx device item'''
a = dev.split(':')
if len(a) != 3:
error("Bad I2C device: %s" % dev)
busaddr = int(a[2], base=0)
if a[1] == 'ALL_EXTERNAL':
return ('FOREACH_I2C_EXTERNAL(b)', 'GET_I2C_DEVICE(b,0x%02x)' % (busaddr))
elif a[1] == 'ALL_INTERNAL':
return ('FOREACH_I2C_INTERNAL(b)', 'GET_I2C_DEVICE(b,0x%02x)' % (busaddr))
elif a[1] == 'ALL':
return ('FOREACH_I2C(b)', 'GET_I2C_DEVICE(b,0x%02x)' % (busaddr))
busnum = int(a[1])
return ('', 'GET_I2C_DEVICE(%u,0x%02x)' % (busnum, busaddr))
def seen_str(dev):
'''return string representation of device for checking for duplicates'''
return str(dev[:2])
def write_IMU_config(f):
'''write IMU config defines'''
global imu_list
devlist = []
wrapper = ''
seen = set()
for dev in imu_list:
if seen_str(dev) in seen:
error("Duplicate IMU: %s" % seen_str(dev))
seen.add(seen_str(dev))
driver = dev[0]
for i in range(1, len(dev)):
if dev[i].startswith("SPI:"):
dev[i] = parse_spi_device(dev[i])
elif dev[i].startswith("I2C:"):
(wrapper, dev[i]) = parse_i2c_device(dev[i])
n = len(devlist)+1
devlist.append('HAL_INS_PROBE%u' % n)
f.write(
'#define HAL_INS_PROBE%u %s ADD_BACKEND(AP_InertialSensor_%s::probe(*this,%s))\n'
% (n, wrapper, driver, ','.join(dev[1:])))
if len(devlist) > 0:
f.write('#define HAL_INS_PROBE_LIST %s\n\n' % ';'.join(devlist))
def write_MAG_config(f):
'''write MAG config defines'''
global compass_list
devlist = []
seen = set()
for dev in compass_list:
if seen_str(dev) in seen:
error("Duplicate MAG: %s" % seen_str(dev))
seen.add(seen_str(dev))
driver = dev[0]
probe = 'probe'
wrapper = ''
a = driver.split(':')
driver = a[0]
if len(a) > 1 and a[1].startswith('probe'):
probe = a[1]
for i in range(1, len(dev)):
if dev[i].startswith("SPI:"):
dev[i] = parse_spi_device(dev[i])
elif dev[i].startswith("I2C:"):
(wrapper, dev[i]) = parse_i2c_device(dev[i])
n = len(devlist)+1
devlist.append('HAL_MAG_PROBE%u' % n)
f.write(
'#define HAL_MAG_PROBE%u %s ADD_BACKEND(DRIVER_%s, AP_Compass_%s::%s(%s))\n'
% (n, wrapper, driver, driver, probe, ','.join(dev[1:])))
if len(devlist) > 0:
f.write('#define HAL_MAG_PROBE_LIST %s\n\n' % ';'.join(devlist))
def write_BARO_config(f):
'''write barometer config defines'''
global baro_list
devlist = []
seen = set()
for dev in baro_list:
if seen_str(dev) in seen:
error("Duplicate BARO: %s" % seen_str(dev))
seen.add(seen_str(dev))
driver = dev[0]
probe = 'probe'
wrapper = ''
a = driver.split(':')
driver = a[0]
if len(a) > 1 and a[1].startswith('probe'):
probe = a[1]
for i in range(1, len(dev)):
if dev[i].startswith("SPI:"):
dev[i] = parse_spi_device(dev[i])
elif dev[i].startswith("I2C:"):
(wrapper, dev[i]) = parse_i2c_device(dev[i])
if dev[i].startswith('hal.i2c_mgr'):
dev[i] = 'std::move(%s)' % dev[i]
n = len(devlist)+1
devlist.append('HAL_BARO_PROBE%u' % n)
args = ['*this'] + dev[1:]
f.write(
'#define HAL_BARO_PROBE%u %s ADD_BACKEND(AP_Baro_%s::%s(%s))\n'
% (n, wrapper, driver, probe, ','.join(args)))
if len(devlist) > 0:
f.write('#define HAL_BARO_PROBE_LIST %s\n\n' % ';'.join(devlist))
def write_board_validate_macro(f):
'''write board validation macro'''
global config
validate_string = ''
validate_dict = {}
if 'BOARD_VALIDATE' in config:
for check in config['BOARD_VALIDATE']:
check_name = check
check_string = check
while True:
def substitute_alias(m):
return '(' + get_config(m.group(1), spaces=True) + ')'
output = re.sub(r'\$(\w+|\{([^}]*)\})', substitute_alias, check_string)
if (output == check_string):
break
check_string = output
validate_dict[check_name] = check_string
# Finally create check conditional
for check_name in validate_dict:
validate_string += "!" + validate_dict[check_name] + "?" + "\"" + check_name + "\"" + ":"
validate_string += "nullptr"
f.write('#define HAL_VALIDATE_BOARD (%s)\n\n' % validate_string)
def get_gpio_bylabel(label):
'''get GPIO(n) setting on a pin label, or -1'''
p = bylabel.get(label)
if p is None:
return -1
return p.extra_value('GPIO', type=int, default=-1)
def get_extra_bylabel(label, name, default=None):
'''get extra setting for a label by name'''
p = bylabel.get(label)
if p is None:
return default
return p.extra_value(name, type=str, default=default)
def get_UART_ORDER():
'''get UART_ORDER from SERIAL_ORDER option'''
if get_config('UART_ORDER', required=False, aslist=True) is not None:
error('Please convert UART_ORDER to SERIAL_ORDER')
serial_order = get_config('SERIAL_ORDER', required=False, aslist=True)
if serial_order is None:
return None
if args.bootloader:
# in bootloader SERIAL_ORDER is treated the same as UART_ORDER
return serial_order
map = [ 0, 3, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]
while len(serial_order) < 4:
serial_order += ['EMPTY']
uart_order = []
for i in range(len(serial_order)):
uart_order.append(serial_order[map[i]])
return uart_order
def write_UART_config(f):
'''write UART config defines'''
global dual_USB_enabled
uart_list = get_UART_ORDER()
if uart_list is None:
return
f.write('\n// UART configuration\n')
# write out driver declarations for HAL_ChibOS_Class.cpp
devnames = "ABCDEFGH"
sdev = 0
idx = 0
num_empty_uarts = 0
for dev in uart_list:
if dev == 'EMPTY':
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
num_empty_uarts += 1
else:
f.write(
'#define HAL_UART%s_DRIVER ChibiOS::UARTDriver uart%sDriver(%u)\n'
% (devnames[idx], devnames[idx], sdev))
sdev += 1
idx += 1
for idx in range(len(uart_list), len(devnames)):
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
if 'IOMCU_UART' in config:
f.write('#define HAL_WITH_IO_MCU 1\n')
idx = len(uart_list)
f.write('#define HAL_UART_IOMCU_IDX %u\n' % idx)
f.write(
'#define HAL_UART_IO_DRIVER ChibiOS::UARTDriver uart_io(HAL_UART_IOMCU_IDX)\n'
)
uart_list.append(config['IOMCU_UART'][0])
f.write('#define HAL_HAVE_SERVO_VOLTAGE 1\n') # make the assumption that IO gurantees servo monitoring
# all IOMCU capable boards have SBUS out
f.write('#define AP_FEATURE_SBUS_OUT 1\n')
else:
f.write('#define HAL_WITH_IO_MCU 0\n')
f.write('\n')
need_uart_driver = False
OTG2_index = None
devlist = []
have_rts_cts = False
for dev in uart_list:
if dev.startswith('UART'):
n = int(dev[4:])
elif dev.startswith('USART'):
n = int(dev[5:])
elif dev.startswith('OTG'):
n = int(dev[3:])
elif dev.startswith('EMPTY'):
continue
else:
error("Invalid element %s in UART_ORDER" % dev)
devlist.append('HAL_%s_CONFIG' % dev)
tx_line = make_line(dev + '_TX')
rx_line = make_line(dev + '_RX')
rts_line = make_line(dev + '_RTS')
if rts_line != "0":
have_rts_cts = True
if dev.startswith('OTG2'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU2, true, false, 0, 0, false, 0, 0}\n'
% dev)
OTG2_index = uart_list.index(dev)
dual_USB_enabled = True
elif dev.startswith('OTG'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU1, true, false, 0, 0, false, 0, 0}\n'
% dev)
else:
need_uart_driver = True
f.write(
"#define HAL_%s_CONFIG { (BaseSequentialStream*) &SD%u, false, "
% (dev, n))
if mcu_series.startswith("STM32F1"):
f.write("%s, %s, %s, " % (tx_line, rx_line, rts_line))
else:
f.write("STM32_%s_RX_DMA_CONFIG, STM32_%s_TX_DMA_CONFIG, %s, %s, %s, " %
(dev, dev, tx_line, rx_line, rts_line))
# add inversion pins, if any
f.write("%d, " % get_gpio_bylabel(dev + "_RXINV"))
f.write("%s, " % get_extra_bylabel(dev + "_RXINV", "POL", "0"))
f.write("%d, " % get_gpio_bylabel(dev + "_TXINV"))
f.write("%s}\n" % get_extra_bylabel(dev + "_TXINV", "POL", "0"))
if have_rts_cts:
f.write('#define AP_FEATURE_RTSCTS 1\n')
if OTG2_index is not None:
f.write('#define HAL_OTG2_UART_INDEX %d\n' % OTG2_index)
f.write('''
#if HAL_NUM_CAN_IFACES
#ifndef HAL_OTG2_PROTOCOL
#define HAL_OTG2_PROTOCOL SerialProtocol_SLCAN
#endif
#define HAL_SERIAL%d_PROTOCOL HAL_OTG2_PROTOCOL
#define HAL_SERIAL%d_BAUD 115200
#endif
''' % (OTG2_index, OTG2_index))
f.write('#define HAL_HAVE_DUAL_USB_CDC 1\n')
f.write('#define HAL_UART_DEVICE_LIST %s\n\n' % ','.join(devlist))
if not need_uart_driver and not args.bootloader:
f.write('''
#ifndef HAL_USE_SERIAL
#define HAL_USE_SERIAL HAL_USE_SERIAL_USB
#endif
''')
num_uarts = len(devlist)
if 'IOMCU_UART' in config:
num_uarts -= 1
if num_uarts > 8:
error("Exceeded max num UARTs of 8 (%u)" % num_uarts)
f.write('#define HAL_UART_NUM_SERIAL_PORTS %u\n' % (num_uarts+num_empty_uarts))
def write_UART_config_bootloader(f):
'''write UART config defines'''
uart_list = get_UART_ORDER()
if uart_list is None:
return
f.write('\n// UART configuration\n')
devlist = []
have_uart = False
OTG2_index = None
for u in uart_list:
if u.startswith('OTG2'):
devlist.append('(BaseChannel *)&SDU2')
OTG2_index = uart_list.index(u)
elif u.startswith('OTG'):
devlist.append('(BaseChannel *)&SDU1')
else:
unum = int(u[-1])
devlist.append('(BaseChannel *)&SD%u' % unum)
have_uart = True
if len(devlist) > 0:
f.write('#define BOOTLOADER_DEV_LIST %s\n' % ','.join(devlist))
if OTG2_index is not None:
f.write('#define HAL_OTG2_UART_INDEX %d\n' % OTG2_index)
if not have_uart:
f.write('''
#ifndef HAL_USE_SERIAL
#define HAL_USE_SERIAL FALSE
#endif
''')
def write_I2C_config(f):
'''write I2C config defines'''
if not have_type_prefix('I2C'):
print("No I2C peripherals")
f.write('''
#ifndef HAL_USE_I2C
#define HAL_USE_I2C FALSE
#endif
''')
return
if 'I2C_ORDER' not in config:
print("Missing I2C_ORDER config")
return
i2c_list = config['I2C_ORDER']
f.write('// I2C configuration\n')
if len(i2c_list) == 0:
error("I2C_ORDER invalid")
devlist = []
# write out config structures
for dev in i2c_list:
if not dev.startswith('I2C') or dev[3] not in "1234":
error("Bad I2C_ORDER element %s" % dev)
n = int(dev[3:])
devlist.append('HAL_I2C%u_CONFIG' % n)
sda_line = make_line('I2C%u_SDA' % n)
scl_line = make_line('I2C%u_SCL' % n)
f.write('''
#if defined(STM32_I2C_I2C%u_RX_DMA_STREAM) && defined(STM32_I2C_I2C%u_TX_DMA_STREAM)
#define HAL_I2C%u_CONFIG { &I2CD%u, STM32_I2C_I2C%u_RX_DMA_STREAM, STM32_I2C_I2C%u_TX_DMA_STREAM, %s, %s }
#else
#define HAL_I2C%u_CONFIG { &I2CD%u, SHARED_DMA_NONE, SHARED_DMA_NONE, %s, %s }
#endif
'''
% (n, n, n, n, n, n, scl_line, sda_line, n, n, scl_line, sda_line))
f.write('\n#define HAL_I2C_DEVICE_LIST %s\n\n' % ','.join(devlist))
def parse_timer(str):
'''parse timer channel string, i.e TIM8_CH2N'''
result = re.match(r'TIM([0-9]*)_CH([1234])(N?)', str)
if result:
tim = int(result.group(1))
chan = int(result.group(2))
compl = result.group(3) == 'N'
if tim < 1 or tim > 17:
error("Bad timer number %s in %s" % (tim, str))
return (tim, chan, compl)
else:
error("Bad timer definition %s" % str)
def write_PWM_config(f):
'''write PWM config defines'''
rc_in = None
rc_in_int = None
alarm = None
pwm_out = []
pwm_timers = []
for l in bylabel.keys():
p = bylabel[l]
if p.type.startswith('TIM'):
if p.has_extra('RCIN'):
rc_in = p
elif p.has_extra('RCININT'):
rc_in_int = p
elif p.has_extra('ALARM'):
alarm = p
else:
if p.extra_value('PWM', type=int) is not None:
pwm_out.append(p)
if p.type not in pwm_timers:
pwm_timers.append(p.type)
if not pwm_out and not alarm:
print("No PWM output defined")
f.write('''
#ifndef HAL_USE_PWM
#define HAL_USE_PWM FALSE
#endif
''')
if rc_in is not None:
(n, chan, compl) = parse_timer(rc_in.label)
if compl:
# it is an inverted channel
f.write('#define HAL_RCIN_IS_INVERTED\n')
if chan not in [1, 2]:
error(
"Bad channel number, only channel 1 and 2 supported for RCIN")
f.write('// RC input config\n')
f.write('#define HAL_USE_ICU TRUE\n')
f.write('#define STM32_ICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCIN_ICU_TIMER ICUD%u\n' % n)
f.write('#define RCIN_ICU_CHANNEL ICU_CHANNEL_%u\n' % chan)
f.write('#define STM32_RCIN_DMA_STREAM STM32_TIM_TIM%u_CH%u_DMA_STREAM\n' % (n, chan))
f.write('#define STM32_RCIN_DMA_CHANNEL STM32_TIM_TIM%u_CH%u_DMA_CHAN\n' % (n, chan))
f.write('\n')
if rc_in_int is not None:
(n, chan, compl) = parse_timer(rc_in_int.label)
if compl:
error('Complementary channel is not supported for RCININT %s' % rc_in_int.label)
f.write('// RC input config\n')
f.write('#define HAL_USE_EICU TRUE\n')
f.write('#define STM32_EICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCININT_EICU_TIMER EICUD%u\n' % n)
f.write('#define RCININT_EICU_CHANNEL EICU_CHANNEL_%u\n' % chan)
f.write('\n')
if alarm is not None:
(n, chan, compl) = parse_timer(alarm.label)
if compl:
error("Complementary channel is not supported for ALARM %s" % alarm.label)
f.write('\n')
f.write('// Alarm PWM output config\n')
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
pwm_clock = 1000000
period = 1000
f.write('''#define HAL_PWM_ALARM \\
{ /* pwmGroup */ \\
%u, /* Timer channel */ \\
{ /* PWMConfig */ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ /* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, \\
0, 0 \\
}, \\
&PWMD%u /* PWMDriver* */ \\
}\n''' %
(chan-1, pwm_clock, period, chan_mode[0],
chan_mode[1], chan_mode[2], chan_mode[3], n))
else:
f.write('\n')
f.write('// No Alarm output pin defined\n')
f.write('#undef HAL_PWM_ALARM\n')
f.write('\n')
f.write('// PWM timer config\n')
for t in sorted(pwm_timers):
n = int(t[3:])
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
f.write('\n')
f.write('// PWM output config\n')
groups = []
have_complementary = False
for t in sorted(pwm_timers):
group = len(groups) + 1
n = int(t[3:])
chan_list = [255, 255, 255, 255]
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
alt_functions = [0, 0, 0, 0]
pal_lines = ['0', '0', '0', '0']
for p in pwm_out:
if p.type != t:
continue
(n, chan, compl) = parse_timer(p.label)
pwm = p.extra_value('PWM', type=int)
chan_list[chan - 1] = pwm - 1
if compl:
chan_mode[chan - 1] = 'PWM_COMPLEMENTARY_OUTPUT_ACTIVE_HIGH'
have_complementary = True
else:
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
alt_functions[chan - 1] = p.af
pal_lines[chan - 1] = 'PAL_LINE(GPIO%s, %uU)' % (p.port, p.pin)
groups.append('HAL_PWM_GROUP%u' % group)
if n in [1, 8]:
# only the advanced timers do 8MHz clocks
advanced_timer = 'true'
else:
advanced_timer = 'false'
pwm_clock = 1000000
period = 20000 * pwm_clock / 1000000
f.write('''#if defined(STM32_TIM_TIM%u_UP_DMA_STREAM) && defined(STM32_TIM_TIM%u_UP_DMA_CHAN)
# define HAL_PWM%u_DMA_CONFIG true, STM32_TIM_TIM%u_UP_DMA_STREAM, STM32_TIM_TIM%u_UP_DMA_CHAN
#else
# define HAL_PWM%u_DMA_CONFIG false, 0, 0
#endif\n''' % (n, n, n, n, n, n))
f.write('''#define HAL_PWM_GROUP%u { %s, \\
{%u, %u, %u, %u}, \\
/* Group Initial Config */ \\
{ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ \\
/* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, 0, 0}, &PWMD%u, \\
HAL_PWM%u_DMA_CONFIG, \\
{ %u, %u, %u, %u }, \\
{ %s, %s, %s, %s }}\n''' %
(group, advanced_timer,
chan_list[0], chan_list[1], chan_list[2], chan_list[3],
pwm_clock, period,
chan_mode[0], chan_mode[1], chan_mode[2], chan_mode[3],
n, n,
alt_functions[0], alt_functions[1], alt_functions[2], alt_functions[3],
pal_lines[0], pal_lines[1], pal_lines[2], pal_lines[3]))
f.write('#define HAL_PWM_GROUPS %s\n\n' % ','.join(groups))
if have_complementary:
f.write('#define STM32_PWM_USE_ADVANCED TRUE\n')
def write_ADC_config(f):
'''write ADC config defines'''
f.write('// ADC config\n')
adc_chans = []
for l in bylabel:
p = bylabel[l]
if not p.type.startswith('ADC'):
continue
chan = get_ADC1_chan(mcu_type, p.portpin)
scale = p.extra_value('SCALE', default=None)
if p.label == 'VDD_5V_SENS':
f.write('#define ANALOG_VCC_5V_PIN %u\n' % chan)
f.write('#define HAL_HAVE_BOARD_VOLTAGE 1\n')
if p.label == 'FMU_SERVORAIL_VCC_SENS':
f.write('#define FMU_SERVORAIL_ADC_CHAN %u\n' % chan)
f.write('#define HAL_HAVE_SERVO_VOLTAGE 1\n')
adc_chans.append((chan, scale, p.label, p.portpin))
adc_chans = sorted(adc_chans)
vdd = get_config('STM32_VDD', default='330U')
if vdd[-1] == 'U':
vdd = vdd[:-1]
vdd = float(vdd) * 0.01
f.write('#define HAL_ANALOG_PINS { \\\n')
for (chan, scale, label, portpin) in adc_chans:
scale_str = '%.2f/4096' % vdd
if scale is not None and scale != '1':
scale_str = scale + '*' + scale_str
f.write('{ %2u, %12s }, /* %s %s */ \\\n' % (chan, scale_str, portpin,
label))
f.write('}\n\n')
def write_GPIO_config(f):
'''write GPIO config defines'''
f.write('// GPIO config\n')
gpios = []
gpioset = set()
for l in bylabel:
p = bylabel[l]
gpio = p.extra_value('GPIO', type=int)
if gpio is None:
continue
if gpio in gpioset:
error("Duplicate GPIO value %u" % gpio)
gpioset.add(gpio)
# see if it is also a PWM pin
pwm = p.extra_value('PWM', type=int, default=0)
port = p.port
pin = p.pin
gpios.append((gpio, pwm, port, pin, p))
gpios = sorted(gpios)
for (gpio, pwm, port, pin, p) in gpios:
f.write('#define HAL_GPIO_LINE_GPIO%u PAL_LINE(GPIO%s, %2uU)\n' % (gpio, port, pin))
f.write('#define HAL_GPIO_PINS { \\\n')
for (gpio, pwm, port, pin, p) in gpios:
f.write('{ %3u, true, %2u, PAL_LINE(GPIO%s, %2uU)}, /* %s */ \\\n' %
(gpio, pwm, port, pin, p))
# and write #defines for use by config code
f.write('}\n\n')
f.write('// full pin define list\n')
last_label = None
for l in sorted(list(set(bylabel.keys()))):
p = bylabel[l]
label = p.label
label = label.replace('-', '_')
if label == last_label:
continue
last_label = label
f.write('#define HAL_GPIO_PIN_%-20s PAL_LINE(GPIO%s,%uU)\n' %
(label, p.port, p.pin))
f.write('\n')
def bootloader_path():
# always embed a bootloader if it is available
this_dir = os.path.realpath(__file__)
rootdir = os.path.relpath(os.path.join(this_dir, "../../../../.."))
hwdef_dirname = os.path.basename(os.path.dirname(args.hwdef))
bootloader_filename = "%s_bl.bin" % (hwdef_dirname,)
bootloader_path = os.path.join(rootdir,
"Tools",
"bootloaders",
bootloader_filename)
if os.path.exists(bootloader_path):
return os.path.realpath(bootloader_path)
return None
def add_bootloader():
'''added bootloader to ROMFS'''
bp = bootloader_path()
if bp is not None:
romfs["bootloader.bin"] = bp
def write_ROMFS(outdir):
'''create ROMFS embedded header'''
romfs_list = []
for k in romfs.keys():
romfs_list.append((k, romfs[k]))
env_vars['ROMFS_FILES'] = romfs_list
def setup_apj_IDs():
'''setup the APJ board IDs'''
env_vars['APJ_BOARD_ID'] = get_config('APJ_BOARD_ID')
env_vars['APJ_BOARD_TYPE'] = get_config('APJ_BOARD_TYPE', default=mcu_type)
(USB_VID, USB_PID) = get_USB_IDs()
env_vars['USBID'] = '0x%04x/0x%04x' % (USB_VID, USB_PID)
def write_peripheral_enable(f):
'''write peripheral enable lines'''
f.write('// peripherals enabled\n')
for type in sorted(list(bytype.keys()) + list(alttype.keys())):
if type.startswith('USART') or type.startswith('UART'):
dstr = 'STM32_SERIAL_USE_%-6s' % type
f.write('#ifndef %s\n' % dstr)
f.write('#define %s TRUE\n' % dstr)
f.write('#endif\n')
if type.startswith('SPI'):
f.write('#define STM32_SPI_USE_%s TRUE\n' % type)
if type.startswith('OTG'):
f.write('#define STM32_USB_USE_%s TRUE\n' % type)
if type.startswith('I2C'):
f.write('#define STM32_I2C_USE_%s TRUE\n' % type)
def get_dma_exclude(periph_list):
'''return list of DMA devices to exclude from DMA'''
dma_exclude = []
for periph in periph_list:
if periph in bylabel:
p = bylabel[periph]
if p.has_extra('NODMA'):
dma_exclude.append(periph)
if periph in altlabel:
p = altlabel[periph]
if p.has_extra('NODMA'):
dma_exclude.append(periph)
return dma_exclude
def write_alt_config(f):
'''write out alternate config settings'''
if len(altmap.keys()) == 0:
# no alt configs
return
f.write('''
/* alternative configurations */
#define PAL_STM32_SPEED(n) ((n&3U)<<3U)
#define PAL_STM32_HIGH 0x8000U
#define HAL_PIN_ALT_CONFIG { \\
''')
for alt in altmap.keys():
for pp in altmap[alt].keys():
p = altmap[alt][pp]
f.write(" { %u, %s, PAL_LINE(GPIO%s,%uU)}, /* %s */ \\\n" % (alt, p.pal_modeline(), p.port, p.pin, str(p)))
f.write('}\n\n')
def write_all_lines(hwdat):
f = open(hwdat, 'w')
f.write('\n'.join(all_lines))
f.close()
if not 'AP_PERIPH' in env_vars:
romfs["hwdef.dat"] = hwdat
def write_hwdef_header(outfilename):
'''write hwdef header file'''
print("Writing hwdef setup in %s" % outfilename)
f = open(outfilename, 'w')
f.write('''/*
generated hardware definitions from hwdef.dat - DO NOT EDIT
*/
#pragma once
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
''')
write_mcu_config(f)
write_SPI_config(f)
write_ADC_config(f)
write_GPIO_config(f)
write_IMU_config(f)
write_MAG_config(f)
write_BARO_config(f)
write_board_validate_macro(f)
write_peripheral_enable(f)
dma_unassigned = dma_resolver.write_dma_header(f, periph_list, mcu_type,
dma_exclude=get_dma_exclude(periph_list),
dma_priority=get_config('DMA_PRIORITY', default='TIM* SPI*', spaces=True),
dma_noshare=get_config('DMA_NOSHARE', default='', spaces=True))
if not args.bootloader:
write_PWM_config(f)
write_I2C_config(f)
write_UART_config(f)
else:
write_UART_config_bootloader(f)
setup_apj_IDs()
write_USB_config(f)
add_bootloader()
if len(romfs) > 0:
f.write('#define HAL_HAVE_AP_ROMFS_EMBEDDED_H 1\n')
if mcu_series.startswith('STM32F1'):
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_OUTPUT_PP(n) (0U << (((n) & 7) * 4))
#define PIN_MODE_OUTPUT_OD(n) (4U << (((n) & 7) * 4))
#define PIN_MODE_AF_PP(n) (8U << (((n) & 7) * 4))
#define PIN_MODE_AF_OD(n) (12U << (((n) & 7) * 4))
#define PIN_MODE_ANALOG(n) (0U << (((n) & 7) * 4))
#define PIN_MODE_NOPULL(n) (4U << (((n) & 7) * 4))
#define PIN_MODE_PUD(n) (8U << (((n) & 7) * 4))
#define PIN_SPEED_MEDIUM(n) (1U << (((n) & 7) * 4))
#define PIN_SPEED_LOW(n) (2U << (((n) & 7) * 4))
#define PIN_SPEED_HIGH(n) (3U << (((n) & 7) * 4))
#define PIN_ODR_HIGH(n) (1U << (((n) & 15)))
#define PIN_ODR_LOW(n) (0U << (((n) & 15)))
#define PIN_PULLUP(n) (1U << (((n) & 15)))
#define PIN_PULLDOWN(n) (0U << (((n) & 15)))
#define PIN_UNDEFINED(n) PIN_INPUT_PUD(n)
''')
else:
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_INPUT(n) (0U << ((n) * 2U))
#define PIN_MODE_OUTPUT(n) (1U << ((n) * 2U))
#define PIN_MODE_ALTERNATE(n) (2U << ((n) * 2U))
#define PIN_MODE_ANALOG(n) (3U << ((n) * 2U))
#define PIN_ODR_LOW(n) (0U << (n))
#define PIN_ODR_HIGH(n) (1U << (n))
#define PIN_OTYPE_PUSHPULL(n) (0U << (n))
#define PIN_OTYPE_OPENDRAIN(n) (1U << (n))
#define PIN_OSPEED_VERYLOW(n) (0U << ((n) * 2U))
#define PIN_OSPEED_LOW(n) (1U << ((n) * 2U))
#define PIN_OSPEED_MEDIUM(n) (2U << ((n) * 2U))
#define PIN_OSPEED_HIGH(n) (3U << ((n) * 2U))
#define PIN_PUPDR_FLOATING(n) (0U << ((n) * 2U))
#define PIN_PUPDR_PULLUP(n) (1U << ((n) * 2U))
#define PIN_PUPDR_PULLDOWN(n) (2U << ((n) * 2U))
#define PIN_AFIO_AF(n, v) ((v) << (((n) % 8U) * 4U))
''')
for port in sorted(ports):
f.write("/* PORT%s:\n" % port)
for pin in range(pincount[port]):
p = portmap[port][pin]
if p.label is not None:
f.write(" %s\n" % p)
f.write("*/\n\n")
if pincount[port] == 0:
# handle blank ports
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s 0x0\n" % (port,
vtype))
f.write("\n\n\n")
continue
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s (" % (p.port, vtype))
first = True
for pin in range(pincount[port]):
p = portmap[port][pin]
modefunc = getattr(p, "get_" + vtype)
v = modefunc()
if v is None:
continue
if not first:
f.write(" | \\\n ")
f.write(v)
first = False
if first:
# there were no pin definitions, use 0
f.write("0")
f.write(")\n\n")
write_alt_config(f)
if not mcu_series.startswith("STM32F1"):
dma_required = ['SPI*', 'ADC*']
if 'IOMCU_UART' in config:
dma_required.append(config['IOMCU_UART'][0] + '*')
for d in dma_unassigned:
for r in dma_required:
if fnmatch.fnmatch(d, r):
error("Missing required DMA for %s" % d)
def build_peripheral_list():
'''build a list of peripherals for DMA resolver to work on'''
peripherals = []
done = set()
prefixes = ['SPI', 'USART', 'UART', 'I2C']
periph_pins = allpins[:]
for alt in altmap.keys():
for p in altmap[alt].keys():
periph_pins.append(altmap[alt][p])
for p in periph_pins:
type = p.type
if type in done:
continue
for prefix in prefixes:
if type.startswith(prefix):
ptx = type + "_TX"
prx = type + "_RX"
if prefix in ['SPI', 'I2C']:
# in DMA map I2C and SPI has RX and TX suffix
if ptx not in bylabel:
bylabel[ptx] = p
if prx not in bylabel:
bylabel[prx] = p
if prx in bylabel or prx in altlabel:
peripherals.append(prx)
if ptx in bylabel or ptx in altlabel:
peripherals.append(ptx)
if type.startswith('ADC'):
peripherals.append(type)
if type.startswith('SDIO') or type.startswith('SDMMC'):
if not mcu_series.startswith("STM32H7"):
peripherals.append(type)
if type.startswith('TIM'):
if p.has_extra('RCIN'):
label = p.label
if label[-1] == 'N':
label = label[:-1]
peripherals.append(label)
elif not p.has_extra('ALARM') and not p.has_extra('RCININT'):
# get the TIMn_UP DMA channels for DShot
label = type + '_UP'
if label not in peripherals and not p.has_extra('NODMA'):
peripherals.append(label)
done.add(type)
return peripherals
def write_env_py(filename):
'''write out env.py for environment variables to control the build process'''
# see if board has a defaults.parm file or a --default-parameters file was specified
defaults_filename = os.path.join(os.path.dirname(args.hwdef), 'defaults.parm')
defaults_path = os.path.join(os.path.dirname(args.hwdef), args.params)
if not args.bootloader:
if os.path.exists(defaults_path):
env_vars['DEFAULT_PARAMETERS'] = os.path.abspath(defaults_path)
print("Default parameters path from command line: %s" % defaults_path)
elif os.path.exists(defaults_filename):
env_vars['DEFAULT_PARAMETERS'] = os.path.abspath(defaults_filename)
print("Default parameters path from hwdef: %s" % defaults_filename)
else:
print("No default parameter file found")
# CHIBIOS_BUILD_FLAGS is passed to the ChibiOS makefile
env_vars['CHIBIOS_BUILD_FLAGS'] = ' '.join(build_flags)
pickle.dump(env_vars, open(filename, "wb"))
def romfs_add(romfs_filename, filename):
'''add a file to ROMFS'''
romfs[romfs_filename] = filename
def romfs_wildcard(pattern):
'''add a set of files to ROMFS by wildcard'''
base_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
(pattern_dir, pattern) = os.path.split(pattern)
for f in os.listdir(os.path.join(base_path, pattern_dir)):
if fnmatch.fnmatch(f, pattern):
romfs[f] = os.path.join(pattern_dir, f)
def romfs_add_dir(subdirs):
'''add a filesystem directory to ROMFS'''
for dirname in subdirs:
romfs_dir = os.path.join(os.path.dirname(args.hwdef), dirname)
if not args.bootloader and os.path.exists(romfs_dir):
for root, d, files in os.walk(romfs_dir):
for f in files:
if fnmatch.fnmatch(f, '*~'):
# skip editor backup files
continue
fullpath = os.path.join(root, f)
relpath = os.path.normpath(os.path.join(dirname, os.path.relpath(root, romfs_dir), f))
romfs[relpath] = fullpath
def process_line(line):
'''process one line of pin definition file'''
global allpins, imu_list, compass_list, baro_list
global mcu_type, mcu_series
all_lines.append(line)
a = shlex.split(line, posix=False)
# keep all config lines for later use
alllines.append(line)
p = None
if a[0].startswith('P') and a[0][1] in ports:
# it is a port/pin definition
try:
port = a[0][1]
pin = int(a[0][2:])
label = a[1]
type = a[2]
extra = a[3:]
except Exception:
error("Bad pin line: %s" % a)
return
p = generic_pin(port, pin, label, type, extra)
af = get_alt_function(mcu_type, a[0], label)
if af is not None:
p.af = af
alt = p.extra_value("ALT", type=int, default=0)
if alt != 0:
if mcu_series.startswith("STM32F1"):
error("Alt config not allowed for F1 MCU")
if alt not in altmap:
altmap[alt] = {}
if p.portpin in altmap[alt]:
error("Pin %s ALT(%u) redefined" % (p.portpin, alt))
altmap[alt][p.portpin] = p
# we need to add alt pins into bytype[] so they are enabled in chibios config
if type not in alttype:
alttype[type] = []
alttype[type].append(p)
altlabel[label] = p
return
if a[0] in config:
error("Pin %s redefined" % a[0])
if p is None and line.find('ALT(') != -1:
error("ALT() invalid for %s" % a[0])
config[a[0]] = a[1:]
if p is not None:
# add to set of pins for primary config
portmap[port][pin] = p
allpins.append(p)
if type not in bytype:
bytype[type] = []
bytype[type].append(p)
bylabel[label] = p
elif a[0] == 'MCU':
mcu_type = a[2]
mcu_series = a[1]
setup_mcu_type_defaults()
elif a[0] == 'SPIDEV':
spidev.append(a[1:])
elif a[0] == 'IMU':
imu_list.append(a[1:])
elif a[0] == 'COMPASS':
compass_list.append(a[1:])
elif a[0] == 'BARO':
baro_list.append(a[1:])
elif a[0] == 'ROMFS':
romfs_add(a[1], a[2])
elif a[0] == 'ROMFS_WILDCARD':
romfs_wildcard(a[1])
elif a[0] == 'undef':
print("Removing %s" % a[1])
config.pop(a[1], '')
bytype.pop(a[1], '')
bylabel.pop(a[1], '')
# also remove all occurences of defines in previous lines if any
for line in alllines[:]:
if line.startswith('define') and a[1] == line.split()[1]:
alllines.remove(line)
newpins = []
for pin in allpins:
if pin.type == a[1] or pin.label == a[1] or pin.portpin == a[1]:
portmap[pin.port][pin.pin] = generic_pin(pin.port, pin.pin, None, 'INPUT', [])
continue
newpins.append(pin)
allpins = newpins
if a[1] == 'IMU':
imu_list = []
if a[1] == 'COMPASS':
compass_list = []
if a[1] == 'BARO':
baro_list = []
elif a[0] == 'env':
print("Adding environment %s" % ' '.join(a[1:]))
if len(a[1:]) < 2:
error("Bad env line for %s" % a[0])
env_vars[a[1]] = ' '.join(a[2:])
def process_file(filename):
'''process a hwdef.dat file'''
try:
f = open(filename, "r")
except Exception:
error("Unable to open file %s" % filename)
for line in f.readlines():
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
a = shlex.split(line)
if a[0] == "include" and len(a) > 1:
include_file = a[1]
if include_file[0] != '/':
dir = os.path.dirname(filename)
include_file = os.path.normpath(
os.path.join(dir, include_file))
print("Including %s" % include_file)
process_file(include_file)
else:
process_line(line)
# process input file
process_file(args.hwdef)
outdir = args.outdir
if outdir is None:
outdir = '/tmp'
if "MCU" not in config:
error("Missing MCU type in config")
mcu_type = get_config('MCU', 1)
print("Setup for MCU %s" % mcu_type)
# build a list for peripherals for DMA resolver
periph_list = build_peripheral_list()
# write out hw.dat for ROMFS
write_all_lines(os.path.join(outdir, "hw.dat"))
# write out hwdef.h
write_hwdef_header(os.path.join(outdir, "hwdef.h"))
# write out ldscript.ld
write_ldscript(os.path.join(outdir, "ldscript.ld"))
romfs_add_dir(['scripts'])
write_ROMFS(outdir)
# copy the shared linker script into the build directory; it must
# exist in the same directory as the ldscript.ld file we generate.
copy_common_linkerscript(outdir, args.hwdef)
write_env_py(os.path.join(outdir, "env.py"))
|
squilter/ardupilot
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py
|
Python
|
gpl-3.0
| 70,394
|
[
"CRYSTAL"
] |
83feecc19cb93108eefc18d21d511bf11bcc358f496bf7286f03305ff8462bfb
|
# -*- coding: utf-8 -*-
"""
sphinx.quickstart
~~~~~~~~~~~~~~~~~
Quickly setup documentation source to work with Sphinx.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
from __future__ import absolute_import
import re
import os
import sys
import optparse
import time
from os import path
from io import open
# try to import readline, unix specific enhancement
try:
import readline
if readline.__doc__ and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
except ImportError:
pass
from six import PY2, PY3, text_type, binary_type
from six.moves import input
from six.moves.urllib.parse import quote as urlquote
from docutils.utils import column_width
from sphinx import __display_version__
from sphinx.util.osutil import make_filename
from sphinx.util.console import purple, bold, red, turquoise, \
nocolor, color_terminal
from sphinx.util import texescape
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
# function to get input from terminal -- overridden by the test suite
term_input = input
DEFAULT_VALUE = {
'path': '.',
'sep': False,
'dot': '_',
'language': None,
'suffix': '.rst',
'master': 'index',
'epub': False,
'ext_autodoc': False,
'ext_doctest': False,
'ext_todo': False,
'makefile': True,
'batchfile': True,
}
EXTENSIONS = ('autodoc', 'doctest', 'intersphinx', 'todo', 'coverage',
'imgmath', 'mathjax', 'ifconfig', 'viewcode', 'githubpages')
PROMPT_PREFIX = '> '
if PY3:
# prevents that the file is checked for being written in Python 2.x syntax
QUICKSTART_CONF = u'#!/usr/bin/env python3\n'
else:
QUICKSTART_CONF = u''
QUICKSTART_CONF += u'''\
# -*- coding: utf-8 -*-
#
# %(project)s documentation build configuration file, created by
# sphinx-quickstart on %(now)s.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [%(extensions)s]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['%(dot)stemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '%(suffix)s'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = '%(master_str)s'
# General information about the project.
project = u'%(project_str)s'
copyright = u'%(copyright_str)s'
author = u'%(author_str)s'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'%(version_str)s'
# The full version, including alpha/beta/rc tags.
release = u'%(release_str)s'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = %(language)r
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%%B %%d, %%Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = [%(exclude_patterns)s]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = %(ext_todo)s
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'%(project_str)s v%(release_str)s'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['%(dot)sstatic']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%%b %%d, %%Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = '%(project_fn)sdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '%(project_fn)s.tex', u'%(project_doc_texescaped_str)s',
u'%(author_texescaped_str)s', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, '%(project_manpage)s', u'%(project_doc_str)s',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, '%(project_fn)s', u'%(project_doc_str)s',
author, '%(project_fn)s', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
'''
EPUB_CONFIG = u'''
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
'''
INTERSPHINX_CONFIG = u'''
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
'''
MASTER_FILE = u'''\
.. %(project)s documentation master file, created by
sphinx-quickstart on %(now)s.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to %(project)s's documentation!
===========%(project_underline)s=================
Contents:
.. toctree::
:maxdepth: %(mastertocmaxdepth)s
%(mastertoctree)s
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
'''
MAKEFILE = u'''\
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = %(rbuilddir)s
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) \
$(SPHINXOPTS) %(rsrcdir)s
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) %(rsrcdir)s
.PHONY: help
help:
\t@echo "Please use \\`make <target>' where <target> is one of"
\t@echo " html to make standalone HTML files"
\t@echo " dirhtml to make HTML files named index.html in directories"
\t@echo " singlehtml to make a single large HTML file"
\t@echo " pickle to make pickle files"
\t@echo " json to make JSON files"
\t@echo " htmlhelp to make HTML files and a HTML help project"
\t@echo " qthelp to make HTML files and a qthelp project"
\t@echo " applehelp to make an Apple Help Book"
\t@echo " devhelp to make HTML files and a Devhelp project"
\t@echo " epub to make an epub"
\t@echo " epub3 to make an epub3"
\t@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
\t@echo " latexpdf to make LaTeX files and run them through pdflatex"
\t@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
\t@echo " text to make text files"
\t@echo " man to make manual pages"
\t@echo " texinfo to make Texinfo files"
\t@echo " info to make Texinfo files and run them through makeinfo"
\t@echo " gettext to make PO message catalogs"
\t@echo " changes to make an overview of all changed/added/deprecated items"
\t@echo " xml to make Docutils-native XML files"
\t@echo " pseudoxml to make pseudoxml-XML files for display purposes"
\t@echo " linkcheck to check all external links for integrity"
\t@echo " doctest to run all doctests embedded in the documentation \
(if enabled)"
\t@echo " coverage to run coverage check of the documentation (if enabled)"
\t@echo " dummy to check syntax errors of document sources"
.PHONY: clean
clean:
\trm -rf $(BUILDDIR)/*
.PHONY: html
html:
\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
\t@echo
\t@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
.PHONY: dirhtml
dirhtml:
\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
\t@echo
\t@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
.PHONY: singlehtml
singlehtml:
\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
\t@echo
\t@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
.PHONY: pickle
pickle:
\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
\t@echo
\t@echo "Build finished; now you can process the pickle files."
.PHONY: json
json:
\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
\t@echo
\t@echo "Build finished; now you can process the JSON files."
.PHONY: htmlhelp
htmlhelp:
\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
\t@echo
\t@echo "Build finished; now you can run HTML Help Workshop with the" \\
\t ".hhp project file in $(BUILDDIR)/htmlhelp."
.PHONY: qthelp
qthelp:
\t$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
\t@echo
\t@echo "Build finished; now you can run "qcollectiongenerator" with the" \\
\t ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
\t@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/%(project_fn)s.qhcp"
\t@echo "To view the help file:"
\t@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/%(project_fn)s.qhc"
.PHONY: applehelp
applehelp:
\t$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
\t@echo
\t@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
\t@echo "N.B. You won't be able to view it unless you put it in" \\
\t "~/Library/Documentation/Help or install it in your application" \\
\t "bundle."
.PHONY: devhelp
devhelp:
\t$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
\t@echo
\t@echo "Build finished."
\t@echo "To view the help file:"
\t@echo "# mkdir -p $$HOME/.local/share/devhelp/%(project_fn)s"
\t@echo "# ln -s $(BUILDDIR)/devhelp\
$$HOME/.local/share/devhelp/%(project_fn)s"
\t@echo "# devhelp"
.PHONY: epub
epub:
\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
\t@echo
\t@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
.PHONY: epub3
epub3:
\t$(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3
\t@echo
\t@echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
.PHONY: latex
latex:
\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
\t@echo
\t@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
\t@echo "Run \\`make' in that directory to run these through (pdf)latex" \\
\t "(use \\`make latexpdf' here to do that automatically)."
.PHONY: latexpdf
latexpdf:
\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
\t@echo "Running LaTeX files through pdflatex..."
\t$(MAKE) -C $(BUILDDIR)/latex all-pdf
\t@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
.PHONY: latexpdfja
latexpdfja:
\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
\t@echo "Running LaTeX files through platex and dvipdfmx..."
\t$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
\t@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
.PHONY: text
text:
\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
\t@echo
\t@echo "Build finished. The text files are in $(BUILDDIR)/text."
.PHONY: man
man:
\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
\t@echo
\t@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
.PHONY: texinfo
texinfo:
\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
\t@echo
\t@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
\t@echo "Run \\`make' in that directory to run these through makeinfo" \\
\t "(use \\`make info' here to do that automatically)."
.PHONY: info
info:
\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
\t@echo "Running Texinfo files through makeinfo..."
\tmake -C $(BUILDDIR)/texinfo info
\t@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
.PHONY: gettext
gettext:
\t$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
\t@echo
\t@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
.PHONY: changes
changes:
\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
\t@echo
\t@echo "The overview file is in $(BUILDDIR)/changes."
.PHONY: linkcheck
linkcheck:
\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
\t@echo
\t@echo "Link check complete; look for any errors in the above output " \\
\t "or in $(BUILDDIR)/linkcheck/output.txt."
.PHONY: doctest
doctest:
\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
\t@echo "Testing of doctests in the sources finished, look at the " \\
\t "results in $(BUILDDIR)/doctest/output.txt."
.PHONY: coverage
coverage:
\t$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
\t@echo "Testing of coverage in the sources finished, look at the " \\
\t "results in $(BUILDDIR)/coverage/python.txt."
.PHONY: xml
xml:
\t$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
\t@echo
\t@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
.PHONY: pseudoxml
pseudoxml:
\t$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
\t@echo
\t@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
.PHONY: dummy
dummy:
\t$(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy
\t@echo
\t@echo "Build finished. Dummy builder generates no files."
'''
BATCHFILE = u'''\
@ECHO OFF
REM Command file for Sphinx documentation
if "%%SPHINXBUILD%%" == "" (
\tset SPHINXBUILD=sphinx-build
)
set BUILDDIR=%(rbuilddir)s
set ALLSPHINXOPTS=-d %%BUILDDIR%%/doctrees %%SPHINXOPTS%% %(rsrcdir)s
set I18NSPHINXOPTS=%%SPHINXOPTS%% %(rsrcdir)s
if NOT "%%PAPER%%" == "" (
\tset ALLSPHINXOPTS=-D latex_paper_size=%%PAPER%% %%ALLSPHINXOPTS%%
\tset I18NSPHINXOPTS=-D latex_paper_size=%%PAPER%% %%I18NSPHINXOPTS%%
)
if "%%1" == "" goto help
if "%%1" == "help" (
\t:help
\techo.Please use `make ^<target^>` where ^<target^> is one of
\techo. html to make standalone HTML files
\techo. dirhtml to make HTML files named index.html in directories
\techo. singlehtml to make a single large HTML file
\techo. pickle to make pickle files
\techo. json to make JSON files
\techo. htmlhelp to make HTML files and a HTML help project
\techo. qthelp to make HTML files and a qthelp project
\techo. devhelp to make HTML files and a Devhelp project
\techo. epub to make an epub
\techo. epub3 to make an epub3
\techo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
\techo. text to make text files
\techo. man to make manual pages
\techo. texinfo to make Texinfo files
\techo. gettext to make PO message catalogs
\techo. changes to make an overview over all changed/added/deprecated items
\techo. xml to make Docutils-native XML files
\techo. pseudoxml to make pseudoxml-XML files for display purposes
\techo. linkcheck to check all external links for integrity
\techo. doctest to run all doctests embedded in the documentation if enabled
\techo. coverage to run coverage check of the documentation if enabled
\techo. dummy to check syntax errors of document sources
\tgoto end
)
if "%%1" == "clean" (
\tfor /d %%%%i in (%%BUILDDIR%%\*) do rmdir /q /s %%%%i
\tdel /q /s %%BUILDDIR%%\*
\tgoto end
)
REM Check if sphinx-build is available and fallback to Python version if any
%%SPHINXBUILD%% 1>NUL 2>NUL
if errorlevel 9009 goto sphinx_python
goto sphinx_ok
:sphinx_python
set SPHINXBUILD=python -m sphinx.__init__
%%SPHINXBUILD%% 2> nul
if errorlevel 9009 (
\techo.
\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx
\techo.installed, then set the SPHINXBUILD environment variable to point
\techo.to the full path of the 'sphinx-build' executable. Alternatively you
\techo.may add the Sphinx directory to PATH.
\techo.
\techo.If you don't have Sphinx installed, grab it from
\techo.http://sphinx-doc.org/
\texit /b 1
)
:sphinx_ok
if "%%1" == "html" (
\t%%SPHINXBUILD%% -b html %%ALLSPHINXOPTS%% %%BUILDDIR%%/html
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The HTML pages are in %%BUILDDIR%%/html.
\tgoto end
)
if "%%1" == "dirhtml" (
\t%%SPHINXBUILD%% -b dirhtml %%ALLSPHINXOPTS%% %%BUILDDIR%%/dirhtml
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The HTML pages are in %%BUILDDIR%%/dirhtml.
\tgoto end
)
if "%%1" == "singlehtml" (
\t%%SPHINXBUILD%% -b singlehtml %%ALLSPHINXOPTS%% %%BUILDDIR%%/singlehtml
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The HTML pages are in %%BUILDDIR%%/singlehtml.
\tgoto end
)
if "%%1" == "pickle" (
\t%%SPHINXBUILD%% -b pickle %%ALLSPHINXOPTS%% %%BUILDDIR%%/pickle
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished; now you can process the pickle files.
\tgoto end
)
if "%%1" == "json" (
\t%%SPHINXBUILD%% -b json %%ALLSPHINXOPTS%% %%BUILDDIR%%/json
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished; now you can process the JSON files.
\tgoto end
)
if "%%1" == "htmlhelp" (
\t%%SPHINXBUILD%% -b htmlhelp %%ALLSPHINXOPTS%% %%BUILDDIR%%/htmlhelp
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %%BUILDDIR%%/htmlhelp.
\tgoto end
)
if "%%1" == "qthelp" (
\t%%SPHINXBUILD%% -b qthelp %%ALLSPHINXOPTS%% %%BUILDDIR%%/qthelp
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %%BUILDDIR%%/qthelp, like this:
\techo.^> qcollectiongenerator %%BUILDDIR%%\\qthelp\\%(project_fn)s.qhcp
\techo.To view the help file:
\techo.^> assistant -collectionFile %%BUILDDIR%%\\qthelp\\%(project_fn)s.ghc
\tgoto end
)
if "%%1" == "devhelp" (
\t%%SPHINXBUILD%% -b devhelp %%ALLSPHINXOPTS%% %%BUILDDIR%%/devhelp
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished.
\tgoto end
)
if "%%1" == "epub" (
\t%%SPHINXBUILD%% -b epub %%ALLSPHINXOPTS%% %%BUILDDIR%%/epub
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The epub file is in %%BUILDDIR%%/epub.
\tgoto end
)
if "%%1" == "epub3" (
\t%%SPHINXBUILD%% -b epub3 %%ALLSPHINXOPTS%% %%BUILDDIR%%/epub3
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The epub3 file is in %%BUILDDIR%%/epub3.
\tgoto end
)
if "%%1" == "latex" (
\t%%SPHINXBUILD%% -b latex %%ALLSPHINXOPTS%% %%BUILDDIR%%/latex
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished; the LaTeX files are in %%BUILDDIR%%/latex.
\tgoto end
)
if "%%1" == "latexpdf" (
\t%%SPHINXBUILD%% -b latex %%ALLSPHINXOPTS%% %%BUILDDIR%%/latex
\tcd %%BUILDDIR%%/latex
\tmake all-pdf
\tcd %%~dp0
\techo.
\techo.Build finished; the PDF files are in %%BUILDDIR%%/latex.
\tgoto end
)
if "%%1" == "latexpdfja" (
\t%%SPHINXBUILD%% -b latex %%ALLSPHINXOPTS%% %%BUILDDIR%%/latex
\tcd %%BUILDDIR%%/latex
\tmake all-pdf-ja
\tcd %%~dp0
\techo.
\techo.Build finished; the PDF files are in %%BUILDDIR%%/latex.
\tgoto end
)
if "%%1" == "text" (
\t%%SPHINXBUILD%% -b text %%ALLSPHINXOPTS%% %%BUILDDIR%%/text
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The text files are in %%BUILDDIR%%/text.
\tgoto end
)
if "%%1" == "man" (
\t%%SPHINXBUILD%% -b man %%ALLSPHINXOPTS%% %%BUILDDIR%%/man
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The manual pages are in %%BUILDDIR%%/man.
\tgoto end
)
if "%%1" == "texinfo" (
\t%%SPHINXBUILD%% -b texinfo %%ALLSPHINXOPTS%% %%BUILDDIR%%/texinfo
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The Texinfo files are in %%BUILDDIR%%/texinfo.
\tgoto end
)
if "%%1" == "gettext" (
\t%%SPHINXBUILD%% -b gettext %%I18NSPHINXOPTS%% %%BUILDDIR%%/locale
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The message catalogs are in %%BUILDDIR%%/locale.
\tgoto end
)
if "%%1" == "changes" (
\t%%SPHINXBUILD%% -b changes %%ALLSPHINXOPTS%% %%BUILDDIR%%/changes
\tif errorlevel 1 exit /b 1
\techo.
\techo.The overview file is in %%BUILDDIR%%/changes.
\tgoto end
)
if "%%1" == "linkcheck" (
\t%%SPHINXBUILD%% -b linkcheck %%ALLSPHINXOPTS%% %%BUILDDIR%%/linkcheck
\tif errorlevel 1 exit /b 1
\techo.
\techo.Link check complete; look for any errors in the above output ^
or in %%BUILDDIR%%/linkcheck/output.txt.
\tgoto end
)
if "%%1" == "doctest" (
\t%%SPHINXBUILD%% -b doctest %%ALLSPHINXOPTS%% %%BUILDDIR%%/doctest
\tif errorlevel 1 exit /b 1
\techo.
\techo.Testing of doctests in the sources finished, look at the ^
results in %%BUILDDIR%%/doctest/output.txt.
\tgoto end
)
if "%%1" == "coverage" (
\t%%SPHINXBUILD%% -b coverage %%ALLSPHINXOPTS%% %%BUILDDIR%%/coverage
\tif errorlevel 1 exit /b 1
\techo.
\techo.Testing of coverage in the sources finished, look at the ^
results in %%BUILDDIR%%/coverage/python.txt.
\tgoto end
)
if "%%1" == "xml" (
\t%%SPHINXBUILD%% -b xml %%ALLSPHINXOPTS%% %%BUILDDIR%%/xml
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The XML files are in %%BUILDDIR%%/xml.
\tgoto end
)
if "%%1" == "pseudoxml" (
\t%%SPHINXBUILD%% -b pseudoxml %%ALLSPHINXOPTS%% %%BUILDDIR%%/pseudoxml
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. The pseudo-XML files are in %%BUILDDIR%%/pseudoxml.
\tgoto end
)
if "%%1" == "dummy" (
\t%%SPHINXBUILD%% -b dummy %%ALLSPHINXOPTS%% %%BUILDDIR%%/dummy
\tif errorlevel 1 exit /b 1
\techo.
\techo.Build finished. Dummy builder generates no files.
\tgoto end
)
:end
'''
# This will become the Makefile template for Sphinx 1.5.
MAKEFILE_NEW = u'''\
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = %(project_fn)s
SOURCEDIR = %(rsrcdir)s
BUILDDIR = %(rbuilddir)s
# Has to be explicit, otherwise we don't get "make" without targets right.
help:
\t@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
# You can add custom targets here.
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%%:
\t@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
'''
# This will become the make.bat template for Sphinx 1.5.
BATCHFILE_NEW = u'''\
@ECHO OFF
REM Command file for Sphinx documentation
if "%%SPHINXBUILD%%" == "" (
\tset SPHINXBUILD=sphinx-build
)
set SOURCEDIR=%(rsrcdir)s
set BUILDDIR=%(rbuilddir)s
set SPHINXPROJ=%(project_fn)s
if "%%1" == "" goto help
%%SPHINXBUILD%% >NUL 2>NUL
if errorlevel 9009 (
\techo.
\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx
\techo.installed, then set the SPHINXBUILD environment variable to point
\techo.to the full path of the 'sphinx-build' executable. Alternatively you
\techo.may add the Sphinx directory to PATH.
\techo.
\techo.If you don't have Sphinx installed, grab it from
\techo.http://sphinx-doc.org/
\texit /b 1
)
%%SPHINXBUILD%% -M %%1 %%SOURCEDIR%% %%BUILDDIR%% %%SPHINXOPTS%%
goto end
:help
%%SPHINXBUILD%% -M help %%SOURCEDIR%% %%BUILDDIR%% %%SPHINXOPTS%%
:end
'''
def mkdir_p(dir):
if path.isdir(dir):
return
os.makedirs(dir)
class ValidationError(Exception):
"""Raised for validation errors."""
def is_path(x):
x = path.expanduser(x)
if path.exists(x) and not path.isdir(x):
raise ValidationError("Please enter a valid path name.")
return x
def nonempty(x):
if not x:
raise ValidationError("Please enter some text.")
return x
def choice(*l):
def val(x):
if x not in l:
raise ValidationError('Please enter one of %s.' % ', '.join(l))
return x
return val
def boolean(x):
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError("Please enter either 'y' or 'n'.")
return x.upper() in ('Y', 'YES')
def suffix(x):
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError("Please enter a file suffix, "
"e.g. '.rst' or '.txt'.")
return x
def ok(x):
return x
def term_decode(text):
if isinstance(text, text_type):
return text
# for Python 2.x, try to get a Unicode string out of it
if text.decode('ascii', 'replace').encode('ascii', 'replace') == text:
return text
if TERM_ENCODING:
text = text.decode(TERM_ENCODING)
else:
print(turquoise('* Note: non-ASCII characters entered '
'and terminal encoding unknown -- assuming '
'UTF-8 or Latin-1.'))
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
text = text.decode('latin1')
return text
def do_prompt(d, key, text, default=None, validator=nonempty):
while True:
if default:
prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default)
else:
prompt = PROMPT_PREFIX + text + ': '
if PY2:
# for Python 2.x, try to get a Unicode string out of it
if prompt.encode('ascii', 'replace').decode('ascii', 'replace') \
!= prompt:
if TERM_ENCODING:
prompt = prompt.encode(TERM_ENCODING)
else:
print(turquoise('* Note: non-ASCII default value provided '
'and terminal encoding unknown -- assuming '
'UTF-8 or Latin-1.'))
try:
prompt = prompt.encode('utf-8')
except UnicodeEncodeError:
prompt = prompt.encode('latin1')
prompt = purple(prompt)
x = term_input(prompt).strip()
if default and not x:
x = default
x = term_decode(x)
try:
x = validator(x)
except ValidationError as err:
print(red('* ' + str(err)))
continue
break
d[key] = x
if PY3:
# remove Unicode literal prefixes
def _convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
return rex.sub('\\1', source)
for f in ['QUICKSTART_CONF', 'EPUB_CONFIG', 'INTERSPHINX_CONFIG']:
globals()[f] = _convert_python_source(globals()[f])
del _convert_python_source
def ask_user(d):
"""Ask the user for quickstart values missing from *d*.
Values are:
* path: root path
* sep: separate source and build dirs (bool)
* dot: replacement for dot in _templates etc.
* project: project name
* author: author names
* version: version of project
* release: release of project
* language: document language
* suffix: source file suffix
* master: master document name
* epub: use epub (bool)
* ext_*: extensions to use (bools)
* makefile: make Makefile
* batchfile: make command file
"""
print(bold('Welcome to the Sphinx %s quickstart utility.') % __display_version__)
print('''
Please enter values for the following settings (just press Enter to
accept a default value, if one is given in brackets).''')
if 'path' in d:
print(bold('''
Selected root path: %s''' % d['path']))
else:
print('''
Enter the root path for documentation.''')
do_prompt(d, 'path', 'Root path for the documentation', '.', is_path)
while path.isfile(path.join(d['path'], 'conf.py')) or \
path.isfile(path.join(d['path'], 'source', 'conf.py')):
print()
print(bold('Error: an existing conf.py has been found in the '
'selected root path.'))
print('sphinx-quickstart will not overwrite existing Sphinx projects.')
print()
do_prompt(d, 'path', 'Please enter a new root path (or just Enter '
'to exit)', '', is_path)
if not d['path']:
sys.exit(1)
if 'sep' not in d:
print('''
You have two options for placing the build directory for Sphinx output.
Either, you use a directory "_build" within the root path, or you separate
"source" and "build" directories within the root path.''')
do_prompt(d, 'sep', 'Separate source and build directories (y/n)', 'n',
boolean)
if 'dot' not in d:
print('''
Inside the root directory, two more directories will be created; "_templates"
for custom HTML templates and "_static" for custom stylesheets and other static
files. You can enter another prefix (such as ".") to replace the underscore.''')
do_prompt(d, 'dot', 'Name prefix for templates and static dir', '_', ok)
if 'project' not in d:
print('''
The project name will occur in several places in the built documentation.''')
do_prompt(d, 'project', 'Project name')
if 'author' not in d:
do_prompt(d, 'author', 'Author name(s)')
if 'version' not in d:
print('''
Sphinx has the notion of a "version" and a "release" for the
software. Each version can have multiple releases. For example, for
Python the version is something like 2.5 or 3.0, while the release is
something like 2.5.1 or 3.0a1. If you don't need this dual structure,
just set both to the same value.''')
do_prompt(d, 'version', 'Project version')
if 'release' not in d:
do_prompt(d, 'release', 'Project release', d['version'])
if 'language' not in d:
print('''
If the documents are to be written in a language other than English,
you can select a language here by its language code. Sphinx will then
translate text that it generates into that language.
For a list of supported codes, see
http://sphinx-doc.org/config.html#confval-language.''')
do_prompt(d, 'language', 'Project language', 'en')
if d['language'] == 'en':
d['language'] = None
if 'suffix' not in d:
print('''
The file name suffix for source files. Commonly, this is either ".txt"
or ".rst". Only files with this suffix are considered documents.''')
do_prompt(d, 'suffix', 'Source file suffix', '.rst', suffix)
if 'master' not in d:
print('''
One document is special in that it is considered the top node of the
"contents tree", that is, it is the root of the hierarchical structure
of the documents. Normally, this is "index", but if your "index"
document is a custom template, you can also set this to another filename.''')
do_prompt(d, 'master', 'Name of your master document (without suffix)',
'index')
while path.isfile(path.join(d['path'], d['master']+d['suffix'])) or \
path.isfile(path.join(d['path'], 'source', d['master']+d['suffix'])):
print()
print(bold('Error: the master file %s has already been found in the '
'selected root path.' % (d['master']+d['suffix'])))
print('sphinx-quickstart will not overwrite the existing file.')
print()
do_prompt(d, 'master', 'Please enter a new file name, or rename the '
'existing file and press Enter', d['master'])
if 'epub' not in d:
print('''
Sphinx can also add configuration for epub output:''')
do_prompt(d, 'epub', 'Do you want to use the epub builder (y/n)',
'n', boolean)
if 'ext_autodoc' not in d:
print('''
Please indicate if you want to use one of the following Sphinx extensions:''')
do_prompt(d, 'ext_autodoc', 'autodoc: automatically insert docstrings '
'from modules (y/n)', 'n', boolean)
if 'ext_doctest' not in d:
do_prompt(d, 'ext_doctest', 'doctest: automatically test code snippets '
'in doctest blocks (y/n)', 'n', boolean)
if 'ext_intersphinx' not in d:
do_prompt(d, 'ext_intersphinx', 'intersphinx: link between Sphinx '
'documentation of different projects (y/n)', 'n', boolean)
if 'ext_todo' not in d:
do_prompt(d, 'ext_todo', 'todo: write "todo" entries '
'that can be shown or hidden on build (y/n)', 'n', boolean)
if 'ext_coverage' not in d:
do_prompt(d, 'ext_coverage', 'coverage: checks for documentation '
'coverage (y/n)', 'n', boolean)
if 'ext_imgmath' not in d:
do_prompt(d, 'ext_imgmath', 'imgmath: include math, rendered '
'as PNG or SVG images (y/n)', 'n', boolean)
if 'ext_mathjax' not in d:
do_prompt(d, 'ext_mathjax', 'mathjax: include math, rendered in the '
'browser by MathJax (y/n)', 'n', boolean)
if d['ext_imgmath'] and d['ext_mathjax']:
print('''Note: imgmath and mathjax cannot be enabled at the same time.
imgmath has been deselected.''')
d['ext_imgmath'] = False
if 'ext_ifconfig' not in d:
do_prompt(d, 'ext_ifconfig', 'ifconfig: conditional inclusion of '
'content based on config values (y/n)', 'n', boolean)
if 'ext_viewcode' not in d:
do_prompt(d, 'ext_viewcode', 'viewcode: include links to the source '
'code of documented Python objects (y/n)', 'n', boolean)
if 'ext_githubpages' not in d:
do_prompt(d, 'ext_githubpages', 'githubpages: create .nojekyll file '
'to publish the document on GitHub pages (y/n)', 'n', boolean)
if 'no_makefile' in d:
d['makefile'] = False
elif 'makefile' not in d:
print('''
A Makefile and a Windows command file can be generated for you so that you
only have to run e.g. `make html' instead of invoking sphinx-build
directly.''')
do_prompt(d, 'makefile', 'Create Makefile? (y/n)', 'y', boolean)
if 'no_batchfile' in d:
d['batchfile'] = False
elif 'batchfile' not in d:
do_prompt(d, 'batchfile', 'Create Windows command file? (y/n)',
'y', boolean)
print()
def generate(d, overwrite=True, silent=False):
"""Generate project based on values in *d*."""
texescape.init()
indent = ' ' * 4
if 'mastertoctree' not in d:
d['mastertoctree'] = ''
if 'mastertocmaxdepth' not in d:
d['mastertocmaxdepth'] = 2
d['project_fn'] = make_filename(d['project'])
d['project_url'] = urlquote(d['project'].encode('idna'))
d['project_manpage'] = d['project_fn'].lower()
d['now'] = time.asctime()
d['project_underline'] = column_width(d['project']) * '='
extensions = (',\n' + indent).join(
repr('sphinx.ext.' + name)
for name in EXTENSIONS
if d.get('ext_' + name))
if extensions:
d['extensions'] = '\n' + indent + extensions + ',\n'
else:
d['extensions'] = extensions
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
d['author_texescaped'] = text_type(d['author']).\
translate(texescape.tex_escape_map)
d['project_doc'] = d['project'] + ' Documentation'
d['project_doc_texescaped'] = text_type(d['project'] + ' Documentation').\
translate(texescape.tex_escape_map)
# escape backslashes and single quotes in strings that are put into
# a Python string literal
for key in ('project', 'project_doc', 'project_doc_texescaped',
'author', 'author_texescaped', 'copyright',
'version', 'release', 'master'):
d[key + '_str'] = d[key].replace('\\', '\\\\').replace("'", "\\'")
if not path.isdir(d['path']):
mkdir_p(d['path'])
srcdir = d['sep'] and path.join(d['path'], 'source') or d['path']
mkdir_p(srcdir)
if d['sep']:
builddir = path.join(d['path'], 'build')
d['exclude_patterns'] = ''
else:
builddir = path.join(srcdir, d['dot'] + 'build')
exclude_patterns = map(repr, [
d['dot'] + 'build',
'Thumbs.db', '.DS_Store',
])
d['exclude_patterns'] = ', '.join(exclude_patterns)
mkdir_p(builddir)
mkdir_p(path.join(srcdir, d['dot'] + 'templates'))
mkdir_p(path.join(srcdir, d['dot'] + 'static'))
def write_file(fpath, content, newline=None):
if overwrite or not path.isfile(fpath):
print('Creating file %s.' % fpath)
f = open(fpath, 'wt', encoding='utf-8', newline=newline)
try:
f.write(content)
finally:
f.close()
else:
print('File %s already exists, skipping.' % fpath)
conf_text = QUICKSTART_CONF % d
if d['epub']:
conf_text += EPUB_CONFIG % d
if d.get('ext_intersphinx'):
conf_text += INTERSPHINX_CONFIG
write_file(path.join(srcdir, 'conf.py'), conf_text)
masterfile = path.join(srcdir, d['master'] + d['suffix'])
write_file(masterfile, MASTER_FILE % d)
if d.get('make_mode') is True:
makefile_template = MAKEFILE_NEW
batchfile_template = BATCHFILE_NEW
else:
makefile_template = MAKEFILE
batchfile_template = BATCHFILE
if d['makefile'] is True:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows
write_file(path.join(d['path'], 'Makefile'), makefile_template % d, u'\n')
if d['batchfile'] is True:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
write_file(path.join(d['path'], 'make.bat'), batchfile_template % d, u'\r\n')
if silent:
return
print()
print(bold('Finished: An initial directory structure has been created.'))
print('''
You should now populate your master file %s and create other documentation
source files. ''' % masterfile + ((d['makefile'] or d['batchfile']) and '''\
Use the Makefile to build the docs, like so:
make builder
''' or '''\
Use the sphinx-build command to build the docs, like so:
sphinx-build -b builder %s %s
''' % (srcdir, builddir)) + '''\
where "builder" is one of the supported builders, e.g. html, latex or linkcheck.
''')
def usage(argv, msg=None):
if msg:
print(msg, file=sys.stderr)
print(file=sys.stderr)
USAGE = """\
Sphinx v%s
Usage: %%prog [options] [projectdir]
""" % __display_version__
EPILOG = """\
For more information, visit <http://sphinx-doc.org/>.
"""
def valid_dir(d):
dir = d['path']
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
if set(['Makefile', 'make.bat']) & set(os.listdir(dir)):
return False
if d['sep']:
dir = os.path.join('source', dir)
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
reserved_names = [
'conf.py',
d['dot'] + 'static',
d['dot'] + 'templates',
d['master'] + d['suffix'],
]
if set(reserved_names) & set(os.listdir(dir)):
return False
return True
class MyFormatter(optparse.IndentedHelpFormatter):
def format_usage(self, usage):
return usage
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
def main(argv=sys.argv):
if not color_terminal():
nocolor()
parser = optparse.OptionParser(USAGE, epilog=EPILOG,
version='Sphinx v%s' % __display_version__,
formatter=MyFormatter())
parser.add_option('-q', '--quiet', action='store_true', dest='quiet',
default=False,
help='quiet mode')
group = parser.add_option_group('Structure options')
group.add_option('--sep', action='store_true', dest='sep',
help='if specified, separate source and build dirs')
group.add_option('--dot', metavar='DOT', dest='dot',
help='replacement for dot in _templates etc.')
group = parser.add_option_group('Project basic options')
group.add_option('-p', '--project', metavar='PROJECT', dest='project',
help='project name')
group.add_option('-a', '--author', metavar='AUTHOR', dest='author',
help='author names')
group.add_option('-v', metavar='VERSION', dest='version',
help='version of project')
group.add_option('-r', '--release', metavar='RELEASE', dest='release',
help='release of project')
group.add_option('-l', '--language', metavar='LANGUAGE', dest='language',
help='document language')
group.add_option('--suffix', metavar='SUFFIX', dest='suffix',
help='source file suffix')
group.add_option('--master', metavar='MASTER', dest='master',
help='master document name')
group.add_option('--epub', action='store_true', dest='epub',
default=False,
help='use epub')
group = parser.add_option_group('Extension options')
for ext in EXTENSIONS:
group.add_option('--ext-' + ext, action='store_true',
dest='ext_' + ext, default=False,
help='enable %s extension' % ext)
group = parser.add_option_group('Makefile and Batchfile creation')
group.add_option('--makefile', action='store_true', dest='makefile',
default=False,
help='create makefile')
group.add_option('--no-makefile', action='store_true', dest='no_makefile',
default=False,
help='not create makefile')
group.add_option('--batchfile', action='store_true', dest='batchfile',
default=False,
help='create batchfile')
group.add_option('--no-batchfile', action='store_true', dest='no_batchfile',
default=False,
help='not create batchfile')
group.add_option('-M', '--no-use-make-mode', action='store_false', dest='make_mode',
help='not use make-mode for Makefile/make.bat')
group.add_option('-m', '--use-make-mode', action='store_true', dest='make_mode',
help='use make-mode for Makefile/make.bat')
# parse options
try:
opts, args = parser.parse_args()
except SystemExit as err:
return err.code
if len(args) > 0:
opts.ensure_value('path', args[0])
d = vars(opts)
# delete None or False value
d = dict((k, v) for k, v in d.items() if not (v is None or v is False))
try:
if 'quiet' in d:
if not set(['project', 'author', 'version']).issubset(d):
print('''"quiet" is specified, but any of "project", \
"author" or "version" is not specified.''')
return
if set(['quiet', 'project', 'author', 'version']).issubset(d):
# quiet mode with all required params satisfied, use default
d.setdefault('release', d['version'])
d2 = DEFAULT_VALUE.copy()
d2.update(dict(("ext_"+ext, False) for ext in EXTENSIONS))
d2.update(d)
d = d2
if 'no_makefile' in d:
d['makefile'] = False
if 'no_batchfile' in d:
d['batchfile'] = False
if not valid_dir(d):
print()
print(bold('Error: specified path is not a directory, or sphinx'
' files already exist.'))
print('sphinx-quickstart only generate into a empty directory.'
' Please specify a new root path.')
return
else:
ask_user(d)
except (KeyboardInterrupt, EOFError):
print()
print('[Interrupted.]')
return
# decode values in d if value is a Python string literal
for key, value in d.items():
if isinstance(value, binary_type):
d[key] = term_decode(value)
generate(d)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
neerajvashistha/pa-dude
|
lib/python2.7/site-packages/sphinx/quickstart.py
|
Python
|
mit
| 52,767
|
[
"VisIt"
] |
60935a1158ab15f1422bbf7cf86db0aa4b2d10099c8dd821c547ef2a47dc5c34
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.